linux/kernel/extable.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Rewritten by Rusty Russell, on the backs of many others...
   3   Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
   4
   5*/
   6#include <linux/ftrace.h>
   7#include <linux/memory.h>
   8#include <linux/extable.h>
   9#include <linux/module.h>
  10#include <linux/mutex.h>
  11#include <linux/init.h>
  12#include <linux/kprobes.h>
  13#include <linux/filter.h>
  14
  15#include <asm/sections.h>
  16#include <linux/uaccess.h>
  17
  18/*
  19 * mutex protecting text section modification (dynamic code patching).
  20 * some users need to sleep (allocating memory...) while they hold this lock.
  21 *
  22 * Note: Also protects SMP-alternatives modification on x86.
  23 *
  24 * NOT exported to modules - patching kernel text is a really delicate matter.
  25 */
  26DEFINE_MUTEX(text_mutex);
  27
  28extern struct exception_table_entry __start___ex_table[];
  29extern struct exception_table_entry __stop___ex_table[];
  30
  31/* Cleared by build time tools if the table is already sorted. */
  32u32 __initdata __visible main_extable_sort_needed = 1;
  33
  34/* Sort the kernel's built-in exception table */
  35void __init sort_main_extable(void)
  36{
  37        if (main_extable_sort_needed &&
  38            &__stop___ex_table > &__start___ex_table) {
  39                pr_notice("Sorting __ex_table...\n");
  40                sort_extable(__start___ex_table, __stop___ex_table);
  41        }
  42}
  43
  44/* Given an address, look for it in the kernel exception table */
  45const
  46struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
  47{
  48        return search_extable(__start___ex_table,
  49                              __stop___ex_table - __start___ex_table, addr);
  50}
  51
  52/* Given an address, look for it in the exception tables. */
  53const struct exception_table_entry *search_exception_tables(unsigned long addr)
  54{
  55        const struct exception_table_entry *e;
  56
  57        e = search_kernel_exception_table(addr);
  58        if (!e)
  59                e = search_module_extables(addr);
  60        if (!e)
  61                e = search_bpf_extables(addr);
  62        return e;
  63}
  64
  65int init_kernel_text(unsigned long addr)
  66{
  67        if (addr >= (unsigned long)_sinittext &&
  68            addr < (unsigned long)_einittext)
  69                return 1;
  70        return 0;
  71}
  72
  73int notrace core_kernel_text(unsigned long addr)
  74{
  75        if (addr >= (unsigned long)_stext &&
  76            addr < (unsigned long)_etext)
  77                return 1;
  78
  79        if (system_state < SYSTEM_RUNNING &&
  80            init_kernel_text(addr))
  81                return 1;
  82        return 0;
  83}
  84
  85/**
  86 * core_kernel_data - tell if addr points to kernel data
  87 * @addr: address to test
  88 *
  89 * Returns true if @addr passed in is from the core kernel data
  90 * section.
  91 *
  92 * Note: On some archs it may return true for core RODATA, and false
  93 *  for others. But will always be true for core RW data.
  94 */
  95int core_kernel_data(unsigned long addr)
  96{
  97        if (addr >= (unsigned long)_sdata &&
  98            addr < (unsigned long)_edata)
  99                return 1;
 100        return 0;
 101}
 102
 103int __kernel_text_address(unsigned long addr)
 104{
 105        if (kernel_text_address(addr))
 106                return 1;
 107        /*
 108         * There might be init symbols in saved stacktraces.
 109         * Give those symbols a chance to be printed in
 110         * backtraces (such as lockdep traces).
 111         *
 112         * Since we are after the module-symbols check, there's
 113         * no danger of address overlap:
 114         */
 115        if (init_kernel_text(addr))
 116                return 1;
 117        return 0;
 118}
 119
 120int kernel_text_address(unsigned long addr)
 121{
 122        bool no_rcu;
 123        int ret = 1;
 124
 125        if (core_kernel_text(addr))
 126                return 1;
 127
 128        /*
 129         * If a stack dump happens while RCU is not watching, then
 130         * RCU needs to be notified that it requires to start
 131         * watching again. This can happen either by tracing that
 132         * triggers a stack trace, or a WARN() that happens during
 133         * coming back from idle, or cpu on or offlining.
 134         *
 135         * is_module_text_address() as well as the kprobe slots,
 136         * is_bpf_text_address() and is_bpf_image_address require
 137         * RCU to be watching.
 138         */
 139        no_rcu = !rcu_is_watching();
 140
 141        /* Treat this like an NMI as it can happen anywhere */
 142        if (no_rcu)
 143                rcu_nmi_enter();
 144
 145        if (is_module_text_address(addr))
 146                goto out;
 147        if (is_ftrace_trampoline(addr))
 148                goto out;
 149        if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
 150                goto out;
 151        if (is_bpf_text_address(addr))
 152                goto out;
 153        ret = 0;
 154out:
 155        if (no_rcu)
 156                rcu_nmi_exit();
 157
 158        return ret;
 159}
 160
 161/*
 162 * On some architectures (PPC64, IA64) function pointers
 163 * are actually only tokens to some data that then holds the
 164 * real function address. As a result, to find if a function
 165 * pointer is part of the kernel text, we need to do some
 166 * special dereferencing first.
 167 */
 168int func_ptr_is_kernel_text(void *ptr)
 169{
 170        unsigned long addr;
 171        addr = (unsigned long) dereference_function_descriptor(ptr);
 172        if (core_kernel_text(addr))
 173                return 1;
 174        return is_module_text_address(addr);
 175}
 176