1/* Rewritten by Rusty Russell, on the backs of many others... 2 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM. 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17*/ 18#include <linux/ftrace.h> 19#include <linux/memory.h> 20#include <linux/extable.h> 21#include <linux/module.h> 22#include <linux/mutex.h> 23#include <linux/init.h> 24#include <linux/kprobes.h> 25#include <linux/filter.h> 26 27#include <asm/sections.h> 28#include <linux/uaccess.h> 29 30/* 31 * mutex protecting text section modification (dynamic code patching). 32 * some users need to sleep (allocating memory...) while they hold this lock. 33 * 34 * Note: Also protects SMP-alternatives modification on x86. 35 * 36 * NOT exported to modules - patching kernel text is a really delicate matter. 37 */ 38DEFINE_MUTEX(text_mutex); 39 40extern struct exception_table_entry __start___ex_table[]; 41extern struct exception_table_entry __stop___ex_table[]; 42 43/* Cleared by build time tools if the table is already sorted. */ 44u32 __initdata __visible main_extable_sort_needed = 1; 45 46/* Sort the kernel's built-in exception table */ 47void __init sort_main_extable(void) 48{ 49 if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) { 50 pr_notice("Sorting __ex_table...\n"); 51 sort_extable(__start___ex_table, __stop___ex_table); 52 } 53} 54 55/* Given an address, look for it in the kernel exception table */ 56const 57struct exception_table_entry *search_kernel_exception_table(unsigned long addr) 58{ 59 return search_extable(__start___ex_table, 60 __stop___ex_table - __start___ex_table, addr); 61} 62 63/* Given an address, look for it in the exception tables. */ 64const struct exception_table_entry *search_exception_tables(unsigned long addr) 65{ 66 const struct exception_table_entry *e; 67 68 e = search_kernel_exception_table(addr); 69 if (!e) 70 e = search_module_extables(addr); 71 if (!e) 72 e = search_bpf_extables(addr); 73 return e; 74} 75 76int init_kernel_text(unsigned long addr) 77{ 78 if (addr >= (unsigned long)_sinittext && 79 addr < (unsigned long)_einittext) 80 return 1; 81 return 0; 82} 83 84int notrace core_kernel_text(unsigned long addr) 85{ 86 if (addr >= (unsigned long)_stext && 87 addr < (unsigned long)_etext) 88 return 1; 89 90 if (system_state < SYSTEM_RUNNING && 91 init_kernel_text(addr)) 92 return 1; 93 return 0; 94} 95 96/** 97 * core_kernel_data - tell if addr points to kernel data 98 * @addr: address to test 99 * 100 * Returns true if @addr passed in is from the core kernel data 101 * section. 102 * 103 * Note: On some archs it may return true for core RODATA, and false 104 * for others. But will always be true for core RW data. 105 */ 106int core_kernel_data(unsigned long addr) 107{ 108 if (addr >= (unsigned long)_sdata && 109 addr < (unsigned long)_edata) 110 return 1; 111 return 0; 112} 113 114int __kernel_text_address(unsigned long addr) 115{ 116 if (kernel_text_address(addr)) 117 return 1; 118 /* 119 * There might be init symbols in saved stacktraces. 120 * Give those symbols a chance to be printed in 121 * backtraces (such as lockdep traces). 122 * 123 * Since we are after the module-symbols check, there's 124 * no danger of address overlap: 125 */ 126 if (init_kernel_text(addr)) 127 return 1; 128 return 0; 129} 130 131int kernel_text_address(unsigned long addr) 132{ 133 bool no_rcu; 134 int ret = 1; 135 136 if (core_kernel_text(addr)) 137 return 1; 138 139 /* 140 * If a stack dump happens while RCU is not watching, then 141 * RCU needs to be notified that it requires to start 142 * watching again. This can happen either by tracing that 143 * triggers a stack trace, or a WARN() that happens during 144 * coming back from idle, or cpu on or offlining. 145 * 146 * is_module_text_address() as well as the kprobe slots, 147 * is_bpf_text_address() and is_bpf_image_address require 148 * RCU to be watching. 149 */ 150 no_rcu = !rcu_is_watching(); 151 152 /* Treat this like an NMI as it can happen anywhere */ 153 if (no_rcu) 154 rcu_nmi_enter(); 155 156 if (is_module_text_address(addr)) 157 goto out; 158 if (is_ftrace_trampoline(addr)) 159 goto out; 160 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) 161 goto out; 162 if (is_bpf_text_address(addr)) 163 goto out; 164 ret = 0; 165out: 166 if (no_rcu) 167 rcu_nmi_exit(); 168 169 return ret; 170} 171 172/* 173 * On some architectures (PPC64, IA64) function pointers 174 * are actually only tokens to some data that then holds the 175 * real function address. As a result, to find if a function 176 * pointer is part of the kernel text, we need to do some 177 * special dereferencing first. 178 */ 179int func_ptr_is_kernel_text(void *ptr) 180{ 181 unsigned long addr; 182 addr = (unsigned long) dereference_function_descriptor(ptr); 183 if (core_kernel_text(addr)) 184 return 1; 185 return is_module_text_address(addr); 186} 187