linux/arch/x86/kernel/module.c
<<
>>
Prefs
   1/*  Kernel module help for x86.
   2    Copyright (C) 2001 Rusty Russell.
   3
   4    This program is free software; you can redistribute it and/or modify
   5    it under the terms of the GNU General Public License as published by
   6    the Free Software Foundation; either version 2 of the License, or
   7    (at your option) any later version.
   8
   9    This program is distributed in the hope that it will be useful,
  10    but WITHOUT ANY WARRANTY; without even the implied warranty of
  11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12    GNU General Public License for more details.
  13
  14    You should have received a copy of the GNU General Public License
  15    along with this program; if not, write to the Free Software
  16    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  17*/
  18
  19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20
  21#include <linux/moduleloader.h>
  22#include <linux/elf.h>
  23#include <linux/vmalloc.h>
  24#include <linux/fs.h>
  25#include <linux/string.h>
  26#include <linux/kernel.h>
  27#include <linux/bug.h>
  28#include <linux/mm.h>
  29#include <linux/gfp.h>
  30#include <linux/jump_label.h>
  31#include <linux/random.h>
  32
  33#include <asm/page.h>
  34#include <asm/pgtable.h>
  35#include <asm/setup.h>
  36
  37#if 0
  38#define DEBUGP(fmt, ...)                                \
  39        printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  40#else
  41#define DEBUGP(fmt, ...)                                \
  42do {                                                    \
  43        if (0)                                          \
  44                printk(KERN_DEBUG fmt, ##__VA_ARGS__);  \
  45} while (0)
  46#endif
  47
  48#ifdef CONFIG_RANDOMIZE_BASE
  49static unsigned long module_load_offset;
  50
  51/* Mutex protects the module_load_offset. */
  52static DEFINE_MUTEX(module_kaslr_mutex);
  53
  54static unsigned long int get_module_load_offset(void)
  55{
  56        if (kaslr_enabled()) {
  57                mutex_lock(&module_kaslr_mutex);
  58                /*
  59                 * Calculate the module_load_offset the first time this
  60                 * code is called. Once calculated it stays the same until
  61                 * reboot.
  62                 */
  63                if (module_load_offset == 0)
  64                        module_load_offset =
  65                                (get_random_int() % 1024 + 1) * PAGE_SIZE;
  66                mutex_unlock(&module_kaslr_mutex);
  67        }
  68        return module_load_offset;
  69}
  70#else
  71static unsigned long int get_module_load_offset(void)
  72{
  73        return 0;
  74}
  75#endif
  76
  77void *module_alloc(unsigned long size)
  78{
  79        if (PAGE_ALIGN(size) > MODULES_LEN)
  80                return NULL;
  81        return __vmalloc_node_range(size, 1,
  82                                    MODULES_VADDR + get_module_load_offset(),
  83                                    MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
  84                                    PAGE_KERNEL_EXEC, NUMA_NO_NODE,
  85                                    __builtin_return_address(0));
  86}
  87
  88#ifdef CONFIG_X86_32
  89int apply_relocate(Elf32_Shdr *sechdrs,
  90                   const char *strtab,
  91                   unsigned int symindex,
  92                   unsigned int relsec,
  93                   struct module *me)
  94{
  95        unsigned int i;
  96        Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
  97        Elf32_Sym *sym;
  98        uint32_t *location;
  99
 100        DEBUGP("Applying relocate section %u to %u\n",
 101               relsec, sechdrs[relsec].sh_info);
 102        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 103                /* This is where to make the change */
 104                location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 105                        + rel[i].r_offset;
 106                /* This is the symbol it is referring to.  Note that all
 107                   undefined symbols have been resolved.  */
 108                sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
 109                        + ELF32_R_SYM(rel[i].r_info);
 110
 111                switch (ELF32_R_TYPE(rel[i].r_info)) {
 112                case R_386_32:
 113                        /* We add the value into the location given */
 114                        *location += sym->st_value;
 115                        break;
 116                case R_386_PC32:
 117                        /* Add the value, subtract its position */
 118                        *location += sym->st_value - (uint32_t)location;
 119                        break;
 120                default:
 121                        pr_err("%s: Unknown relocation: %u\n",
 122                               me->name, ELF32_R_TYPE(rel[i].r_info));
 123                        return -ENOEXEC;
 124                }
 125        }
 126        return 0;
 127}
 128#else /*X86_64*/
 129int apply_relocate_add(Elf64_Shdr *sechdrs,
 130                   const char *strtab,
 131                   unsigned int symindex,
 132                   unsigned int relsec,
 133                   struct module *me)
 134{
 135        unsigned int i;
 136        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
 137        Elf64_Sym *sym;
 138        void *loc;
 139        u64 val;
 140        bool rhel70 = check_module_rhelversion(me, "7.0");
 141        bool warned = false;
 142
 143        DEBUGP("Applying relocate section %u to %u\n",
 144               relsec, sechdrs[relsec].sh_info);
 145
 146        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 147                Elf64_Sym kstack_sym;
 148                bool apply_kstack_fixup = false;
 149                const char *symname;
 150
 151                /* This is where to make the change */
 152                loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 153                        + rel[i].r_offset;
 154
 155                /* This is the symbol it is referring to.  Note that all
 156                   undefined symbols have been resolved.  */
 157                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
 158                        + ELF64_R_SYM(rel[i].r_info);
 159                symname = strtab + sym->st_name;
 160
 161                DEBUGP("symname %s type %d st_value %Lx r_addend %Lx loc %Lx\n",
 162                       symname, (int)ELF64_R_TYPE(rel[i].r_info),
 163                       sym->st_value, rel[i].r_addend, (u64)loc);
 164
 165                if (rhel70 && !strcmp(symname, "kernel_stack")) {
 166                        if (!warned)
 167                                printk(KERN_INFO "%s: applying kernel_stack fix up\n",
 168                                        me->name);
 169                        apply_kstack_fixup = true;
 170                        warned = true;
 171                }
 172
 173                /* kernel_stack is referenced to access current_thread_info in
 174                 * a variety of places... if we're loading a module which
 175                 * expects an 8K stack, fix up the symbol reference to look
 176                 * at a second copy. Nobody should be using this symbol for
 177                 * any other purpose.
 178                 */
 179                if (apply_kstack_fixup) {
 180                        const struct kernel_symbol *ksym2;
 181                        ksym2 = find_symbol("__kernel_stack_70__",
 182                                            NULL, NULL, true, true);
 183                        if (!IS_ERR(ksym2)) {
 184                                kstack_sym.st_value = ksym2->value;
 185                                sym = &kstack_sym;
 186                        } else
 187                                return PTR_ERR(ksym2) ?: -ENOEXEC;
 188                }
 189
 190                val = sym->st_value + rel[i].r_addend;
 191
 192                switch (ELF64_R_TYPE(rel[i].r_info)) {
 193                case R_X86_64_NONE:
 194                        break;
 195                case R_X86_64_64:
 196                        if (*(u64 *)loc != 0)
 197                                goto invalid_relocation;
 198                        *(u64 *)loc = val;
 199                        break;
 200                case R_X86_64_32:
 201                        if (*(u32 *)loc != 0)
 202                                goto invalid_relocation;
 203                        *(u32 *)loc = val;
 204                        if (val != *(u32 *)loc)
 205                                goto overflow;
 206                        break;
 207                case R_X86_64_32S:
 208                        if (*(s32 *)loc != 0)
 209                                goto invalid_relocation;
 210                        *(s32 *)loc = val;
 211                        if ((s64)val != *(s32 *)loc)
 212                                goto overflow;
 213                        break;
 214                case R_X86_64_PC32:
 215                case R_X86_64_PLT32:
 216                        if (*(u32 *)loc != 0)
 217                                goto invalid_relocation;
 218                        val -= (u64)loc;
 219                        *(u32 *)loc = val;
 220#if 0
 221                        if ((s64)val != *(s32 *)loc)
 222                                goto overflow;
 223#endif
 224                        break;
 225                default:
 226                        pr_err("%s: Unknown rela relocation: %llu\n",
 227                               me->name, ELF64_R_TYPE(rel[i].r_info));
 228                        return -ENOEXEC;
 229                }
 230        }
 231        return 0;
 232
 233invalid_relocation:
 234        pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
 235               (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
 236        return -ENOEXEC;
 237
 238overflow:
 239        pr_err("overflow in relocation type %d val %Lx\n",
 240               (int)ELF64_R_TYPE(rel[i].r_info), val);
 241        pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
 242               me->name);
 243        return -ENOEXEC;
 244}
 245#endif
 246
 247int module_finalize(const Elf_Ehdr *hdr,
 248                    const Elf_Shdr *sechdrs,
 249                    struct module *me)
 250{
 251        const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
 252                *para = NULL;
 253        char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 254
 255        for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
 256                if (!strcmp(".text", secstrings + s->sh_name))
 257                        text = s;
 258                if (!strcmp(".altinstructions", secstrings + s->sh_name))
 259                        alt = s;
 260                if (!strcmp(".smp_locks", secstrings + s->sh_name))
 261                        locks = s;
 262                if (!strcmp(".parainstructions", secstrings + s->sh_name))
 263                        para = s;
 264        }
 265
 266        if (alt) {
 267                /* patch .altinstructions */
 268                void *aseg = (void *)alt->sh_addr;
 269                apply_alternatives(aseg, aseg + alt->sh_size);
 270        }
 271        if (locks && text) {
 272                void *lseg = (void *)locks->sh_addr;
 273                void *tseg = (void *)text->sh_addr;
 274                alternatives_smp_module_add(me, me->name,
 275                                            lseg, lseg + locks->sh_size,
 276                                            tseg, tseg + text->sh_size);
 277        }
 278
 279        if (para) {
 280                void *pseg = (void *)para->sh_addr;
 281                apply_paravirt(pseg, pseg + para->sh_size);
 282        }
 283
 284        /* make jump label nops */
 285        jump_label_apply_nops(me);
 286
 287        return 0;
 288}
 289
 290void module_arch_cleanup(struct module *mod)
 291{
 292        alternatives_smp_module_del(mod);
 293}
 294