linux/arch/arm64/kernel/module.c
<<
>>
Prefs
   1/*
   2 * AArch64 loadable module support.
   3 *
   4 * Copyright (C) 2012 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * Author: Will Deacon <will.deacon@arm.com>
  19 */
  20
  21#include <linux/bitops.h>
  22#include <linux/elf.h>
  23#include <linux/gfp.h>
  24#include <linux/kasan.h>
  25#include <linux/kernel.h>
  26#include <linux/mm.h>
  27#include <linux/moduleloader.h>
  28#include <linux/vmalloc.h>
  29#include <asm/alternative.h>
  30#include <asm/insn.h>
  31#include <asm/sections.h>
  32
  33void *module_alloc(unsigned long size)
  34{
  35        gfp_t gfp_mask = GFP_KERNEL;
  36        void *p;
  37
  38        /* Silence the initial allocation */
  39        if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
  40                gfp_mask |= __GFP_NOWARN;
  41
  42        p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  43                                module_alloc_base + MODULES_VSIZE,
  44                                gfp_mask, PAGE_KERNEL_EXEC, 0,
  45                                NUMA_NO_NODE, __builtin_return_address(0));
  46
  47        if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  48            !IS_ENABLED(CONFIG_KASAN))
  49                /*
  50                 * KASAN can only deal with module allocations being served
  51                 * from the reserved module region, since the remainder of
  52                 * the vmalloc region is already backed by zero shadow pages,
  53                 * and punching holes into it is non-trivial. Since the module
  54                 * region is not randomized when KASAN is enabled, it is even
  55                 * less likely that the module region gets exhausted, so we
  56                 * can simply omit this fallback in that case.
  57                 */
  58                p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
  59                                VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
  60                                NUMA_NO_NODE, __builtin_return_address(0));
  61
  62        if (p && (kasan_module_alloc(p, size) < 0)) {
  63                vfree(p);
  64                return NULL;
  65        }
  66
  67        return p;
  68}
  69
  70enum aarch64_reloc_op {
  71        RELOC_OP_NONE,
  72        RELOC_OP_ABS,
  73        RELOC_OP_PREL,
  74        RELOC_OP_PAGE,
  75};
  76
  77static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
  78{
  79        switch (reloc_op) {
  80        case RELOC_OP_ABS:
  81                return val;
  82        case RELOC_OP_PREL:
  83                return val - (u64)place;
  84        case RELOC_OP_PAGE:
  85                return (val & ~0xfff) - ((u64)place & ~0xfff);
  86        case RELOC_OP_NONE:
  87                return 0;
  88        }
  89
  90        pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  91        return 0;
  92}
  93
  94static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  95{
  96        s64 sval = do_reloc(op, place, val);
  97
  98        switch (len) {
  99        case 16:
 100                *(s16 *)place = sval;
 101                if (sval < S16_MIN || sval > U16_MAX)
 102                        return -ERANGE;
 103                break;
 104        case 32:
 105                *(s32 *)place = sval;
 106                if (sval < S32_MIN || sval > U32_MAX)
 107                        return -ERANGE;
 108                break;
 109        case 64:
 110                *(s64 *)place = sval;
 111                break;
 112        default:
 113                pr_err("Invalid length (%d) for data relocation\n", len);
 114                return 0;
 115        }
 116        return 0;
 117}
 118
 119enum aarch64_insn_movw_imm_type {
 120        AARCH64_INSN_IMM_MOVNZ,
 121        AARCH64_INSN_IMM_MOVKZ,
 122};
 123
 124static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
 125                           int lsb, enum aarch64_insn_movw_imm_type imm_type)
 126{
 127        u64 imm;
 128        s64 sval;
 129        u32 insn = le32_to_cpu(*place);
 130
 131        sval = do_reloc(op, place, val);
 132        imm = sval >> lsb;
 133
 134        if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
 135                /*
 136                 * For signed MOVW relocations, we have to manipulate the
 137                 * instruction encoding depending on whether or not the
 138                 * immediate is less than zero.
 139                 */
 140                insn &= ~(3 << 29);
 141                if (sval >= 0) {
 142                        /* >=0: Set the instruction to MOVZ (opcode 10b). */
 143                        insn |= 2 << 29;
 144                } else {
 145                        /*
 146                         * <0: Set the instruction to MOVN (opcode 00b).
 147                         *     Since we've masked the opcode already, we
 148                         *     don't need to do anything other than
 149                         *     inverting the new immediate field.
 150                         */
 151                        imm = ~imm;
 152                }
 153        }
 154
 155        /* Update the instruction with the new encoding. */
 156        insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
 157        *place = cpu_to_le32(insn);
 158
 159        if (imm > U16_MAX)
 160                return -ERANGE;
 161
 162        return 0;
 163}
 164
 165static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
 166                          int lsb, int len, enum aarch64_insn_imm_type imm_type)
 167{
 168        u64 imm, imm_mask;
 169        s64 sval;
 170        u32 insn = le32_to_cpu(*place);
 171
 172        /* Calculate the relocation value. */
 173        sval = do_reloc(op, place, val);
 174        sval >>= lsb;
 175
 176        /* Extract the value bits and shift them to bit 0. */
 177        imm_mask = (BIT(lsb + len) - 1) >> lsb;
 178        imm = sval & imm_mask;
 179
 180        /* Update the instruction's immediate field. */
 181        insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
 182        *place = cpu_to_le32(insn);
 183
 184        /*
 185         * Extract the upper value bits (including the sign bit) and
 186         * shift them to bit 0.
 187         */
 188        sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
 189
 190        /*
 191         * Overflow has occurred if the upper bits are not all equal to
 192         * the sign bit of the value.
 193         */
 194        if ((u64)(sval + 1) >= 2)
 195                return -ERANGE;
 196
 197        return 0;
 198}
 199
 200int apply_relocate_add(Elf64_Shdr *sechdrs,
 201                       const char *strtab,
 202                       unsigned int symindex,
 203                       unsigned int relsec,
 204                       struct module *me)
 205{
 206        unsigned int i;
 207        int ovf;
 208        bool overflow_check;
 209        Elf64_Sym *sym;
 210        void *loc;
 211        u64 val;
 212        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
 213
 214        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 215                /* loc corresponds to P in the AArch64 ELF document. */
 216                loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 217                        + rel[i].r_offset;
 218
 219                /* sym is the ELF symbol we're referring to. */
 220                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
 221                        + ELF64_R_SYM(rel[i].r_info);
 222
 223                /* val corresponds to (S + A) in the AArch64 ELF document. */
 224                val = sym->st_value + rel[i].r_addend;
 225
 226                /* Check for overflow by default. */
 227                overflow_check = true;
 228
 229                /* Perform the static relocation. */
 230                switch (ELF64_R_TYPE(rel[i].r_info)) {
 231                /* Null relocations. */
 232                case R_ARM_NONE:
 233                case R_AARCH64_NONE:
 234                        ovf = 0;
 235                        break;
 236
 237                /* Data relocations. */
 238                case R_AARCH64_ABS64:
 239                        overflow_check = false;
 240                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
 241                        break;
 242                case R_AARCH64_ABS32:
 243                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
 244                        break;
 245                case R_AARCH64_ABS16:
 246                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
 247                        break;
 248                case R_AARCH64_PREL64:
 249                        overflow_check = false;
 250                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
 251                        break;
 252                case R_AARCH64_PREL32:
 253                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
 254                        break;
 255                case R_AARCH64_PREL16:
 256                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
 257                        break;
 258
 259                /* MOVW instruction relocations. */
 260                case R_AARCH64_MOVW_UABS_G0_NC:
 261                        overflow_check = false;
 262                case R_AARCH64_MOVW_UABS_G0:
 263                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 264                                              AARCH64_INSN_IMM_MOVKZ);
 265                        break;
 266                case R_AARCH64_MOVW_UABS_G1_NC:
 267                        overflow_check = false;
 268                case R_AARCH64_MOVW_UABS_G1:
 269                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 270                                              AARCH64_INSN_IMM_MOVKZ);
 271                        break;
 272                case R_AARCH64_MOVW_UABS_G2_NC:
 273                        overflow_check = false;
 274                case R_AARCH64_MOVW_UABS_G2:
 275                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 276                                              AARCH64_INSN_IMM_MOVKZ);
 277                        break;
 278                case R_AARCH64_MOVW_UABS_G3:
 279                        /* We're using the top bits so we can't overflow. */
 280                        overflow_check = false;
 281                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
 282                                              AARCH64_INSN_IMM_MOVKZ);
 283                        break;
 284                case R_AARCH64_MOVW_SABS_G0:
 285                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 286                                              AARCH64_INSN_IMM_MOVNZ);
 287                        break;
 288                case R_AARCH64_MOVW_SABS_G1:
 289                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 290                                              AARCH64_INSN_IMM_MOVNZ);
 291                        break;
 292                case R_AARCH64_MOVW_SABS_G2:
 293                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 294                                              AARCH64_INSN_IMM_MOVNZ);
 295                        break;
 296                case R_AARCH64_MOVW_PREL_G0_NC:
 297                        overflow_check = false;
 298                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 299                                              AARCH64_INSN_IMM_MOVKZ);
 300                        break;
 301                case R_AARCH64_MOVW_PREL_G0:
 302                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 303                                              AARCH64_INSN_IMM_MOVNZ);
 304                        break;
 305                case R_AARCH64_MOVW_PREL_G1_NC:
 306                        overflow_check = false;
 307                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 308                                              AARCH64_INSN_IMM_MOVKZ);
 309                        break;
 310                case R_AARCH64_MOVW_PREL_G1:
 311                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 312                                              AARCH64_INSN_IMM_MOVNZ);
 313                        break;
 314                case R_AARCH64_MOVW_PREL_G2_NC:
 315                        overflow_check = false;
 316                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 317                                              AARCH64_INSN_IMM_MOVKZ);
 318                        break;
 319                case R_AARCH64_MOVW_PREL_G2:
 320                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 321                                              AARCH64_INSN_IMM_MOVNZ);
 322                        break;
 323                case R_AARCH64_MOVW_PREL_G3:
 324                        /* We're using the top bits so we can't overflow. */
 325                        overflow_check = false;
 326                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
 327                                              AARCH64_INSN_IMM_MOVNZ);
 328                        break;
 329
 330                /* Immediate instruction relocations. */
 331                case R_AARCH64_LD_PREL_LO19:
 332                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 333                                             AARCH64_INSN_IMM_19);
 334                        break;
 335                case R_AARCH64_ADR_PREL_LO21:
 336                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
 337                                             AARCH64_INSN_IMM_ADR);
 338                        break;
 339#ifndef CONFIG_ARM64_ERRATUM_843419
 340                case R_AARCH64_ADR_PREL_PG_HI21_NC:
 341                        overflow_check = false;
 342                case R_AARCH64_ADR_PREL_PG_HI21:
 343                        ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
 344                                             AARCH64_INSN_IMM_ADR);
 345                        break;
 346#endif
 347                case R_AARCH64_ADD_ABS_LO12_NC:
 348                case R_AARCH64_LDST8_ABS_LO12_NC:
 349                        overflow_check = false;
 350                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
 351                                             AARCH64_INSN_IMM_12);
 352                        break;
 353                case R_AARCH64_LDST16_ABS_LO12_NC:
 354                        overflow_check = false;
 355                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
 356                                             AARCH64_INSN_IMM_12);
 357                        break;
 358                case R_AARCH64_LDST32_ABS_LO12_NC:
 359                        overflow_check = false;
 360                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
 361                                             AARCH64_INSN_IMM_12);
 362                        break;
 363                case R_AARCH64_LDST64_ABS_LO12_NC:
 364                        overflow_check = false;
 365                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
 366                                             AARCH64_INSN_IMM_12);
 367                        break;
 368                case R_AARCH64_LDST128_ABS_LO12_NC:
 369                        overflow_check = false;
 370                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
 371                                             AARCH64_INSN_IMM_12);
 372                        break;
 373                case R_AARCH64_TSTBR14:
 374                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
 375                                             AARCH64_INSN_IMM_14);
 376                        break;
 377                case R_AARCH64_CONDBR19:
 378                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 379                                             AARCH64_INSN_IMM_19);
 380                        break;
 381                case R_AARCH64_JUMP26:
 382                case R_AARCH64_CALL26:
 383                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
 384                                             AARCH64_INSN_IMM_26);
 385
 386                        if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
 387                            ovf == -ERANGE) {
 388                                val = module_emit_plt_entry(me, loc, &rel[i], sym);
 389                                ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
 390                                                     26, AARCH64_INSN_IMM_26);
 391                        }
 392                        break;
 393
 394                default:
 395                        pr_err("module %s: unsupported RELA relocation: %llu\n",
 396                               me->name, ELF64_R_TYPE(rel[i].r_info));
 397                        return -ENOEXEC;
 398                }
 399
 400                if (overflow_check && ovf == -ERANGE)
 401                        goto overflow;
 402
 403        }
 404
 405        return 0;
 406
 407overflow:
 408        pr_err("module %s: overflow in relocation type %d val %Lx\n",
 409               me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
 410        return -ENOEXEC;
 411}
 412
 413int module_finalize(const Elf_Ehdr *hdr,
 414                    const Elf_Shdr *sechdrs,
 415                    struct module *me)
 416{
 417        const Elf_Shdr *s, *se;
 418        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 419
 420        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
 421                if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
 422                        apply_alternatives((void *)s->sh_addr, s->sh_size);
 423                }
 424#ifdef CONFIG_ARM64_MODULE_PLTS
 425                if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
 426                    !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
 427                        me->arch.ftrace_trampoline = (void *)s->sh_addr;
 428#endif
 429        }
 430
 431        return 0;
 432}
 433