linux/arch/arm64/kernel/module.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AArch64 loadable module support.
   4 *
   5 * Copyright (C) 2012 ARM Limited
   6 *
   7 * Author: Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/elf.h>
  12#include <linux/ftrace.h>
  13#include <linux/gfp.h>
  14#include <linux/kasan.h>
  15#include <linux/kernel.h>
  16#include <linux/mm.h>
  17#include <linux/moduleloader.h>
  18#include <linux/vmalloc.h>
  19#include <asm/alternative.h>
  20#include <asm/insn.h>
  21#include <asm/sections.h>
  22
  23void *module_alloc(unsigned long size)
  24{
  25        u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
  26        gfp_t gfp_mask = GFP_KERNEL;
  27        void *p;
  28
  29        /* Silence the initial allocation */
  30        if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
  31                gfp_mask |= __GFP_NOWARN;
  32
  33        if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
  34            IS_ENABLED(CONFIG_KASAN_SW_TAGS))
  35                /* don't exceed the static module region - see below */
  36                module_alloc_end = MODULES_END;
  37
  38        p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  39                                module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
  40                                NUMA_NO_NODE, __builtin_return_address(0));
  41
  42        if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  43            (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
  44             (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
  45              !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
  46                /*
  47                 * KASAN without KASAN_VMALLOC can only deal with module
  48                 * allocations being served from the reserved module region,
  49                 * since the remainder of the vmalloc region is already
  50                 * backed by zero shadow pages, and punching holes into it
  51                 * is non-trivial. Since the module region is not randomized
  52                 * when KASAN is enabled without KASAN_VMALLOC, it is even
  53                 * less likely that the module region gets exhausted, so we
  54                 * can simply omit this fallback in that case.
  55                 */
  56                p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  57                                module_alloc_base + SZ_2G, GFP_KERNEL,
  58                                PAGE_KERNEL, 0, NUMA_NO_NODE,
  59                                __builtin_return_address(0));
  60
  61        if (p && (kasan_module_alloc(p, size) < 0)) {
  62                vfree(p);
  63                return NULL;
  64        }
  65
  66        return p;
  67}
  68
  69enum aarch64_reloc_op {
  70        RELOC_OP_NONE,
  71        RELOC_OP_ABS,
  72        RELOC_OP_PREL,
  73        RELOC_OP_PAGE,
  74};
  75
  76static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
  77{
  78        switch (reloc_op) {
  79        case RELOC_OP_ABS:
  80                return val;
  81        case RELOC_OP_PREL:
  82                return val - (u64)place;
  83        case RELOC_OP_PAGE:
  84                return (val & ~0xfff) - ((u64)place & ~0xfff);
  85        case RELOC_OP_NONE:
  86                return 0;
  87        }
  88
  89        pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  90        return 0;
  91}
  92
  93static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  94{
  95        s64 sval = do_reloc(op, place, val);
  96
  97        /*
  98         * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
  99         * relative and absolute relocations as having a range of [-2^15, 2^16)
 100         * or [-2^31, 2^32), respectively. However, in order to be able to
 101         * detect overflows reliably, we have to choose whether we interpret
 102         * such quantities as signed or as unsigned, and stick with it.
 103         * The way we organize our address space requires a signed
 104         * interpretation of 32-bit relative references, so let's use that
 105         * for all R_AARCH64_PRELxx relocations. This means our upper
 106         * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
 107         */
 108
 109        switch (len) {
 110        case 16:
 111                *(s16 *)place = sval;
 112                switch (op) {
 113                case RELOC_OP_ABS:
 114                        if (sval < 0 || sval > U16_MAX)
 115                                return -ERANGE;
 116                        break;
 117                case RELOC_OP_PREL:
 118                        if (sval < S16_MIN || sval > S16_MAX)
 119                                return -ERANGE;
 120                        break;
 121                default:
 122                        pr_err("Invalid 16-bit data relocation (%d)\n", op);
 123                        return 0;
 124                }
 125                break;
 126        case 32:
 127                *(s32 *)place = sval;
 128                switch (op) {
 129                case RELOC_OP_ABS:
 130                        if (sval < 0 || sval > U32_MAX)
 131                                return -ERANGE;
 132                        break;
 133                case RELOC_OP_PREL:
 134                        if (sval < S32_MIN || sval > S32_MAX)
 135                                return -ERANGE;
 136                        break;
 137                default:
 138                        pr_err("Invalid 32-bit data relocation (%d)\n", op);
 139                        return 0;
 140                }
 141                break;
 142        case 64:
 143                *(s64 *)place = sval;
 144                break;
 145        default:
 146                pr_err("Invalid length (%d) for data relocation\n", len);
 147                return 0;
 148        }
 149        return 0;
 150}
 151
 152enum aarch64_insn_movw_imm_type {
 153        AARCH64_INSN_IMM_MOVNZ,
 154        AARCH64_INSN_IMM_MOVKZ,
 155};
 156
 157static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
 158                           int lsb, enum aarch64_insn_movw_imm_type imm_type)
 159{
 160        u64 imm;
 161        s64 sval;
 162        u32 insn = le32_to_cpu(*place);
 163
 164        sval = do_reloc(op, place, val);
 165        imm = sval >> lsb;
 166
 167        if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
 168                /*
 169                 * For signed MOVW relocations, we have to manipulate the
 170                 * instruction encoding depending on whether or not the
 171                 * immediate is less than zero.
 172                 */
 173                insn &= ~(3 << 29);
 174                if (sval >= 0) {
 175                        /* >=0: Set the instruction to MOVZ (opcode 10b). */
 176                        insn |= 2 << 29;
 177                } else {
 178                        /*
 179                         * <0: Set the instruction to MOVN (opcode 00b).
 180                         *     Since we've masked the opcode already, we
 181                         *     don't need to do anything other than
 182                         *     inverting the new immediate field.
 183                         */
 184                        imm = ~imm;
 185                }
 186        }
 187
 188        /* Update the instruction with the new encoding. */
 189        insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
 190        *place = cpu_to_le32(insn);
 191
 192        if (imm > U16_MAX)
 193                return -ERANGE;
 194
 195        return 0;
 196}
 197
 198static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
 199                          int lsb, int len, enum aarch64_insn_imm_type imm_type)
 200{
 201        u64 imm, imm_mask;
 202        s64 sval;
 203        u32 insn = le32_to_cpu(*place);
 204
 205        /* Calculate the relocation value. */
 206        sval = do_reloc(op, place, val);
 207        sval >>= lsb;
 208
 209        /* Extract the value bits and shift them to bit 0. */
 210        imm_mask = (BIT(lsb + len) - 1) >> lsb;
 211        imm = sval & imm_mask;
 212
 213        /* Update the instruction's immediate field. */
 214        insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
 215        *place = cpu_to_le32(insn);
 216
 217        /*
 218         * Extract the upper value bits (including the sign bit) and
 219         * shift them to bit 0.
 220         */
 221        sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
 222
 223        /*
 224         * Overflow has occurred if the upper bits are not all equal to
 225         * the sign bit of the value.
 226         */
 227        if ((u64)(sval + 1) >= 2)
 228                return -ERANGE;
 229
 230        return 0;
 231}
 232
 233static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
 234                           __le32 *place, u64 val)
 235{
 236        u32 insn;
 237
 238        if (!is_forbidden_offset_for_adrp(place))
 239                return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
 240                                      AARCH64_INSN_IMM_ADR);
 241
 242        /* patch ADRP to ADR if it is in range */
 243        if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
 244                            AARCH64_INSN_IMM_ADR)) {
 245                insn = le32_to_cpu(*place);
 246                insn &= ~BIT(31);
 247        } else {
 248                /* out of range for ADR -> emit a veneer */
 249                val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
 250                if (!val)
 251                        return -ENOEXEC;
 252                insn = aarch64_insn_gen_branch_imm((u64)place, val,
 253                                                   AARCH64_INSN_BRANCH_NOLINK);
 254        }
 255
 256        *place = cpu_to_le32(insn);
 257        return 0;
 258}
 259
 260int apply_relocate_add(Elf64_Shdr *sechdrs,
 261                       const char *strtab,
 262                       unsigned int symindex,
 263                       unsigned int relsec,
 264                       struct module *me)
 265{
 266        unsigned int i;
 267        int ovf;
 268        bool overflow_check;
 269        Elf64_Sym *sym;
 270        void *loc;
 271        u64 val;
 272        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
 273
 274        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 275                /* loc corresponds to P in the AArch64 ELF document. */
 276                loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 277                        + rel[i].r_offset;
 278
 279                /* sym is the ELF symbol we're referring to. */
 280                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
 281                        + ELF64_R_SYM(rel[i].r_info);
 282
 283                /* val corresponds to (S + A) in the AArch64 ELF document. */
 284                val = sym->st_value + rel[i].r_addend;
 285
 286                /* Check for overflow by default. */
 287                overflow_check = true;
 288
 289                /* Perform the static relocation. */
 290                switch (ELF64_R_TYPE(rel[i].r_info)) {
 291                /* Null relocations. */
 292                case R_ARM_NONE:
 293                case R_AARCH64_NONE:
 294                        ovf = 0;
 295                        break;
 296
 297                /* Data relocations. */
 298                case R_AARCH64_ABS64:
 299                        overflow_check = false;
 300                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
 301                        break;
 302                case R_AARCH64_ABS32:
 303                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
 304                        break;
 305                case R_AARCH64_ABS16:
 306                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
 307                        break;
 308                case R_AARCH64_PREL64:
 309                        overflow_check = false;
 310                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
 311                        break;
 312                case R_AARCH64_PREL32:
 313                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
 314                        break;
 315                case R_AARCH64_PREL16:
 316                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
 317                        break;
 318
 319                /* MOVW instruction relocations. */
 320                case R_AARCH64_MOVW_UABS_G0_NC:
 321                        overflow_check = false;
 322                        fallthrough;
 323                case R_AARCH64_MOVW_UABS_G0:
 324                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 325                                              AARCH64_INSN_IMM_MOVKZ);
 326                        break;
 327                case R_AARCH64_MOVW_UABS_G1_NC:
 328                        overflow_check = false;
 329                        fallthrough;
 330                case R_AARCH64_MOVW_UABS_G1:
 331                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 332                                              AARCH64_INSN_IMM_MOVKZ);
 333                        break;
 334                case R_AARCH64_MOVW_UABS_G2_NC:
 335                        overflow_check = false;
 336                        fallthrough;
 337                case R_AARCH64_MOVW_UABS_G2:
 338                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 339                                              AARCH64_INSN_IMM_MOVKZ);
 340                        break;
 341                case R_AARCH64_MOVW_UABS_G3:
 342                        /* We're using the top bits so we can't overflow. */
 343                        overflow_check = false;
 344                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
 345                                              AARCH64_INSN_IMM_MOVKZ);
 346                        break;
 347                case R_AARCH64_MOVW_SABS_G0:
 348                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 349                                              AARCH64_INSN_IMM_MOVNZ);
 350                        break;
 351                case R_AARCH64_MOVW_SABS_G1:
 352                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 353                                              AARCH64_INSN_IMM_MOVNZ);
 354                        break;
 355                case R_AARCH64_MOVW_SABS_G2:
 356                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 357                                              AARCH64_INSN_IMM_MOVNZ);
 358                        break;
 359                case R_AARCH64_MOVW_PREL_G0_NC:
 360                        overflow_check = false;
 361                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 362                                              AARCH64_INSN_IMM_MOVKZ);
 363                        break;
 364                case R_AARCH64_MOVW_PREL_G0:
 365                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 366                                              AARCH64_INSN_IMM_MOVNZ);
 367                        break;
 368                case R_AARCH64_MOVW_PREL_G1_NC:
 369                        overflow_check = false;
 370                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 371                                              AARCH64_INSN_IMM_MOVKZ);
 372                        break;
 373                case R_AARCH64_MOVW_PREL_G1:
 374                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 375                                              AARCH64_INSN_IMM_MOVNZ);
 376                        break;
 377                case R_AARCH64_MOVW_PREL_G2_NC:
 378                        overflow_check = false;
 379                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 380                                              AARCH64_INSN_IMM_MOVKZ);
 381                        break;
 382                case R_AARCH64_MOVW_PREL_G2:
 383                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 384                                              AARCH64_INSN_IMM_MOVNZ);
 385                        break;
 386                case R_AARCH64_MOVW_PREL_G3:
 387                        /* We're using the top bits so we can't overflow. */
 388                        overflow_check = false;
 389                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
 390                                              AARCH64_INSN_IMM_MOVNZ);
 391                        break;
 392
 393                /* Immediate instruction relocations. */
 394                case R_AARCH64_LD_PREL_LO19:
 395                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 396                                             AARCH64_INSN_IMM_19);
 397                        break;
 398                case R_AARCH64_ADR_PREL_LO21:
 399                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
 400                                             AARCH64_INSN_IMM_ADR);
 401                        break;
 402                case R_AARCH64_ADR_PREL_PG_HI21_NC:
 403                        overflow_check = false;
 404                        fallthrough;
 405                case R_AARCH64_ADR_PREL_PG_HI21:
 406                        ovf = reloc_insn_adrp(me, sechdrs, loc, val);
 407                        if (ovf && ovf != -ERANGE)
 408                                return ovf;
 409                        break;
 410                case R_AARCH64_ADD_ABS_LO12_NC:
 411                case R_AARCH64_LDST8_ABS_LO12_NC:
 412                        overflow_check = false;
 413                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
 414                                             AARCH64_INSN_IMM_12);
 415                        break;
 416                case R_AARCH64_LDST16_ABS_LO12_NC:
 417                        overflow_check = false;
 418                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
 419                                             AARCH64_INSN_IMM_12);
 420                        break;
 421                case R_AARCH64_LDST32_ABS_LO12_NC:
 422                        overflow_check = false;
 423                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
 424                                             AARCH64_INSN_IMM_12);
 425                        break;
 426                case R_AARCH64_LDST64_ABS_LO12_NC:
 427                        overflow_check = false;
 428                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
 429                                             AARCH64_INSN_IMM_12);
 430                        break;
 431                case R_AARCH64_LDST128_ABS_LO12_NC:
 432                        overflow_check = false;
 433                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
 434                                             AARCH64_INSN_IMM_12);
 435                        break;
 436                case R_AARCH64_TSTBR14:
 437                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
 438                                             AARCH64_INSN_IMM_14);
 439                        break;
 440                case R_AARCH64_CONDBR19:
 441                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 442                                             AARCH64_INSN_IMM_19);
 443                        break;
 444                case R_AARCH64_JUMP26:
 445                case R_AARCH64_CALL26:
 446                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
 447                                             AARCH64_INSN_IMM_26);
 448
 449                        if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
 450                            ovf == -ERANGE) {
 451                                val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
 452                                if (!val)
 453                                        return -ENOEXEC;
 454                                ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
 455                                                     26, AARCH64_INSN_IMM_26);
 456                        }
 457                        break;
 458
 459                default:
 460                        pr_err("module %s: unsupported RELA relocation: %llu\n",
 461                               me->name, ELF64_R_TYPE(rel[i].r_info));
 462                        return -ENOEXEC;
 463                }
 464
 465                if (overflow_check && ovf == -ERANGE)
 466                        goto overflow;
 467
 468        }
 469
 470        return 0;
 471
 472overflow:
 473        pr_err("module %s: overflow in relocation type %d val %Lx\n",
 474               me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
 475        return -ENOEXEC;
 476}
 477
 478static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
 479                                    const Elf_Shdr *sechdrs,
 480                                    const char *name)
 481{
 482        const Elf_Shdr *s, *se;
 483        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 484
 485        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
 486                if (strcmp(name, secstrs + s->sh_name) == 0)
 487                        return s;
 488        }
 489
 490        return NULL;
 491}
 492
 493static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
 494{
 495        *plt = get_plt_entry(addr, plt);
 496}
 497
 498static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
 499                                  const Elf_Shdr *sechdrs,
 500                                  struct module *mod)
 501{
 502#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
 503        const Elf_Shdr *s;
 504        struct plt_entry *plts;
 505
 506        s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
 507        if (!s)
 508                return -ENOEXEC;
 509
 510        plts = (void *)s->sh_addr;
 511
 512        __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
 513
 514        if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
 515                __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
 516
 517        mod->arch.ftrace_trampolines = plts;
 518#endif
 519        return 0;
 520}
 521
 522int module_finalize(const Elf_Ehdr *hdr,
 523                    const Elf_Shdr *sechdrs,
 524                    struct module *me)
 525{
 526        const Elf_Shdr *s;
 527        s = find_section(hdr, sechdrs, ".altinstructions");
 528        if (s)
 529                apply_alternatives_module((void *)s->sh_addr, s->sh_size);
 530
 531        return module_init_ftrace_plt(hdr, sechdrs, me);
 532}
 533