linux/arch/arm64/kernel/module.c
<<
>>
Prefs
   1/*
   2 * AArch64 loadable module support.
   3 *
   4 * Copyright (C) 2012 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * Author: Will Deacon <will.deacon@arm.com>
  19 */
  20
  21#include <linux/bitops.h>
  22#include <linux/elf.h>
  23#include <linux/gfp.h>
  24#include <linux/kasan.h>
  25#include <linux/kernel.h>
  26#include <linux/mm.h>
  27#include <linux/moduleloader.h>
  28#include <linux/vmalloc.h>
  29#include <asm/alternative.h>
  30#include <asm/insn.h>
  31#include <asm/sections.h>
  32
  33void *module_alloc(unsigned long size)
  34{
  35        gfp_t gfp_mask = GFP_KERNEL;
  36        void *p;
  37
  38        /* Silence the initial allocation */
  39        if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
  40                gfp_mask |= __GFP_NOWARN;
  41
  42        p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  43                                module_alloc_base + MODULES_VSIZE,
  44                                gfp_mask, PAGE_KERNEL_EXEC, 0,
  45                                NUMA_NO_NODE, __builtin_return_address(0));
  46
  47        if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  48            !IS_ENABLED(CONFIG_KASAN))
  49                /*
  50                 * KASAN can only deal with module allocations being served
  51                 * from the reserved module region, since the remainder of
  52                 * the vmalloc region is already backed by zero shadow pages,
  53                 * and punching holes into it is non-trivial. Since the module
  54                 * region is not randomized when KASAN is enabled, it is even
  55                 * less likely that the module region gets exhausted, so we
  56                 * can simply omit this fallback in that case.
  57                 */
  58                p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  59                                module_alloc_base + SZ_4G, GFP_KERNEL,
  60                                PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
  61                                __builtin_return_address(0));
  62
  63        if (p && (kasan_module_alloc(p, size) < 0)) {
  64                vfree(p);
  65                return NULL;
  66        }
  67
  68        return p;
  69}
  70
  71enum aarch64_reloc_op {
  72        RELOC_OP_NONE,
  73        RELOC_OP_ABS,
  74        RELOC_OP_PREL,
  75        RELOC_OP_PAGE,
  76};
  77
  78static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
  79{
  80        switch (reloc_op) {
  81        case RELOC_OP_ABS:
  82                return val;
  83        case RELOC_OP_PREL:
  84                return val - (u64)place;
  85        case RELOC_OP_PAGE:
  86                return (val & ~0xfff) - ((u64)place & ~0xfff);
  87        case RELOC_OP_NONE:
  88                return 0;
  89        }
  90
  91        pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  92        return 0;
  93}
  94
  95static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  96{
  97        s64 sval = do_reloc(op, place, val);
  98
  99        switch (len) {
 100        case 16:
 101                *(s16 *)place = sval;
 102                if (sval < S16_MIN || sval > U16_MAX)
 103                        return -ERANGE;
 104                break;
 105        case 32:
 106                *(s32 *)place = sval;
 107                if (sval < S32_MIN || sval > U32_MAX)
 108                        return -ERANGE;
 109                break;
 110        case 64:
 111                *(s64 *)place = sval;
 112                break;
 113        default:
 114                pr_err("Invalid length (%d) for data relocation\n", len);
 115                return 0;
 116        }
 117        return 0;
 118}
 119
 120enum aarch64_insn_movw_imm_type {
 121        AARCH64_INSN_IMM_MOVNZ,
 122        AARCH64_INSN_IMM_MOVKZ,
 123};
 124
 125static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
 126                           int lsb, enum aarch64_insn_movw_imm_type imm_type)
 127{
 128        u64 imm;
 129        s64 sval;
 130        u32 insn = le32_to_cpu(*place);
 131
 132        sval = do_reloc(op, place, val);
 133        imm = sval >> lsb;
 134
 135        if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
 136                /*
 137                 * For signed MOVW relocations, we have to manipulate the
 138                 * instruction encoding depending on whether or not the
 139                 * immediate is less than zero.
 140                 */
 141                insn &= ~(3 << 29);
 142                if (sval >= 0) {
 143                        /* >=0: Set the instruction to MOVZ (opcode 10b). */
 144                        insn |= 2 << 29;
 145                } else {
 146                        /*
 147                         * <0: Set the instruction to MOVN (opcode 00b).
 148                         *     Since we've masked the opcode already, we
 149                         *     don't need to do anything other than
 150                         *     inverting the new immediate field.
 151                         */
 152                        imm = ~imm;
 153                }
 154        }
 155
 156        /* Update the instruction with the new encoding. */
 157        insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
 158        *place = cpu_to_le32(insn);
 159
 160        if (imm > U16_MAX)
 161                return -ERANGE;
 162
 163        return 0;
 164}
 165
 166static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
 167                          int lsb, int len, enum aarch64_insn_imm_type imm_type)
 168{
 169        u64 imm, imm_mask;
 170        s64 sval;
 171        u32 insn = le32_to_cpu(*place);
 172
 173        /* Calculate the relocation value. */
 174        sval = do_reloc(op, place, val);
 175        sval >>= lsb;
 176
 177        /* Extract the value bits and shift them to bit 0. */
 178        imm_mask = (BIT(lsb + len) - 1) >> lsb;
 179        imm = sval & imm_mask;
 180
 181        /* Update the instruction's immediate field. */
 182        insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
 183        *place = cpu_to_le32(insn);
 184
 185        /*
 186         * Extract the upper value bits (including the sign bit) and
 187         * shift them to bit 0.
 188         */
 189        sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
 190
 191        /*
 192         * Overflow has occurred if the upper bits are not all equal to
 193         * the sign bit of the value.
 194         */
 195        if ((u64)(sval + 1) >= 2)
 196                return -ERANGE;
 197
 198        return 0;
 199}
 200
 201static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
 202                           __le32 *place, u64 val)
 203{
 204        u32 insn;
 205
 206        if (!is_forbidden_offset_for_adrp(place))
 207                return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
 208                                      AARCH64_INSN_IMM_ADR);
 209
 210        /* patch ADRP to ADR if it is in range */
 211        if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
 212                            AARCH64_INSN_IMM_ADR)) {
 213                insn = le32_to_cpu(*place);
 214                insn &= ~BIT(31);
 215        } else {
 216                /* out of range for ADR -> emit a veneer */
 217                val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
 218                if (!val)
 219                        return -ENOEXEC;
 220                insn = aarch64_insn_gen_branch_imm((u64)place, val,
 221                                                   AARCH64_INSN_BRANCH_NOLINK);
 222        }
 223
 224        *place = cpu_to_le32(insn);
 225        return 0;
 226}
 227
 228int apply_relocate_add(Elf64_Shdr *sechdrs,
 229                       const char *strtab,
 230                       unsigned int symindex,
 231                       unsigned int relsec,
 232                       struct module *me)
 233{
 234        unsigned int i;
 235        int ovf;
 236        bool overflow_check;
 237        Elf64_Sym *sym;
 238        void *loc;
 239        u64 val;
 240        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
 241
 242        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 243                /* loc corresponds to P in the AArch64 ELF document. */
 244                loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 245                        + rel[i].r_offset;
 246
 247                /* sym is the ELF symbol we're referring to. */
 248                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
 249                        + ELF64_R_SYM(rel[i].r_info);
 250
 251                /* val corresponds to (S + A) in the AArch64 ELF document. */
 252                val = sym->st_value + rel[i].r_addend;
 253
 254                /* Check for overflow by default. */
 255                overflow_check = true;
 256
 257                /* Perform the static relocation. */
 258                switch (ELF64_R_TYPE(rel[i].r_info)) {
 259                /* Null relocations. */
 260                case R_ARM_NONE:
 261                case R_AARCH64_NONE:
 262                        ovf = 0;
 263                        break;
 264
 265                /* Data relocations. */
 266                case R_AARCH64_ABS64:
 267                        overflow_check = false;
 268                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
 269                        break;
 270                case R_AARCH64_ABS32:
 271                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
 272                        break;
 273                case R_AARCH64_ABS16:
 274                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
 275                        break;
 276                case R_AARCH64_PREL64:
 277                        overflow_check = false;
 278                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
 279                        break;
 280                case R_AARCH64_PREL32:
 281                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
 282                        break;
 283                case R_AARCH64_PREL16:
 284                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
 285                        break;
 286
 287                /* MOVW instruction relocations. */
 288                case R_AARCH64_MOVW_UABS_G0_NC:
 289                        overflow_check = false;
 290                case R_AARCH64_MOVW_UABS_G0:
 291                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 292                                              AARCH64_INSN_IMM_MOVKZ);
 293                        break;
 294                case R_AARCH64_MOVW_UABS_G1_NC:
 295                        overflow_check = false;
 296                case R_AARCH64_MOVW_UABS_G1:
 297                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 298                                              AARCH64_INSN_IMM_MOVKZ);
 299                        break;
 300                case R_AARCH64_MOVW_UABS_G2_NC:
 301                        overflow_check = false;
 302                case R_AARCH64_MOVW_UABS_G2:
 303                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 304                                              AARCH64_INSN_IMM_MOVKZ);
 305                        break;
 306                case R_AARCH64_MOVW_UABS_G3:
 307                        /* We're using the top bits so we can't overflow. */
 308                        overflow_check = false;
 309                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
 310                                              AARCH64_INSN_IMM_MOVKZ);
 311                        break;
 312                case R_AARCH64_MOVW_SABS_G0:
 313                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 314                                              AARCH64_INSN_IMM_MOVNZ);
 315                        break;
 316                case R_AARCH64_MOVW_SABS_G1:
 317                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 318                                              AARCH64_INSN_IMM_MOVNZ);
 319                        break;
 320                case R_AARCH64_MOVW_SABS_G2:
 321                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 322                                              AARCH64_INSN_IMM_MOVNZ);
 323                        break;
 324                case R_AARCH64_MOVW_PREL_G0_NC:
 325                        overflow_check = false;
 326                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 327                                              AARCH64_INSN_IMM_MOVKZ);
 328                        break;
 329                case R_AARCH64_MOVW_PREL_G0:
 330                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 331                                              AARCH64_INSN_IMM_MOVNZ);
 332                        break;
 333                case R_AARCH64_MOVW_PREL_G1_NC:
 334                        overflow_check = false;
 335                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 336                                              AARCH64_INSN_IMM_MOVKZ);
 337                        break;
 338                case R_AARCH64_MOVW_PREL_G1:
 339                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 340                                              AARCH64_INSN_IMM_MOVNZ);
 341                        break;
 342                case R_AARCH64_MOVW_PREL_G2_NC:
 343                        overflow_check = false;
 344                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 345                                              AARCH64_INSN_IMM_MOVKZ);
 346                        break;
 347                case R_AARCH64_MOVW_PREL_G2:
 348                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 349                                              AARCH64_INSN_IMM_MOVNZ);
 350                        break;
 351                case R_AARCH64_MOVW_PREL_G3:
 352                        /* We're using the top bits so we can't overflow. */
 353                        overflow_check = false;
 354                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
 355                                              AARCH64_INSN_IMM_MOVNZ);
 356                        break;
 357
 358                /* Immediate instruction relocations. */
 359                case R_AARCH64_LD_PREL_LO19:
 360                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 361                                             AARCH64_INSN_IMM_19);
 362                        break;
 363                case R_AARCH64_ADR_PREL_LO21:
 364                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
 365                                             AARCH64_INSN_IMM_ADR);
 366                        break;
 367                case R_AARCH64_ADR_PREL_PG_HI21_NC:
 368                        overflow_check = false;
 369                case R_AARCH64_ADR_PREL_PG_HI21:
 370                        ovf = reloc_insn_adrp(me, sechdrs, loc, val);
 371                        if (ovf && ovf != -ERANGE)
 372                                return ovf;
 373                        break;
 374                case R_AARCH64_ADD_ABS_LO12_NC:
 375                case R_AARCH64_LDST8_ABS_LO12_NC:
 376                        overflow_check = false;
 377                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
 378                                             AARCH64_INSN_IMM_12);
 379                        break;
 380                case R_AARCH64_LDST16_ABS_LO12_NC:
 381                        overflow_check = false;
 382                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
 383                                             AARCH64_INSN_IMM_12);
 384                        break;
 385                case R_AARCH64_LDST32_ABS_LO12_NC:
 386                        overflow_check = false;
 387                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
 388                                             AARCH64_INSN_IMM_12);
 389                        break;
 390                case R_AARCH64_LDST64_ABS_LO12_NC:
 391                        overflow_check = false;
 392                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
 393                                             AARCH64_INSN_IMM_12);
 394                        break;
 395                case R_AARCH64_LDST128_ABS_LO12_NC:
 396                        overflow_check = false;
 397                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
 398                                             AARCH64_INSN_IMM_12);
 399                        break;
 400                case R_AARCH64_TSTBR14:
 401                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
 402                                             AARCH64_INSN_IMM_14);
 403                        break;
 404                case R_AARCH64_CONDBR19:
 405                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 406                                             AARCH64_INSN_IMM_19);
 407                        break;
 408                case R_AARCH64_JUMP26:
 409                case R_AARCH64_CALL26:
 410                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
 411                                             AARCH64_INSN_IMM_26);
 412
 413                        if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
 414                            ovf == -ERANGE) {
 415                                val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
 416                                if (!val)
 417                                        return -ENOEXEC;
 418                                ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
 419                                                     26, AARCH64_INSN_IMM_26);
 420                        }
 421                        break;
 422
 423                default:
 424                        pr_err("module %s: unsupported RELA relocation: %llu\n",
 425                               me->name, ELF64_R_TYPE(rel[i].r_info));
 426                        return -ENOEXEC;
 427                }
 428
 429                if (overflow_check && ovf == -ERANGE)
 430                        goto overflow;
 431
 432        }
 433
 434        return 0;
 435
 436overflow:
 437        pr_err("module %s: overflow in relocation type %d val %Lx\n",
 438               me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
 439        return -ENOEXEC;
 440}
 441
 442int module_finalize(const Elf_Ehdr *hdr,
 443                    const Elf_Shdr *sechdrs,
 444                    struct module *me)
 445{
 446        const Elf_Shdr *s, *se;
 447        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 448
 449        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
 450                if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
 451                        apply_alternatives_module((void *)s->sh_addr, s->sh_size);
 452#ifdef CONFIG_ARM64_MODULE_PLTS
 453                if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
 454                    !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
 455                        me->arch.ftrace_trampoline = (void *)s->sh_addr;
 456#endif
 457        }
 458
 459        return 0;
 460}
 461