linux/arch/arm64/kernel/module.c
<<
>>
Prefs
   1/*
   2 * AArch64 loadable module support.
   3 *
   4 * Copyright (C) 2012 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * Author: Will Deacon <will.deacon@arm.com>
  19 */
  20
  21#include <linux/bitops.h>
  22#include <linux/elf.h>
  23#include <linux/gfp.h>
  24#include <linux/kasan.h>
  25#include <linux/kernel.h>
  26#include <linux/mm.h>
  27#include <linux/moduleloader.h>
  28#include <linux/vmalloc.h>
  29#include <asm/alternative.h>
  30#include <asm/insn.h>
  31#include <asm/sections.h>
  32
  33void *module_alloc(unsigned long size)
  34{
  35        void *p;
  36
  37        p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  38                                module_alloc_base + MODULES_VSIZE,
  39                                GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
  40                                NUMA_NO_NODE, __builtin_return_address(0));
  41
  42        if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  43            !IS_ENABLED(CONFIG_KASAN))
  44                /*
  45                 * KASAN can only deal with module allocations being served
  46                 * from the reserved module region, since the remainder of
  47                 * the vmalloc region is already backed by zero shadow pages,
  48                 * and punching holes into it is non-trivial. Since the module
  49                 * region is not randomized when KASAN is enabled, it is even
  50                 * less likely that the module region gets exhausted, so we
  51                 * can simply omit this fallback in that case.
  52                 */
  53                p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
  54                                VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
  55                                NUMA_NO_NODE, __builtin_return_address(0));
  56
  57        if (p && (kasan_module_alloc(p, size) < 0)) {
  58                vfree(p);
  59                return NULL;
  60        }
  61
  62        return p;
  63}
  64
  65enum aarch64_reloc_op {
  66        RELOC_OP_NONE,
  67        RELOC_OP_ABS,
  68        RELOC_OP_PREL,
  69        RELOC_OP_PAGE,
  70};
  71
  72static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
  73{
  74        switch (reloc_op) {
  75        case RELOC_OP_ABS:
  76                return val;
  77        case RELOC_OP_PREL:
  78                return val - (u64)place;
  79        case RELOC_OP_PAGE:
  80                return (val & ~0xfff) - ((u64)place & ~0xfff);
  81        case RELOC_OP_NONE:
  82                return 0;
  83        }
  84
  85        pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  86        return 0;
  87}
  88
  89static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  90{
  91        s64 sval = do_reloc(op, place, val);
  92
  93        switch (len) {
  94        case 16:
  95                *(s16 *)place = sval;
  96                if (sval < S16_MIN || sval > U16_MAX)
  97                        return -ERANGE;
  98                break;
  99        case 32:
 100                *(s32 *)place = sval;
 101                if (sval < S32_MIN || sval > U32_MAX)
 102                        return -ERANGE;
 103                break;
 104        case 64:
 105                *(s64 *)place = sval;
 106                break;
 107        default:
 108                pr_err("Invalid length (%d) for data relocation\n", len);
 109                return 0;
 110        }
 111        return 0;
 112}
 113
 114enum aarch64_insn_movw_imm_type {
 115        AARCH64_INSN_IMM_MOVNZ,
 116        AARCH64_INSN_IMM_MOVKZ,
 117};
 118
 119static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
 120                           int lsb, enum aarch64_insn_movw_imm_type imm_type)
 121{
 122        u64 imm;
 123        s64 sval;
 124        u32 insn = le32_to_cpu(*(u32 *)place);
 125
 126        sval = do_reloc(op, place, val);
 127        imm = sval >> lsb;
 128
 129        if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
 130                /*
 131                 * For signed MOVW relocations, we have to manipulate the
 132                 * instruction encoding depending on whether or not the
 133                 * immediate is less than zero.
 134                 */
 135                insn &= ~(3 << 29);
 136                if (sval >= 0) {
 137                        /* >=0: Set the instruction to MOVZ (opcode 10b). */
 138                        insn |= 2 << 29;
 139                } else {
 140                        /*
 141                         * <0: Set the instruction to MOVN (opcode 00b).
 142                         *     Since we've masked the opcode already, we
 143                         *     don't need to do anything other than
 144                         *     inverting the new immediate field.
 145                         */
 146                        imm = ~imm;
 147                }
 148        }
 149
 150        /* Update the instruction with the new encoding. */
 151        insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
 152        *(u32 *)place = cpu_to_le32(insn);
 153
 154        if (imm > U16_MAX)
 155                return -ERANGE;
 156
 157        return 0;
 158}
 159
 160static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
 161                          int lsb, int len, enum aarch64_insn_imm_type imm_type)
 162{
 163        u64 imm, imm_mask;
 164        s64 sval;
 165        u32 insn = le32_to_cpu(*(u32 *)place);
 166
 167        /* Calculate the relocation value. */
 168        sval = do_reloc(op, place, val);
 169        sval >>= lsb;
 170
 171        /* Extract the value bits and shift them to bit 0. */
 172        imm_mask = (BIT(lsb + len) - 1) >> lsb;
 173        imm = sval & imm_mask;
 174
 175        /* Update the instruction's immediate field. */
 176        insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
 177        *(u32 *)place = cpu_to_le32(insn);
 178
 179        /*
 180         * Extract the upper value bits (including the sign bit) and
 181         * shift them to bit 0.
 182         */
 183        sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
 184
 185        /*
 186         * Overflow has occurred if the upper bits are not all equal to
 187         * the sign bit of the value.
 188         */
 189        if ((u64)(sval + 1) >= 2)
 190                return -ERANGE;
 191
 192        return 0;
 193}
 194
 195int apply_relocate_add(Elf64_Shdr *sechdrs,
 196                       const char *strtab,
 197                       unsigned int symindex,
 198                       unsigned int relsec,
 199                       struct module *me)
 200{
 201        unsigned int i;
 202        int ovf;
 203        bool overflow_check;
 204        Elf64_Sym *sym;
 205        void *loc;
 206        u64 val;
 207        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
 208
 209        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 210                /* loc corresponds to P in the AArch64 ELF document. */
 211                loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 212                        + rel[i].r_offset;
 213
 214                /* sym is the ELF symbol we're referring to. */
 215                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
 216                        + ELF64_R_SYM(rel[i].r_info);
 217
 218                /* val corresponds to (S + A) in the AArch64 ELF document. */
 219                val = sym->st_value + rel[i].r_addend;
 220
 221                /* Check for overflow by default. */
 222                overflow_check = true;
 223
 224                /* Perform the static relocation. */
 225                switch (ELF64_R_TYPE(rel[i].r_info)) {
 226                /* Null relocations. */
 227                case R_ARM_NONE:
 228                case R_AARCH64_NONE:
 229                        ovf = 0;
 230                        break;
 231
 232                /* Data relocations. */
 233                case R_AARCH64_ABS64:
 234                        overflow_check = false;
 235                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
 236                        break;
 237                case R_AARCH64_ABS32:
 238                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
 239                        break;
 240                case R_AARCH64_ABS16:
 241                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
 242                        break;
 243                case R_AARCH64_PREL64:
 244                        overflow_check = false;
 245                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
 246                        break;
 247                case R_AARCH64_PREL32:
 248                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
 249                        break;
 250                case R_AARCH64_PREL16:
 251                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
 252                        break;
 253
 254                /* MOVW instruction relocations. */
 255                case R_AARCH64_MOVW_UABS_G0_NC:
 256                        overflow_check = false;
 257                case R_AARCH64_MOVW_UABS_G0:
 258                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 259                                              AARCH64_INSN_IMM_MOVKZ);
 260                        break;
 261                case R_AARCH64_MOVW_UABS_G1_NC:
 262                        overflow_check = false;
 263                case R_AARCH64_MOVW_UABS_G1:
 264                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 265                                              AARCH64_INSN_IMM_MOVKZ);
 266                        break;
 267                case R_AARCH64_MOVW_UABS_G2_NC:
 268                        overflow_check = false;
 269                case R_AARCH64_MOVW_UABS_G2:
 270                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 271                                              AARCH64_INSN_IMM_MOVKZ);
 272                        break;
 273                case R_AARCH64_MOVW_UABS_G3:
 274                        /* We're using the top bits so we can't overflow. */
 275                        overflow_check = false;
 276                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
 277                                              AARCH64_INSN_IMM_MOVKZ);
 278                        break;
 279                case R_AARCH64_MOVW_SABS_G0:
 280                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 281                                              AARCH64_INSN_IMM_MOVNZ);
 282                        break;
 283                case R_AARCH64_MOVW_SABS_G1:
 284                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 285                                              AARCH64_INSN_IMM_MOVNZ);
 286                        break;
 287                case R_AARCH64_MOVW_SABS_G2:
 288                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 289                                              AARCH64_INSN_IMM_MOVNZ);
 290                        break;
 291                case R_AARCH64_MOVW_PREL_G0_NC:
 292                        overflow_check = false;
 293                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 294                                              AARCH64_INSN_IMM_MOVKZ);
 295                        break;
 296                case R_AARCH64_MOVW_PREL_G0:
 297                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 298                                              AARCH64_INSN_IMM_MOVNZ);
 299                        break;
 300                case R_AARCH64_MOVW_PREL_G1_NC:
 301                        overflow_check = false;
 302                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 303                                              AARCH64_INSN_IMM_MOVKZ);
 304                        break;
 305                case R_AARCH64_MOVW_PREL_G1:
 306                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 307                                              AARCH64_INSN_IMM_MOVNZ);
 308                        break;
 309                case R_AARCH64_MOVW_PREL_G2_NC:
 310                        overflow_check = false;
 311                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 312                                              AARCH64_INSN_IMM_MOVKZ);
 313                        break;
 314                case R_AARCH64_MOVW_PREL_G2:
 315                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 316                                              AARCH64_INSN_IMM_MOVNZ);
 317                        break;
 318                case R_AARCH64_MOVW_PREL_G3:
 319                        /* We're using the top bits so we can't overflow. */
 320                        overflow_check = false;
 321                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
 322                                              AARCH64_INSN_IMM_MOVNZ);
 323                        break;
 324
 325                /* Immediate instruction relocations. */
 326                case R_AARCH64_LD_PREL_LO19:
 327                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 328                                             AARCH64_INSN_IMM_19);
 329                        break;
 330                case R_AARCH64_ADR_PREL_LO21:
 331                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
 332                                             AARCH64_INSN_IMM_ADR);
 333                        break;
 334#ifndef CONFIG_ARM64_ERRATUM_843419
 335                case R_AARCH64_ADR_PREL_PG_HI21_NC:
 336                        overflow_check = false;
 337                case R_AARCH64_ADR_PREL_PG_HI21:
 338                        ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
 339                                             AARCH64_INSN_IMM_ADR);
 340                        break;
 341#endif
 342                case R_AARCH64_ADD_ABS_LO12_NC:
 343                case R_AARCH64_LDST8_ABS_LO12_NC:
 344                        overflow_check = false;
 345                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
 346                                             AARCH64_INSN_IMM_12);
 347                        break;
 348                case R_AARCH64_LDST16_ABS_LO12_NC:
 349                        overflow_check = false;
 350                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
 351                                             AARCH64_INSN_IMM_12);
 352                        break;
 353                case R_AARCH64_LDST32_ABS_LO12_NC:
 354                        overflow_check = false;
 355                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
 356                                             AARCH64_INSN_IMM_12);
 357                        break;
 358                case R_AARCH64_LDST64_ABS_LO12_NC:
 359                        overflow_check = false;
 360                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
 361                                             AARCH64_INSN_IMM_12);
 362                        break;
 363                case R_AARCH64_LDST128_ABS_LO12_NC:
 364                        overflow_check = false;
 365                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
 366                                             AARCH64_INSN_IMM_12);
 367                        break;
 368                case R_AARCH64_TSTBR14:
 369                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
 370                                             AARCH64_INSN_IMM_14);
 371                        break;
 372                case R_AARCH64_CONDBR19:
 373                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 374                                             AARCH64_INSN_IMM_19);
 375                        break;
 376                case R_AARCH64_JUMP26:
 377                case R_AARCH64_CALL26:
 378                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
 379                                             AARCH64_INSN_IMM_26);
 380
 381                        if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
 382                            ovf == -ERANGE) {
 383                                val = module_emit_plt_entry(me, &rel[i], sym);
 384                                ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
 385                                                     26, AARCH64_INSN_IMM_26);
 386                        }
 387                        break;
 388
 389                default:
 390                        pr_err("module %s: unsupported RELA relocation: %llu\n",
 391                               me->name, ELF64_R_TYPE(rel[i].r_info));
 392                        return -ENOEXEC;
 393                }
 394
 395                if (overflow_check && ovf == -ERANGE)
 396                        goto overflow;
 397
 398        }
 399
 400        return 0;
 401
 402overflow:
 403        pr_err("module %s: overflow in relocation type %d val %Lx\n",
 404               me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
 405        return -ENOEXEC;
 406}
 407
 408int module_finalize(const Elf_Ehdr *hdr,
 409                    const Elf_Shdr *sechdrs,
 410                    struct module *me)
 411{
 412        const Elf_Shdr *s, *se;
 413        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 414
 415        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
 416                if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
 417                        apply_alternatives((void *)s->sh_addr, s->sh_size);
 418                        return 0;
 419                }
 420        }
 421
 422        return 0;
 423}
 424