linux/arch/arm64/kernel/module.c
<<
>>
Prefs
   1/*
   2 * AArch64 loadable module support.
   3 *
   4 * Copyright (C) 2012 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * Author: Will Deacon <will.deacon@arm.com>
  19 */
  20
  21#include <linux/bitops.h>
  22#include <linux/elf.h>
  23#include <linux/gfp.h>
  24#include <linux/kernel.h>
  25#include <linux/mm.h>
  26#include <linux/moduleloader.h>
  27#include <linux/vmalloc.h>
  28#include <asm/alternative.h>
  29#include <asm/insn.h>
  30#include <asm/sections.h>
  31
  32#define AARCH64_INSN_IMM_MOVNZ          AARCH64_INSN_IMM_MAX
  33#define AARCH64_INSN_IMM_MOVK           AARCH64_INSN_IMM_16
  34
  35void *module_alloc(unsigned long size)
  36{
  37        return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
  38                                    GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
  39                                    __builtin_return_address(0));
  40}
  41
  42enum aarch64_reloc_op {
  43        RELOC_OP_NONE,
  44        RELOC_OP_ABS,
  45        RELOC_OP_PREL,
  46        RELOC_OP_PAGE,
  47};
  48
  49static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
  50{
  51        switch (reloc_op) {
  52        case RELOC_OP_ABS:
  53                return val;
  54        case RELOC_OP_PREL:
  55                return val - (u64)place;
  56        case RELOC_OP_PAGE:
  57                return (val & ~0xfff) - ((u64)place & ~0xfff);
  58        case RELOC_OP_NONE:
  59                return 0;
  60        }
  61
  62        pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  63        return 0;
  64}
  65
  66static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  67{
  68        u64 imm_mask = (1 << len) - 1;
  69        s64 sval = do_reloc(op, place, val);
  70
  71        switch (len) {
  72        case 16:
  73                *(s16 *)place = sval;
  74                break;
  75        case 32:
  76                *(s32 *)place = sval;
  77                break;
  78        case 64:
  79                *(s64 *)place = sval;
  80                break;
  81        default:
  82                pr_err("Invalid length (%d) for data relocation\n", len);
  83                return 0;
  84        }
  85
  86        /*
  87         * Extract the upper value bits (including the sign bit) and
  88         * shift them to bit 0.
  89         */
  90        sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
  91
  92        /*
  93         * Overflow has occurred if the value is not representable in
  94         * len bits (i.e the bottom len bits are not sign-extended and
  95         * the top bits are not all zero).
  96         */
  97        if ((u64)(sval + 1) > 2)
  98                return -ERANGE;
  99
 100        return 0;
 101}
 102
 103static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
 104                           int lsb, enum aarch64_insn_imm_type imm_type)
 105{
 106        u64 imm, limit = 0;
 107        s64 sval;
 108        u32 insn = le32_to_cpu(*(u32 *)place);
 109
 110        sval = do_reloc(op, place, val);
 111        sval >>= lsb;
 112        imm = sval & 0xffff;
 113
 114        if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
 115                /*
 116                 * For signed MOVW relocations, we have to manipulate the
 117                 * instruction encoding depending on whether or not the
 118                 * immediate is less than zero.
 119                 */
 120                insn &= ~(3 << 29);
 121                if ((s64)imm >= 0) {
 122                        /* >=0: Set the instruction to MOVZ (opcode 10b). */
 123                        insn |= 2 << 29;
 124                } else {
 125                        /*
 126                         * <0: Set the instruction to MOVN (opcode 00b).
 127                         *     Since we've masked the opcode already, we
 128                         *     don't need to do anything other than
 129                         *     inverting the new immediate field.
 130                         */
 131                        imm = ~imm;
 132                }
 133                imm_type = AARCH64_INSN_IMM_MOVK;
 134        }
 135
 136        /* Update the instruction with the new encoding. */
 137        insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
 138        *(u32 *)place = cpu_to_le32(insn);
 139
 140        /* Shift out the immediate field. */
 141        sval >>= 16;
 142
 143        /*
 144         * For unsigned immediates, the overflow check is straightforward.
 145         * For signed immediates, the sign bit is actually the bit past the
 146         * most significant bit of the field.
 147         * The AARCH64_INSN_IMM_16 immediate type is unsigned.
 148         */
 149        if (imm_type != AARCH64_INSN_IMM_16) {
 150                sval++;
 151                limit++;
 152        }
 153
 154        /* Check the upper bits depending on the sign of the immediate. */
 155        if ((u64)sval > limit)
 156                return -ERANGE;
 157
 158        return 0;
 159}
 160
 161static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
 162                          int lsb, int len, enum aarch64_insn_imm_type imm_type)
 163{
 164        u64 imm, imm_mask;
 165        s64 sval;
 166        u32 insn = le32_to_cpu(*(u32 *)place);
 167
 168        /* Calculate the relocation value. */
 169        sval = do_reloc(op, place, val);
 170        sval >>= lsb;
 171
 172        /* Extract the value bits and shift them to bit 0. */
 173        imm_mask = (BIT(lsb + len) - 1) >> lsb;
 174        imm = sval & imm_mask;
 175
 176        /* Update the instruction's immediate field. */
 177        insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
 178        *(u32 *)place = cpu_to_le32(insn);
 179
 180        /*
 181         * Extract the upper value bits (including the sign bit) and
 182         * shift them to bit 0.
 183         */
 184        sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
 185
 186        /*
 187         * Overflow has occurred if the upper bits are not all equal to
 188         * the sign bit of the value.
 189         */
 190        if ((u64)(sval + 1) >= 2)
 191                return -ERANGE;
 192
 193        return 0;
 194}
 195
 196int apply_relocate_add(Elf64_Shdr *sechdrs,
 197                       const char *strtab,
 198                       unsigned int symindex,
 199                       unsigned int relsec,
 200                       struct module *me)
 201{
 202        unsigned int i;
 203        int ovf;
 204        bool overflow_check;
 205        Elf64_Sym *sym;
 206        void *loc;
 207        u64 val;
 208        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
 209
 210        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 211                /* loc corresponds to P in the AArch64 ELF document. */
 212                loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 213                        + rel[i].r_offset;
 214
 215                /* sym is the ELF symbol we're referring to. */
 216                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
 217                        + ELF64_R_SYM(rel[i].r_info);
 218
 219                /* val corresponds to (S + A) in the AArch64 ELF document. */
 220                val = sym->st_value + rel[i].r_addend;
 221
 222                /* Check for overflow by default. */
 223                overflow_check = true;
 224
 225                /* Perform the static relocation. */
 226                switch (ELF64_R_TYPE(rel[i].r_info)) {
 227                /* Null relocations. */
 228                case R_ARM_NONE:
 229                case R_AARCH64_NONE:
 230                        ovf = 0;
 231                        break;
 232
 233                /* Data relocations. */
 234                case R_AARCH64_ABS64:
 235                        overflow_check = false;
 236                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
 237                        break;
 238                case R_AARCH64_ABS32:
 239                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
 240                        break;
 241                case R_AARCH64_ABS16:
 242                        ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
 243                        break;
 244                case R_AARCH64_PREL64:
 245                        overflow_check = false;
 246                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
 247                        break;
 248                case R_AARCH64_PREL32:
 249                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
 250                        break;
 251                case R_AARCH64_PREL16:
 252                        ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
 253                        break;
 254
 255                /* MOVW instruction relocations. */
 256                case R_AARCH64_MOVW_UABS_G0_NC:
 257                        overflow_check = false;
 258                case R_AARCH64_MOVW_UABS_G0:
 259                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 260                                              AARCH64_INSN_IMM_16);
 261                        break;
 262                case R_AARCH64_MOVW_UABS_G1_NC:
 263                        overflow_check = false;
 264                case R_AARCH64_MOVW_UABS_G1:
 265                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 266                                              AARCH64_INSN_IMM_16);
 267                        break;
 268                case R_AARCH64_MOVW_UABS_G2_NC:
 269                        overflow_check = false;
 270                case R_AARCH64_MOVW_UABS_G2:
 271                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 272                                              AARCH64_INSN_IMM_16);
 273                        break;
 274                case R_AARCH64_MOVW_UABS_G3:
 275                        /* We're using the top bits so we can't overflow. */
 276                        overflow_check = false;
 277                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
 278                                              AARCH64_INSN_IMM_16);
 279                        break;
 280                case R_AARCH64_MOVW_SABS_G0:
 281                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 282                                              AARCH64_INSN_IMM_MOVNZ);
 283                        break;
 284                case R_AARCH64_MOVW_SABS_G1:
 285                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 286                                              AARCH64_INSN_IMM_MOVNZ);
 287                        break;
 288                case R_AARCH64_MOVW_SABS_G2:
 289                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 290                                              AARCH64_INSN_IMM_MOVNZ);
 291                        break;
 292                case R_AARCH64_MOVW_PREL_G0_NC:
 293                        overflow_check = false;
 294                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 295                                              AARCH64_INSN_IMM_MOVK);
 296                        break;
 297                case R_AARCH64_MOVW_PREL_G0:
 298                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 299                                              AARCH64_INSN_IMM_MOVNZ);
 300                        break;
 301                case R_AARCH64_MOVW_PREL_G1_NC:
 302                        overflow_check = false;
 303                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 304                                              AARCH64_INSN_IMM_MOVK);
 305                        break;
 306                case R_AARCH64_MOVW_PREL_G1:
 307                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 308                                              AARCH64_INSN_IMM_MOVNZ);
 309                        break;
 310                case R_AARCH64_MOVW_PREL_G2_NC:
 311                        overflow_check = false;
 312                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 313                                              AARCH64_INSN_IMM_MOVK);
 314                        break;
 315                case R_AARCH64_MOVW_PREL_G2:
 316                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 317                                              AARCH64_INSN_IMM_MOVNZ);
 318                        break;
 319                case R_AARCH64_MOVW_PREL_G3:
 320                        /* We're using the top bits so we can't overflow. */
 321                        overflow_check = false;
 322                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
 323                                              AARCH64_INSN_IMM_MOVNZ);
 324                        break;
 325
 326                /* Immediate instruction relocations. */
 327                case R_AARCH64_LD_PREL_LO19:
 328                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 329                                             AARCH64_INSN_IMM_19);
 330                        break;
 331                case R_AARCH64_ADR_PREL_LO21:
 332                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
 333                                             AARCH64_INSN_IMM_ADR);
 334                        break;
 335                case R_AARCH64_ADR_PREL_PG_HI21_NC:
 336                        overflow_check = false;
 337                case R_AARCH64_ADR_PREL_PG_HI21:
 338                        ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
 339                                             AARCH64_INSN_IMM_ADR);
 340                        break;
 341                case R_AARCH64_ADD_ABS_LO12_NC:
 342                case R_AARCH64_LDST8_ABS_LO12_NC:
 343                        overflow_check = false;
 344                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
 345                                             AARCH64_INSN_IMM_12);
 346                        break;
 347                case R_AARCH64_LDST16_ABS_LO12_NC:
 348                        overflow_check = false;
 349                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
 350                                             AARCH64_INSN_IMM_12);
 351                        break;
 352                case R_AARCH64_LDST32_ABS_LO12_NC:
 353                        overflow_check = false;
 354                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
 355                                             AARCH64_INSN_IMM_12);
 356                        break;
 357                case R_AARCH64_LDST64_ABS_LO12_NC:
 358                        overflow_check = false;
 359                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
 360                                             AARCH64_INSN_IMM_12);
 361                        break;
 362                case R_AARCH64_LDST128_ABS_LO12_NC:
 363                        overflow_check = false;
 364                        ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
 365                                             AARCH64_INSN_IMM_12);
 366                        break;
 367                case R_AARCH64_TSTBR14:
 368                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
 369                                             AARCH64_INSN_IMM_14);
 370                        break;
 371                case R_AARCH64_CONDBR19:
 372                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 373                                             AARCH64_INSN_IMM_19);
 374                        break;
 375                case R_AARCH64_JUMP26:
 376                case R_AARCH64_CALL26:
 377                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
 378                                             AARCH64_INSN_IMM_26);
 379                        break;
 380
 381                default:
 382                        pr_err("module %s: unsupported RELA relocation: %llu\n",
 383                               me->name, ELF64_R_TYPE(rel[i].r_info));
 384                        return -ENOEXEC;
 385                }
 386
 387                if (overflow_check && ovf == -ERANGE)
 388                        goto overflow;
 389
 390        }
 391
 392        return 0;
 393
 394overflow:
 395        pr_err("module %s: overflow in relocation type %d val %Lx\n",
 396               me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
 397        return -ENOEXEC;
 398}
 399
 400int module_finalize(const Elf_Ehdr *hdr,
 401                    const Elf_Shdr *sechdrs,
 402                    struct module *me)
 403{
 404        const Elf_Shdr *s, *se;
 405        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 406
 407        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
 408                if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
 409                        apply_alternatives((void *)s->sh_addr, s->sh_size);
 410                        return 0;
 411                }
 412        }
 413
 414        return 0;
 415}
 416