linux/arch/arm/mm/alignment.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/mm/alignment.c
   4 *
   5 *  Copyright (C) 1995  Linus Torvalds
   6 *  Modifications for ARM processor (c) 1995-2001 Russell King
   7 *  Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc.
   8 *  - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
   9 *    Copyright (C) 1996, Cygnus Software Technologies Ltd.
  10 */
  11#include <linux/moduleparam.h>
  12#include <linux/compiler.h>
  13#include <linux/kernel.h>
  14#include <linux/sched/debug.h>
  15#include <linux/errno.h>
  16#include <linux/string.h>
  17#include <linux/proc_fs.h>
  18#include <linux/seq_file.h>
  19#include <linux/init.h>
  20#include <linux/sched/signal.h>
  21#include <linux/uaccess.h>
  22
  23#include <asm/cp15.h>
  24#include <asm/system_info.h>
  25#include <asm/unaligned.h>
  26#include <asm/opcodes.h>
  27
  28#include "fault.h"
  29#include "mm.h"
  30
  31/*
  32 * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
  33 * /proc/sys/debug/alignment, modified and integrated into
  34 * Linux 2.1 by Russell King
  35 *
  36 * Speed optimisations and better fault handling by Russell King.
  37 *
  38 * *** NOTE ***
  39 * This code is not portable to processors with late data abort handling.
  40 */
  41#define CODING_BITS(i)  (i & 0x0e000000)
  42#define COND_BITS(i)    (i & 0xf0000000)
  43
  44#define LDST_I_BIT(i)   (i & (1 << 26))         /* Immediate constant   */
  45#define LDST_P_BIT(i)   (i & (1 << 24))         /* Preindex             */
  46#define LDST_U_BIT(i)   (i & (1 << 23))         /* Add offset           */
  47#define LDST_W_BIT(i)   (i & (1 << 21))         /* Writeback            */
  48#define LDST_L_BIT(i)   (i & (1 << 20))         /* Load                 */
  49
  50#define LDST_P_EQ_U(i)  ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
  51
  52#define LDSTHD_I_BIT(i) (i & (1 << 22))         /* double/half-word immed */
  53#define LDM_S_BIT(i)    (i & (1 << 22))         /* write CPSR from SPSR */
  54
  55#define RN_BITS(i)      ((i >> 16) & 15)        /* Rn                   */
  56#define RD_BITS(i)      ((i >> 12) & 15)        /* Rd                   */
  57#define RM_BITS(i)      (i & 15)                /* Rm                   */
  58
  59#define REGMASK_BITS(i) (i & 0xffff)
  60#define OFFSET_BITS(i)  (i & 0x0fff)
  61
  62#define IS_SHIFT(i)     (i & 0x0ff0)
  63#define SHIFT_BITS(i)   ((i >> 7) & 0x1f)
  64#define SHIFT_TYPE(i)   (i & 0x60)
  65#define SHIFT_LSL       0x00
  66#define SHIFT_LSR       0x20
  67#define SHIFT_ASR       0x40
  68#define SHIFT_RORRRX    0x60
  69
  70#define BAD_INSTR       0xdeadc0de
  71
  72/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
  73#define IS_T32(hi16) \
  74        (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
  75
  76static unsigned long ai_user;
  77static unsigned long ai_sys;
  78static void *ai_sys_last_pc;
  79static unsigned long ai_skipped;
  80static unsigned long ai_half;
  81static unsigned long ai_word;
  82static unsigned long ai_dword;
  83static unsigned long ai_multi;
  84static int ai_usermode;
  85static unsigned long cr_no_alignment;
  86
  87core_param(alignment, ai_usermode, int, 0600);
  88
  89#define UM_WARN         (1 << 0)
  90#define UM_FIXUP        (1 << 1)
  91#define UM_SIGNAL       (1 << 2)
  92
  93/* Return true if and only if the ARMv6 unaligned access model is in use. */
  94static bool cpu_is_v6_unaligned(void)
  95{
  96        return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U;
  97}
  98
  99static int safe_usermode(int new_usermode, bool warn)
 100{
 101        /*
 102         * ARMv6 and later CPUs can perform unaligned accesses for
 103         * most single load and store instructions up to word size.
 104         * LDM, STM, LDRD and STRD still need to be handled.
 105         *
 106         * Ignoring the alignment fault is not an option on these
 107         * CPUs since we spin re-faulting the instruction without
 108         * making any progress.
 109         */
 110        if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
 111                new_usermode |= UM_FIXUP;
 112
 113                if (warn)
 114                        pr_warn("alignment: ignoring faults is unsafe on this CPU.  Defaulting to fixup mode.\n");
 115        }
 116
 117        return new_usermode;
 118}
 119
 120#ifdef CONFIG_PROC_FS
 121static const char *usermode_action[] = {
 122        "ignored",
 123        "warn",
 124        "fixup",
 125        "fixup+warn",
 126        "signal",
 127        "signal+warn"
 128};
 129
 130static int alignment_proc_show(struct seq_file *m, void *v)
 131{
 132        seq_printf(m, "User:\t\t%lu\n", ai_user);
 133        seq_printf(m, "System:\t\t%lu (%pS)\n", ai_sys, ai_sys_last_pc);
 134        seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
 135        seq_printf(m, "Half:\t\t%lu\n", ai_half);
 136        seq_printf(m, "Word:\t\t%lu\n", ai_word);
 137        if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
 138                seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
 139        seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
 140        seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
 141                        usermode_action[ai_usermode]);
 142
 143        return 0;
 144}
 145
 146static int alignment_proc_open(struct inode *inode, struct file *file)
 147{
 148        return single_open(file, alignment_proc_show, NULL);
 149}
 150
 151static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
 152                                    size_t count, loff_t *pos)
 153{
 154        char mode;
 155
 156        if (count > 0) {
 157                if (get_user(mode, buffer))
 158                        return -EFAULT;
 159                if (mode >= '0' && mode <= '5')
 160                        ai_usermode = safe_usermode(mode - '0', true);
 161        }
 162        return count;
 163}
 164
 165static const struct proc_ops alignment_proc_ops = {
 166        .proc_open      = alignment_proc_open,
 167        .proc_read      = seq_read,
 168        .proc_lseek     = seq_lseek,
 169        .proc_release   = single_release,
 170        .proc_write     = alignment_proc_write,
 171};
 172#endif /* CONFIG_PROC_FS */
 173
 174union offset_union {
 175        unsigned long un;
 176          signed long sn;
 177};
 178
 179#define TYPE_ERROR      0
 180#define TYPE_FAULT      1
 181#define TYPE_LDST       2
 182#define TYPE_DONE       3
 183
 184#ifdef __ARMEB__
 185#define BE              1
 186#define FIRST_BYTE_16   "mov    %1, %1, ror #8\n"
 187#define FIRST_BYTE_32   "mov    %1, %1, ror #24\n"
 188#define NEXT_BYTE       "ror #24"
 189#else
 190#define BE              0
 191#define FIRST_BYTE_16
 192#define FIRST_BYTE_32
 193#define NEXT_BYTE       "lsr #8"
 194#endif
 195
 196#define __get8_unaligned_check(ins,val,addr,err)        \
 197        __asm__(                                        \
 198 ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
 199 THUMB( "1:     "ins"   %1, [%2]\n"     )               \
 200 THUMB( "       add     %2, %2, #1\n"   )               \
 201        "2:\n"                                          \
 202        "       .pushsection .text.fixup,\"ax\"\n"      \
 203        "       .align  2\n"                            \
 204        "3:     mov     %0, #1\n"                       \
 205        "       b       2b\n"                           \
 206        "       .popsection\n"                          \
 207        "       .pushsection __ex_table,\"a\"\n"        \
 208        "       .align  3\n"                            \
 209        "       .long   1b, 3b\n"                       \
 210        "       .popsection\n"                          \
 211        : "=r" (err), "=&r" (val), "=r" (addr)          \
 212        : "0" (err), "2" (addr))
 213
 214#define __get16_unaligned_check(ins,val,addr)                   \
 215        do {                                                    \
 216                unsigned int err = 0, v, a = addr;              \
 217                __get8_unaligned_check(ins,v,a,err);            \
 218                val =  v << ((BE) ? 8 : 0);                     \
 219                __get8_unaligned_check(ins,v,a,err);            \
 220                val |= v << ((BE) ? 0 : 8);                     \
 221                if (err)                                        \
 222                        goto fault;                             \
 223        } while (0)
 224
 225#define get16_unaligned_check(val,addr) \
 226        __get16_unaligned_check("ldrb",val,addr)
 227
 228#define get16t_unaligned_check(val,addr) \
 229        __get16_unaligned_check("ldrbt",val,addr)
 230
 231#define __get32_unaligned_check(ins,val,addr)                   \
 232        do {                                                    \
 233                unsigned int err = 0, v, a = addr;              \
 234                __get8_unaligned_check(ins,v,a,err);            \
 235                val =  v << ((BE) ? 24 :  0);                   \
 236                __get8_unaligned_check(ins,v,a,err);            \
 237                val |= v << ((BE) ? 16 :  8);                   \
 238                __get8_unaligned_check(ins,v,a,err);            \
 239                val |= v << ((BE) ?  8 : 16);                   \
 240                __get8_unaligned_check(ins,v,a,err);            \
 241                val |= v << ((BE) ?  0 : 24);                   \
 242                if (err)                                        \
 243                        goto fault;                             \
 244        } while (0)
 245
 246#define get32_unaligned_check(val,addr) \
 247        __get32_unaligned_check("ldrb",val,addr)
 248
 249#define get32t_unaligned_check(val,addr) \
 250        __get32_unaligned_check("ldrbt",val,addr)
 251
 252#define __put16_unaligned_check(ins,val,addr)                   \
 253        do {                                                    \
 254                unsigned int err = 0, v = val, a = addr;        \
 255                __asm__( FIRST_BYTE_16                          \
 256         ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
 257         THUMB( "1:     "ins"   %1, [%2]\n"     )               \
 258         THUMB( "       add     %2, %2, #1\n"   )               \
 259                "       mov     %1, %1, "NEXT_BYTE"\n"          \
 260                "2:     "ins"   %1, [%2]\n"                     \
 261                "3:\n"                                          \
 262                "       .pushsection .text.fixup,\"ax\"\n"      \
 263                "       .align  2\n"                            \
 264                "4:     mov     %0, #1\n"                       \
 265                "       b       3b\n"                           \
 266                "       .popsection\n"                          \
 267                "       .pushsection __ex_table,\"a\"\n"        \
 268                "       .align  3\n"                            \
 269                "       .long   1b, 4b\n"                       \
 270                "       .long   2b, 4b\n"                       \
 271                "       .popsection\n"                          \
 272                : "=r" (err), "=&r" (v), "=&r" (a)              \
 273                : "0" (err), "1" (v), "2" (a));                 \
 274                if (err)                                        \
 275                        goto fault;                             \
 276        } while (0)
 277
 278#define put16_unaligned_check(val,addr)  \
 279        __put16_unaligned_check("strb",val,addr)
 280
 281#define put16t_unaligned_check(val,addr) \
 282        __put16_unaligned_check("strbt",val,addr)
 283
 284#define __put32_unaligned_check(ins,val,addr)                   \
 285        do {                                                    \
 286                unsigned int err = 0, v = val, a = addr;        \
 287                __asm__( FIRST_BYTE_32                          \
 288         ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
 289         THUMB( "1:     "ins"   %1, [%2]\n"     )               \
 290         THUMB( "       add     %2, %2, #1\n"   )               \
 291                "       mov     %1, %1, "NEXT_BYTE"\n"          \
 292         ARM(   "2:     "ins"   %1, [%2], #1\n" )               \
 293         THUMB( "2:     "ins"   %1, [%2]\n"     )               \
 294         THUMB( "       add     %2, %2, #1\n"   )               \
 295                "       mov     %1, %1, "NEXT_BYTE"\n"          \
 296         ARM(   "3:     "ins"   %1, [%2], #1\n" )               \
 297         THUMB( "3:     "ins"   %1, [%2]\n"     )               \
 298         THUMB( "       add     %2, %2, #1\n"   )               \
 299                "       mov     %1, %1, "NEXT_BYTE"\n"          \
 300                "4:     "ins"   %1, [%2]\n"                     \
 301                "5:\n"                                          \
 302                "       .pushsection .text.fixup,\"ax\"\n"      \
 303                "       .align  2\n"                            \
 304                "6:     mov     %0, #1\n"                       \
 305                "       b       5b\n"                           \
 306                "       .popsection\n"                          \
 307                "       .pushsection __ex_table,\"a\"\n"        \
 308                "       .align  3\n"                            \
 309                "       .long   1b, 6b\n"                       \
 310                "       .long   2b, 6b\n"                       \
 311                "       .long   3b, 6b\n"                       \
 312                "       .long   4b, 6b\n"                       \
 313                "       .popsection\n"                          \
 314                : "=r" (err), "=&r" (v), "=&r" (a)              \
 315                : "0" (err), "1" (v), "2" (a));                 \
 316                if (err)                                        \
 317                        goto fault;                             \
 318        } while (0)
 319
 320#define put32_unaligned_check(val,addr) \
 321        __put32_unaligned_check("strb", val, addr)
 322
 323#define put32t_unaligned_check(val,addr) \
 324        __put32_unaligned_check("strbt", val, addr)
 325
 326static void
 327do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
 328{
 329        if (!LDST_U_BIT(instr))
 330                offset.un = -offset.un;
 331
 332        if (!LDST_P_BIT(instr))
 333                addr += offset.un;
 334
 335        if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
 336                regs->uregs[RN_BITS(instr)] = addr;
 337}
 338
 339static int
 340do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
 341{
 342        unsigned int rd = RD_BITS(instr);
 343
 344        ai_half += 1;
 345
 346        if (user_mode(regs))
 347                goto user;
 348
 349        if (LDST_L_BIT(instr)) {
 350                unsigned long val;
 351                get16_unaligned_check(val, addr);
 352
 353                /* signed half-word? */
 354                if (instr & 0x40)
 355                        val = (signed long)((signed short) val);
 356
 357                regs->uregs[rd] = val;
 358        } else
 359                put16_unaligned_check(regs->uregs[rd], addr);
 360
 361        return TYPE_LDST;
 362
 363 user:
 364        if (LDST_L_BIT(instr)) {
 365                unsigned long val;
 366                unsigned int __ua_flags = uaccess_save_and_enable();
 367
 368                get16t_unaligned_check(val, addr);
 369                uaccess_restore(__ua_flags);
 370
 371                /* signed half-word? */
 372                if (instr & 0x40)
 373                        val = (signed long)((signed short) val);
 374
 375                regs->uregs[rd] = val;
 376        } else {
 377                unsigned int __ua_flags = uaccess_save_and_enable();
 378                put16t_unaligned_check(regs->uregs[rd], addr);
 379                uaccess_restore(__ua_flags);
 380        }
 381
 382        return TYPE_LDST;
 383
 384 fault:
 385        return TYPE_FAULT;
 386}
 387
 388static int
 389do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
 390{
 391        unsigned int rd = RD_BITS(instr);
 392        unsigned int rd2;
 393        int load;
 394
 395        if ((instr & 0xfe000000) == 0xe8000000) {
 396                /* ARMv7 Thumb-2 32-bit LDRD/STRD */
 397                rd2 = (instr >> 8) & 0xf;
 398                load = !!(LDST_L_BIT(instr));
 399        } else if (((rd & 1) == 1) || (rd == 14))
 400                goto bad;
 401        else {
 402                load = ((instr & 0xf0) == 0xd0);
 403                rd2 = rd + 1;
 404        }
 405
 406        ai_dword += 1;
 407
 408        if (user_mode(regs))
 409                goto user;
 410
 411        if (load) {
 412                unsigned long val;
 413                get32_unaligned_check(val, addr);
 414                regs->uregs[rd] = val;
 415                get32_unaligned_check(val, addr + 4);
 416                regs->uregs[rd2] = val;
 417        } else {
 418                put32_unaligned_check(regs->uregs[rd], addr);
 419                put32_unaligned_check(regs->uregs[rd2], addr + 4);
 420        }
 421
 422        return TYPE_LDST;
 423
 424 user:
 425        if (load) {
 426                unsigned long val, val2;
 427                unsigned int __ua_flags = uaccess_save_and_enable();
 428
 429                get32t_unaligned_check(val, addr);
 430                get32t_unaligned_check(val2, addr + 4);
 431
 432                uaccess_restore(__ua_flags);
 433
 434                regs->uregs[rd] = val;
 435                regs->uregs[rd2] = val2;
 436        } else {
 437                unsigned int __ua_flags = uaccess_save_and_enable();
 438                put32t_unaligned_check(regs->uregs[rd], addr);
 439                put32t_unaligned_check(regs->uregs[rd2], addr + 4);
 440                uaccess_restore(__ua_flags);
 441        }
 442
 443        return TYPE_LDST;
 444 bad:
 445        return TYPE_ERROR;
 446 fault:
 447        return TYPE_FAULT;
 448}
 449
 450static int
 451do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
 452{
 453        unsigned int rd = RD_BITS(instr);
 454
 455        ai_word += 1;
 456
 457        if ((!LDST_P_BIT(instr) && LDST_W_BIT(instr)) || user_mode(regs))
 458                goto trans;
 459
 460        if (LDST_L_BIT(instr)) {
 461                unsigned int val;
 462                get32_unaligned_check(val, addr);
 463                regs->uregs[rd] = val;
 464        } else
 465                put32_unaligned_check(regs->uregs[rd], addr);
 466        return TYPE_LDST;
 467
 468 trans:
 469        if (LDST_L_BIT(instr)) {
 470                unsigned int val;
 471                unsigned int __ua_flags = uaccess_save_and_enable();
 472                get32t_unaligned_check(val, addr);
 473                uaccess_restore(__ua_flags);
 474                regs->uregs[rd] = val;
 475        } else {
 476                unsigned int __ua_flags = uaccess_save_and_enable();
 477                put32t_unaligned_check(regs->uregs[rd], addr);
 478                uaccess_restore(__ua_flags);
 479        }
 480        return TYPE_LDST;
 481
 482 fault:
 483        return TYPE_FAULT;
 484}
 485
 486/*
 487 * LDM/STM alignment handler.
 488 *
 489 * There are 4 variants of this instruction:
 490 *
 491 * B = rn pointer before instruction, A = rn pointer after instruction
 492 *              ------ increasing address ----->
 493 *              |    | r0 | r1 | ... | rx |    |
 494 * PU = 01             B                    A
 495 * PU = 11        B                    A
 496 * PU = 00        A                    B
 497 * PU = 10             A                    B
 498 */
 499static int
 500do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
 501{
 502        unsigned int rd, rn, correction, nr_regs, regbits;
 503        unsigned long eaddr, newaddr;
 504
 505        if (LDM_S_BIT(instr))
 506                goto bad;
 507
 508        correction = 4; /* processor implementation defined */
 509        regs->ARM_pc += correction;
 510
 511        ai_multi += 1;
 512
 513        /* count the number of registers in the mask to be transferred */
 514        nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
 515
 516        rn = RN_BITS(instr);
 517        newaddr = eaddr = regs->uregs[rn];
 518
 519        if (!LDST_U_BIT(instr))
 520                nr_regs = -nr_regs;
 521        newaddr += nr_regs;
 522        if (!LDST_U_BIT(instr))
 523                eaddr = newaddr;
 524
 525        if (LDST_P_EQ_U(instr)) /* U = P */
 526                eaddr += 4;
 527
 528        /*
 529         * For alignment faults on the ARM922T/ARM920T the MMU  makes
 530         * the FSR (and hence addr) equal to the updated base address
 531         * of the multiple access rather than the restored value.
 532         * Switch this message off if we've got a ARM92[02], otherwise
 533         * [ls]dm alignment faults are noisy!
 534         */
 535#if !(defined CONFIG_CPU_ARM922T)  && !(defined CONFIG_CPU_ARM920T)
 536        /*
 537         * This is a "hint" - we already have eaddr worked out by the
 538         * processor for us.
 539         */
 540        if (addr != eaddr) {
 541                pr_err("LDMSTM: PC = %08lx, instr = %08x, "
 542                        "addr = %08lx, eaddr = %08lx\n",
 543                         instruction_pointer(regs), instr, addr, eaddr);
 544                show_regs(regs);
 545        }
 546#endif
 547
 548        if (user_mode(regs)) {
 549                unsigned int __ua_flags = uaccess_save_and_enable();
 550                for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
 551                     regbits >>= 1, rd += 1)
 552                        if (regbits & 1) {
 553                                if (LDST_L_BIT(instr)) {
 554                                        unsigned int val;
 555                                        get32t_unaligned_check(val, eaddr);
 556                                        regs->uregs[rd] = val;
 557                                } else
 558                                        put32t_unaligned_check(regs->uregs[rd], eaddr);
 559                                eaddr += 4;
 560                        }
 561                uaccess_restore(__ua_flags);
 562        } else {
 563                for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
 564                     regbits >>= 1, rd += 1)
 565                        if (regbits & 1) {
 566                                if (LDST_L_BIT(instr)) {
 567                                        unsigned int val;
 568                                        get32_unaligned_check(val, eaddr);
 569                                        regs->uregs[rd] = val;
 570                                } else
 571                                        put32_unaligned_check(regs->uregs[rd], eaddr);
 572                                eaddr += 4;
 573                        }
 574        }
 575
 576        if (LDST_W_BIT(instr))
 577                regs->uregs[rn] = newaddr;
 578        if (!LDST_L_BIT(instr) || !(REGMASK_BITS(instr) & (1 << 15)))
 579                regs->ARM_pc -= correction;
 580        return TYPE_DONE;
 581
 582fault:
 583        regs->ARM_pc -= correction;
 584        return TYPE_FAULT;
 585
 586bad:
 587        pr_err("Alignment trap: not handling ldm with s-bit set\n");
 588        return TYPE_ERROR;
 589}
 590
 591/*
 592 * Convert Thumb ld/st instruction forms to equivalent ARM instructions so
 593 * we can reuse ARM userland alignment fault fixups for Thumb.
 594 *
 595 * This implementation was initially based on the algorithm found in
 596 * gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
 597 * to convert only Thumb ld/st instruction forms to equivalent ARM forms.
 598 *
 599 * NOTES:
 600 * 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
 601 * 2. If for some reason we're passed an non-ld/st Thumb instruction to
 602 *    decode, we return 0xdeadc0de. This should never happen under normal
 603 *    circumstances but if it does, we've got other problems to deal with
 604 *    elsewhere and we obviously can't fix those problems here.
 605 */
 606
 607static unsigned long
 608thumb2arm(u16 tinstr)
 609{
 610        u32 L = (tinstr & (1<<11)) >> 11;
 611
 612        switch ((tinstr & 0xf800) >> 11) {
 613        /* 6.5.1 Format 1: */
 614        case 0x6000 >> 11:                              /* 7.1.52 STR(1) */
 615        case 0x6800 >> 11:                              /* 7.1.26 LDR(1) */
 616        case 0x7000 >> 11:                              /* 7.1.55 STRB(1) */
 617        case 0x7800 >> 11:                              /* 7.1.30 LDRB(1) */
 618                return 0xe5800000 |
 619                        ((tinstr & (1<<12)) << (22-12)) |       /* fixup */
 620                        (L<<20) |                               /* L==1? */
 621                        ((tinstr & (7<<0)) << (12-0)) |         /* Rd */
 622                        ((tinstr & (7<<3)) << (16-3)) |         /* Rn */
 623                        ((tinstr & (31<<6)) >>                  /* immed_5 */
 624                                (6 - ((tinstr & (1<<12)) ? 0 : 2)));
 625        case 0x8000 >> 11:                              /* 7.1.57 STRH(1) */
 626        case 0x8800 >> 11:                              /* 7.1.32 LDRH(1) */
 627                return 0xe1c000b0 |
 628                        (L<<20) |                               /* L==1? */
 629                        ((tinstr & (7<<0)) << (12-0)) |         /* Rd */
 630                        ((tinstr & (7<<3)) << (16-3)) |         /* Rn */
 631                        ((tinstr & (7<<6)) >> (6-1)) |   /* immed_5[2:0] */
 632                        ((tinstr & (3<<9)) >> (9-8));    /* immed_5[4:3] */
 633
 634        /* 6.5.1 Format 2: */
 635        case 0x5000 >> 11:
 636        case 0x5800 >> 11:
 637                {
 638                        static const u32 subset[8] = {
 639                                0xe7800000,             /* 7.1.53 STR(2) */
 640                                0xe18000b0,             /* 7.1.58 STRH(2) */
 641                                0xe7c00000,             /* 7.1.56 STRB(2) */
 642                                0xe19000d0,             /* 7.1.34 LDRSB */
 643                                0xe7900000,             /* 7.1.27 LDR(2) */
 644                                0xe19000b0,             /* 7.1.33 LDRH(2) */
 645                                0xe7d00000,             /* 7.1.31 LDRB(2) */
 646                                0xe19000f0              /* 7.1.35 LDRSH */
 647                        };
 648                        return subset[(tinstr & (7<<9)) >> 9] |
 649                            ((tinstr & (7<<0)) << (12-0)) |     /* Rd */
 650                            ((tinstr & (7<<3)) << (16-3)) |     /* Rn */
 651                            ((tinstr & (7<<6)) >> (6-0));       /* Rm */
 652                }
 653
 654        /* 6.5.1 Format 3: */
 655        case 0x4800 >> 11:                              /* 7.1.28 LDR(3) */
 656                /* NOTE: This case is not technically possible. We're
 657                 *       loading 32-bit memory data via PC relative
 658                 *       addressing mode. So we can and should eliminate
 659                 *       this case. But I'll leave it here for now.
 660                 */
 661                return 0xe59f0000 |
 662                    ((tinstr & (7<<8)) << (12-8)) |             /* Rd */
 663                    ((tinstr & 255) << (2-0));                  /* immed_8 */
 664
 665        /* 6.5.1 Format 4: */
 666        case 0x9000 >> 11:                              /* 7.1.54 STR(3) */
 667        case 0x9800 >> 11:                              /* 7.1.29 LDR(4) */
 668                return 0xe58d0000 |
 669                        (L<<20) |                               /* L==1? */
 670                        ((tinstr & (7<<8)) << (12-8)) |         /* Rd */
 671                        ((tinstr & 255) << 2);                  /* immed_8 */
 672
 673        /* 6.6.1 Format 1: */
 674        case 0xc000 >> 11:                              /* 7.1.51 STMIA */
 675        case 0xc800 >> 11:                              /* 7.1.25 LDMIA */
 676                {
 677                        u32 Rn = (tinstr & (7<<8)) >> 8;
 678                        u32 W = ((L<<Rn) & (tinstr&255)) ? 0 : 1<<21;
 679
 680                        return 0xe8800000 | W | (L<<20) | (Rn<<16) |
 681                                (tinstr&255);
 682                }
 683
 684        /* 6.6.1 Format 2: */
 685        case 0xb000 >> 11:                              /* 7.1.48 PUSH */
 686        case 0xb800 >> 11:                              /* 7.1.47 POP */
 687                if ((tinstr & (3 << 9)) == 0x0400) {
 688                        static const u32 subset[4] = {
 689                                0xe92d0000,     /* STMDB sp!,{registers} */
 690                                0xe92d4000,     /* STMDB sp!,{registers,lr} */
 691                                0xe8bd0000,     /* LDMIA sp!,{registers} */
 692                                0xe8bd8000      /* LDMIA sp!,{registers,pc} */
 693                        };
 694                        return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
 695                            (tinstr & 255);             /* register_list */
 696                }
 697                /* Else, fall through - for illegal instruction case */
 698
 699        default:
 700                return BAD_INSTR;
 701        }
 702}
 703
 704/*
 705 * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
 706 * handlable by ARM alignment handler, also find the corresponding handler,
 707 * so that we can reuse ARM userland alignment fault fixups for Thumb.
 708 *
 709 * @pinstr: original Thumb-2 instruction; returns new handlable instruction
 710 * @regs: register context.
 711 * @poffset: return offset from faulted addr for later writeback
 712 *
 713 * NOTES:
 714 * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
 715 * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
 716 */
 717static void *
 718do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
 719                            union offset_union *poffset)
 720{
 721        u32 instr = *pinstr;
 722        u16 tinst1 = (instr >> 16) & 0xffff;
 723        u16 tinst2 = instr & 0xffff;
 724
 725        switch (tinst1 & 0xffe0) {
 726        /* A6.3.5 Load/Store multiple */
 727        case 0xe880:            /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
 728        case 0xe8a0:            /* ...above writeback version */
 729        case 0xe900:            /* STMDB/STMFD, LDMDB/LDMEA */
 730        case 0xe920:            /* ...above writeback version */
 731                /* no need offset decision since handler calculates it */
 732                return do_alignment_ldmstm;
 733
 734        case 0xf840:            /* POP/PUSH T3 (single register) */
 735                if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
 736                        u32 L = !!(LDST_L_BIT(instr));
 737                        const u32 subset[2] = {
 738                                0xe92d0000,     /* STMDB sp!,{registers} */
 739                                0xe8bd0000,     /* LDMIA sp!,{registers} */
 740                        };
 741                        *pinstr = subset[L] | (1<<RD_BITS(instr));
 742                        return do_alignment_ldmstm;
 743                }
 744                /* Else fall through for illegal instruction case */
 745                break;
 746
 747        /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
 748        case 0xe860:
 749        case 0xe960:
 750        case 0xe8e0:
 751        case 0xe9e0:
 752                poffset->un = (tinst2 & 0xff) << 2;
 753                /* Fall through */
 754
 755        case 0xe940:
 756        case 0xe9c0:
 757                return do_alignment_ldrdstrd;
 758
 759        /*
 760         * No need to handle load/store instructions up to word size
 761         * since ARMv6 and later CPUs can perform unaligned accesses.
 762         */
 763        default:
 764                break;
 765        }
 766        return NULL;
 767}
 768
 769static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
 770{
 771        u32 instr = 0;
 772        int fault;
 773
 774        if (user_mode(regs))
 775                fault = get_user(instr, ip);
 776        else
 777                fault = get_kernel_nofault(instr, ip);
 778
 779        *inst = __mem_to_opcode_arm(instr);
 780
 781        return fault;
 782}
 783
 784static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
 785{
 786        u16 instr = 0;
 787        int fault;
 788
 789        if (user_mode(regs))
 790                fault = get_user(instr, ip);
 791        else
 792                fault = get_kernel_nofault(instr, ip);
 793
 794        *inst = __mem_to_opcode_thumb16(instr);
 795
 796        return fault;
 797}
 798
 799static int
 800do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 801{
 802        union offset_union uninitialized_var(offset);
 803        unsigned long instrptr;
 804        int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
 805        unsigned int type;
 806        u32 instr = 0;
 807        u16 tinstr = 0;
 808        int isize = 4;
 809        int thumb2_32b = 0;
 810        int fault;
 811
 812        if (interrupts_enabled(regs))
 813                local_irq_enable();
 814
 815        instrptr = instruction_pointer(regs);
 816
 817        if (thumb_mode(regs)) {
 818                u16 *ptr = (u16 *)(instrptr & ~1);
 819
 820                fault = alignment_get_thumb(regs, ptr, &tinstr);
 821                if (!fault) {
 822                        if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
 823                            IS_T32(tinstr)) {
 824                                /* Thumb-2 32-bit */
 825                                u16 tinst2;
 826                                fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
 827                                instr = __opcode_thumb32_compose(tinstr, tinst2);
 828                                thumb2_32b = 1;
 829                        } else {
 830                                isize = 2;
 831                                instr = thumb2arm(tinstr);
 832                        }
 833                }
 834        } else {
 835                fault = alignment_get_arm(regs, (void *)instrptr, &instr);
 836        }
 837
 838        if (fault) {
 839                type = TYPE_FAULT;
 840                goto bad_or_fault;
 841        }
 842
 843        if (user_mode(regs))
 844                goto user;
 845
 846        ai_sys += 1;
 847        ai_sys_last_pc = (void *)instruction_pointer(regs);
 848
 849 fixup:
 850
 851        regs->ARM_pc += isize;
 852
 853        switch (CODING_BITS(instr)) {
 854        case 0x00000000:        /* 3.13.4 load/store instruction extensions */
 855                if (LDSTHD_I_BIT(instr))
 856                        offset.un = (instr & 0xf00) >> 4 | (instr & 15);
 857                else
 858                        offset.un = regs->uregs[RM_BITS(instr)];
 859
 860                if ((instr & 0x000000f0) == 0x000000b0 || /* LDRH, STRH */
 861                    (instr & 0x001000f0) == 0x001000f0)   /* LDRSH */
 862                        handler = do_alignment_ldrhstrh;
 863                else if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */
 864                         (instr & 0x001000f0) == 0x000000f0)   /* STRD */
 865                        handler = do_alignment_ldrdstrd;
 866                else if ((instr & 0x01f00ff0) == 0x01000090) /* SWP */
 867                        goto swp;
 868                else
 869                        goto bad;
 870                break;
 871
 872        case 0x04000000:        /* ldr or str immediate */
 873                if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
 874                        goto bad;
 875                offset.un = OFFSET_BITS(instr);
 876                handler = do_alignment_ldrstr;
 877                break;
 878
 879        case 0x06000000:        /* ldr or str register */
 880                offset.un = regs->uregs[RM_BITS(instr)];
 881
 882                if (IS_SHIFT(instr)) {
 883                        unsigned int shiftval = SHIFT_BITS(instr);
 884
 885                        switch(SHIFT_TYPE(instr)) {
 886                        case SHIFT_LSL:
 887                                offset.un <<= shiftval;
 888                                break;
 889
 890                        case SHIFT_LSR:
 891                                offset.un >>= shiftval;
 892                                break;
 893
 894                        case SHIFT_ASR:
 895                                offset.sn >>= shiftval;
 896                                break;
 897
 898                        case SHIFT_RORRRX:
 899                                if (shiftval == 0) {
 900                                        offset.un >>= 1;
 901                                        if (regs->ARM_cpsr & PSR_C_BIT)
 902                                                offset.un |= 1 << 31;
 903                                } else
 904                                        offset.un = offset.un >> shiftval |
 905                                                          offset.un << (32 - shiftval);
 906                                break;
 907                        }
 908                }
 909                handler = do_alignment_ldrstr;
 910                break;
 911
 912        case 0x08000000:        /* ldm or stm, or thumb-2 32bit instruction */
 913                if (thumb2_32b) {
 914                        offset.un = 0;
 915                        handler = do_alignment_t32_to_handler(&instr, regs, &offset);
 916                } else {
 917                        offset.un = 0;
 918                        handler = do_alignment_ldmstm;
 919                }
 920                break;
 921
 922        default:
 923                goto bad;
 924        }
 925
 926        if (!handler)
 927                goto bad;
 928        type = handler(addr, instr, regs);
 929
 930        if (type == TYPE_ERROR || type == TYPE_FAULT) {
 931                regs->ARM_pc -= isize;
 932                goto bad_or_fault;
 933        }
 934
 935        if (type == TYPE_LDST)
 936                do_alignment_finish_ldst(addr, instr, regs, offset);
 937
 938        return 0;
 939
 940 bad_or_fault:
 941        if (type == TYPE_ERROR)
 942                goto bad;
 943        /*
 944         * We got a fault - fix it up, or die.
 945         */
 946        do_bad_area(addr, fsr, regs);
 947        return 0;
 948
 949 swp:
 950        pr_err("Alignment trap: not handling swp instruction\n");
 951
 952 bad:
 953        /*
 954         * Oops, we didn't handle the instruction.
 955         */
 956        pr_err("Alignment trap: not handling instruction "
 957                "%0*x at [<%08lx>]\n",
 958                isize << 1,
 959                isize == 2 ? tinstr : instr, instrptr);
 960        ai_skipped += 1;
 961        return 1;
 962
 963 user:
 964        ai_user += 1;
 965
 966        if (ai_usermode & UM_WARN)
 967                printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
 968                       "Address=0x%08lx FSR 0x%03x\n", current->comm,
 969                        task_pid_nr(current), instrptr,
 970                        isize << 1,
 971                        isize == 2 ? tinstr : instr,
 972                        addr, fsr);
 973
 974        if (ai_usermode & UM_FIXUP)
 975                goto fixup;
 976
 977        if (ai_usermode & UM_SIGNAL) {
 978                force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
 979        } else {
 980                /*
 981                 * We're about to disable the alignment trap and return to
 982                 * user space.  But if an interrupt occurs before actually
 983                 * reaching user space, then the IRQ vector entry code will
 984                 * notice that we were still in kernel space and therefore
 985                 * the alignment trap won't be re-enabled in that case as it
 986                 * is presumed to be always on from kernel space.
 987                 * Let's prevent that race by disabling interrupts here (they
 988                 * are disabled on the way back to user space anyway in
 989                 * entry-common.S) and disable the alignment trap only if
 990                 * there is no work pending for this thread.
 991                 */
 992                raw_local_irq_disable();
 993                if (!(current_thread_info()->flags & _TIF_WORK_MASK))
 994                        set_cr(cr_no_alignment);
 995        }
 996
 997        return 0;
 998}
 999
1000static int __init noalign_setup(char *__unused)
1001{
1002        set_cr(__clear_cr(CR_A));
1003        return 1;
1004}
1005__setup("noalign", noalign_setup);
1006
1007/*
1008 * This needs to be done after sysctl_init, otherwise sys/ will be
1009 * overwritten.  Actually, this shouldn't be in sys/ at all since
1010 * it isn't a sysctl, and it doesn't contain sysctl information.
1011 * We now locate it in /proc/cpu/alignment instead.
1012 */
1013static int __init alignment_init(void)
1014{
1015#ifdef CONFIG_PROC_FS
1016        struct proc_dir_entry *res;
1017
1018        res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
1019                          &alignment_proc_ops);
1020        if (!res)
1021                return -ENOMEM;
1022#endif
1023
1024        if (cpu_is_v6_unaligned()) {
1025                set_cr(__clear_cr(CR_A));
1026                ai_usermode = safe_usermode(ai_usermode, false);
1027        }
1028
1029        cr_no_alignment = get_cr() & ~CR_A;
1030
1031        hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
1032                        "alignment exception");
1033
1034        /*
1035         * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
1036         * fault, not as alignment error.
1037         *
1038         * TODO: handle ARMv6K properly. Runtime check for 'K' extension is
1039         * needed.
1040         */
1041        if (cpu_architecture() <= CPU_ARCH_ARMv6) {
1042                hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
1043                                "alignment exception");
1044        }
1045
1046        return 0;
1047}
1048
1049fs_initcall(alignment_init);
1050