linux/arch/arm/mm/alignment.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/alignment.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Modifications for ARM processor (c) 1995-2001 Russell King
   6 *  Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc.
   7 *  - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
   8 *    Copyright (C) 1996, Cygnus Software Technologies Ltd.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14#include <linux/moduleparam.h>
  15#include <linux/compiler.h>
  16#include <linux/kernel.h>
  17#include <linux/sched/debug.h>
  18#include <linux/errno.h>
  19#include <linux/string.h>
  20#include <linux/proc_fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/init.h>
  23#include <linux/sched/signal.h>
  24#include <linux/uaccess.h>
  25
  26#include <asm/cp15.h>
  27#include <asm/system_info.h>
  28#include <asm/unaligned.h>
  29#include <asm/opcodes.h>
  30
  31#include "fault.h"
  32#include "mm.h"
  33
  34/*
  35 * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
  36 * /proc/sys/debug/alignment, modified and integrated into
  37 * Linux 2.1 by Russell King
  38 *
  39 * Speed optimisations and better fault handling by Russell King.
  40 *
  41 * *** NOTE ***
  42 * This code is not portable to processors with late data abort handling.
  43 */
  44#define CODING_BITS(i)  (i & 0x0e000000)
  45#define COND_BITS(i)    (i & 0xf0000000)
  46
  47#define LDST_I_BIT(i)   (i & (1 << 26))         /* Immediate constant   */
  48#define LDST_P_BIT(i)   (i & (1 << 24))         /* Preindex             */
  49#define LDST_U_BIT(i)   (i & (1 << 23))         /* Add offset           */
  50#define LDST_W_BIT(i)   (i & (1 << 21))         /* Writeback            */
  51#define LDST_L_BIT(i)   (i & (1 << 20))         /* Load                 */
  52
  53#define LDST_P_EQ_U(i)  ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
  54
  55#define LDSTHD_I_BIT(i) (i & (1 << 22))         /* double/half-word immed */
  56#define LDM_S_BIT(i)    (i & (1 << 22))         /* write CPSR from SPSR */
  57
  58#define RN_BITS(i)      ((i >> 16) & 15)        /* Rn                   */
  59#define RD_BITS(i)      ((i >> 12) & 15)        /* Rd                   */
  60#define RM_BITS(i)      (i & 15)                /* Rm                   */
  61
  62#define REGMASK_BITS(i) (i & 0xffff)
  63#define OFFSET_BITS(i)  (i & 0x0fff)
  64
  65#define IS_SHIFT(i)     (i & 0x0ff0)
  66#define SHIFT_BITS(i)   ((i >> 7) & 0x1f)
  67#define SHIFT_TYPE(i)   (i & 0x60)
  68#define SHIFT_LSL       0x00
  69#define SHIFT_LSR       0x20
  70#define SHIFT_ASR       0x40
  71#define SHIFT_RORRRX    0x60
  72
  73#define BAD_INSTR       0xdeadc0de
  74
  75/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
  76#define IS_T32(hi16) \
  77        (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
  78
  79static unsigned long ai_user;
  80static unsigned long ai_sys;
  81static void *ai_sys_last_pc;
  82static unsigned long ai_skipped;
  83static unsigned long ai_half;
  84static unsigned long ai_word;
  85static unsigned long ai_dword;
  86static unsigned long ai_multi;
  87static int ai_usermode;
  88static unsigned long cr_no_alignment;
  89
  90core_param(alignment, ai_usermode, int, 0600);
  91
  92#define UM_WARN         (1 << 0)
  93#define UM_FIXUP        (1 << 1)
  94#define UM_SIGNAL       (1 << 2)
  95
  96/* Return true if and only if the ARMv6 unaligned access model is in use. */
  97static bool cpu_is_v6_unaligned(void)
  98{
  99        return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U;
 100}
 101
 102static int safe_usermode(int new_usermode, bool warn)
 103{
 104        /*
 105         * ARMv6 and later CPUs can perform unaligned accesses for
 106         * most single load and store instructions up to word size.
 107         * LDM, STM, LDRD and STRD still need to be handled.
 108         *
 109         * Ignoring the alignment fault is not an option on these
 110         * CPUs since we spin re-faulting the instruction without
 111         * making any progress.
 112         */
 113        if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
 114                new_usermode |= UM_FIXUP;
 115
 116                if (warn)
 117                        pr_warn("alignment: ignoring faults is unsafe on this CPU.  Defaulting to fixup mode.\n");
 118        }
 119
 120        return new_usermode;
 121}
 122
 123#ifdef CONFIG_PROC_FS
 124static const char *usermode_action[] = {
 125        "ignored",
 126        "warn",
 127        "fixup",
 128        "fixup+warn",
 129        "signal",
 130        "signal+warn"
 131};
 132
 133static int alignment_proc_show(struct seq_file *m, void *v)
 134{
 135        seq_printf(m, "User:\t\t%lu\n", ai_user);
 136        seq_printf(m, "System:\t\t%lu (%pF)\n", ai_sys, ai_sys_last_pc);
 137        seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
 138        seq_printf(m, "Half:\t\t%lu\n", ai_half);
 139        seq_printf(m, "Word:\t\t%lu\n", ai_word);
 140        if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
 141                seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
 142        seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
 143        seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
 144                        usermode_action[ai_usermode]);
 145
 146        return 0;
 147}
 148
 149static int alignment_proc_open(struct inode *inode, struct file *file)
 150{
 151        return single_open(file, alignment_proc_show, NULL);
 152}
 153
 154static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
 155                                    size_t count, loff_t *pos)
 156{
 157        char mode;
 158
 159        if (count > 0) {
 160                if (get_user(mode, buffer))
 161                        return -EFAULT;
 162                if (mode >= '0' && mode <= '5')
 163                        ai_usermode = safe_usermode(mode - '0', true);
 164        }
 165        return count;
 166}
 167
 168static const struct file_operations alignment_proc_fops = {
 169        .open           = alignment_proc_open,
 170        .read           = seq_read,
 171        .llseek         = seq_lseek,
 172        .release        = single_release,
 173        .write          = alignment_proc_write,
 174};
 175#endif /* CONFIG_PROC_FS */
 176
 177union offset_union {
 178        unsigned long un;
 179          signed long sn;
 180};
 181
 182#define TYPE_ERROR      0
 183#define TYPE_FAULT      1
 184#define TYPE_LDST       2
 185#define TYPE_DONE       3
 186
 187#ifdef __ARMEB__
 188#define BE              1
 189#define FIRST_BYTE_16   "mov    %1, %1, ror #8\n"
 190#define FIRST_BYTE_32   "mov    %1, %1, ror #24\n"
 191#define NEXT_BYTE       "ror #24"
 192#else
 193#define BE              0
 194#define FIRST_BYTE_16
 195#define FIRST_BYTE_32
 196#define NEXT_BYTE       "lsr #8"
 197#endif
 198
 199#define __get8_unaligned_check(ins,val,addr,err)        \
 200        __asm__(                                        \
 201 ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
 202 THUMB( "1:     "ins"   %1, [%2]\n"     )               \
 203 THUMB( "       add     %2, %2, #1\n"   )               \
 204        "2:\n"                                          \
 205        "       .pushsection .text.fixup,\"ax\"\n"      \
 206        "       .align  2\n"                            \
 207        "3:     mov     %0, #1\n"                       \
 208        "       b       2b\n"                           \
 209        "       .popsection\n"                          \
 210        "       .pushsection __ex_table,\"a\"\n"        \
 211        "       .align  3\n"                            \
 212        "       .long   1b, 3b\n"                       \
 213        "       .popsection\n"                          \
 214        : "=r" (err), "=&r" (val), "=r" (addr)          \
 215        : "0" (err), "2" (addr))
 216
 217#define __get16_unaligned_check(ins,val,addr)                   \
 218        do {                                                    \
 219                unsigned int err = 0, v, a = addr;              \
 220                __get8_unaligned_check(ins,v,a,err);            \
 221                val =  v << ((BE) ? 8 : 0);                     \
 222                __get8_unaligned_check(ins,v,a,err);            \
 223                val |= v << ((BE) ? 0 : 8);                     \
 224                if (err)                                        \
 225                        goto fault;                             \
 226        } while (0)
 227
 228#define get16_unaligned_check(val,addr) \
 229        __get16_unaligned_check("ldrb",val,addr)
 230
 231#define get16t_unaligned_check(val,addr) \
 232        __get16_unaligned_check("ldrbt",val,addr)
 233
 234#define __get32_unaligned_check(ins,val,addr)                   \
 235        do {                                                    \
 236                unsigned int err = 0, v, a = addr;              \
 237                __get8_unaligned_check(ins,v,a,err);            \
 238                val =  v << ((BE) ? 24 :  0);                   \
 239                __get8_unaligned_check(ins,v,a,err);            \
 240                val |= v << ((BE) ? 16 :  8);                   \
 241                __get8_unaligned_check(ins,v,a,err);            \
 242                val |= v << ((BE) ?  8 : 16);                   \
 243                __get8_unaligned_check(ins,v,a,err);            \
 244                val |= v << ((BE) ?  0 : 24);                   \
 245                if (err)                                        \
 246                        goto fault;                             \
 247        } while (0)
 248
 249#define get32_unaligned_check(val,addr) \
 250        __get32_unaligned_check("ldrb",val,addr)
 251
 252#define get32t_unaligned_check(val,addr) \
 253        __get32_unaligned_check("ldrbt",val,addr)
 254
 255#define __put16_unaligned_check(ins,val,addr)                   \
 256        do {                                                    \
 257                unsigned int err = 0, v = val, a = addr;        \
 258                __asm__( FIRST_BYTE_16                          \
 259         ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
 260         THUMB( "1:     "ins"   %1, [%2]\n"     )               \
 261         THUMB( "       add     %2, %2, #1\n"   )               \
 262                "       mov     %1, %1, "NEXT_BYTE"\n"          \
 263                "2:     "ins"   %1, [%2]\n"                     \
 264                "3:\n"                                          \
 265                "       .pushsection .text.fixup,\"ax\"\n"      \
 266                "       .align  2\n"                            \
 267                "4:     mov     %0, #1\n"                       \
 268                "       b       3b\n"                           \
 269                "       .popsection\n"                          \
 270                "       .pushsection __ex_table,\"a\"\n"        \
 271                "       .align  3\n"                            \
 272                "       .long   1b, 4b\n"                       \
 273                "       .long   2b, 4b\n"                       \
 274                "       .popsection\n"                          \
 275                : "=r" (err), "=&r" (v), "=&r" (a)              \
 276                : "0" (err), "1" (v), "2" (a));                 \
 277                if (err)                                        \
 278                        goto fault;                             \
 279        } while (0)
 280
 281#define put16_unaligned_check(val,addr)  \
 282        __put16_unaligned_check("strb",val,addr)
 283
 284#define put16t_unaligned_check(val,addr) \
 285        __put16_unaligned_check("strbt",val,addr)
 286
 287#define __put32_unaligned_check(ins,val,addr)                   \
 288        do {                                                    \
 289                unsigned int err = 0, v = val, a = addr;        \
 290                __asm__( FIRST_BYTE_32                          \
 291         ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
 292         THUMB( "1:     "ins"   %1, [%2]\n"     )               \
 293         THUMB( "       add     %2, %2, #1\n"   )               \
 294                "       mov     %1, %1, "NEXT_BYTE"\n"          \
 295         ARM(   "2:     "ins"   %1, [%2], #1\n" )               \
 296         THUMB( "2:     "ins"   %1, [%2]\n"     )               \
 297         THUMB( "       add     %2, %2, #1\n"   )               \
 298                "       mov     %1, %1, "NEXT_BYTE"\n"          \
 299         ARM(   "3:     "ins"   %1, [%2], #1\n" )               \
 300         THUMB( "3:     "ins"   %1, [%2]\n"     )               \
 301         THUMB( "       add     %2, %2, #1\n"   )               \
 302                "       mov     %1, %1, "NEXT_BYTE"\n"          \
 303                "4:     "ins"   %1, [%2]\n"                     \
 304                "5:\n"                                          \
 305                "       .pushsection .text.fixup,\"ax\"\n"      \
 306                "       .align  2\n"                            \
 307                "6:     mov     %0, #1\n"                       \
 308                "       b       5b\n"                           \
 309                "       .popsection\n"                          \
 310                "       .pushsection __ex_table,\"a\"\n"        \
 311                "       .align  3\n"                            \
 312                "       .long   1b, 6b\n"                       \
 313                "       .long   2b, 6b\n"                       \
 314                "       .long   3b, 6b\n"                       \
 315                "       .long   4b, 6b\n"                       \
 316                "       .popsection\n"                          \
 317                : "=r" (err), "=&r" (v), "=&r" (a)              \
 318                : "0" (err), "1" (v), "2" (a));                 \
 319                if (err)                                        \
 320                        goto fault;                             \
 321        } while (0)
 322
 323#define put32_unaligned_check(val,addr) \
 324        __put32_unaligned_check("strb", val, addr)
 325
 326#define put32t_unaligned_check(val,addr) \
 327        __put32_unaligned_check("strbt", val, addr)
 328
 329static void
 330do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset)
 331{
 332        if (!LDST_U_BIT(instr))
 333                offset.un = -offset.un;
 334
 335        if (!LDST_P_BIT(instr))
 336                addr += offset.un;
 337
 338        if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
 339                regs->uregs[RN_BITS(instr)] = addr;
 340}
 341
 342static int
 343do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs)
 344{
 345        unsigned int rd = RD_BITS(instr);
 346
 347        ai_half += 1;
 348
 349        if (user_mode(regs))
 350                goto user;
 351
 352        if (LDST_L_BIT(instr)) {
 353                unsigned long val;
 354                get16_unaligned_check(val, addr);
 355
 356                /* signed half-word? */
 357                if (instr & 0x40)
 358                        val = (signed long)((signed short) val);
 359
 360                regs->uregs[rd] = val;
 361        } else
 362                put16_unaligned_check(regs->uregs[rd], addr);
 363
 364        return TYPE_LDST;
 365
 366 user:
 367        if (LDST_L_BIT(instr)) {
 368                unsigned long val;
 369                unsigned int __ua_flags = uaccess_save_and_enable();
 370
 371                get16t_unaligned_check(val, addr);
 372                uaccess_restore(__ua_flags);
 373
 374                /* signed half-word? */
 375                if (instr & 0x40)
 376                        val = (signed long)((signed short) val);
 377
 378                regs->uregs[rd] = val;
 379        } else {
 380                unsigned int __ua_flags = uaccess_save_and_enable();
 381                put16t_unaligned_check(regs->uregs[rd], addr);
 382                uaccess_restore(__ua_flags);
 383        }
 384
 385        return TYPE_LDST;
 386
 387 fault:
 388        return TYPE_FAULT;
 389}
 390
 391static int
 392do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
 393                      struct pt_regs *regs)
 394{
 395        unsigned int rd = RD_BITS(instr);
 396        unsigned int rd2;
 397        int load;
 398
 399        if ((instr & 0xfe000000) == 0xe8000000) {
 400                /* ARMv7 Thumb-2 32-bit LDRD/STRD */
 401                rd2 = (instr >> 8) & 0xf;
 402                load = !!(LDST_L_BIT(instr));
 403        } else if (((rd & 1) == 1) || (rd == 14))
 404                goto bad;
 405        else {
 406                load = ((instr & 0xf0) == 0xd0);
 407                rd2 = rd + 1;
 408        }
 409
 410        ai_dword += 1;
 411
 412        if (user_mode(regs))
 413                goto user;
 414
 415        if (load) {
 416                unsigned long val;
 417                get32_unaligned_check(val, addr);
 418                regs->uregs[rd] = val;
 419                get32_unaligned_check(val, addr + 4);
 420                regs->uregs[rd2] = val;
 421        } else {
 422                put32_unaligned_check(regs->uregs[rd], addr);
 423                put32_unaligned_check(regs->uregs[rd2], addr + 4);
 424        }
 425
 426        return TYPE_LDST;
 427
 428 user:
 429        if (load) {
 430                unsigned long val, val2;
 431                unsigned int __ua_flags = uaccess_save_and_enable();
 432
 433                get32t_unaligned_check(val, addr);
 434                get32t_unaligned_check(val2, addr + 4);
 435
 436                uaccess_restore(__ua_flags);
 437
 438                regs->uregs[rd] = val;
 439                regs->uregs[rd2] = val2;
 440        } else {
 441                unsigned int __ua_flags = uaccess_save_and_enable();
 442                put32t_unaligned_check(regs->uregs[rd], addr);
 443                put32t_unaligned_check(regs->uregs[rd2], addr + 4);
 444                uaccess_restore(__ua_flags);
 445        }
 446
 447        return TYPE_LDST;
 448 bad:
 449        return TYPE_ERROR;
 450 fault:
 451        return TYPE_FAULT;
 452}
 453
 454static int
 455do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs)
 456{
 457        unsigned int rd = RD_BITS(instr);
 458
 459        ai_word += 1;
 460
 461        if ((!LDST_P_BIT(instr) && LDST_W_BIT(instr)) || user_mode(regs))
 462                goto trans;
 463
 464        if (LDST_L_BIT(instr)) {
 465                unsigned int val;
 466                get32_unaligned_check(val, addr);
 467                regs->uregs[rd] = val;
 468        } else
 469                put32_unaligned_check(regs->uregs[rd], addr);
 470        return TYPE_LDST;
 471
 472 trans:
 473        if (LDST_L_BIT(instr)) {
 474                unsigned int val;
 475                unsigned int __ua_flags = uaccess_save_and_enable();
 476                get32t_unaligned_check(val, addr);
 477                uaccess_restore(__ua_flags);
 478                regs->uregs[rd] = val;
 479        } else {
 480                unsigned int __ua_flags = uaccess_save_and_enable();
 481                put32t_unaligned_check(regs->uregs[rd], addr);
 482                uaccess_restore(__ua_flags);
 483        }
 484        return TYPE_LDST;
 485
 486 fault:
 487        return TYPE_FAULT;
 488}
 489
 490/*
 491 * LDM/STM alignment handler.
 492 *
 493 * There are 4 variants of this instruction:
 494 *
 495 * B = rn pointer before instruction, A = rn pointer after instruction
 496 *              ------ increasing address ----->
 497 *              |    | r0 | r1 | ... | rx |    |
 498 * PU = 01             B                    A
 499 * PU = 11        B                    A
 500 * PU = 00        A                    B
 501 * PU = 10             A                    B
 502 */
 503static int
 504do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs)
 505{
 506        unsigned int rd, rn, correction, nr_regs, regbits;
 507        unsigned long eaddr, newaddr;
 508
 509        if (LDM_S_BIT(instr))
 510                goto bad;
 511
 512        correction = 4; /* processor implementation defined */
 513        regs->ARM_pc += correction;
 514
 515        ai_multi += 1;
 516
 517        /* count the number of registers in the mask to be transferred */
 518        nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
 519
 520        rn = RN_BITS(instr);
 521        newaddr = eaddr = regs->uregs[rn];
 522
 523        if (!LDST_U_BIT(instr))
 524                nr_regs = -nr_regs;
 525        newaddr += nr_regs;
 526        if (!LDST_U_BIT(instr))
 527                eaddr = newaddr;
 528
 529        if (LDST_P_EQ_U(instr)) /* U = P */
 530                eaddr += 4;
 531
 532        /*
 533         * For alignment faults on the ARM922T/ARM920T the MMU  makes
 534         * the FSR (and hence addr) equal to the updated base address
 535         * of the multiple access rather than the restored value.
 536         * Switch this message off if we've got a ARM92[02], otherwise
 537         * [ls]dm alignment faults are noisy!
 538         */
 539#if !(defined CONFIG_CPU_ARM922T)  && !(defined CONFIG_CPU_ARM920T)
 540        /*
 541         * This is a "hint" - we already have eaddr worked out by the
 542         * processor for us.
 543         */
 544        if (addr != eaddr) {
 545                pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
 546                        "addr = %08lx, eaddr = %08lx\n",
 547                         instruction_pointer(regs), instr, addr, eaddr);
 548                show_regs(regs);
 549        }
 550#endif
 551
 552        if (user_mode(regs)) {
 553                unsigned int __ua_flags = uaccess_save_and_enable();
 554                for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
 555                     regbits >>= 1, rd += 1)
 556                        if (regbits & 1) {
 557                                if (LDST_L_BIT(instr)) {
 558                                        unsigned int val;
 559                                        get32t_unaligned_check(val, eaddr);
 560                                        regs->uregs[rd] = val;
 561                                } else
 562                                        put32t_unaligned_check(regs->uregs[rd], eaddr);
 563                                eaddr += 4;
 564                        }
 565                uaccess_restore(__ua_flags);
 566        } else {
 567                for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
 568                     regbits >>= 1, rd += 1)
 569                        if (regbits & 1) {
 570                                if (LDST_L_BIT(instr)) {
 571                                        unsigned int val;
 572                                        get32_unaligned_check(val, eaddr);
 573                                        regs->uregs[rd] = val;
 574                                } else
 575                                        put32_unaligned_check(regs->uregs[rd], eaddr);
 576                                eaddr += 4;
 577                        }
 578        }
 579
 580        if (LDST_W_BIT(instr))
 581                regs->uregs[rn] = newaddr;
 582        if (!LDST_L_BIT(instr) || !(REGMASK_BITS(instr) & (1 << 15)))
 583                regs->ARM_pc -= correction;
 584        return TYPE_DONE;
 585
 586fault:
 587        regs->ARM_pc -= correction;
 588        return TYPE_FAULT;
 589
 590bad:
 591        pr_err("Alignment trap: not handling ldm with s-bit set\n");
 592        return TYPE_ERROR;
 593}
 594
 595/*
 596 * Convert Thumb ld/st instruction forms to equivalent ARM instructions so
 597 * we can reuse ARM userland alignment fault fixups for Thumb.
 598 *
 599 * This implementation was initially based on the algorithm found in
 600 * gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
 601 * to convert only Thumb ld/st instruction forms to equivalent ARM forms.
 602 *
 603 * NOTES:
 604 * 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
 605 * 2. If for some reason we're passed an non-ld/st Thumb instruction to
 606 *    decode, we return 0xdeadc0de. This should never happen under normal
 607 *    circumstances but if it does, we've got other problems to deal with
 608 *    elsewhere and we obviously can't fix those problems here.
 609 */
 610
 611static unsigned long
 612thumb2arm(u16 tinstr)
 613{
 614        u32 L = (tinstr & (1<<11)) >> 11;
 615
 616        switch ((tinstr & 0xf800) >> 11) {
 617        /* 6.5.1 Format 1: */
 618        case 0x6000 >> 11:                              /* 7.1.52 STR(1) */
 619        case 0x6800 >> 11:                              /* 7.1.26 LDR(1) */
 620        case 0x7000 >> 11:                              /* 7.1.55 STRB(1) */
 621        case 0x7800 >> 11:                              /* 7.1.30 LDRB(1) */
 622                return 0xe5800000 |
 623                        ((tinstr & (1<<12)) << (22-12)) |       /* fixup */
 624                        (L<<20) |                               /* L==1? */
 625                        ((tinstr & (7<<0)) << (12-0)) |         /* Rd */
 626                        ((tinstr & (7<<3)) << (16-3)) |         /* Rn */
 627                        ((tinstr & (31<<6)) >>                  /* immed_5 */
 628                                (6 - ((tinstr & (1<<12)) ? 0 : 2)));
 629        case 0x8000 >> 11:                              /* 7.1.57 STRH(1) */
 630        case 0x8800 >> 11:                              /* 7.1.32 LDRH(1) */
 631                return 0xe1c000b0 |
 632                        (L<<20) |                               /* L==1? */
 633                        ((tinstr & (7<<0)) << (12-0)) |         /* Rd */
 634                        ((tinstr & (7<<3)) << (16-3)) |         /* Rn */
 635                        ((tinstr & (7<<6)) >> (6-1)) |   /* immed_5[2:0] */
 636                        ((tinstr & (3<<9)) >> (9-8));    /* immed_5[4:3] */
 637
 638        /* 6.5.1 Format 2: */
 639        case 0x5000 >> 11:
 640        case 0x5800 >> 11:
 641                {
 642                        static const u32 subset[8] = {
 643                                0xe7800000,             /* 7.1.53 STR(2) */
 644                                0xe18000b0,             /* 7.1.58 STRH(2) */
 645                                0xe7c00000,             /* 7.1.56 STRB(2) */
 646                                0xe19000d0,             /* 7.1.34 LDRSB */
 647                                0xe7900000,             /* 7.1.27 LDR(2) */
 648                                0xe19000b0,             /* 7.1.33 LDRH(2) */
 649                                0xe7d00000,             /* 7.1.31 LDRB(2) */
 650                                0xe19000f0              /* 7.1.35 LDRSH */
 651                        };
 652                        return subset[(tinstr & (7<<9)) >> 9] |
 653                            ((tinstr & (7<<0)) << (12-0)) |     /* Rd */
 654                            ((tinstr & (7<<3)) << (16-3)) |     /* Rn */
 655                            ((tinstr & (7<<6)) >> (6-0));       /* Rm */
 656                }
 657
 658        /* 6.5.1 Format 3: */
 659        case 0x4800 >> 11:                              /* 7.1.28 LDR(3) */
 660                /* NOTE: This case is not technically possible. We're
 661                 *       loading 32-bit memory data via PC relative
 662                 *       addressing mode. So we can and should eliminate
 663                 *       this case. But I'll leave it here for now.
 664                 */
 665                return 0xe59f0000 |
 666                    ((tinstr & (7<<8)) << (12-8)) |             /* Rd */
 667                    ((tinstr & 255) << (2-0));                  /* immed_8 */
 668
 669        /* 6.5.1 Format 4: */
 670        case 0x9000 >> 11:                              /* 7.1.54 STR(3) */
 671        case 0x9800 >> 11:                              /* 7.1.29 LDR(4) */
 672                return 0xe58d0000 |
 673                        (L<<20) |                               /* L==1? */
 674                        ((tinstr & (7<<8)) << (12-8)) |         /* Rd */
 675                        ((tinstr & 255) << 2);                  /* immed_8 */
 676
 677        /* 6.6.1 Format 1: */
 678        case 0xc000 >> 11:                              /* 7.1.51 STMIA */
 679        case 0xc800 >> 11:                              /* 7.1.25 LDMIA */
 680                {
 681                        u32 Rn = (tinstr & (7<<8)) >> 8;
 682                        u32 W = ((L<<Rn) & (tinstr&255)) ? 0 : 1<<21;
 683
 684                        return 0xe8800000 | W | (L<<20) | (Rn<<16) |
 685                                (tinstr&255);
 686                }
 687
 688        /* 6.6.1 Format 2: */
 689        case 0xb000 >> 11:                              /* 7.1.48 PUSH */
 690        case 0xb800 >> 11:                              /* 7.1.47 POP */
 691                if ((tinstr & (3 << 9)) == 0x0400) {
 692                        static const u32 subset[4] = {
 693                                0xe92d0000,     /* STMDB sp!,{registers} */
 694                                0xe92d4000,     /* STMDB sp!,{registers,lr} */
 695                                0xe8bd0000,     /* LDMIA sp!,{registers} */
 696                                0xe8bd8000      /* LDMIA sp!,{registers,pc} */
 697                        };
 698                        return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
 699                            (tinstr & 255);             /* register_list */
 700                }
 701                /* Else fall through for illegal instruction case */
 702
 703        default:
 704                return BAD_INSTR;
 705        }
 706}
 707
 708/*
 709 * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
 710 * handlable by ARM alignment handler, also find the corresponding handler,
 711 * so that we can reuse ARM userland alignment fault fixups for Thumb.
 712 *
 713 * @pinstr: original Thumb-2 instruction; returns new handlable instruction
 714 * @regs: register context.
 715 * @poffset: return offset from faulted addr for later writeback
 716 *
 717 * NOTES:
 718 * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
 719 * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
 720 */
 721static void *
 722do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
 723                            union offset_union *poffset)
 724{
 725        unsigned long instr = *pinstr;
 726        u16 tinst1 = (instr >> 16) & 0xffff;
 727        u16 tinst2 = instr & 0xffff;
 728
 729        switch (tinst1 & 0xffe0) {
 730        /* A6.3.5 Load/Store multiple */
 731        case 0xe880:            /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
 732        case 0xe8a0:            /* ...above writeback version */
 733        case 0xe900:            /* STMDB/STMFD, LDMDB/LDMEA */
 734        case 0xe920:            /* ...above writeback version */
 735                /* no need offset decision since handler calculates it */
 736                return do_alignment_ldmstm;
 737
 738        case 0xf840:            /* POP/PUSH T3 (single register) */
 739                if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
 740                        u32 L = !!(LDST_L_BIT(instr));
 741                        const u32 subset[2] = {
 742                                0xe92d0000,     /* STMDB sp!,{registers} */
 743                                0xe8bd0000,     /* LDMIA sp!,{registers} */
 744                        };
 745                        *pinstr = subset[L] | (1<<RD_BITS(instr));
 746                        return do_alignment_ldmstm;
 747                }
 748                /* Else fall through for illegal instruction case */
 749                break;
 750
 751        /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
 752        case 0xe860:
 753        case 0xe960:
 754        case 0xe8e0:
 755        case 0xe9e0:
 756                poffset->un = (tinst2 & 0xff) << 2;
 757        case 0xe940:
 758        case 0xe9c0:
 759                return do_alignment_ldrdstrd;
 760
 761        /*
 762         * No need to handle load/store instructions up to word size
 763         * since ARMv6 and later CPUs can perform unaligned accesses.
 764         */
 765        default:
 766                break;
 767        }
 768        return NULL;
 769}
 770
 771static int
 772do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 773{
 774        union offset_union uninitialized_var(offset);
 775        unsigned long instr = 0, instrptr;
 776        int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
 777        unsigned int type;
 778        unsigned int fault;
 779        u16 tinstr = 0;
 780        int isize = 4;
 781        int thumb2_32b = 0;
 782
 783        if (interrupts_enabled(regs))
 784                local_irq_enable();
 785
 786        instrptr = instruction_pointer(regs);
 787
 788        if (thumb_mode(regs)) {
 789                u16 *ptr = (u16 *)(instrptr & ~1);
 790                fault = probe_kernel_address(ptr, tinstr);
 791                tinstr = __mem_to_opcode_thumb16(tinstr);
 792                if (!fault) {
 793                        if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
 794                            IS_T32(tinstr)) {
 795                                /* Thumb-2 32-bit */
 796                                u16 tinst2 = 0;
 797                                fault = probe_kernel_address(ptr + 1, tinst2);
 798                                tinst2 = __mem_to_opcode_thumb16(tinst2);
 799                                instr = __opcode_thumb32_compose(tinstr, tinst2);
 800                                thumb2_32b = 1;
 801                        } else {
 802                                isize = 2;
 803                                instr = thumb2arm(tinstr);
 804                        }
 805                }
 806        } else {
 807                fault = probe_kernel_address((void *)instrptr, instr);
 808                instr = __mem_to_opcode_arm(instr);
 809        }
 810
 811        if (fault) {
 812                type = TYPE_FAULT;
 813                goto bad_or_fault;
 814        }
 815
 816        if (user_mode(regs))
 817                goto user;
 818
 819        ai_sys += 1;
 820        ai_sys_last_pc = (void *)instruction_pointer(regs);
 821
 822 fixup:
 823
 824        regs->ARM_pc += isize;
 825
 826        switch (CODING_BITS(instr)) {
 827        case 0x00000000:        /* 3.13.4 load/store instruction extensions */
 828                if (LDSTHD_I_BIT(instr))
 829                        offset.un = (instr & 0xf00) >> 4 | (instr & 15);
 830                else
 831                        offset.un = regs->uregs[RM_BITS(instr)];
 832
 833                if ((instr & 0x000000f0) == 0x000000b0 || /* LDRH, STRH */
 834                    (instr & 0x001000f0) == 0x001000f0)   /* LDRSH */
 835                        handler = do_alignment_ldrhstrh;
 836                else if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */
 837                         (instr & 0x001000f0) == 0x000000f0)   /* STRD */
 838                        handler = do_alignment_ldrdstrd;
 839                else if ((instr & 0x01f00ff0) == 0x01000090) /* SWP */
 840                        goto swp;
 841                else
 842                        goto bad;
 843                break;
 844
 845        case 0x04000000:        /* ldr or str immediate */
 846                if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
 847                        goto bad;
 848                offset.un = OFFSET_BITS(instr);
 849                handler = do_alignment_ldrstr;
 850                break;
 851
 852        case 0x06000000:        /* ldr or str register */
 853                offset.un = regs->uregs[RM_BITS(instr)];
 854
 855                if (IS_SHIFT(instr)) {
 856                        unsigned int shiftval = SHIFT_BITS(instr);
 857
 858                        switch(SHIFT_TYPE(instr)) {
 859                        case SHIFT_LSL:
 860                                offset.un <<= shiftval;
 861                                break;
 862
 863                        case SHIFT_LSR:
 864                                offset.un >>= shiftval;
 865                                break;
 866
 867                        case SHIFT_ASR:
 868                                offset.sn >>= shiftval;
 869                                break;
 870
 871                        case SHIFT_RORRRX:
 872                                if (shiftval == 0) {
 873                                        offset.un >>= 1;
 874                                        if (regs->ARM_cpsr & PSR_C_BIT)
 875                                                offset.un |= 1 << 31;
 876                                } else
 877                                        offset.un = offset.un >> shiftval |
 878                                                          offset.un << (32 - shiftval);
 879                                break;
 880                        }
 881                }
 882                handler = do_alignment_ldrstr;
 883                break;
 884
 885        case 0x08000000:        /* ldm or stm, or thumb-2 32bit instruction */
 886                if (thumb2_32b) {
 887                        offset.un = 0;
 888                        handler = do_alignment_t32_to_handler(&instr, regs, &offset);
 889                } else {
 890                        offset.un = 0;
 891                        handler = do_alignment_ldmstm;
 892                }
 893                break;
 894
 895        default:
 896                goto bad;
 897        }
 898
 899        if (!handler)
 900                goto bad;
 901        type = handler(addr, instr, regs);
 902
 903        if (type == TYPE_ERROR || type == TYPE_FAULT) {
 904                regs->ARM_pc -= isize;
 905                goto bad_or_fault;
 906        }
 907
 908        if (type == TYPE_LDST)
 909                do_alignment_finish_ldst(addr, instr, regs, offset);
 910
 911        return 0;
 912
 913 bad_or_fault:
 914        if (type == TYPE_ERROR)
 915                goto bad;
 916        /*
 917         * We got a fault - fix it up, or die.
 918         */
 919        do_bad_area(addr, fsr, regs);
 920        return 0;
 921
 922 swp:
 923        pr_err("Alignment trap: not handling swp instruction\n");
 924
 925 bad:
 926        /*
 927         * Oops, we didn't handle the instruction.
 928         */
 929        pr_err("Alignment trap: not handling instruction "
 930                "%0*lx at [<%08lx>]\n",
 931                isize << 1,
 932                isize == 2 ? tinstr : instr, instrptr);
 933        ai_skipped += 1;
 934        return 1;
 935
 936 user:
 937        ai_user += 1;
 938
 939        if (ai_usermode & UM_WARN)
 940                printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
 941                       "Address=0x%08lx FSR 0x%03x\n", current->comm,
 942                        task_pid_nr(current), instrptr,
 943                        isize << 1,
 944                        isize == 2 ? tinstr : instr,
 945                        addr, fsr);
 946
 947        if (ai_usermode & UM_FIXUP)
 948                goto fixup;
 949
 950        if (ai_usermode & UM_SIGNAL) {
 951                force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr, current);
 952        } else {
 953                /*
 954                 * We're about to disable the alignment trap and return to
 955                 * user space.  But if an interrupt occurs before actually
 956                 * reaching user space, then the IRQ vector entry code will
 957                 * notice that we were still in kernel space and therefore
 958                 * the alignment trap won't be re-enabled in that case as it
 959                 * is presumed to be always on from kernel space.
 960                 * Let's prevent that race by disabling interrupts here (they
 961                 * are disabled on the way back to user space anyway in
 962                 * entry-common.S) and disable the alignment trap only if
 963                 * there is no work pending for this thread.
 964                 */
 965                raw_local_irq_disable();
 966                if (!(current_thread_info()->flags & _TIF_WORK_MASK))
 967                        set_cr(cr_no_alignment);
 968        }
 969
 970        return 0;
 971}
 972
 973static int __init noalign_setup(char *__unused)
 974{
 975        set_cr(__clear_cr(CR_A));
 976        return 1;
 977}
 978__setup("noalign", noalign_setup);
 979
 980/*
 981 * This needs to be done after sysctl_init, otherwise sys/ will be
 982 * overwritten.  Actually, this shouldn't be in sys/ at all since
 983 * it isn't a sysctl, and it doesn't contain sysctl information.
 984 * We now locate it in /proc/cpu/alignment instead.
 985 */
 986static int __init alignment_init(void)
 987{
 988#ifdef CONFIG_PROC_FS
 989        struct proc_dir_entry *res;
 990
 991        res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
 992                          &alignment_proc_fops);
 993        if (!res)
 994                return -ENOMEM;
 995#endif
 996
 997        if (cpu_is_v6_unaligned()) {
 998                set_cr(__clear_cr(CR_A));
 999                ai_usermode = safe_usermode(ai_usermode, false);
1000        }
1001
1002        cr_no_alignment = get_cr() & ~CR_A;
1003
1004        hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
1005                        "alignment exception");
1006
1007        /*
1008         * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
1009         * fault, not as alignment error.
1010         *
1011         * TODO: handle ARMv6K properly. Runtime check for 'K' extension is
1012         * needed.
1013         */
1014        if (cpu_architecture() <= CPU_ARCH_ARMv6) {
1015                hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
1016                                "alignment exception");
1017        }
1018
1019        return 0;
1020}
1021
1022fs_initcall(alignment_init);
1023