linux/arch/powerpc/kernel/align.c
<<
>>
Prefs
   1/* align.c - handle alignment exceptions for the Power PC.
   2 *
   3 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
   4 * Copyright (c) 1998-1999 TiVo, Inc.
   5 *   PowerPC 403GCX modifications.
   6 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
   7 *   PowerPC 403GCX/405GP modifications.
   8 * Copyright (c) 2001-2002 PPC64 team, IBM Corp
   9 *   64-bit and Power4 support
  10 * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
  11 *                    <benh@kernel.crashing.org>
  12 *   Merge ppc32 and ppc64 implementations
  13 *
  14 * This program is free software; you can redistribute it and/or
  15 * modify it under the terms of the GNU General Public License
  16 * as published by the Free Software Foundation; either version
  17 * 2 of the License, or (at your option) any later version.
  18 */
  19
  20#include <linux/kernel.h>
  21#include <linux/mm.h>
  22#include <asm/processor.h>
  23#include <linux/uaccess.h>
  24#include <asm/cache.h>
  25#include <asm/cputable.h>
  26#include <asm/emulated_ops.h>
  27#include <asm/switch_to.h>
  28#include <asm/disassemble.h>
  29#include <asm/cpu_has_feature.h>
  30
  31struct aligninfo {
  32        unsigned char len;
  33        unsigned char flags;
  34};
  35
  36
  37#define INVALID { 0, 0 }
  38
  39/* Bits in the flags field */
  40#define LD      0       /* load */
  41#define ST      1       /* store */
  42#define SE      2       /* sign-extend value, or FP ld/st as word */
  43#define F       4       /* to/from fp regs */
  44#define U       8       /* update index register */
  45#define M       0x10    /* multiple load/store */
  46#define SW      0x20    /* byte swap */
  47#define S       0x40    /* single-precision fp or... */
  48#define SX      0x40    /* ... byte count in XER */
  49#define HARD    0x80    /* string, stwcx. */
  50#define E4      0x40    /* SPE endianness is word */
  51#define E8      0x80    /* SPE endianness is double word */
  52#define SPLT    0x80    /* VSX SPLAT load */
  53
  54/* DSISR bits reported for a DCBZ instruction: */
  55#define DCBZ    0x5f    /* 8xx/82xx dcbz faults when cache not enabled */
  56
  57/*
  58 * The PowerPC stores certain bits of the instruction that caused the
  59 * alignment exception in the DSISR register.  This array maps those
  60 * bits to information about the operand length and what the
  61 * instruction would do.
  62 */
  63static struct aligninfo aligninfo[128] = {
  64        { 4, LD },              /* 00 0 0000: lwz / lwarx */
  65        INVALID,                /* 00 0 0001 */
  66        { 4, ST },              /* 00 0 0010: stw */
  67        INVALID,                /* 00 0 0011 */
  68        { 2, LD },              /* 00 0 0100: lhz */
  69        { 2, LD+SE },           /* 00 0 0101: lha */
  70        { 2, ST },              /* 00 0 0110: sth */
  71        { 4, LD+M },            /* 00 0 0111: lmw */
  72        { 4, LD+F+S },          /* 00 0 1000: lfs */
  73        { 8, LD+F },            /* 00 0 1001: lfd */
  74        { 4, ST+F+S },          /* 00 0 1010: stfs */
  75        { 8, ST+F },            /* 00 0 1011: stfd */
  76        { 16, LD },             /* 00 0 1100: lq */
  77        { 8, LD },              /* 00 0 1101: ld/ldu/lwa */
  78        INVALID,                /* 00 0 1110 */
  79        { 8, ST },              /* 00 0 1111: std/stdu */
  80        { 4, LD+U },            /* 00 1 0000: lwzu */
  81        INVALID,                /* 00 1 0001 */
  82        { 4, ST+U },            /* 00 1 0010: stwu */
  83        INVALID,                /* 00 1 0011 */
  84        { 2, LD+U },            /* 00 1 0100: lhzu */
  85        { 2, LD+SE+U },         /* 00 1 0101: lhau */
  86        { 2, ST+U },            /* 00 1 0110: sthu */
  87        { 4, ST+M },            /* 00 1 0111: stmw */
  88        { 4, LD+F+S+U },        /* 00 1 1000: lfsu */
  89        { 8, LD+F+U },          /* 00 1 1001: lfdu */
  90        { 4, ST+F+S+U },        /* 00 1 1010: stfsu */
  91        { 8, ST+F+U },          /* 00 1 1011: stfdu */
  92        { 16, LD+F },           /* 00 1 1100: lfdp */
  93        INVALID,                /* 00 1 1101 */
  94        { 16, ST+F },           /* 00 1 1110: stfdp */
  95        INVALID,                /* 00 1 1111 */
  96        { 8, LD },              /* 01 0 0000: ldx */
  97        INVALID,                /* 01 0 0001 */
  98        { 8, ST },              /* 01 0 0010: stdx */
  99        INVALID,                /* 01 0 0011 */
 100        INVALID,                /* 01 0 0100 */
 101        { 4, LD+SE },           /* 01 0 0101: lwax */
 102        INVALID,                /* 01 0 0110 */
 103        INVALID,                /* 01 0 0111 */
 104        { 4, LD+M+HARD+SX },    /* 01 0 1000: lswx */
 105        { 4, LD+M+HARD },       /* 01 0 1001: lswi */
 106        { 4, ST+M+HARD+SX },    /* 01 0 1010: stswx */
 107        { 4, ST+M+HARD },       /* 01 0 1011: stswi */
 108        INVALID,                /* 01 0 1100 */
 109        { 8, LD+U },            /* 01 0 1101: ldu */
 110        INVALID,                /* 01 0 1110 */
 111        { 8, ST+U },            /* 01 0 1111: stdu */
 112        { 8, LD+U },            /* 01 1 0000: ldux */
 113        INVALID,                /* 01 1 0001 */
 114        { 8, ST+U },            /* 01 1 0010: stdux */
 115        INVALID,                /* 01 1 0011 */
 116        INVALID,                /* 01 1 0100 */
 117        { 4, LD+SE+U },         /* 01 1 0101: lwaux */
 118        INVALID,                /* 01 1 0110 */
 119        INVALID,                /* 01 1 0111 */
 120        INVALID,                /* 01 1 1000 */
 121        INVALID,                /* 01 1 1001 */
 122        INVALID,                /* 01 1 1010 */
 123        INVALID,                /* 01 1 1011 */
 124        INVALID,                /* 01 1 1100 */
 125        INVALID,                /* 01 1 1101 */
 126        INVALID,                /* 01 1 1110 */
 127        INVALID,                /* 01 1 1111 */
 128        INVALID,                /* 10 0 0000 */
 129        INVALID,                /* 10 0 0001 */
 130        INVALID,                /* 10 0 0010: stwcx. */
 131        INVALID,                /* 10 0 0011 */
 132        INVALID,                /* 10 0 0100 */
 133        INVALID,                /* 10 0 0101 */
 134        INVALID,                /* 10 0 0110 */
 135        INVALID,                /* 10 0 0111 */
 136        { 4, LD+SW },           /* 10 0 1000: lwbrx */
 137        INVALID,                /* 10 0 1001 */
 138        { 4, ST+SW },           /* 10 0 1010: stwbrx */
 139        INVALID,                /* 10 0 1011 */
 140        { 2, LD+SW },           /* 10 0 1100: lhbrx */
 141        { 4, LD+SE },           /* 10 0 1101  lwa */
 142        { 2, ST+SW },           /* 10 0 1110: sthbrx */
 143        { 16, ST },             /* 10 0 1111: stq */
 144        INVALID,                /* 10 1 0000 */
 145        INVALID,                /* 10 1 0001 */
 146        INVALID,                /* 10 1 0010 */
 147        INVALID,                /* 10 1 0011 */
 148        INVALID,                /* 10 1 0100 */
 149        INVALID,                /* 10 1 0101 */
 150        INVALID,                /* 10 1 0110 */
 151        INVALID,                /* 10 1 0111 */
 152        INVALID,                /* 10 1 1000 */
 153        INVALID,                /* 10 1 1001 */
 154        INVALID,                /* 10 1 1010 */
 155        INVALID,                /* 10 1 1011 */
 156        INVALID,                /* 10 1 1100 */
 157        INVALID,                /* 10 1 1101 */
 158        INVALID,                /* 10 1 1110 */
 159        { 0, ST+HARD },         /* 10 1 1111: dcbz */
 160        { 4, LD },              /* 11 0 0000: lwzx */
 161        INVALID,                /* 11 0 0001 */
 162        { 4, ST },              /* 11 0 0010: stwx */
 163        INVALID,                /* 11 0 0011 */
 164        { 2, LD },              /* 11 0 0100: lhzx */
 165        { 2, LD+SE },           /* 11 0 0101: lhax */
 166        { 2, ST },              /* 11 0 0110: sthx */
 167        INVALID,                /* 11 0 0111 */
 168        { 4, LD+F+S },          /* 11 0 1000: lfsx */
 169        { 8, LD+F },            /* 11 0 1001: lfdx */
 170        { 4, ST+F+S },          /* 11 0 1010: stfsx */
 171        { 8, ST+F },            /* 11 0 1011: stfdx */
 172        { 16, LD+F },           /* 11 0 1100: lfdpx */
 173        { 4, LD+F+SE },         /* 11 0 1101: lfiwax */
 174        { 16, ST+F },           /* 11 0 1110: stfdpx */
 175        { 4, ST+F },            /* 11 0 1111: stfiwx */
 176        { 4, LD+U },            /* 11 1 0000: lwzux */
 177        INVALID,                /* 11 1 0001 */
 178        { 4, ST+U },            /* 11 1 0010: stwux */
 179        INVALID,                /* 11 1 0011 */
 180        { 2, LD+U },            /* 11 1 0100: lhzux */
 181        { 2, LD+SE+U },         /* 11 1 0101: lhaux */
 182        { 2, ST+U },            /* 11 1 0110: sthux */
 183        INVALID,                /* 11 1 0111 */
 184        { 4, LD+F+S+U },        /* 11 1 1000: lfsux */
 185        { 8, LD+F+U },          /* 11 1 1001: lfdux */
 186        { 4, ST+F+S+U },        /* 11 1 1010: stfsux */
 187        { 8, ST+F+U },          /* 11 1 1011: stfdux */
 188        INVALID,                /* 11 1 1100 */
 189        { 4, LD+F },            /* 11 1 1101: lfiwzx */
 190        INVALID,                /* 11 1 1110 */
 191        INVALID,                /* 11 1 1111 */
 192};
 193
 194/*
 195 * The dcbz (data cache block zero) instruction
 196 * gives an alignment fault if used on non-cacheable
 197 * memory.  We handle the fault mainly for the
 198 * case when we are running with the cache disabled
 199 * for debugging.
 200 */
 201static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
 202{
 203        long __user *p;
 204        int i, size;
 205
 206#ifdef __powerpc64__
 207        size = ppc64_caches.dline_size;
 208#else
 209        size = L1_CACHE_BYTES;
 210#endif
 211        p = (long __user *) (regs->dar & -size);
 212        if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size))
 213                return -EFAULT;
 214        for (i = 0; i < size / sizeof(long); ++i)
 215                if (__put_user_inatomic(0, p+i))
 216                        return -EFAULT;
 217        return 1;
 218}
 219
 220/*
 221 * Emulate load & store multiple instructions
 222 * On 64-bit machines, these instructions only affect/use the
 223 * bottom 4 bytes of each register, and the loads clear the
 224 * top 4 bytes of the affected register.
 225 */
 226#ifdef __BIG_ENDIAN__
 227#ifdef CONFIG_PPC64
 228#define REG_BYTE(rp, i)         *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
 229#else
 230#define REG_BYTE(rp, i)         *((u8 *)(rp) + (i))
 231#endif
 232#else
 233#define REG_BYTE(rp, i)         (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
 234#endif
 235
 236#define SWIZ_PTR(p)             ((unsigned char __user *)((p) ^ swiz))
 237
 238static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
 239                            unsigned int reg, unsigned int nb,
 240                            unsigned int flags, unsigned int instr,
 241                            unsigned long swiz)
 242{
 243        unsigned long *rptr;
 244        unsigned int nb0, i, bswiz;
 245        unsigned long p;
 246
 247        /*
 248         * We do not try to emulate 8 bytes multiple as they aren't really
 249         * available in our operating environments and we don't try to
 250         * emulate multiples operations in kernel land as they should never
 251         * be used/generated there at least not on unaligned boundaries
 252         */
 253        if (unlikely((nb > 4) || !user_mode(regs)))
 254                return 0;
 255
 256        /* lmw, stmw, lswi/x, stswi/x */
 257        nb0 = 0;
 258        if (flags & HARD) {
 259                if (flags & SX) {
 260                        nb = regs->xer & 127;
 261                        if (nb == 0)
 262                                return 1;
 263                } else {
 264                        unsigned long pc = regs->nip ^ (swiz & 4);
 265
 266                        if (__get_user_inatomic(instr,
 267                                                (unsigned int __user *)pc))
 268                                return -EFAULT;
 269                        if (swiz == 0 && (flags & SW))
 270                                instr = cpu_to_le32(instr);
 271                        nb = (instr >> 11) & 0x1f;
 272                        if (nb == 0)
 273                                nb = 32;
 274                }
 275                if (nb + reg * 4 > 128) {
 276                        nb0 = nb + reg * 4 - 128;
 277                        nb = 128 - reg * 4;
 278                }
 279#ifdef __LITTLE_ENDIAN__
 280                /*
 281                 *  String instructions are endian neutral but the code
 282                 *  below is not.  Force byte swapping on so that the
 283                 *  effects of swizzling are undone in the load/store
 284                 *  loops below.
 285                 */
 286                flags ^= SW;
 287#endif
 288        } else {
 289                /* lwm, stmw */
 290                nb = (32 - reg) * 4;
 291        }
 292
 293        if (!access_ok((flags & ST ? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0))
 294                return -EFAULT; /* bad address */
 295
 296        rptr = &regs->gpr[reg];
 297        p = (unsigned long) addr;
 298        bswiz = (flags & SW)? 3: 0;
 299
 300        if (!(flags & ST)) {
 301                /*
 302                 * This zeroes the top 4 bytes of the affected registers
 303                 * in 64-bit mode, and also zeroes out any remaining
 304                 * bytes of the last register for lsw*.
 305                 */
 306                memset(rptr, 0, ((nb + 3) / 4) * sizeof(unsigned long));
 307                if (nb0 > 0)
 308                        memset(&regs->gpr[0], 0,
 309                               ((nb0 + 3) / 4) * sizeof(unsigned long));
 310
 311                for (i = 0; i < nb; ++i, ++p)
 312                        if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
 313                                                SWIZ_PTR(p)))
 314                                return -EFAULT;
 315                if (nb0 > 0) {
 316                        rptr = &regs->gpr[0];
 317                        addr += nb;
 318                        for (i = 0; i < nb0; ++i, ++p)
 319                                if (__get_user_inatomic(REG_BYTE(rptr,
 320                                                                 i ^ bswiz),
 321                                                        SWIZ_PTR(p)))
 322                                        return -EFAULT;
 323                }
 324
 325        } else {
 326                for (i = 0; i < nb; ++i, ++p)
 327                        if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
 328                                                SWIZ_PTR(p)))
 329                                return -EFAULT;
 330                if (nb0 > 0) {
 331                        rptr = &regs->gpr[0];
 332                        addr += nb;
 333                        for (i = 0; i < nb0; ++i, ++p)
 334                                if (__put_user_inatomic(REG_BYTE(rptr,
 335                                                                 i ^ bswiz),
 336                                                        SWIZ_PTR(p)))
 337                                        return -EFAULT;
 338                }
 339        }
 340        return 1;
 341}
 342
 343/*
 344 * Emulate floating-point pair loads and stores.
 345 * Only POWER6 has these instructions, and it does true little-endian,
 346 * so we don't need the address swizzling.
 347 */
 348static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
 349                           unsigned int flags)
 350{
 351        char *ptr0 = (char *) &current->thread.TS_FPR(reg);
 352        char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
 353        int i, ret, sw = 0;
 354
 355        if (reg & 1)
 356                return 0;       /* invalid form: FRS/FRT must be even */
 357        if (flags & SW)
 358                sw = 7;
 359        ret = 0;
 360        for (i = 0; i < 8; ++i) {
 361                if (!(flags & ST)) {
 362                        ret |= __get_user(ptr0[i^sw], addr + i);
 363                        ret |= __get_user(ptr1[i^sw], addr + i + 8);
 364                } else {
 365                        ret |= __put_user(ptr0[i^sw], addr + i);
 366                        ret |= __put_user(ptr1[i^sw], addr + i + 8);
 367                }
 368        }
 369        if (ret)
 370                return -EFAULT;
 371        return 1;       /* exception handled and fixed up */
 372}
 373
 374#ifdef CONFIG_PPC64
 375static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
 376                          unsigned int reg, unsigned int flags)
 377{
 378        char *ptr0 = (char *)&regs->gpr[reg];
 379        char *ptr1 = (char *)&regs->gpr[reg+1];
 380        int i, ret, sw = 0;
 381
 382        if (reg & 1)
 383                return 0;       /* invalid form: GPR must be even */
 384        if (flags & SW)
 385                sw = 7;
 386        ret = 0;
 387        for (i = 0; i < 8; ++i) {
 388                if (!(flags & ST)) {
 389                        ret |= __get_user(ptr0[i^sw], addr + i);
 390                        ret |= __get_user(ptr1[i^sw], addr + i + 8);
 391                } else {
 392                        ret |= __put_user(ptr0[i^sw], addr + i);
 393                        ret |= __put_user(ptr1[i^sw], addr + i + 8);
 394                }
 395        }
 396        if (ret)
 397                return -EFAULT;
 398        return 1;       /* exception handled and fixed up */
 399}
 400#endif /* CONFIG_PPC64 */
 401
 402#ifdef CONFIG_SPE
 403
 404static struct aligninfo spe_aligninfo[32] = {
 405        { 8, LD+E8 },           /* 0 00 00: evldd[x] */
 406        { 8, LD+E4 },           /* 0 00 01: evldw[x] */
 407        { 8, LD },              /* 0 00 10: evldh[x] */
 408        INVALID,                /* 0 00 11 */
 409        { 2, LD },              /* 0 01 00: evlhhesplat[x] */
 410        INVALID,                /* 0 01 01 */
 411        { 2, LD },              /* 0 01 10: evlhhousplat[x] */
 412        { 2, LD+SE },           /* 0 01 11: evlhhossplat[x] */
 413        { 4, LD },              /* 0 10 00: evlwhe[x] */
 414        INVALID,                /* 0 10 01 */
 415        { 4, LD },              /* 0 10 10: evlwhou[x] */
 416        { 4, LD+SE },           /* 0 10 11: evlwhos[x] */
 417        { 4, LD+E4 },           /* 0 11 00: evlwwsplat[x] */
 418        INVALID,                /* 0 11 01 */
 419        { 4, LD },              /* 0 11 10: evlwhsplat[x] */
 420        INVALID,                /* 0 11 11 */
 421
 422        { 8, ST+E8 },           /* 1 00 00: evstdd[x] */
 423        { 8, ST+E4 },           /* 1 00 01: evstdw[x] */
 424        { 8, ST },              /* 1 00 10: evstdh[x] */
 425        INVALID,                /* 1 00 11 */
 426        INVALID,                /* 1 01 00 */
 427        INVALID,                /* 1 01 01 */
 428        INVALID,                /* 1 01 10 */
 429        INVALID,                /* 1 01 11 */
 430        { 4, ST },              /* 1 10 00: evstwhe[x] */
 431        INVALID,                /* 1 10 01 */
 432        { 4, ST },              /* 1 10 10: evstwho[x] */
 433        INVALID,                /* 1 10 11 */
 434        { 4, ST+E4 },           /* 1 11 00: evstwwe[x] */
 435        INVALID,                /* 1 11 01 */
 436        { 4, ST+E4 },           /* 1 11 10: evstwwo[x] */
 437        INVALID,                /* 1 11 11 */
 438};
 439
 440#define EVLDD           0x00
 441#define EVLDW           0x01
 442#define EVLDH           0x02
 443#define EVLHHESPLAT     0x04
 444#define EVLHHOUSPLAT    0x06
 445#define EVLHHOSSPLAT    0x07
 446#define EVLWHE          0x08
 447#define EVLWHOU         0x0A
 448#define EVLWHOS         0x0B
 449#define EVLWWSPLAT      0x0C
 450#define EVLWHSPLAT      0x0E
 451#define EVSTDD          0x10
 452#define EVSTDW          0x11
 453#define EVSTDH          0x12
 454#define EVSTWHE         0x18
 455#define EVSTWHO         0x1A
 456#define EVSTWWE         0x1C
 457#define EVSTWWO         0x1E
 458
 459/*
 460 * Emulate SPE loads and stores.
 461 * Only Book-E has these instructions, and it does true little-endian,
 462 * so we don't need the address swizzling.
 463 */
 464static int emulate_spe(struct pt_regs *regs, unsigned int reg,
 465                       unsigned int instr)
 466{
 467        int ret;
 468        union {
 469                u64 ll;
 470                u32 w[2];
 471                u16 h[4];
 472                u8 v[8];
 473        } data, temp;
 474        unsigned char __user *p, *addr;
 475        unsigned long *evr = &current->thread.evr[reg];
 476        unsigned int nb, flags;
 477
 478        instr = (instr >> 1) & 0x1f;
 479
 480        /* DAR has the operand effective address */
 481        addr = (unsigned char __user *)regs->dar;
 482
 483        nb = spe_aligninfo[instr].len;
 484        flags = spe_aligninfo[instr].flags;
 485
 486        /* Verify the address of the operand */
 487        if (unlikely(user_mode(regs) &&
 488                     !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),
 489                                addr, nb)))
 490                return -EFAULT;
 491
 492        /* userland only */
 493        if (unlikely(!user_mode(regs)))
 494                return 0;
 495
 496        flush_spe_to_thread(current);
 497
 498        /* If we are loading, get the data from user space, else
 499         * get it from register values
 500         */
 501        if (flags & ST) {
 502                data.ll = 0;
 503                switch (instr) {
 504                case EVSTDD:
 505                case EVSTDW:
 506                case EVSTDH:
 507                        data.w[0] = *evr;
 508                        data.w[1] = regs->gpr[reg];
 509                        break;
 510                case EVSTWHE:
 511                        data.h[2] = *evr >> 16;
 512                        data.h[3] = regs->gpr[reg] >> 16;
 513                        break;
 514                case EVSTWHO:
 515                        data.h[2] = *evr & 0xffff;
 516                        data.h[3] = regs->gpr[reg] & 0xffff;
 517                        break;
 518                case EVSTWWE:
 519                        data.w[1] = *evr;
 520                        break;
 521                case EVSTWWO:
 522                        data.w[1] = regs->gpr[reg];
 523                        break;
 524                default:
 525                        return -EINVAL;
 526                }
 527        } else {
 528                temp.ll = data.ll = 0;
 529                ret = 0;
 530                p = addr;
 531
 532                switch (nb) {
 533                case 8:
 534                        ret |= __get_user_inatomic(temp.v[0], p++);
 535                        ret |= __get_user_inatomic(temp.v[1], p++);
 536                        ret |= __get_user_inatomic(temp.v[2], p++);
 537                        ret |= __get_user_inatomic(temp.v[3], p++);
 538                case 4:
 539                        ret |= __get_user_inatomic(temp.v[4], p++);
 540                        ret |= __get_user_inatomic(temp.v[5], p++);
 541                case 2:
 542                        ret |= __get_user_inatomic(temp.v[6], p++);
 543                        ret |= __get_user_inatomic(temp.v[7], p++);
 544                        if (unlikely(ret))
 545                                return -EFAULT;
 546                }
 547
 548                switch (instr) {
 549                case EVLDD:
 550                case EVLDW:
 551                case EVLDH:
 552                        data.ll = temp.ll;
 553                        break;
 554                case EVLHHESPLAT:
 555                        data.h[0] = temp.h[3];
 556                        data.h[2] = temp.h[3];
 557                        break;
 558                case EVLHHOUSPLAT:
 559                case EVLHHOSSPLAT:
 560                        data.h[1] = temp.h[3];
 561                        data.h[3] = temp.h[3];
 562                        break;
 563                case EVLWHE:
 564                        data.h[0] = temp.h[2];
 565                        data.h[2] = temp.h[3];
 566                        break;
 567                case EVLWHOU:
 568                case EVLWHOS:
 569                        data.h[1] = temp.h[2];
 570                        data.h[3] = temp.h[3];
 571                        break;
 572                case EVLWWSPLAT:
 573                        data.w[0] = temp.w[1];
 574                        data.w[1] = temp.w[1];
 575                        break;
 576                case EVLWHSPLAT:
 577                        data.h[0] = temp.h[2];
 578                        data.h[1] = temp.h[2];
 579                        data.h[2] = temp.h[3];
 580                        data.h[3] = temp.h[3];
 581                        break;
 582                default:
 583                        return -EINVAL;
 584                }
 585        }
 586
 587        if (flags & SW) {
 588                switch (flags & 0xf0) {
 589                case E8:
 590                        data.ll = swab64(data.ll);
 591                        break;
 592                case E4:
 593                        data.w[0] = swab32(data.w[0]);
 594                        data.w[1] = swab32(data.w[1]);
 595                        break;
 596                /* Its half word endian */
 597                default:
 598                        data.h[0] = swab16(data.h[0]);
 599                        data.h[1] = swab16(data.h[1]);
 600                        data.h[2] = swab16(data.h[2]);
 601                        data.h[3] = swab16(data.h[3]);
 602                        break;
 603                }
 604        }
 605
 606        if (flags & SE) {
 607                data.w[0] = (s16)data.h[1];
 608                data.w[1] = (s16)data.h[3];
 609        }
 610
 611        /* Store result to memory or update registers */
 612        if (flags & ST) {
 613                ret = 0;
 614                p = addr;
 615                switch (nb) {
 616                case 8:
 617                        ret |= __put_user_inatomic(data.v[0], p++);
 618                        ret |= __put_user_inatomic(data.v[1], p++);
 619                        ret |= __put_user_inatomic(data.v[2], p++);
 620                        ret |= __put_user_inatomic(data.v[3], p++);
 621                case 4:
 622                        ret |= __put_user_inatomic(data.v[4], p++);
 623                        ret |= __put_user_inatomic(data.v[5], p++);
 624                case 2:
 625                        ret |= __put_user_inatomic(data.v[6], p++);
 626                        ret |= __put_user_inatomic(data.v[7], p++);
 627                }
 628                if (unlikely(ret))
 629                        return -EFAULT;
 630        } else {
 631                *evr = data.w[0];
 632                regs->gpr[reg] = data.w[1];
 633        }
 634
 635        return 1;
 636}
 637#endif /* CONFIG_SPE */
 638
 639#ifdef CONFIG_VSX
 640/*
 641 * Emulate VSX instructions...
 642 */
 643static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
 644                       unsigned int areg, struct pt_regs *regs,
 645                       unsigned int flags, unsigned int length,
 646                       unsigned int elsize)
 647{
 648        char *ptr;
 649        unsigned long *lptr;
 650        int ret = 0;
 651        int sw = 0;
 652        int i, j;
 653
 654        /* userland only */
 655        if (unlikely(!user_mode(regs)))
 656                return 0;
 657
 658        flush_vsx_to_thread(current);
 659
 660        if (reg < 32)
 661                ptr = (char *) &current->thread.fp_state.fpr[reg][0];
 662        else
 663                ptr = (char *) &current->thread.vr_state.vr[reg - 32];
 664
 665        lptr = (unsigned long *) ptr;
 666
 667#ifdef __LITTLE_ENDIAN__
 668        if (flags & SW) {
 669                elsize = length;
 670                sw = length-1;
 671        } else {
 672                /*
 673                 * The elements are BE ordered, even in LE mode, so process
 674                 * them in reverse order.
 675                 */
 676                addr += length - elsize;
 677
 678                /* 8 byte memory accesses go in the top 8 bytes of the VR */
 679                if (length == 8)
 680                        ptr += 8;
 681        }
 682#else
 683        if (flags & SW)
 684                sw = elsize-1;
 685#endif
 686
 687        for (j = 0; j < length; j += elsize) {
 688                for (i = 0; i < elsize; ++i) {
 689                        if (flags & ST)
 690                                ret |= __put_user(ptr[i^sw], addr + i);
 691                        else
 692                                ret |= __get_user(ptr[i^sw], addr + i);
 693                }
 694                ptr  += elsize;
 695#ifdef __LITTLE_ENDIAN__
 696                addr -= elsize;
 697#else
 698                addr += elsize;
 699#endif
 700        }
 701
 702#ifdef __BIG_ENDIAN__
 703#define VSX_HI 0
 704#define VSX_LO 1
 705#else
 706#define VSX_HI 1
 707#define VSX_LO 0
 708#endif
 709
 710        if (!ret) {
 711                if (flags & U)
 712                        regs->gpr[areg] = regs->dar;
 713
 714                /* Splat load copies the same data to top and bottom 8 bytes */
 715                if (flags & SPLT)
 716                        lptr[VSX_LO] = lptr[VSX_HI];
 717                /* For 8 byte loads, zero the low 8 bytes */
 718                else if (!(flags & ST) && (8 == length))
 719                        lptr[VSX_LO] = 0;
 720        } else
 721                return -EFAULT;
 722
 723        return 1;
 724}
 725#endif
 726
 727/*
 728 * Called on alignment exception. Attempts to fixup
 729 *
 730 * Return 1 on success
 731 * Return 0 if unable to handle the interrupt
 732 * Return -EFAULT if data address is bad
 733 */
 734
 735int fix_alignment(struct pt_regs *regs)
 736{
 737        unsigned int instr, nb, flags, instruction = 0;
 738        unsigned int reg, areg;
 739        unsigned int dsisr;
 740        unsigned char __user *addr;
 741        unsigned long p, swiz;
 742        int ret, i;
 743        union data {
 744                u64 ll;
 745                double dd;
 746                unsigned char v[8];
 747                struct {
 748#ifdef __LITTLE_ENDIAN__
 749                        int      low32;
 750                        unsigned hi32;
 751#else
 752                        unsigned hi32;
 753                        int      low32;
 754#endif
 755                } x32;
 756                struct {
 757#ifdef __LITTLE_ENDIAN__
 758                        short         low16;
 759                        unsigned char hi48[6];
 760#else
 761                        unsigned char hi48[6];
 762                        short         low16;
 763#endif
 764                } x16;
 765        } data;
 766
 767        /*
 768         * We require a complete register set, if not, then our assembly
 769         * is broken
 770         */
 771        CHECK_FULL_REGS(regs);
 772
 773        dsisr = regs->dsisr;
 774
 775        /* Some processors don't provide us with a DSISR we can use here,
 776         * let's make one up from the instruction
 777         */
 778        if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
 779                unsigned long pc = regs->nip;
 780
 781                if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
 782                        pc ^= 4;
 783                if (unlikely(__get_user_inatomic(instr,
 784                                                 (unsigned int __user *)pc)))
 785                        return -EFAULT;
 786                if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
 787                        instr = cpu_to_le32(instr);
 788                dsisr = make_dsisr(instr);
 789                instruction = instr;
 790        }
 791
 792        /* extract the operation and registers from the dsisr */
 793        reg = (dsisr >> 5) & 0x1f;      /* source/dest register */
 794        areg = dsisr & 0x1f;            /* register to update */
 795
 796#ifdef CONFIG_SPE
 797        if ((instr >> 26) == 0x4) {
 798                PPC_WARN_ALIGNMENT(spe, regs);
 799                return emulate_spe(regs, reg, instr);
 800        }
 801#endif
 802
 803        instr = (dsisr >> 10) & 0x7f;
 804        instr |= (dsisr >> 13) & 0x60;
 805
 806        /* Lookup the operation in our table */
 807        nb = aligninfo[instr].len;
 808        flags = aligninfo[instr].flags;
 809
 810        /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
 811        if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
 812                nb = 8;
 813                flags = LD+SW;
 814        } else if (IS_XFORM(instruction) &&
 815                   ((instruction >> 1) & 0x3ff) == 660) {
 816                nb = 8;
 817                flags = ST+SW;
 818        }
 819
 820        /* Byteswap little endian loads and stores */
 821        swiz = 0;
 822        if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
 823                flags ^= SW;
 824#ifdef __BIG_ENDIAN__
 825                /*
 826                 * So-called "PowerPC little endian" mode works by
 827                 * swizzling addresses rather than by actually doing
 828                 * any byte-swapping.  To emulate this, we XOR each
 829                 * byte address with 7.  We also byte-swap, because
 830                 * the processor's address swizzling depends on the
 831                 * operand size (it xors the address with 7 for bytes,
 832                 * 6 for halfwords, 4 for words, 0 for doublewords) but
 833                 * we will xor with 7 and load/store each byte separately.
 834                 */
 835                if (cpu_has_feature(CPU_FTR_PPC_LE))
 836                        swiz = 7;
 837#endif
 838        }
 839
 840        /* DAR has the operand effective address */
 841        addr = (unsigned char __user *)regs->dar;
 842
 843#ifdef CONFIG_VSX
 844        if ((instruction & 0xfc00003e) == 0x7c000018) {
 845                unsigned int elsize;
 846
 847                /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
 848                reg |= (instruction & 0x1) << 5;
 849                /* Simple inline decoder instead of a table */
 850                /* VSX has only 8 and 16 byte memory accesses */
 851                nb = 8;
 852                if (instruction & 0x200)
 853                        nb = 16;
 854
 855                /* Vector stores in little-endian mode swap individual
 856                   elements, so process them separately */
 857                elsize = 4;
 858                if (instruction & 0x80)
 859                        elsize = 8;
 860
 861                flags = 0;
 862                if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE))
 863                        flags |= SW;
 864                if (instruction & 0x100)
 865                        flags |= ST;
 866                if (instruction & 0x040)
 867                        flags |= U;
 868                /* splat load needs a special decoder */
 869                if ((instruction & 0x400) == 0){
 870                        flags |= SPLT;
 871                        nb = 8;
 872                }
 873                PPC_WARN_ALIGNMENT(vsx, regs);
 874                return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
 875        }
 876#endif
 877
 878        /*
 879         * ISA 3.0 (such as P9) copy, copy_first, paste and paste_last alignment
 880         * check.
 881         *
 882         * Send a SIGBUS to the process that caused the fault.
 883         *
 884         * We do not emulate these because paste may contain additional metadata
 885         * when pasting to a co-processor. Furthermore, paste_last is the
 886         * synchronisation point for preceding copy/paste sequences.
 887         */
 888        if ((instruction & 0xfc0006fe) == PPC_INST_COPY)
 889                return -EIO;
 890
 891        /* A size of 0 indicates an instruction we don't support, with
 892         * the exception of DCBZ which is handled as a special case here
 893         */
 894        if (instr == DCBZ) {
 895                PPC_WARN_ALIGNMENT(dcbz, regs);
 896                return emulate_dcbz(regs, addr);
 897        }
 898        if (unlikely(nb == 0))
 899                return 0;
 900
 901        /* Load/Store Multiple instructions are handled in their own
 902         * function
 903         */
 904        if (flags & M) {
 905                PPC_WARN_ALIGNMENT(multiple, regs);
 906                return emulate_multiple(regs, addr, reg, nb,
 907                                        flags, instr, swiz);
 908        }
 909
 910        /* Verify the address of the operand */
 911        if (unlikely(user_mode(regs) &&
 912                     !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),
 913                                addr, nb)))
 914                return -EFAULT;
 915
 916        /* Force the fprs into the save area so we can reference them */
 917        if (flags & F) {
 918                /* userland only */
 919                if (unlikely(!user_mode(regs)))
 920                        return 0;
 921                flush_fp_to_thread(current);
 922        }
 923
 924        if (nb == 16) {
 925                if (flags & F) {
 926                        /* Special case for 16-byte FP loads and stores */
 927                        PPC_WARN_ALIGNMENT(fp_pair, regs);
 928                        return emulate_fp_pair(addr, reg, flags);
 929                } else {
 930#ifdef CONFIG_PPC64
 931                        /* Special case for 16-byte loads and stores */
 932                        PPC_WARN_ALIGNMENT(lq_stq, regs);
 933                        return emulate_lq_stq(regs, addr, reg, flags);
 934#else
 935                        return 0;
 936#endif
 937                }
 938        }
 939
 940        PPC_WARN_ALIGNMENT(unaligned, regs);
 941
 942        /* If we are loading, get the data from user space, else
 943         * get it from register values
 944         */
 945        if (!(flags & ST)) {
 946                unsigned int start = 0;
 947
 948                switch (nb) {
 949                case 4:
 950                        start = offsetof(union data, x32.low32);
 951                        break;
 952                case 2:
 953                        start = offsetof(union data, x16.low16);
 954                        break;
 955                }
 956
 957                data.ll = 0;
 958                ret = 0;
 959                p = (unsigned long)addr;
 960
 961                for (i = 0; i < nb; i++)
 962                        ret |= __get_user_inatomic(data.v[start + i],
 963                                                   SWIZ_PTR(p++));
 964
 965                if (unlikely(ret))
 966                        return -EFAULT;
 967
 968        } else if (flags & F) {
 969                data.ll = current->thread.TS_FPR(reg);
 970                if (flags & S) {
 971                        /* Single-precision FP store requires conversion... */
 972#ifdef CONFIG_PPC_FPU
 973                        preempt_disable();
 974                        enable_kernel_fp();
 975                        cvt_df(&data.dd, (float *)&data.x32.low32);
 976                        disable_kernel_fp();
 977                        preempt_enable();
 978#else
 979                        return 0;
 980#endif
 981                }
 982        } else
 983                data.ll = regs->gpr[reg];
 984
 985        if (flags & SW) {
 986                switch (nb) {
 987                case 8:
 988                        data.ll = swab64(data.ll);
 989                        break;
 990                case 4:
 991                        data.x32.low32 = swab32(data.x32.low32);
 992                        break;
 993                case 2:
 994                        data.x16.low16 = swab16(data.x16.low16);
 995                        break;
 996                }
 997        }
 998
 999        /* Perform other misc operations like sign extension
1000         * or floating point single precision conversion
1001         */
1002        switch (flags & ~(U|SW)) {
1003        case LD+SE:     /* sign extending integer loads */
1004        case LD+F+SE:   /* sign extend for lfiwax */
1005                if ( nb == 2 )
1006                        data.ll = data.x16.low16;
1007                else    /* nb must be 4 */
1008                        data.ll = data.x32.low32;
1009                break;
1010
1011        /* Single-precision FP load requires conversion... */
1012        case LD+F+S:
1013#ifdef CONFIG_PPC_FPU
1014                preempt_disable();
1015                enable_kernel_fp();
1016                cvt_fd((float *)&data.x32.low32, &data.dd);
1017                disable_kernel_fp();
1018                preempt_enable();
1019#else
1020                return 0;
1021#endif
1022                break;
1023        }
1024
1025        /* Store result to memory or update registers */
1026        if (flags & ST) {
1027                unsigned int start = 0;
1028
1029                switch (nb) {
1030                case 4:
1031                        start = offsetof(union data, x32.low32);
1032                        break;
1033                case 2:
1034                        start = offsetof(union data, x16.low16);
1035                        break;
1036                }
1037
1038                ret = 0;
1039                p = (unsigned long)addr;
1040
1041                for (i = 0; i < nb; i++)
1042                        ret |= __put_user_inatomic(data.v[start + i],
1043                                                   SWIZ_PTR(p++));
1044
1045                if (unlikely(ret))
1046                        return -EFAULT;
1047        } else if (flags & F)
1048                current->thread.TS_FPR(reg) = data.ll;
1049        else
1050                regs->gpr[reg] = data.ll;
1051
1052        /* Update RA as needed */
1053        if (flags & U)
1054                regs->gpr[areg] = regs->dar;
1055
1056        return 1;
1057}
1058