qemu/target-arm/helper.c
<<
>>
Prefs
   1#include <stdio.h>
   2#include <stdlib.h>
   3#include <string.h>
   4
   5#include "cpu.h"
   6#include "gdbstub.h"
   7#include "helper.h"
   8#include "qemu-common.h"
   9#include "host-utils.h"
  10#if !defined(CONFIG_USER_ONLY)
  11#include "hw/loader.h"
  12#endif
  13
  14static uint32_t cortexa9_cp15_c0_c1[8] =
  15{ 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
  16
  17static uint32_t cortexa9_cp15_c0_c2[8] =
  18{ 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
  19
  20static uint32_t cortexa8_cp15_c0_c1[8] =
  21{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
  22
  23static uint32_t cortexa8_cp15_c0_c2[8] =
  24{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
  25
  26static uint32_t mpcore_cp15_c0_c1[8] =
  27{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
  28
  29static uint32_t mpcore_cp15_c0_c2[8] =
  30{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
  31
  32static uint32_t arm1136_cp15_c0_c1[8] =
  33{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
  34
  35static uint32_t arm1136_cp15_c0_c2[8] =
  36{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
  37
  38static uint32_t arm1176_cp15_c0_c1[8] =
  39{ 0x111, 0x11, 0x33, 0, 0x01130003, 0x10030302, 0x01222100, 0 };
  40
  41static uint32_t arm1176_cp15_c0_c2[8] =
  42{ 0x0140011, 0x12002111, 0x11231121, 0x01102131, 0x01141, 0, 0, 0 };
  43
  44static uint32_t cpu_arm_find_by_name(const char *name);
  45
  46static inline void set_feature(CPUARMState *env, int feature)
  47{
  48    env->features |= 1u << feature;
  49}
  50
  51static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
  52{
  53    env->cp15.c0_cpuid = id;
  54    switch (id) {
  55    case ARM_CPUID_ARM926:
  56        set_feature(env, ARM_FEATURE_V4T);
  57        set_feature(env, ARM_FEATURE_V5);
  58        set_feature(env, ARM_FEATURE_VFP);
  59        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
  60        env->cp15.c0_cachetype = 0x1dd20d2;
  61        env->cp15.c1_sys = 0x00090078;
  62        break;
  63    case ARM_CPUID_ARM946:
  64        set_feature(env, ARM_FEATURE_V4T);
  65        set_feature(env, ARM_FEATURE_V5);
  66        set_feature(env, ARM_FEATURE_MPU);
  67        env->cp15.c0_cachetype = 0x0f004006;
  68        env->cp15.c1_sys = 0x00000078;
  69        break;
  70    case ARM_CPUID_ARM1026:
  71        set_feature(env, ARM_FEATURE_V4T);
  72        set_feature(env, ARM_FEATURE_V5);
  73        set_feature(env, ARM_FEATURE_VFP);
  74        set_feature(env, ARM_FEATURE_AUXCR);
  75        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
  76        env->cp15.c0_cachetype = 0x1dd20d2;
  77        env->cp15.c1_sys = 0x00090078;
  78        break;
  79    case ARM_CPUID_ARM1136:
  80        /* This is the 1136 r1, which is a v6K core */
  81        set_feature(env, ARM_FEATURE_V6K);
  82        /* Fall through */
  83    case ARM_CPUID_ARM1136_R2:
  84        /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
  85         * older core than plain "arm1136". In particular this does not
  86         * have the v6K features.
  87         */
  88        set_feature(env, ARM_FEATURE_V4T);
  89        set_feature(env, ARM_FEATURE_V5);
  90        set_feature(env, ARM_FEATURE_V6);
  91        set_feature(env, ARM_FEATURE_VFP);
  92        set_feature(env, ARM_FEATURE_AUXCR);
  93        /* These ID register values are correct for 1136 but may be wrong
  94         * for 1136_r2 (in particular r0p2 does not actually implement most
  95         * of the ID registers).
  96         */
  97        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
  98        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
  99        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
 100        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
 101        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
 102        env->cp15.c0_cachetype = 0x1dd20d2;
 103        env->cp15.c1_sys = 0x00050078;
 104        break;
 105    case ARM_CPUID_ARM1176:
 106        set_feature(env, ARM_FEATURE_V4T);
 107        set_feature(env, ARM_FEATURE_V5);
 108        set_feature(env, ARM_FEATURE_V6);
 109        set_feature(env, ARM_FEATURE_V6K);
 110        set_feature(env, ARM_FEATURE_VFP);
 111        set_feature(env, ARM_FEATURE_AUXCR);
 112        set_feature(env, ARM_FEATURE_VAPA);
 113        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b5;
 114        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
 115        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
 116        memcpy(env->cp15.c0_c1, arm1176_cp15_c0_c1, 8 * sizeof(uint32_t));
 117        memcpy(env->cp15.c0_c2, arm1176_cp15_c0_c2, 8 * sizeof(uint32_t));
 118        env->cp15.c0_cachetype = 0x1dd20d2;
 119        env->cp15.c1_sys = 0x00050078;
 120        break;
 121    case ARM_CPUID_ARM11MPCORE:
 122        set_feature(env, ARM_FEATURE_V4T);
 123        set_feature(env, ARM_FEATURE_V5);
 124        set_feature(env, ARM_FEATURE_V6);
 125        set_feature(env, ARM_FEATURE_V6K);
 126        set_feature(env, ARM_FEATURE_VFP);
 127        set_feature(env, ARM_FEATURE_AUXCR);
 128        set_feature(env, ARM_FEATURE_VAPA);
 129        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
 130        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
 131        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
 132        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
 133        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
 134        env->cp15.c0_cachetype = 0x1dd20d2;
 135        break;
 136    case ARM_CPUID_CORTEXA8:
 137        set_feature(env, ARM_FEATURE_V4T);
 138        set_feature(env, ARM_FEATURE_V5);
 139        set_feature(env, ARM_FEATURE_V6);
 140        set_feature(env, ARM_FEATURE_V6K);
 141        set_feature(env, ARM_FEATURE_V7);
 142        set_feature(env, ARM_FEATURE_AUXCR);
 143        set_feature(env, ARM_FEATURE_THUMB2);
 144        set_feature(env, ARM_FEATURE_VFP);
 145        set_feature(env, ARM_FEATURE_VFP3);
 146        set_feature(env, ARM_FEATURE_NEON);
 147        set_feature(env, ARM_FEATURE_THUMB2EE);
 148        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
 149        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
 150        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
 151        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
 152        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
 153        env->cp15.c0_cachetype = 0x82048004;
 154        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
 155        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
 156        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
 157        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
 158        env->cp15.c1_sys = 0x00c50078;
 159        break;
 160    case ARM_CPUID_CORTEXA9:
 161        set_feature(env, ARM_FEATURE_V4T);
 162        set_feature(env, ARM_FEATURE_V5);
 163        set_feature(env, ARM_FEATURE_V6);
 164        set_feature(env, ARM_FEATURE_V6K);
 165        set_feature(env, ARM_FEATURE_V7);
 166        set_feature(env, ARM_FEATURE_AUXCR);
 167        set_feature(env, ARM_FEATURE_THUMB2);
 168        set_feature(env, ARM_FEATURE_VFP);
 169        set_feature(env, ARM_FEATURE_VFP3);
 170        set_feature(env, ARM_FEATURE_VFP_FP16);
 171        set_feature(env, ARM_FEATURE_NEON);
 172        set_feature(env, ARM_FEATURE_THUMB2EE);
 173        /* Note that A9 supports the MP extensions even for
 174         * A9UP and single-core A9MP (which are both different
 175         * and valid configurations; we don't model A9UP).
 176         */
 177        set_feature(env, ARM_FEATURE_V7MP);
 178        env->vfp.xregs[ARM_VFP_FPSID] = 0x41034000; /* Guess */
 179        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
 180        env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
 181        memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
 182        memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
 183        env->cp15.c0_cachetype = 0x80038003;
 184        env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
 185        env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
 186        env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
 187        env->cp15.c1_sys = 0x00c50078;
 188        break;
 189    case ARM_CPUID_CORTEXM3:
 190        set_feature(env, ARM_FEATURE_V4T);
 191        set_feature(env, ARM_FEATURE_V5);
 192        set_feature(env, ARM_FEATURE_V6);
 193        set_feature(env, ARM_FEATURE_THUMB2);
 194        set_feature(env, ARM_FEATURE_V7);
 195        set_feature(env, ARM_FEATURE_M);
 196        set_feature(env, ARM_FEATURE_THUMB_DIV);
 197        break;
 198    case ARM_CPUID_ANY: /* For userspace emulation.  */
 199        set_feature(env, ARM_FEATURE_V4T);
 200        set_feature(env, ARM_FEATURE_V5);
 201        set_feature(env, ARM_FEATURE_V6);
 202        set_feature(env, ARM_FEATURE_V6K);
 203        set_feature(env, ARM_FEATURE_V7);
 204        set_feature(env, ARM_FEATURE_THUMB2);
 205        set_feature(env, ARM_FEATURE_VFP);
 206        set_feature(env, ARM_FEATURE_VFP3);
 207        set_feature(env, ARM_FEATURE_VFP4);
 208        set_feature(env, ARM_FEATURE_VFP_FP16);
 209        set_feature(env, ARM_FEATURE_NEON);
 210        set_feature(env, ARM_FEATURE_THUMB2EE);
 211        set_feature(env, ARM_FEATURE_ARM_DIV);
 212        set_feature(env, ARM_FEATURE_V7MP);
 213        break;
 214    case ARM_CPUID_TI915T:
 215    case ARM_CPUID_TI925T:
 216        set_feature(env, ARM_FEATURE_V4T);
 217        set_feature(env, ARM_FEATURE_OMAPCP);
 218        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
 219        env->cp15.c0_cachetype = 0x5109149;
 220        env->cp15.c1_sys = 0x00000070;
 221        env->cp15.c15_i_max = 0x000;
 222        env->cp15.c15_i_min = 0xff0;
 223        break;
 224    case ARM_CPUID_PXA250:
 225    case ARM_CPUID_PXA255:
 226    case ARM_CPUID_PXA260:
 227    case ARM_CPUID_PXA261:
 228    case ARM_CPUID_PXA262:
 229        set_feature(env, ARM_FEATURE_V4T);
 230        set_feature(env, ARM_FEATURE_V5);
 231        set_feature(env, ARM_FEATURE_XSCALE);
 232        /* JTAG_ID is ((id << 28) | 0x09265013) */
 233        env->cp15.c0_cachetype = 0xd172172;
 234        env->cp15.c1_sys = 0x00000078;
 235        break;
 236    case ARM_CPUID_PXA270_A0:
 237    case ARM_CPUID_PXA270_A1:
 238    case ARM_CPUID_PXA270_B0:
 239    case ARM_CPUID_PXA270_B1:
 240    case ARM_CPUID_PXA270_C0:
 241    case ARM_CPUID_PXA270_C5:
 242        set_feature(env, ARM_FEATURE_V4T);
 243        set_feature(env, ARM_FEATURE_V5);
 244        set_feature(env, ARM_FEATURE_XSCALE);
 245        /* JTAG_ID is ((id << 28) | 0x09265013) */
 246        set_feature(env, ARM_FEATURE_IWMMXT);
 247        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
 248        env->cp15.c0_cachetype = 0xd172172;
 249        env->cp15.c1_sys = 0x00000078;
 250        break;
 251    case ARM_CPUID_SA1100:
 252    case ARM_CPUID_SA1110:
 253        set_feature(env, ARM_FEATURE_STRONGARM);
 254        env->cp15.c1_sys = 0x00000070;
 255        break;
 256    default:
 257        cpu_abort(env, "Bad CPU ID: %x\n", id);
 258        break;
 259    }
 260
 261    /* Some features automatically imply others: */
 262    if (arm_feature(env, ARM_FEATURE_V7)) {
 263        set_feature(env, ARM_FEATURE_VAPA);
 264    }
 265    if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
 266        set_feature(env, ARM_FEATURE_THUMB_DIV);
 267    }
 268}
 269
 270void cpu_reset(CPUARMState *env)
 271{
 272    uint32_t id;
 273
 274    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
 275        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
 276        log_cpu_state(env, 0);
 277    }
 278
 279    id = env->cp15.c0_cpuid;
 280    memset(env, 0, offsetof(CPUARMState, breakpoints));
 281    if (id)
 282        cpu_reset_model_id(env, id);
 283#if defined (CONFIG_USER_ONLY)
 284    env->uncached_cpsr = ARM_CPU_MODE_USR;
 285    /* For user mode we must enable access to coprocessors */
 286    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
 287    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
 288        env->cp15.c15_cpar = 3;
 289    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
 290        env->cp15.c15_cpar = 1;
 291    }
 292#else
 293    /* SVC mode with interrupts disabled.  */
 294    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
 295    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
 296       clear at reset.  Initial SP and PC are loaded from ROM.  */
 297    if (IS_M(env)) {
 298        uint32_t pc;
 299        uint8_t *rom;
 300        env->uncached_cpsr &= ~CPSR_I;
 301        rom = rom_ptr(0);
 302        if (rom) {
 303            /* We should really use ldl_phys here, in case the guest
 304               modified flash and reset itself.  However images
 305               loaded via -kernel have not been copied yet, so load the
 306               values directly from there.  */
 307            env->regs[13] = ldl_p(rom);
 308            pc = ldl_p(rom + 4);
 309            env->thumb = pc & 1;
 310            env->regs[15] = pc & ~1;
 311        }
 312    }
 313    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
 314    env->cp15.c2_base_mask = 0xffffc000u;
 315    /* v7 performance monitor control register: same implementor
 316     * field as main ID register, and we implement no event counters.
 317     */
 318    env->cp15.c9_pmcr = (id & 0xff000000);
 319#endif
 320    set_flush_to_zero(1, &env->vfp.standard_fp_status);
 321    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
 322    set_default_nan_mode(1, &env->vfp.standard_fp_status);
 323    set_float_detect_tininess(float_tininess_before_rounding,
 324                              &env->vfp.fp_status);
 325    set_float_detect_tininess(float_tininess_before_rounding,
 326                              &env->vfp.standard_fp_status);
 327    tlb_flush(env, 1);
 328}
 329
 330static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
 331{
 332    int nregs;
 333
 334    /* VFP data registers are always little-endian.  */
 335    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
 336    if (reg < nregs) {
 337        stfq_le_p(buf, env->vfp.regs[reg]);
 338        return 8;
 339    }
 340    if (arm_feature(env, ARM_FEATURE_NEON)) {
 341        /* Aliases for Q regs.  */
 342        nregs += 16;
 343        if (reg < nregs) {
 344            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
 345            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
 346            return 16;
 347        }
 348    }
 349    switch (reg - nregs) {
 350    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
 351    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
 352    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
 353    }
 354    return 0;
 355}
 356
 357static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
 358{
 359    int nregs;
 360
 361    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
 362    if (reg < nregs) {
 363        env->vfp.regs[reg] = ldfq_le_p(buf);
 364        return 8;
 365    }
 366    if (arm_feature(env, ARM_FEATURE_NEON)) {
 367        nregs += 16;
 368        if (reg < nregs) {
 369            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
 370            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
 371            return 16;
 372        }
 373    }
 374    switch (reg - nregs) {
 375    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
 376    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
 377    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
 378    }
 379    return 0;
 380}
 381
 382CPUARMState *cpu_arm_init(const char *cpu_model)
 383{
 384    CPUARMState *env;
 385    uint32_t id;
 386    static int inited = 0;
 387
 388    id = cpu_arm_find_by_name(cpu_model);
 389    if (id == 0)
 390        return NULL;
 391    env = g_malloc0(sizeof(CPUARMState));
 392    cpu_exec_init(env);
 393    if (!inited) {
 394        inited = 1;
 395        arm_translate_init();
 396    }
 397
 398    env->cpu_model_str = cpu_model;
 399    env->cp15.c0_cpuid = id;
 400    cpu_reset(env);
 401    if (arm_feature(env, ARM_FEATURE_NEON)) {
 402        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
 403                                 51, "arm-neon.xml", 0);
 404    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
 405        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
 406                                 35, "arm-vfp3.xml", 0);
 407    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
 408        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
 409                                 19, "arm-vfp.xml", 0);
 410    }
 411    qemu_init_vcpu(env);
 412    return env;
 413}
 414
 415struct arm_cpu_t {
 416    uint32_t id;
 417    const char *name;
 418};
 419
 420static const struct arm_cpu_t arm_cpu_names[] = {
 421    { ARM_CPUID_ARM926, "arm926"},
 422    { ARM_CPUID_ARM946, "arm946"},
 423    { ARM_CPUID_ARM1026, "arm1026"},
 424    { ARM_CPUID_ARM1136, "arm1136"},
 425    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
 426    { ARM_CPUID_ARM1176, "arm1176"},
 427    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
 428    { ARM_CPUID_CORTEXM3, "cortex-m3"},
 429    { ARM_CPUID_CORTEXA8, "cortex-a8"},
 430    { ARM_CPUID_CORTEXA9, "cortex-a9"},
 431    { ARM_CPUID_TI925T, "ti925t" },
 432    { ARM_CPUID_PXA250, "pxa250" },
 433    { ARM_CPUID_SA1100,    "sa1100" },
 434    { ARM_CPUID_SA1110,    "sa1110" },
 435    { ARM_CPUID_PXA255, "pxa255" },
 436    { ARM_CPUID_PXA260, "pxa260" },
 437    { ARM_CPUID_PXA261, "pxa261" },
 438    { ARM_CPUID_PXA262, "pxa262" },
 439    { ARM_CPUID_PXA270, "pxa270" },
 440    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
 441    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
 442    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
 443    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
 444    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
 445    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
 446    { ARM_CPUID_ANY, "any"},
 447    { 0, NULL}
 448};
 449
 450void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
 451{
 452    int i;
 453
 454    (*cpu_fprintf)(f, "Available CPUs:\n");
 455    for (i = 0; arm_cpu_names[i].name; i++) {
 456        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
 457    }
 458}
 459
 460/* return 0 if not found */
 461static uint32_t cpu_arm_find_by_name(const char *name)
 462{
 463    int i;
 464    uint32_t id;
 465
 466    id = 0;
 467    for (i = 0; arm_cpu_names[i].name; i++) {
 468        if (strcmp(name, arm_cpu_names[i].name) == 0) {
 469            id = arm_cpu_names[i].id;
 470            break;
 471        }
 472    }
 473    return id;
 474}
 475
 476void cpu_arm_close(CPUARMState *env)
 477{
 478    g_free(env);
 479}
 480
 481uint32_t cpsr_read(CPUARMState *env)
 482{
 483    int ZF;
 484    ZF = (env->ZF == 0);
 485    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
 486        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
 487        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
 488        | ((env->condexec_bits & 0xfc) << 8)
 489        | (env->GE << 16);
 490}
 491
 492void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
 493{
 494    if (mask & CPSR_NZCV) {
 495        env->ZF = (~val) & CPSR_Z;
 496        env->NF = val;
 497        env->CF = (val >> 29) & 1;
 498        env->VF = (val << 3) & 0x80000000;
 499    }
 500    if (mask & CPSR_Q)
 501        env->QF = ((val & CPSR_Q) != 0);
 502    if (mask & CPSR_T)
 503        env->thumb = ((val & CPSR_T) != 0);
 504    if (mask & CPSR_IT_0_1) {
 505        env->condexec_bits &= ~3;
 506        env->condexec_bits |= (val >> 25) & 3;
 507    }
 508    if (mask & CPSR_IT_2_7) {
 509        env->condexec_bits &= 3;
 510        env->condexec_bits |= (val >> 8) & 0xfc;
 511    }
 512    if (mask & CPSR_GE) {
 513        env->GE = (val >> 16) & 0xf;
 514    }
 515
 516    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
 517        switch_mode(env, val & CPSR_M);
 518    }
 519    mask &= ~CACHED_CPSR_BITS;
 520    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
 521}
 522
 523/* Sign/zero extend */
 524uint32_t HELPER(sxtb16)(uint32_t x)
 525{
 526    uint32_t res;
 527    res = (uint16_t)(int8_t)x;
 528    res |= (uint32_t)(int8_t)(x >> 16) << 16;
 529    return res;
 530}
 531
 532uint32_t HELPER(uxtb16)(uint32_t x)
 533{
 534    uint32_t res;
 535    res = (uint16_t)(uint8_t)x;
 536    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
 537    return res;
 538}
 539
 540uint32_t HELPER(clz)(uint32_t x)
 541{
 542    return clz32(x);
 543}
 544
 545int32_t HELPER(sdiv)(int32_t num, int32_t den)
 546{
 547    if (den == 0)
 548      return 0;
 549    if (num == INT_MIN && den == -1)
 550      return INT_MIN;
 551    return num / den;
 552}
 553
 554uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
 555{
 556    if (den == 0)
 557      return 0;
 558    return num / den;
 559}
 560
 561uint32_t HELPER(rbit)(uint32_t x)
 562{
 563    x =  ((x & 0xff000000) >> 24)
 564       | ((x & 0x00ff0000) >> 8)
 565       | ((x & 0x0000ff00) << 8)
 566       | ((x & 0x000000ff) << 24);
 567    x =  ((x & 0xf0f0f0f0) >> 4)
 568       | ((x & 0x0f0f0f0f) << 4);
 569    x =  ((x & 0x88888888) >> 3)
 570       | ((x & 0x44444444) >> 1)
 571       | ((x & 0x22222222) << 1)
 572       | ((x & 0x11111111) << 3);
 573    return x;
 574}
 575
 576uint32_t HELPER(abs)(uint32_t x)
 577{
 578    return ((int32_t)x < 0) ? -x : x;
 579}
 580
 581#if defined(CONFIG_USER_ONLY)
 582
 583void do_interrupt (CPUState *env)
 584{
 585    env->exception_index = -1;
 586}
 587
 588int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
 589                              int mmu_idx)
 590{
 591    if (rw == 2) {
 592        env->exception_index = EXCP_PREFETCH_ABORT;
 593        env->cp15.c6_insn = address;
 594    } else {
 595        env->exception_index = EXCP_DATA_ABORT;
 596        env->cp15.c6_data = address;
 597    }
 598    return 1;
 599}
 600
 601/* These should probably raise undefined insn exceptions.  */
 602void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
 603{
 604    int op1 = (insn >> 8) & 0xf;
 605    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
 606    return;
 607}
 608
 609uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
 610{
 611    int op1 = (insn >> 8) & 0xf;
 612    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
 613    return 0;
 614}
 615
 616void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
 617{
 618    cpu_abort(env, "cp15 insn %08x\n", insn);
 619}
 620
 621uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
 622{
 623    cpu_abort(env, "cp15 insn %08x\n", insn);
 624}
 625
 626/* These should probably raise undefined insn exceptions.  */
 627void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
 628{
 629    cpu_abort(env, "v7m_mrs %d\n", reg);
 630}
 631
 632uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
 633{
 634    cpu_abort(env, "v7m_mrs %d\n", reg);
 635    return 0;
 636}
 637
 638void switch_mode(CPUState *env, int mode)
 639{
 640    if (mode != ARM_CPU_MODE_USR)
 641        cpu_abort(env, "Tried to switch out of user mode\n");
 642}
 643
 644void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
 645{
 646    cpu_abort(env, "banked r13 write\n");
 647}
 648
 649uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
 650{
 651    cpu_abort(env, "banked r13 read\n");
 652    return 0;
 653}
 654
 655#else
 656
 657extern int semihosting_enabled;
 658
 659/* Map CPU modes onto saved register banks.  */
 660static inline int bank_number (int mode)
 661{
 662    switch (mode) {
 663    case ARM_CPU_MODE_USR:
 664    case ARM_CPU_MODE_SYS:
 665        return 0;
 666    case ARM_CPU_MODE_SVC:
 667        return 1;
 668    case ARM_CPU_MODE_ABT:
 669        return 2;
 670    case ARM_CPU_MODE_UND:
 671        return 3;
 672    case ARM_CPU_MODE_IRQ:
 673        return 4;
 674    case ARM_CPU_MODE_FIQ:
 675        return 5;
 676    }
 677    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
 678    return -1;
 679}
 680
 681void switch_mode(CPUState *env, int mode)
 682{
 683    int old_mode;
 684    int i;
 685
 686    old_mode = env->uncached_cpsr & CPSR_M;
 687    if (mode == old_mode)
 688        return;
 689
 690    if (old_mode == ARM_CPU_MODE_FIQ) {
 691        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
 692        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
 693    } else if (mode == ARM_CPU_MODE_FIQ) {
 694        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
 695        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
 696    }
 697
 698    i = bank_number(old_mode);
 699    env->banked_r13[i] = env->regs[13];
 700    env->banked_r14[i] = env->regs[14];
 701    env->banked_spsr[i] = env->spsr;
 702
 703    i = bank_number(mode);
 704    env->regs[13] = env->banked_r13[i];
 705    env->regs[14] = env->banked_r14[i];
 706    env->spsr = env->banked_spsr[i];
 707}
 708
 709static void v7m_push(CPUARMState *env, uint32_t val)
 710{
 711    env->regs[13] -= 4;
 712    stl_phys(env->regs[13], val);
 713}
 714
 715static uint32_t v7m_pop(CPUARMState *env)
 716{
 717    uint32_t val;
 718    val = ldl_phys(env->regs[13]);
 719    env->regs[13] += 4;
 720    return val;
 721}
 722
 723/* Switch to V7M main or process stack pointer.  */
 724static void switch_v7m_sp(CPUARMState *env, int process)
 725{
 726    uint32_t tmp;
 727    if (env->v7m.current_sp != process) {
 728        tmp = env->v7m.other_sp;
 729        env->v7m.other_sp = env->regs[13];
 730        env->regs[13] = tmp;
 731        env->v7m.current_sp = process;
 732    }
 733}
 734
 735static void do_v7m_exception_exit(CPUARMState *env)
 736{
 737    uint32_t type;
 738    uint32_t xpsr;
 739
 740    type = env->regs[15];
 741    if (env->v7m.exception != 0)
 742        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
 743
 744    /* Switch to the target stack.  */
 745    switch_v7m_sp(env, (type & 4) != 0);
 746    /* Pop registers.  */
 747    env->regs[0] = v7m_pop(env);
 748    env->regs[1] = v7m_pop(env);
 749    env->regs[2] = v7m_pop(env);
 750    env->regs[3] = v7m_pop(env);
 751    env->regs[12] = v7m_pop(env);
 752    env->regs[14] = v7m_pop(env);
 753    env->regs[15] = v7m_pop(env);
 754    xpsr = v7m_pop(env);
 755    xpsr_write(env, xpsr, 0xfffffdff);
 756    /* Undo stack alignment.  */
 757    if (xpsr & 0x200)
 758        env->regs[13] |= 4;
 759    /* ??? The exception return type specifies Thread/Handler mode.  However
 760       this is also implied by the xPSR value. Not sure what to do
 761       if there is a mismatch.  */
 762    /* ??? Likewise for mismatches between the CONTROL register and the stack
 763       pointer.  */
 764}
 765
 766static void do_interrupt_v7m(CPUARMState *env)
 767{
 768    uint32_t xpsr = xpsr_read(env);
 769    uint32_t lr;
 770    uint32_t addr;
 771
 772    lr = 0xfffffff1;
 773    if (env->v7m.current_sp)
 774        lr |= 4;
 775    if (env->v7m.exception == 0)
 776        lr |= 8;
 777
 778    /* For exceptions we just mark as pending on the NVIC, and let that
 779       handle it.  */
 780    /* TODO: Need to escalate if the current priority is higher than the
 781       one we're raising.  */
 782    switch (env->exception_index) {
 783    case EXCP_UDEF:
 784        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
 785        return;
 786    case EXCP_SWI:
 787        env->regs[15] += 2;
 788        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
 789        return;
 790    case EXCP_PREFETCH_ABORT:
 791    case EXCP_DATA_ABORT:
 792        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
 793        return;
 794    case EXCP_BKPT:
 795        if (semihosting_enabled) {
 796            int nr;
 797            nr = lduw_code(env->regs[15]) & 0xff;
 798            if (nr == 0xab) {
 799                env->regs[15] += 2;
 800                env->regs[0] = do_arm_semihosting(env);
 801                return;
 802            }
 803        }
 804        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
 805        return;
 806    case EXCP_IRQ:
 807        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
 808        break;
 809    case EXCP_EXCEPTION_EXIT:
 810        do_v7m_exception_exit(env);
 811        return;
 812    default:
 813        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
 814        return; /* Never happens.  Keep compiler happy.  */
 815    }
 816
 817    /* Align stack pointer.  */
 818    /* ??? Should only do this if Configuration Control Register
 819       STACKALIGN bit is set.  */
 820    if (env->regs[13] & 4) {
 821        env->regs[13] -= 4;
 822        xpsr |= 0x200;
 823    }
 824    /* Switch to the handler mode.  */
 825    v7m_push(env, xpsr);
 826    v7m_push(env, env->regs[15]);
 827    v7m_push(env, env->regs[14]);
 828    v7m_push(env, env->regs[12]);
 829    v7m_push(env, env->regs[3]);
 830    v7m_push(env, env->regs[2]);
 831    v7m_push(env, env->regs[1]);
 832    v7m_push(env, env->regs[0]);
 833    switch_v7m_sp(env, 0);
 834    env->uncached_cpsr &= ~CPSR_IT;
 835    env->regs[14] = lr;
 836    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
 837    env->regs[15] = addr & 0xfffffffe;
 838    env->thumb = addr & 1;
 839}
 840
 841/* Handle a CPU exception.  */
 842void do_interrupt(CPUARMState *env)
 843{
 844    uint32_t addr;
 845    uint32_t mask;
 846    int new_mode;
 847    uint32_t offset;
 848
 849    if (IS_M(env)) {
 850        do_interrupt_v7m(env);
 851        return;
 852    }
 853    /* TODO: Vectored interrupt controller.  */
 854    switch (env->exception_index) {
 855    case EXCP_UDEF:
 856        new_mode = ARM_CPU_MODE_UND;
 857        addr = 0x04;
 858        mask = CPSR_I;
 859        if (env->thumb)
 860            offset = 2;
 861        else
 862            offset = 4;
 863        break;
 864    case EXCP_SWI:
 865        if (semihosting_enabled) {
 866            /* Check for semihosting interrupt.  */
 867            if (env->thumb) {
 868                mask = lduw_code(env->regs[15] - 2) & 0xff;
 869            } else {
 870                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
 871            }
 872            /* Only intercept calls from privileged modes, to provide some
 873               semblance of security.  */
 874            if (((mask == 0x123456 && !env->thumb)
 875                    || (mask == 0xab && env->thumb))
 876                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
 877                env->regs[0] = do_arm_semihosting(env);
 878                return;
 879            }
 880        }
 881        new_mode = ARM_CPU_MODE_SVC;
 882        addr = 0x08;
 883        mask = CPSR_I;
 884        /* The PC already points to the next instruction.  */
 885        offset = 0;
 886        break;
 887    case EXCP_BKPT:
 888        /* See if this is a semihosting syscall.  */
 889        if (env->thumb && semihosting_enabled) {
 890            mask = lduw_code(env->regs[15]) & 0xff;
 891            if (mask == 0xab
 892                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
 893                env->regs[15] += 2;
 894                env->regs[0] = do_arm_semihosting(env);
 895                return;
 896            }
 897        }
 898        env->cp15.c5_insn = 2;
 899        /* Fall through to prefetch abort.  */
 900    case EXCP_PREFETCH_ABORT:
 901        new_mode = ARM_CPU_MODE_ABT;
 902        addr = 0x0c;
 903        mask = CPSR_A | CPSR_I;
 904        offset = 4;
 905        break;
 906    case EXCP_DATA_ABORT:
 907        new_mode = ARM_CPU_MODE_ABT;
 908        addr = 0x10;
 909        mask = CPSR_A | CPSR_I;
 910        offset = 8;
 911        break;
 912    case EXCP_IRQ:
 913        new_mode = ARM_CPU_MODE_IRQ;
 914        addr = 0x18;
 915        /* Disable IRQ and imprecise data aborts.  */
 916        mask = CPSR_A | CPSR_I;
 917        offset = 4;
 918        break;
 919    case EXCP_FIQ:
 920        new_mode = ARM_CPU_MODE_FIQ;
 921        addr = 0x1c;
 922        /* Disable FIQ, IRQ and imprecise data aborts.  */
 923        mask = CPSR_A | CPSR_I | CPSR_F;
 924        offset = 4;
 925        break;
 926    default:
 927        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
 928        return; /* Never happens.  Keep compiler happy.  */
 929    }
 930    /* High vectors.  */
 931    if (env->cp15.c1_sys & (1 << 13)) {
 932        addr += 0xffff0000;
 933    }
 934    switch_mode (env, new_mode);
 935    env->spsr = cpsr_read(env);
 936    /* Clear IT bits.  */
 937    env->condexec_bits = 0;
 938    /* Switch to the new mode, and to the correct instruction set.  */
 939    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
 940    env->uncached_cpsr |= mask;
 941    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
 942     * and we should just guard the thumb mode on V4 */
 943    if (arm_feature(env, ARM_FEATURE_V4T)) {
 944        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
 945    }
 946    env->regs[14] = env->regs[15] + offset;
 947    env->regs[15] = addr;
 948    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
 949}
 950
 951/* Check section/page access permissions.
 952   Returns the page protection flags, or zero if the access is not
 953   permitted.  */
 954static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
 955                           int is_user)
 956{
 957  int prot_ro;
 958
 959  if (domain == 3)
 960    return PAGE_READ | PAGE_WRITE;
 961
 962  if (access_type == 1)
 963      prot_ro = 0;
 964  else
 965      prot_ro = PAGE_READ;
 966
 967  switch (ap) {
 968  case 0:
 969      if (access_type == 1)
 970          return 0;
 971      switch ((env->cp15.c1_sys >> 8) & 3) {
 972      case 1:
 973          return is_user ? 0 : PAGE_READ;
 974      case 2:
 975          return PAGE_READ;
 976      default:
 977          return 0;
 978      }
 979  case 1:
 980      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
 981  case 2:
 982      if (is_user)
 983          return prot_ro;
 984      else
 985          return PAGE_READ | PAGE_WRITE;
 986  case 3:
 987      return PAGE_READ | PAGE_WRITE;
 988  case 4: /* Reserved.  */
 989      return 0;
 990  case 5:
 991      return is_user ? 0 : prot_ro;
 992  case 6:
 993      return prot_ro;
 994  case 7:
 995      if (!arm_feature (env, ARM_FEATURE_V6K))
 996          return 0;
 997      return prot_ro;
 998  default:
 999      abort();
1000  }
1001}
1002
1003static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
1004{
1005    uint32_t table;
1006
1007    if (address & env->cp15.c2_mask)
1008        table = env->cp15.c2_base1 & 0xffffc000;
1009    else
1010        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
1011
1012    table |= (address >> 18) & 0x3ffc;
1013    return table;
1014}
1015
1016static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
1017                            int is_user, uint32_t *phys_ptr, int *prot,
1018                            target_ulong *page_size)
1019{
1020    int code;
1021    uint32_t table;
1022    uint32_t desc;
1023    int type;
1024    int ap;
1025    int domain;
1026    uint32_t phys_addr;
1027
1028    /* Pagetable walk.  */
1029    /* Lookup l1 descriptor.  */
1030    table = get_level1_table_address(env, address);
1031    desc = ldl_phys(table);
1032    type = (desc & 3);
1033    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
1034    if (type == 0) {
1035        /* Section translation fault.  */
1036        code = 5;
1037        goto do_fault;
1038    }
1039    if (domain == 0 || domain == 2) {
1040        if (type == 2)
1041            code = 9; /* Section domain fault.  */
1042        else
1043            code = 11; /* Page domain fault.  */
1044        goto do_fault;
1045    }
1046    if (type == 2) {
1047        /* 1Mb section.  */
1048        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1049        ap = (desc >> 10) & 3;
1050        code = 13;
1051        *page_size = 1024 * 1024;
1052    } else {
1053        /* Lookup l2 entry.  */
1054        if (type == 1) {
1055            /* Coarse pagetable.  */
1056            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1057        } else {
1058            /* Fine pagetable.  */
1059            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1060        }
1061        desc = ldl_phys(table);
1062        switch (desc & 3) {
1063        case 0: /* Page translation fault.  */
1064            code = 7;
1065            goto do_fault;
1066        case 1: /* 64k page.  */
1067            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1068            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1069            *page_size = 0x10000;
1070            break;
1071        case 2: /* 4k page.  */
1072            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1073            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1074            *page_size = 0x1000;
1075            break;
1076        case 3: /* 1k page.  */
1077            if (type == 1) {
1078                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1079                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1080                } else {
1081                    /* Page translation fault.  */
1082                    code = 7;
1083                    goto do_fault;
1084                }
1085            } else {
1086                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1087            }
1088            ap = (desc >> 4) & 3;
1089            *page_size = 0x400;
1090            break;
1091        default:
1092            /* Never happens, but compiler isn't smart enough to tell.  */
1093            abort();
1094        }
1095        code = 15;
1096    }
1097    *prot = check_ap(env, ap, domain, access_type, is_user);
1098    if (!*prot) {
1099        /* Access permission fault.  */
1100        goto do_fault;
1101    }
1102    *prot |= PAGE_EXEC;
1103    *phys_ptr = phys_addr;
1104    return 0;
1105do_fault:
1106    return code | (domain << 4);
1107}
1108
1109static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
1110                            int is_user, uint32_t *phys_ptr, int *prot,
1111                            target_ulong *page_size)
1112{
1113    int code;
1114    uint32_t table;
1115    uint32_t desc;
1116    uint32_t xn;
1117    int type;
1118    int ap;
1119    int domain;
1120    uint32_t phys_addr;
1121
1122    /* Pagetable walk.  */
1123    /* Lookup l1 descriptor.  */
1124    table = get_level1_table_address(env, address);
1125    desc = ldl_phys(table);
1126    type = (desc & 3);
1127    if (type == 0) {
1128        /* Section translation fault.  */
1129        code = 5;
1130        domain = 0;
1131        goto do_fault;
1132    } else if (type == 2 && (desc & (1 << 18))) {
1133        /* Supersection.  */
1134        domain = 0;
1135    } else {
1136        /* Section or page.  */
1137        domain = (desc >> 4) & 0x1e;
1138    }
1139    domain = (env->cp15.c3 >> domain) & 3;
1140    if (domain == 0 || domain == 2) {
1141        if (type == 2)
1142            code = 9; /* Section domain fault.  */
1143        else
1144            code = 11; /* Page domain fault.  */
1145        goto do_fault;
1146    }
1147    if (type == 2) {
1148        if (desc & (1 << 18)) {
1149            /* Supersection.  */
1150            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1151            *page_size = 0x1000000;
1152        } else {
1153            /* Section.  */
1154            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1155            *page_size = 0x100000;
1156        }
1157        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1158        xn = desc & (1 << 4);
1159        code = 13;
1160    } else {
1161        /* Lookup l2 entry.  */
1162        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1163        desc = ldl_phys(table);
1164        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1165        switch (desc & 3) {
1166        case 0: /* Page translation fault.  */
1167            code = 7;
1168            goto do_fault;
1169        case 1: /* 64k page.  */
1170            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1171            xn = desc & (1 << 15);
1172            *page_size = 0x10000;
1173            break;
1174        case 2: case 3: /* 4k page.  */
1175            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1176            xn = desc & 1;
1177            *page_size = 0x1000;
1178            break;
1179        default:
1180            /* Never happens, but compiler isn't smart enough to tell.  */
1181            abort();
1182        }
1183        code = 15;
1184    }
1185    if (domain == 3) {
1186        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1187    } else {
1188        if (xn && access_type == 2)
1189            goto do_fault;
1190
1191        /* The simplified model uses AP[0] as an access control bit.  */
1192        if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1193            /* Access flag fault.  */
1194            code = (code == 15) ? 6 : 3;
1195            goto do_fault;
1196        }
1197        *prot = check_ap(env, ap, domain, access_type, is_user);
1198        if (!*prot) {
1199            /* Access permission fault.  */
1200            goto do_fault;
1201        }
1202        if (!xn) {
1203            *prot |= PAGE_EXEC;
1204        }
1205    }
1206    *phys_ptr = phys_addr;
1207    return 0;
1208do_fault:
1209    return code | (domain << 4);
1210}
1211
1212static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1213                             int is_user, uint32_t *phys_ptr, int *prot)
1214{
1215    int n;
1216    uint32_t mask;
1217    uint32_t base;
1218
1219    *phys_ptr = address;
1220    for (n = 7; n >= 0; n--) {
1221        base = env->cp15.c6_region[n];
1222        if ((base & 1) == 0)
1223            continue;
1224        mask = 1 << ((base >> 1) & 0x1f);
1225        /* Keep this shift separate from the above to avoid an
1226           (undefined) << 32.  */
1227        mask = (mask << 1) - 1;
1228        if (((base ^ address) & ~mask) == 0)
1229            break;
1230    }
1231    if (n < 0)
1232        return 2;
1233
1234    if (access_type == 2) {
1235        mask = env->cp15.c5_insn;
1236    } else {
1237        mask = env->cp15.c5_data;
1238    }
1239    mask = (mask >> (n * 4)) & 0xf;
1240    switch (mask) {
1241    case 0:
1242        return 1;
1243    case 1:
1244        if (is_user)
1245          return 1;
1246        *prot = PAGE_READ | PAGE_WRITE;
1247        break;
1248    case 2:
1249        *prot = PAGE_READ;
1250        if (!is_user)
1251            *prot |= PAGE_WRITE;
1252        break;
1253    case 3:
1254        *prot = PAGE_READ | PAGE_WRITE;
1255        break;
1256    case 5:
1257        if (is_user)
1258            return 1;
1259        *prot = PAGE_READ;
1260        break;
1261    case 6:
1262        *prot = PAGE_READ;
1263        break;
1264    default:
1265        /* Bad permission.  */
1266        return 1;
1267    }
1268    *prot |= PAGE_EXEC;
1269    return 0;
1270}
1271
1272static inline int get_phys_addr(CPUState *env, uint32_t address,
1273                                int access_type, int is_user,
1274                                uint32_t *phys_ptr, int *prot,
1275                                target_ulong *page_size)
1276{
1277    /* Fast Context Switch Extension.  */
1278    if (address < 0x02000000)
1279        address += env->cp15.c13_fcse;
1280
1281    if ((env->cp15.c1_sys & 1) == 0) {
1282        /* MMU/MPU disabled.  */
1283        *phys_ptr = address;
1284        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1285        *page_size = TARGET_PAGE_SIZE;
1286        return 0;
1287    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1288        *page_size = TARGET_PAGE_SIZE;
1289        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1290                                 prot);
1291    } else if (env->cp15.c1_sys & (1 << 23)) {
1292        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1293                                prot, page_size);
1294    } else {
1295        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1296                                prot, page_size);
1297    }
1298}
1299
1300int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1301                              int access_type, int mmu_idx)
1302{
1303    uint32_t phys_addr;
1304    target_ulong page_size;
1305    int prot;
1306    int ret, is_user;
1307
1308    is_user = mmu_idx == MMU_USER_IDX;
1309    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1310                        &page_size);
1311    if (ret == 0) {
1312        /* Map a single [sub]page.  */
1313        phys_addr &= ~(uint32_t)0x3ff;
1314        address &= ~(uint32_t)0x3ff;
1315        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1316        return 0;
1317    }
1318
1319    if (access_type == 2) {
1320        env->cp15.c5_insn = ret;
1321        env->cp15.c6_insn = address;
1322        env->exception_index = EXCP_PREFETCH_ABORT;
1323    } else {
1324        env->cp15.c5_data = ret;
1325        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1326            env->cp15.c5_data |= (1 << 11);
1327        env->cp15.c6_data = address;
1328        env->exception_index = EXCP_DATA_ABORT;
1329    }
1330    return 1;
1331}
1332
1333target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1334{
1335    uint32_t phys_addr;
1336    target_ulong page_size;
1337    int prot;
1338    int ret;
1339
1340    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1341
1342    if (ret != 0)
1343        return -1;
1344
1345    return phys_addr;
1346}
1347
1348void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
1349{
1350    int cp_num = (insn >> 8) & 0xf;
1351    int cp_info = (insn >> 5) & 7;
1352    int src = (insn >> 16) & 0xf;
1353    int operand = insn & 0xf;
1354
1355    if (env->cp[cp_num].cp_write)
1356        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1357                                 cp_info, src, operand, val);
1358}
1359
1360uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
1361{
1362    int cp_num = (insn >> 8) & 0xf;
1363    int cp_info = (insn >> 5) & 7;
1364    int dest = (insn >> 16) & 0xf;
1365    int operand = insn & 0xf;
1366
1367    if (env->cp[cp_num].cp_read)
1368        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1369                                       cp_info, dest, operand);
1370    return 0;
1371}
1372
1373/* Return basic MPU access permission bits.  */
1374static uint32_t simple_mpu_ap_bits(uint32_t val)
1375{
1376    uint32_t ret;
1377    uint32_t mask;
1378    int i;
1379    ret = 0;
1380    mask = 3;
1381    for (i = 0; i < 16; i += 2) {
1382        ret |= (val >> i) & mask;
1383        mask <<= 2;
1384    }
1385    return ret;
1386}
1387
1388/* Pad basic MPU access permission bits to extended format.  */
1389static uint32_t extended_mpu_ap_bits(uint32_t val)
1390{
1391    uint32_t ret;
1392    uint32_t mask;
1393    int i;
1394    ret = 0;
1395    mask = 3;
1396    for (i = 0; i < 16; i += 2) {
1397        ret |= (val & mask) << i;
1398        mask <<= 2;
1399    }
1400    return ret;
1401}
1402
1403void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1404{
1405    int op1;
1406    int op2;
1407    int crm;
1408
1409    op1 = (insn >> 21) & 7;
1410    op2 = (insn >> 5) & 7;
1411    crm = insn & 0xf;
1412    switch ((insn >> 16) & 0xf) {
1413    case 0:
1414        /* ID codes.  */
1415        if (arm_feature(env, ARM_FEATURE_XSCALE))
1416            break;
1417        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1418            break;
1419        if (arm_feature(env, ARM_FEATURE_V7)
1420                && op1 == 2 && crm == 0 && op2 == 0) {
1421            env->cp15.c0_cssel = val & 0xf;
1422            break;
1423        }
1424        goto bad_reg;
1425    case 1: /* System configuration.  */
1426        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1427            op2 = 0;
1428        switch (op2) {
1429        case 0:
1430            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1431                env->cp15.c1_sys = val;
1432            /* ??? Lots of these bits are not implemented.  */
1433            /* This may enable/disable the MMU, so do a TLB flush.  */
1434            tlb_flush(env, 1);
1435            break;
1436        case 1: /* Auxiliary control register.  */
1437            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1438                env->cp15.c1_xscaleauxcr = val;
1439                break;
1440            }
1441            /* Not implemented.  */
1442            break;
1443        case 2:
1444            if (arm_feature(env, ARM_FEATURE_XSCALE))
1445                goto bad_reg;
1446            if (env->cp15.c1_coproc != val) {
1447                env->cp15.c1_coproc = val;
1448                /* ??? Is this safe when called from within a TB?  */
1449                tb_flush(env);
1450            }
1451            break;
1452        default:
1453            goto bad_reg;
1454        }
1455        break;
1456    case 2: /* MMU Page table control / MPU cache control.  */
1457        if (arm_feature(env, ARM_FEATURE_MPU)) {
1458            switch (op2) {
1459            case 0:
1460                env->cp15.c2_data = val;
1461                break;
1462            case 1:
1463                env->cp15.c2_insn = val;
1464                break;
1465            default:
1466                goto bad_reg;
1467            }
1468        } else {
1469            switch (op2) {
1470            case 0:
1471                env->cp15.c2_base0 = val;
1472                break;
1473            case 1:
1474                env->cp15.c2_base1 = val;
1475                break;
1476            case 2:
1477                val &= 7;
1478                env->cp15.c2_control = val;
1479                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1480                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1481                break;
1482            default:
1483                goto bad_reg;
1484            }
1485        }
1486        break;
1487    case 3: /* MMU Domain access control / MPU write buffer control.  */
1488        env->cp15.c3 = val;
1489        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1490        break;
1491    case 4: /* Reserved.  */
1492        goto bad_reg;
1493    case 5: /* MMU Fault status / MPU access permission.  */
1494        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1495            op2 = 0;
1496        switch (op2) {
1497        case 0:
1498            if (arm_feature(env, ARM_FEATURE_MPU))
1499                val = extended_mpu_ap_bits(val);
1500            env->cp15.c5_data = val;
1501            break;
1502        case 1:
1503            if (arm_feature(env, ARM_FEATURE_MPU))
1504                val = extended_mpu_ap_bits(val);
1505            env->cp15.c5_insn = val;
1506            break;
1507        case 2:
1508            if (!arm_feature(env, ARM_FEATURE_MPU))
1509                goto bad_reg;
1510            env->cp15.c5_data = val;
1511            break;
1512        case 3:
1513            if (!arm_feature(env, ARM_FEATURE_MPU))
1514                goto bad_reg;
1515            env->cp15.c5_insn = val;
1516            break;
1517        default:
1518            goto bad_reg;
1519        }
1520        break;
1521    case 6: /* MMU Fault address / MPU base/size.  */
1522        if (arm_feature(env, ARM_FEATURE_MPU)) {
1523            if (crm >= 8)
1524                goto bad_reg;
1525            env->cp15.c6_region[crm] = val;
1526        } else {
1527            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1528                op2 = 0;
1529            switch (op2) {
1530            case 0:
1531                env->cp15.c6_data = val;
1532                break;
1533            case 1: /* ??? This is WFAR on armv6 */
1534            case 2:
1535                env->cp15.c6_insn = val;
1536                break;
1537            default:
1538                goto bad_reg;
1539            }
1540        }
1541        break;
1542    case 7: /* Cache control.  */
1543        env->cp15.c15_i_max = 0x000;
1544        env->cp15.c15_i_min = 0xff0;
1545        if (op1 != 0) {
1546            goto bad_reg;
1547        }
1548        /* No cache, so nothing to do except VA->PA translations. */
1549        if (arm_feature(env, ARM_FEATURE_VAPA)) {
1550            switch (crm) {
1551            case 4:
1552                if (arm_feature(env, ARM_FEATURE_V7)) {
1553                    env->cp15.c7_par = val & 0xfffff6ff;
1554                } else {
1555                    env->cp15.c7_par = val & 0xfffff1ff;
1556                }
1557                break;
1558            case 8: {
1559                uint32_t phys_addr;
1560                target_ulong page_size;
1561                int prot;
1562                int ret, is_user = op2 & 2;
1563                int access_type = op2 & 1;
1564
1565                if (op2 & 4) {
1566                    /* Other states are only available with TrustZone */
1567                    goto bad_reg;
1568                }
1569                ret = get_phys_addr(env, val, access_type, is_user,
1570                                    &phys_addr, &prot, &page_size);
1571                if (ret == 0) {
1572                    /* We do not set any attribute bits in the PAR */
1573                    if (page_size == (1 << 24)
1574                        && arm_feature(env, ARM_FEATURE_V7)) {
1575                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1576                    } else {
1577                        env->cp15.c7_par = phys_addr & 0xfffff000;
1578                    }
1579                } else {
1580                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1581                                       ((ret & (12 << 1)) >> 6) |
1582                                       ((ret & 0xf) << 1) | 1;
1583                }
1584                break;
1585            }
1586            }
1587        }
1588        break;
1589    case 8: /* MMU TLB control.  */
1590        switch (op2) {
1591        case 0: /* Invalidate all.  */
1592            tlb_flush(env, 0);
1593            break;
1594        case 1: /* Invalidate single TLB entry.  */
1595            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1596            break;
1597        case 2: /* Invalidate on ASID.  */
1598            tlb_flush(env, val == 0);
1599            break;
1600        case 3: /* Invalidate single entry on MVA.  */
1601            /* ??? This is like case 1, but ignores ASID.  */
1602            tlb_flush(env, 1);
1603            break;
1604        default:
1605            goto bad_reg;
1606        }
1607        break;
1608    case 9:
1609        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1610            break;
1611        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1612            break; /* Ignore ReadBuffer access */
1613        switch (crm) {
1614        case 0: /* Cache lockdown.  */
1615            switch (op1) {
1616            case 0: /* L1 cache.  */
1617                switch (op2) {
1618                case 0:
1619                    env->cp15.c9_data = val;
1620                    break;
1621                case 1:
1622                    env->cp15.c9_insn = val;
1623                    break;
1624                default:
1625                    goto bad_reg;
1626                }
1627                break;
1628            case 1: /* L2 cache.  */
1629                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1630                break;
1631            default:
1632                goto bad_reg;
1633            }
1634            break;
1635        case 1: /* TCM memory region registers.  */
1636            /* Not implemented.  */
1637            goto bad_reg;
1638        case 12: /* Performance monitor control */
1639            /* Performance monitors are implementation defined in v7,
1640             * but with an ARM recommended set of registers, which we
1641             * follow (although we don't actually implement any counters)
1642             */
1643            if (!arm_feature(env, ARM_FEATURE_V7)) {
1644                goto bad_reg;
1645            }
1646            switch (op2) {
1647            case 0: /* performance monitor control register */
1648                /* only the DP, X, D and E bits are writable */
1649                env->cp15.c9_pmcr &= ~0x39;
1650                env->cp15.c9_pmcr |= (val & 0x39);
1651                break;
1652            case 1: /* Count enable set register */
1653                val &= (1 << 31);
1654                env->cp15.c9_pmcnten |= val;
1655                break;
1656            case 2: /* Count enable clear */
1657                val &= (1 << 31);
1658                env->cp15.c9_pmcnten &= ~val;
1659                break;
1660            case 3: /* Overflow flag status */
1661                env->cp15.c9_pmovsr &= ~val;
1662                break;
1663            case 4: /* Software increment */
1664                /* RAZ/WI since we don't implement the software-count event */
1665                break;
1666            case 5: /* Event counter selection register */
1667                /* Since we don't implement any events, writing to this register
1668                 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1669                 */
1670                break;
1671            default:
1672                goto bad_reg;
1673            }
1674            break;
1675        case 13: /* Performance counters */
1676            if (!arm_feature(env, ARM_FEATURE_V7)) {
1677                goto bad_reg;
1678            }
1679            switch (op2) {
1680            case 0: /* Cycle count register: not implemented, so RAZ/WI */
1681                break;
1682            case 1: /* Event type select */
1683                env->cp15.c9_pmxevtyper = val & 0xff;
1684                break;
1685            case 2: /* Event count register */
1686                /* Unimplemented (we have no events), RAZ/WI */
1687                break;
1688            default:
1689                goto bad_reg;
1690            }
1691            break;
1692        case 14: /* Performance monitor control */
1693            if (!arm_feature(env, ARM_FEATURE_V7)) {
1694                goto bad_reg;
1695            }
1696            switch (op2) {
1697            case 0: /* user enable */
1698                env->cp15.c9_pmuserenr = val & 1;
1699                /* changes access rights for cp registers, so flush tbs */
1700                tb_flush(env);
1701                break;
1702            case 1: /* interrupt enable set */
1703                /* We have no event counters so only the C bit can be changed */
1704                val &= (1 << 31);
1705                env->cp15.c9_pminten |= val;
1706                break;
1707            case 2: /* interrupt enable clear */
1708                val &= (1 << 31);
1709                env->cp15.c9_pminten &= ~val;
1710                break;
1711            }
1712            break;
1713        default:
1714            goto bad_reg;
1715        }
1716        break;
1717    case 10: /* MMU TLB lockdown.  */
1718        /* ??? TLB lockdown not implemented.  */
1719        break;
1720    case 12: /* Reserved.  */
1721        goto bad_reg;
1722    case 13: /* Process ID.  */
1723        switch (op2) {
1724        case 0:
1725            /* Unlike real hardware the qemu TLB uses virtual addresses,
1726               not modified virtual addresses, so this causes a TLB flush.
1727             */
1728            if (env->cp15.c13_fcse != val)
1729              tlb_flush(env, 1);
1730            env->cp15.c13_fcse = val;
1731            break;
1732        case 1:
1733            /* This changes the ASID, so do a TLB flush.  */
1734            if (env->cp15.c13_context != val
1735                && !arm_feature(env, ARM_FEATURE_MPU))
1736              tlb_flush(env, 0);
1737            env->cp15.c13_context = val;
1738            break;
1739        default:
1740            goto bad_reg;
1741        }
1742        break;
1743    case 14: /* Reserved.  */
1744        goto bad_reg;
1745    case 15: /* Implementation specific.  */
1746        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1747            if (op2 == 0 && crm == 1) {
1748                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1749                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1750                    tb_flush(env);
1751                    env->cp15.c15_cpar = val & 0x3fff;
1752                }
1753                break;
1754            }
1755            goto bad_reg;
1756        }
1757        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1758            switch (crm) {
1759            case 0:
1760                break;
1761            case 1: /* Set TI925T configuration.  */
1762                env->cp15.c15_ticonfig = val & 0xe7;
1763                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1764                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1765                break;
1766            case 2: /* Set I_max.  */
1767                env->cp15.c15_i_max = val;
1768                break;
1769            case 3: /* Set I_min.  */
1770                env->cp15.c15_i_min = val;
1771                break;
1772            case 4: /* Set thread-ID.  */
1773                env->cp15.c15_threadid = val & 0xffff;
1774                break;
1775            case 8: /* Wait-for-interrupt (deprecated).  */
1776                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1777                break;
1778            default:
1779                goto bad_reg;
1780            }
1781        }
1782        break;
1783    }
1784    return;
1785bad_reg:
1786    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1787    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1788              (insn >> 16) & 0xf, crm, op1, op2);
1789}
1790
1791uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1792{
1793    int op1;
1794    int op2;
1795    int crm;
1796
1797    op1 = (insn >> 21) & 7;
1798    op2 = (insn >> 5) & 7;
1799    crm = insn & 0xf;
1800    switch ((insn >> 16) & 0xf) {
1801    case 0: /* ID codes.  */
1802        switch (op1) {
1803        case 0:
1804            switch (crm) {
1805            case 0:
1806                switch (op2) {
1807                case 0: /* Device ID.  */
1808                    return env->cp15.c0_cpuid;
1809                case 1: /* Cache Type.  */
1810                    return env->cp15.c0_cachetype;
1811                case 2: /* TCM status.  */
1812                    return 0;
1813                case 3: /* TLB type register.  */
1814                    return 0; /* No lockable TLB entries.  */
1815                case 5: /* MPIDR */
1816                    /* The MPIDR was standardised in v7; prior to
1817                     * this it was implemented only in the 11MPCore.
1818                     * For all other pre-v7 cores it does not exist.
1819                     */
1820                    if (arm_feature(env, ARM_FEATURE_V7) ||
1821                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1822                        int mpidr = env->cpu_index;
1823                        /* We don't support setting cluster ID ([8..11])
1824                         * so these bits always RAZ.
1825                         */
1826                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1827                            mpidr |= (1 << 31);
1828                            /* Cores which are uniprocessor (non-coherent)
1829                             * but still implement the MP extensions set
1830                             * bit 30. (For instance, A9UP.) However we do
1831                             * not currently model any of those cores.
1832                             */
1833                        }
1834                        return mpidr;
1835                    }
1836                    /* otherwise fall through to the unimplemented-reg case */
1837                default:
1838                    goto bad_reg;
1839                }
1840            case 1:
1841                if (!arm_feature(env, ARM_FEATURE_V6))
1842                    goto bad_reg;
1843                return env->cp15.c0_c1[op2];
1844            case 2:
1845                if (!arm_feature(env, ARM_FEATURE_V6))
1846                    goto bad_reg;
1847                return env->cp15.c0_c2[op2];
1848            case 3: case 4: case 5: case 6: case 7:
1849                return 0;
1850            default:
1851                goto bad_reg;
1852            }
1853        case 1:
1854            /* These registers aren't documented on arm11 cores.  However
1855               Linux looks at them anyway.  */
1856            if (!arm_feature(env, ARM_FEATURE_V6))
1857                goto bad_reg;
1858            if (crm != 0)
1859                goto bad_reg;
1860            if (!arm_feature(env, ARM_FEATURE_V7))
1861                return 0;
1862
1863            switch (op2) {
1864            case 0:
1865                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1866            case 1:
1867                return env->cp15.c0_clid;
1868            case 7:
1869                return 0;
1870            }
1871            goto bad_reg;
1872        case 2:
1873            if (op2 != 0 || crm != 0)
1874                goto bad_reg;
1875            return env->cp15.c0_cssel;
1876        default:
1877            goto bad_reg;
1878        }
1879    case 1: /* System configuration.  */
1880        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1881            op2 = 0;
1882        switch (op2) {
1883        case 0: /* Control register.  */
1884            return env->cp15.c1_sys;
1885        case 1: /* Auxiliary control register.  */
1886            if (arm_feature(env, ARM_FEATURE_XSCALE))
1887                return env->cp15.c1_xscaleauxcr;
1888            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1889                goto bad_reg;
1890            switch (ARM_CPUID(env)) {
1891            case ARM_CPUID_ARM1026:
1892                return 1;
1893            case ARM_CPUID_ARM1136:
1894            case ARM_CPUID_ARM1136_R2:
1895            case ARM_CPUID_ARM1176:
1896                return 7;
1897            case ARM_CPUID_ARM11MPCORE:
1898                return 1;
1899            case ARM_CPUID_CORTEXA8:
1900                return 2;
1901            case ARM_CPUID_CORTEXA9:
1902                return 0;
1903            default:
1904                goto bad_reg;
1905            }
1906        case 2: /* Coprocessor access register.  */
1907            if (arm_feature(env, ARM_FEATURE_XSCALE))
1908                goto bad_reg;
1909            return env->cp15.c1_coproc;
1910        default:
1911            goto bad_reg;
1912        }
1913    case 2: /* MMU Page table control / MPU cache control.  */
1914        if (arm_feature(env, ARM_FEATURE_MPU)) {
1915            switch (op2) {
1916            case 0:
1917                return env->cp15.c2_data;
1918                break;
1919            case 1:
1920                return env->cp15.c2_insn;
1921                break;
1922            default:
1923                goto bad_reg;
1924            }
1925        } else {
1926            switch (op2) {
1927            case 0:
1928                return env->cp15.c2_base0;
1929            case 1:
1930                return env->cp15.c2_base1;
1931            case 2:
1932                return env->cp15.c2_control;
1933            default:
1934                goto bad_reg;
1935            }
1936        }
1937    case 3: /* MMU Domain access control / MPU write buffer control.  */
1938        return env->cp15.c3;
1939    case 4: /* Reserved.  */
1940        goto bad_reg;
1941    case 5: /* MMU Fault status / MPU access permission.  */
1942        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1943            op2 = 0;
1944        switch (op2) {
1945        case 0:
1946            if (arm_feature(env, ARM_FEATURE_MPU))
1947                return simple_mpu_ap_bits(env->cp15.c5_data);
1948            return env->cp15.c5_data;
1949        case 1:
1950            if (arm_feature(env, ARM_FEATURE_MPU))
1951                return simple_mpu_ap_bits(env->cp15.c5_data);
1952            return env->cp15.c5_insn;
1953        case 2:
1954            if (!arm_feature(env, ARM_FEATURE_MPU))
1955                goto bad_reg;
1956            return env->cp15.c5_data;
1957        case 3:
1958            if (!arm_feature(env, ARM_FEATURE_MPU))
1959                goto bad_reg;
1960            return env->cp15.c5_insn;
1961        default:
1962            goto bad_reg;
1963        }
1964    case 6: /* MMU Fault address.  */
1965        if (arm_feature(env, ARM_FEATURE_MPU)) {
1966            if (crm >= 8)
1967                goto bad_reg;
1968            return env->cp15.c6_region[crm];
1969        } else {
1970            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1971                op2 = 0;
1972            switch (op2) {
1973            case 0:
1974                return env->cp15.c6_data;
1975            case 1:
1976                if (arm_feature(env, ARM_FEATURE_V6)) {
1977                    /* Watchpoint Fault Adrress.  */
1978                    return 0; /* Not implemented.  */
1979                } else {
1980                    /* Instruction Fault Adrress.  */
1981                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1982                       shouldn't do any harm.  */
1983                    return env->cp15.c6_insn;
1984                }
1985            case 2:
1986                if (arm_feature(env, ARM_FEATURE_V6)) {
1987                    /* Instruction Fault Adrress.  */
1988                    return env->cp15.c6_insn;
1989                } else {
1990                    goto bad_reg;
1991                }
1992            default:
1993                goto bad_reg;
1994            }
1995        }
1996    case 7: /* Cache control.  */
1997        if (crm == 4 && op1 == 0 && op2 == 0) {
1998            return env->cp15.c7_par;
1999        }
2000        /* FIXME: Should only clear Z flag if destination is r15.  */
2001        env->ZF = 0;
2002        return 0;
2003    case 8: /* MMU TLB control.  */
2004        goto bad_reg;
2005    case 9:
2006        switch (crm) {
2007        case 0: /* Cache lockdown */
2008            switch (op1) {
2009            case 0: /* L1 cache.  */
2010                if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2011                    return 0;
2012                }
2013                switch (op2) {
2014                case 0:
2015                    return env->cp15.c9_data;
2016                case 1:
2017                    return env->cp15.c9_insn;
2018                default:
2019                    goto bad_reg;
2020                }
2021            case 1: /* L2 cache */
2022                if (crm != 0) {
2023                    goto bad_reg;
2024                }
2025                /* L2 Lockdown and Auxiliary control.  */
2026                return 0;
2027            default:
2028                goto bad_reg;
2029            }
2030            break;
2031        case 12: /* Performance monitor control */
2032            if (!arm_feature(env, ARM_FEATURE_V7)) {
2033                goto bad_reg;
2034            }
2035            switch (op2) {
2036            case 0: /* performance monitor control register */
2037                return env->cp15.c9_pmcr;
2038            case 1: /* count enable set */
2039            case 2: /* count enable clear */
2040                return env->cp15.c9_pmcnten;
2041            case 3: /* overflow flag status */
2042                return env->cp15.c9_pmovsr;
2043            case 4: /* software increment */
2044            case 5: /* event counter selection register */
2045                return 0; /* Unimplemented, RAZ/WI */
2046            default:
2047                goto bad_reg;
2048            }
2049        case 13: /* Performance counters */
2050            if (!arm_feature(env, ARM_FEATURE_V7)) {
2051                goto bad_reg;
2052            }
2053            switch (op2) {
2054            case 1: /* Event type select */
2055                return env->cp15.c9_pmxevtyper;
2056            case 0: /* Cycle count register */
2057            case 2: /* Event count register */
2058                /* Unimplemented, so RAZ/WI */
2059                return 0;
2060            default:
2061                goto bad_reg;
2062            }
2063        case 14: /* Performance monitor control */
2064            if (!arm_feature(env, ARM_FEATURE_V7)) {
2065                goto bad_reg;
2066            }
2067            switch (op2) {
2068            case 0: /* user enable */
2069                return env->cp15.c9_pmuserenr;
2070            case 1: /* interrupt enable set */
2071            case 2: /* interrupt enable clear */
2072                return env->cp15.c9_pminten;
2073            default:
2074                goto bad_reg;
2075            }
2076        default:
2077            goto bad_reg;
2078        }
2079        break;
2080    case 10: /* MMU TLB lockdown.  */
2081        /* ??? TLB lockdown not implemented.  */
2082        return 0;
2083    case 11: /* TCM DMA control.  */
2084    case 12: /* Reserved.  */
2085        goto bad_reg;
2086    case 13: /* Process ID.  */
2087        switch (op2) {
2088        case 0:
2089            return env->cp15.c13_fcse;
2090        case 1:
2091            return env->cp15.c13_context;
2092        default:
2093            goto bad_reg;
2094        }
2095    case 14: /* Reserved.  */
2096        goto bad_reg;
2097    case 15: /* Implementation specific.  */
2098        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2099            if (op2 == 0 && crm == 1)
2100                return env->cp15.c15_cpar;
2101
2102            goto bad_reg;
2103        }
2104        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2105            switch (crm) {
2106            case 0:
2107                return 0;
2108            case 1: /* Read TI925T configuration.  */
2109                return env->cp15.c15_ticonfig;
2110            case 2: /* Read I_max.  */
2111                return env->cp15.c15_i_max;
2112            case 3: /* Read I_min.  */
2113                return env->cp15.c15_i_min;
2114            case 4: /* Read thread-ID.  */
2115                return env->cp15.c15_threadid;
2116            case 8: /* TI925T_status */
2117                return 0;
2118            }
2119            /* TODO: Peripheral port remap register:
2120             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2121             * controller base address at $rn & ~0xfff and map size of
2122             * 0x200 << ($rn & 0xfff), when MMU is off.  */
2123            goto bad_reg;
2124        }
2125        return 0;
2126    }
2127bad_reg:
2128    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2129    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2130              (insn >> 16) & 0xf, crm, op1, op2);
2131    return 0;
2132}
2133
2134void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
2135{
2136    if ((env->uncached_cpsr & CPSR_M) == mode) {
2137        env->regs[13] = val;
2138    } else {
2139        env->banked_r13[bank_number(mode)] = val;
2140    }
2141}
2142
2143uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
2144{
2145    if ((env->uncached_cpsr & CPSR_M) == mode) {
2146        return env->regs[13];
2147    } else {
2148        return env->banked_r13[bank_number(mode)];
2149    }
2150}
2151
2152uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
2153{
2154    switch (reg) {
2155    case 0: /* APSR */
2156        return xpsr_read(env) & 0xf8000000;
2157    case 1: /* IAPSR */
2158        return xpsr_read(env) & 0xf80001ff;
2159    case 2: /* EAPSR */
2160        return xpsr_read(env) & 0xff00fc00;
2161    case 3: /* xPSR */
2162        return xpsr_read(env) & 0xff00fdff;
2163    case 5: /* IPSR */
2164        return xpsr_read(env) & 0x000001ff;
2165    case 6: /* EPSR */
2166        return xpsr_read(env) & 0x0700fc00;
2167    case 7: /* IEPSR */
2168        return xpsr_read(env) & 0x0700edff;
2169    case 8: /* MSP */
2170        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2171    case 9: /* PSP */
2172        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2173    case 16: /* PRIMASK */
2174        return (env->uncached_cpsr & CPSR_I) != 0;
2175    case 17: /* BASEPRI */
2176    case 18: /* BASEPRI_MAX */
2177        return env->v7m.basepri;
2178    case 19: /* FAULTMASK */
2179        return (env->uncached_cpsr & CPSR_F) != 0;
2180    case 20: /* CONTROL */
2181        return env->v7m.control;
2182    default:
2183        /* ??? For debugging only.  */
2184        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2185        return 0;
2186    }
2187}
2188
2189void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
2190{
2191    switch (reg) {
2192    case 0: /* APSR */
2193        xpsr_write(env, val, 0xf8000000);
2194        break;
2195    case 1: /* IAPSR */
2196        xpsr_write(env, val, 0xf8000000);
2197        break;
2198    case 2: /* EAPSR */
2199        xpsr_write(env, val, 0xfe00fc00);
2200        break;
2201    case 3: /* xPSR */
2202        xpsr_write(env, val, 0xfe00fc00);
2203        break;
2204    case 5: /* IPSR */
2205        /* IPSR bits are readonly.  */
2206        break;
2207    case 6: /* EPSR */
2208        xpsr_write(env, val, 0x0600fc00);
2209        break;
2210    case 7: /* IEPSR */
2211        xpsr_write(env, val, 0x0600fc00);
2212        break;
2213    case 8: /* MSP */
2214        if (env->v7m.current_sp)
2215            env->v7m.other_sp = val;
2216        else
2217            env->regs[13] = val;
2218        break;
2219    case 9: /* PSP */
2220        if (env->v7m.current_sp)
2221            env->regs[13] = val;
2222        else
2223            env->v7m.other_sp = val;
2224        break;
2225    case 16: /* PRIMASK */
2226        if (val & 1)
2227            env->uncached_cpsr |= CPSR_I;
2228        else
2229            env->uncached_cpsr &= ~CPSR_I;
2230        break;
2231    case 17: /* BASEPRI */
2232        env->v7m.basepri = val & 0xff;
2233        break;
2234    case 18: /* BASEPRI_MAX */
2235        val &= 0xff;
2236        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2237            env->v7m.basepri = val;
2238        break;
2239    case 19: /* FAULTMASK */
2240        if (val & 1)
2241            env->uncached_cpsr |= CPSR_F;
2242        else
2243            env->uncached_cpsr &= ~CPSR_F;
2244        break;
2245    case 20: /* CONTROL */
2246        env->v7m.control = val & 3;
2247        switch_v7m_sp(env, (val & 2) != 0);
2248        break;
2249    default:
2250        /* ??? For debugging only.  */
2251        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2252        return;
2253    }
2254}
2255
2256void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2257                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2258                void *opaque)
2259{
2260    if (cpnum < 0 || cpnum > 14) {
2261        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2262        return;
2263    }
2264
2265    env->cp[cpnum].cp_read = cp_read;
2266    env->cp[cpnum].cp_write = cp_write;
2267    env->cp[cpnum].opaque = opaque;
2268}
2269
2270#endif
2271
2272/* Note that signed overflow is undefined in C.  The following routines are
2273   careful to use unsigned types where modulo arithmetic is required.
2274   Failure to do so _will_ break on newer gcc.  */
2275
2276/* Signed saturating arithmetic.  */
2277
2278/* Perform 16-bit signed saturating addition.  */
2279static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2280{
2281    uint16_t res;
2282
2283    res = a + b;
2284    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2285        if (a & 0x8000)
2286            res = 0x8000;
2287        else
2288            res = 0x7fff;
2289    }
2290    return res;
2291}
2292
2293/* Perform 8-bit signed saturating addition.  */
2294static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2295{
2296    uint8_t res;
2297
2298    res = a + b;
2299    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2300        if (a & 0x80)
2301            res = 0x80;
2302        else
2303            res = 0x7f;
2304    }
2305    return res;
2306}
2307
2308/* Perform 16-bit signed saturating subtraction.  */
2309static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2310{
2311    uint16_t res;
2312
2313    res = a - b;
2314    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2315        if (a & 0x8000)
2316            res = 0x8000;
2317        else
2318            res = 0x7fff;
2319    }
2320    return res;
2321}
2322
2323/* Perform 8-bit signed saturating subtraction.  */
2324static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2325{
2326    uint8_t res;
2327
2328    res = a - b;
2329    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2330        if (a & 0x80)
2331            res = 0x80;
2332        else
2333            res = 0x7f;
2334    }
2335    return res;
2336}
2337
2338#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2339#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2340#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2341#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2342#define PFX q
2343
2344#include "op_addsub.h"
2345
2346/* Unsigned saturating arithmetic.  */
2347static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2348{
2349    uint16_t res;
2350    res = a + b;
2351    if (res < a)
2352        res = 0xffff;
2353    return res;
2354}
2355
2356static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2357{
2358    if (a > b)
2359        return a - b;
2360    else
2361        return 0;
2362}
2363
2364static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2365{
2366    uint8_t res;
2367    res = a + b;
2368    if (res < a)
2369        res = 0xff;
2370    return res;
2371}
2372
2373static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2374{
2375    if (a > b)
2376        return a - b;
2377    else
2378        return 0;
2379}
2380
2381#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2382#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2383#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2384#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2385#define PFX uq
2386
2387#include "op_addsub.h"
2388
2389/* Signed modulo arithmetic.  */
2390#define SARITH16(a, b, n, op) do { \
2391    int32_t sum; \
2392    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2393    RESULT(sum, n, 16); \
2394    if (sum >= 0) \
2395        ge |= 3 << (n * 2); \
2396    } while(0)
2397
2398#define SARITH8(a, b, n, op) do { \
2399    int32_t sum; \
2400    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2401    RESULT(sum, n, 8); \
2402    if (sum >= 0) \
2403        ge |= 1 << n; \
2404    } while(0)
2405
2406
2407#define ADD16(a, b, n) SARITH16(a, b, n, +)
2408#define SUB16(a, b, n) SARITH16(a, b, n, -)
2409#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2410#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2411#define PFX s
2412#define ARITH_GE
2413
2414#include "op_addsub.h"
2415
2416/* Unsigned modulo arithmetic.  */
2417#define ADD16(a, b, n) do { \
2418    uint32_t sum; \
2419    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2420    RESULT(sum, n, 16); \
2421    if ((sum >> 16) == 1) \
2422        ge |= 3 << (n * 2); \
2423    } while(0)
2424
2425#define ADD8(a, b, n) do { \
2426    uint32_t sum; \
2427    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2428    RESULT(sum, n, 8); \
2429    if ((sum >> 8) == 1) \
2430        ge |= 1 << n; \
2431    } while(0)
2432
2433#define SUB16(a, b, n) do { \
2434    uint32_t sum; \
2435    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2436    RESULT(sum, n, 16); \
2437    if ((sum >> 16) == 0) \
2438        ge |= 3 << (n * 2); \
2439    } while(0)
2440
2441#define SUB8(a, b, n) do { \
2442    uint32_t sum; \
2443    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2444    RESULT(sum, n, 8); \
2445    if ((sum >> 8) == 0) \
2446        ge |= 1 << n; \
2447    } while(0)
2448
2449#define PFX u
2450#define ARITH_GE
2451
2452#include "op_addsub.h"
2453
2454/* Halved signed arithmetic.  */
2455#define ADD16(a, b, n) \
2456  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2457#define SUB16(a, b, n) \
2458  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2459#define ADD8(a, b, n) \
2460  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2461#define SUB8(a, b, n) \
2462  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2463#define PFX sh
2464
2465#include "op_addsub.h"
2466
2467/* Halved unsigned arithmetic.  */
2468#define ADD16(a, b, n) \
2469  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2470#define SUB16(a, b, n) \
2471  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2472#define ADD8(a, b, n) \
2473  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2474#define SUB8(a, b, n) \
2475  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2476#define PFX uh
2477
2478#include "op_addsub.h"
2479
2480static inline uint8_t do_usad(uint8_t a, uint8_t b)
2481{
2482    if (a > b)
2483        return a - b;
2484    else
2485        return b - a;
2486}
2487
2488/* Unsigned sum of absolute byte differences.  */
2489uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2490{
2491    uint32_t sum;
2492    sum = do_usad(a, b);
2493    sum += do_usad(a >> 8, b >> 8);
2494    sum += do_usad(a >> 16, b >>16);
2495    sum += do_usad(a >> 24, b >> 24);
2496    return sum;
2497}
2498
2499/* For ARMv6 SEL instruction.  */
2500uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2501{
2502    uint32_t mask;
2503
2504    mask = 0;
2505    if (flags & 1)
2506        mask |= 0xff;
2507    if (flags & 2)
2508        mask |= 0xff00;
2509    if (flags & 4)
2510        mask |= 0xff0000;
2511    if (flags & 8)
2512        mask |= 0xff000000;
2513    return (a & mask) | (b & ~mask);
2514}
2515
2516uint32_t HELPER(logicq_cc)(uint64_t val)
2517{
2518    return (val >> 32) | (val != 0);
2519}
2520
2521/* VFP support.  We follow the convention used for VFP instrunctions:
2522   Single precition routines have a "s" suffix, double precision a
2523   "d" suffix.  */
2524
2525/* Convert host exception flags to vfp form.  */
2526static inline int vfp_exceptbits_from_host(int host_bits)
2527{
2528    int target_bits = 0;
2529
2530    if (host_bits & float_flag_invalid)
2531        target_bits |= 1;
2532    if (host_bits & float_flag_divbyzero)
2533        target_bits |= 2;
2534    if (host_bits & float_flag_overflow)
2535        target_bits |= 4;
2536    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2537        target_bits |= 8;
2538    if (host_bits & float_flag_inexact)
2539        target_bits |= 0x10;
2540    if (host_bits & float_flag_input_denormal)
2541        target_bits |= 0x80;
2542    return target_bits;
2543}
2544
2545uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2546{
2547    int i;
2548    uint32_t fpscr;
2549
2550    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2551            | (env->vfp.vec_len << 16)
2552            | (env->vfp.vec_stride << 20);
2553    i = get_float_exception_flags(&env->vfp.fp_status);
2554    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2555    fpscr |= vfp_exceptbits_from_host(i);
2556    return fpscr;
2557}
2558
2559uint32_t vfp_get_fpscr(CPUState *env)
2560{
2561    return HELPER(vfp_get_fpscr)(env);
2562}
2563
2564/* Convert vfp exception flags to target form.  */
2565static inline int vfp_exceptbits_to_host(int target_bits)
2566{
2567    int host_bits = 0;
2568
2569    if (target_bits & 1)
2570        host_bits |= float_flag_invalid;
2571    if (target_bits & 2)
2572        host_bits |= float_flag_divbyzero;
2573    if (target_bits & 4)
2574        host_bits |= float_flag_overflow;
2575    if (target_bits & 8)
2576        host_bits |= float_flag_underflow;
2577    if (target_bits & 0x10)
2578        host_bits |= float_flag_inexact;
2579    if (target_bits & 0x80)
2580        host_bits |= float_flag_input_denormal;
2581    return host_bits;
2582}
2583
2584void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2585{
2586    int i;
2587    uint32_t changed;
2588
2589    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2590    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2591    env->vfp.vec_len = (val >> 16) & 7;
2592    env->vfp.vec_stride = (val >> 20) & 3;
2593
2594    changed ^= val;
2595    if (changed & (3 << 22)) {
2596        i = (val >> 22) & 3;
2597        switch (i) {
2598        case 0:
2599            i = float_round_nearest_even;
2600            break;
2601        case 1:
2602            i = float_round_up;
2603            break;
2604        case 2:
2605            i = float_round_down;
2606            break;
2607        case 3:
2608            i = float_round_to_zero;
2609            break;
2610        }
2611        set_float_rounding_mode(i, &env->vfp.fp_status);
2612    }
2613    if (changed & (1 << 24)) {
2614        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2615        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2616    }
2617    if (changed & (1 << 25))
2618        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2619
2620    i = vfp_exceptbits_to_host(val);
2621    set_float_exception_flags(i, &env->vfp.fp_status);
2622    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2623}
2624
2625void vfp_set_fpscr(CPUState *env, uint32_t val)
2626{
2627    HELPER(vfp_set_fpscr)(env, val);
2628}
2629
2630#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2631
2632#define VFP_BINOP(name) \
2633float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2634{ \
2635    float_status *fpst = fpstp; \
2636    return float32_ ## name(a, b, fpst); \
2637} \
2638float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2639{ \
2640    float_status *fpst = fpstp; \
2641    return float64_ ## name(a, b, fpst); \
2642}
2643VFP_BINOP(add)
2644VFP_BINOP(sub)
2645VFP_BINOP(mul)
2646VFP_BINOP(div)
2647#undef VFP_BINOP
2648
2649float32 VFP_HELPER(neg, s)(float32 a)
2650{
2651    return float32_chs(a);
2652}
2653
2654float64 VFP_HELPER(neg, d)(float64 a)
2655{
2656    return float64_chs(a);
2657}
2658
2659float32 VFP_HELPER(abs, s)(float32 a)
2660{
2661    return float32_abs(a);
2662}
2663
2664float64 VFP_HELPER(abs, d)(float64 a)
2665{
2666    return float64_abs(a);
2667}
2668
2669float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2670{
2671    return float32_sqrt(a, &env->vfp.fp_status);
2672}
2673
2674float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2675{
2676    return float64_sqrt(a, &env->vfp.fp_status);
2677}
2678
2679/* XXX: check quiet/signaling case */
2680#define DO_VFP_cmp(p, type) \
2681void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2682{ \
2683    uint32_t flags; \
2684    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2685    case 0: flags = 0x6; break; \
2686    case -1: flags = 0x8; break; \
2687    case 1: flags = 0x2; break; \
2688    default: case 2: flags = 0x3; break; \
2689    } \
2690    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2691        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2692} \
2693void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2694{ \
2695    uint32_t flags; \
2696    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2697    case 0: flags = 0x6; break; \
2698    case -1: flags = 0x8; break; \
2699    case 1: flags = 0x2; break; \
2700    default: case 2: flags = 0x3; break; \
2701    } \
2702    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2703        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2704}
2705DO_VFP_cmp(s, float32)
2706DO_VFP_cmp(d, float64)
2707#undef DO_VFP_cmp
2708
2709/* Integer to float and float to integer conversions */
2710
2711#define CONV_ITOF(name, fsz, sign) \
2712    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2713{ \
2714    float_status *fpst = fpstp; \
2715    return sign##int32_to_##float##fsz(x, fpst); \
2716}
2717
2718#define CONV_FTOI(name, fsz, sign, round) \
2719uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2720{ \
2721    float_status *fpst = fpstp; \
2722    if (float##fsz##_is_any_nan(x)) { \
2723        float_raise(float_flag_invalid, fpst); \
2724        return 0; \
2725    } \
2726    return float##fsz##_to_##sign##int32##round(x, fpst); \
2727}
2728
2729#define FLOAT_CONVS(name, p, fsz, sign) \
2730CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2731CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2732CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2733
2734FLOAT_CONVS(si, s, 32, )
2735FLOAT_CONVS(si, d, 64, )
2736FLOAT_CONVS(ui, s, 32, u)
2737FLOAT_CONVS(ui, d, 64, u)
2738
2739#undef CONV_ITOF
2740#undef CONV_FTOI
2741#undef FLOAT_CONVS
2742
2743/* floating point conversion */
2744float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2745{
2746    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2747    /* ARM requires that S<->D conversion of any kind of NaN generates
2748     * a quiet NaN by forcing the most significant frac bit to 1.
2749     */
2750    return float64_maybe_silence_nan(r);
2751}
2752
2753float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2754{
2755    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2756    /* ARM requires that S<->D conversion of any kind of NaN generates
2757     * a quiet NaN by forcing the most significant frac bit to 1.
2758     */
2759    return float32_maybe_silence_nan(r);
2760}
2761
2762/* VFP3 fixed point conversion.  */
2763#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2764float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2765                                    void *fpstp) \
2766{ \
2767    float_status *fpst = fpstp; \
2768    float##fsz tmp; \
2769    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2770    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2771} \
2772uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2773                                       void *fpstp) \
2774{ \
2775    float_status *fpst = fpstp; \
2776    float##fsz tmp; \
2777    if (float##fsz##_is_any_nan(x)) { \
2778        float_raise(float_flag_invalid, fpst); \
2779        return 0; \
2780    } \
2781    tmp = float##fsz##_scalbn(x, shift, fpst); \
2782    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2783}
2784
2785VFP_CONV_FIX(sh, d, 64, int16, )
2786VFP_CONV_FIX(sl, d, 64, int32, )
2787VFP_CONV_FIX(uh, d, 64, uint16, u)
2788VFP_CONV_FIX(ul, d, 64, uint32, u)
2789VFP_CONV_FIX(sh, s, 32, int16, )
2790VFP_CONV_FIX(sl, s, 32, int32, )
2791VFP_CONV_FIX(uh, s, 32, uint16, u)
2792VFP_CONV_FIX(ul, s, 32, uint32, u)
2793#undef VFP_CONV_FIX
2794
2795/* Half precision conversions.  */
2796static float32 do_fcvt_f16_to_f32(uint32_t a, CPUState *env, float_status *s)
2797{
2798    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2799    float32 r = float16_to_float32(make_float16(a), ieee, s);
2800    if (ieee) {
2801        return float32_maybe_silence_nan(r);
2802    }
2803    return r;
2804}
2805
2806static uint32_t do_fcvt_f32_to_f16(float32 a, CPUState *env, float_status *s)
2807{
2808    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2809    float16 r = float32_to_float16(a, ieee, s);
2810    if (ieee) {
2811        r = float16_maybe_silence_nan(r);
2812    }
2813    return float16_val(r);
2814}
2815
2816float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2817{
2818    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2819}
2820
2821uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUState *env)
2822{
2823    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2824}
2825
2826float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2827{
2828    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2829}
2830
2831uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUState *env)
2832{
2833    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2834}
2835
2836#define float32_two make_float32(0x40000000)
2837#define float32_three make_float32(0x40400000)
2838#define float32_one_point_five make_float32(0x3fc00000)
2839
2840float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2841{
2842    float_status *s = &env->vfp.standard_fp_status;
2843    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2844        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2845        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2846            float_raise(float_flag_input_denormal, s);
2847        }
2848        return float32_two;
2849    }
2850    return float32_sub(float32_two, float32_mul(a, b, s), s);
2851}
2852
2853float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2854{
2855    float_status *s = &env->vfp.standard_fp_status;
2856    float32 product;
2857    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2858        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2859        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2860            float_raise(float_flag_input_denormal, s);
2861        }
2862        return float32_one_point_five;
2863    }
2864    product = float32_mul(a, b, s);
2865    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2866}
2867
2868/* NEON helpers.  */
2869
2870/* Constants 256 and 512 are used in some helpers; we avoid relying on
2871 * int->float conversions at run-time.  */
2872#define float64_256 make_float64(0x4070000000000000LL)
2873#define float64_512 make_float64(0x4080000000000000LL)
2874
2875/* The algorithm that must be used to calculate the estimate
2876 * is specified by the ARM ARM.
2877 */
2878static float64 recip_estimate(float64 a, CPUState *env)
2879{
2880    /* These calculations mustn't set any fp exception flags,
2881     * so we use a local copy of the fp_status.
2882     */
2883    float_status dummy_status = env->vfp.standard_fp_status;
2884    float_status *s = &dummy_status;
2885    /* q = (int)(a * 512.0) */
2886    float64 q = float64_mul(float64_512, a, s);
2887    int64_t q_int = float64_to_int64_round_to_zero(q, s);
2888
2889    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2890    q = int64_to_float64(q_int, s);
2891    q = float64_add(q, float64_half, s);
2892    q = float64_div(q, float64_512, s);
2893    q = float64_div(float64_one, q, s);
2894
2895    /* s = (int)(256.0 * r + 0.5) */
2896    q = float64_mul(q, float64_256, s);
2897    q = float64_add(q, float64_half, s);
2898    q_int = float64_to_int64_round_to_zero(q, s);
2899
2900    /* return (double)s / 256.0 */
2901    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2902}
2903
2904float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2905{
2906    float_status *s = &env->vfp.standard_fp_status;
2907    float64 f64;
2908    uint32_t val32 = float32_val(a);
2909
2910    int result_exp;
2911    int a_exp = (val32  & 0x7f800000) >> 23;
2912    int sign = val32 & 0x80000000;
2913
2914    if (float32_is_any_nan(a)) {
2915        if (float32_is_signaling_nan(a)) {
2916            float_raise(float_flag_invalid, s);
2917        }
2918        return float32_default_nan;
2919    } else if (float32_is_infinity(a)) {
2920        return float32_set_sign(float32_zero, float32_is_neg(a));
2921    } else if (float32_is_zero_or_denormal(a)) {
2922        if (!float32_is_zero(a)) {
2923            float_raise(float_flag_input_denormal, s);
2924        }
2925        float_raise(float_flag_divbyzero, s);
2926        return float32_set_sign(float32_infinity, float32_is_neg(a));
2927    } else if (a_exp >= 253) {
2928        float_raise(float_flag_underflow, s);
2929        return float32_set_sign(float32_zero, float32_is_neg(a));
2930    }
2931
2932    f64 = make_float64((0x3feULL << 52)
2933                       | ((int64_t)(val32 & 0x7fffff) << 29));
2934
2935    result_exp = 253 - a_exp;
2936
2937    f64 = recip_estimate(f64, env);
2938
2939    val32 = sign
2940        | ((result_exp & 0xff) << 23)
2941        | ((float64_val(f64) >> 29) & 0x7fffff);
2942    return make_float32(val32);
2943}
2944
2945/* The algorithm that must be used to calculate the estimate
2946 * is specified by the ARM ARM.
2947 */
2948static float64 recip_sqrt_estimate(float64 a, CPUState *env)
2949{
2950    /* These calculations mustn't set any fp exception flags,
2951     * so we use a local copy of the fp_status.
2952     */
2953    float_status dummy_status = env->vfp.standard_fp_status;
2954    float_status *s = &dummy_status;
2955    float64 q;
2956    int64_t q_int;
2957
2958    if (float64_lt(a, float64_half, s)) {
2959        /* range 0.25 <= a < 0.5 */
2960
2961        /* a in units of 1/512 rounded down */
2962        /* q0 = (int)(a * 512.0);  */
2963        q = float64_mul(float64_512, a, s);
2964        q_int = float64_to_int64_round_to_zero(q, s);
2965
2966        /* reciprocal root r */
2967        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
2968        q = int64_to_float64(q_int, s);
2969        q = float64_add(q, float64_half, s);
2970        q = float64_div(q, float64_512, s);
2971        q = float64_sqrt(q, s);
2972        q = float64_div(float64_one, q, s);
2973    } else {
2974        /* range 0.5 <= a < 1.0 */
2975
2976        /* a in units of 1/256 rounded down */
2977        /* q1 = (int)(a * 256.0); */
2978        q = float64_mul(float64_256, a, s);
2979        int64_t q_int = float64_to_int64_round_to_zero(q, s);
2980
2981        /* reciprocal root r */
2982        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
2983        q = int64_to_float64(q_int, s);
2984        q = float64_add(q, float64_half, s);
2985        q = float64_div(q, float64_256, s);
2986        q = float64_sqrt(q, s);
2987        q = float64_div(float64_one, q, s);
2988    }
2989    /* r in units of 1/256 rounded to nearest */
2990    /* s = (int)(256.0 * r + 0.5); */
2991
2992    q = float64_mul(q, float64_256,s );
2993    q = float64_add(q, float64_half, s);
2994    q_int = float64_to_int64_round_to_zero(q, s);
2995
2996    /* return (double)s / 256.0;*/
2997    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2998}
2999
3000float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
3001{
3002    float_status *s = &env->vfp.standard_fp_status;
3003    int result_exp;
3004    float64 f64;
3005    uint32_t val;
3006    uint64_t val64;
3007
3008    val = float32_val(a);
3009
3010    if (float32_is_any_nan(a)) {
3011        if (float32_is_signaling_nan(a)) {
3012            float_raise(float_flag_invalid, s);
3013        }
3014        return float32_default_nan;
3015    } else if (float32_is_zero_or_denormal(a)) {
3016        if (!float32_is_zero(a)) {
3017            float_raise(float_flag_input_denormal, s);
3018        }
3019        float_raise(float_flag_divbyzero, s);
3020        return float32_set_sign(float32_infinity, float32_is_neg(a));
3021    } else if (float32_is_neg(a)) {
3022        float_raise(float_flag_invalid, s);
3023        return float32_default_nan;
3024    } else if (float32_is_infinity(a)) {
3025        return float32_zero;
3026    }
3027
3028    /* Normalize to a double-precision value between 0.25 and 1.0,
3029     * preserving the parity of the exponent.  */
3030    if ((val & 0x800000) == 0) {
3031        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3032                           | (0x3feULL << 52)
3033                           | ((uint64_t)(val & 0x7fffff) << 29));
3034    } else {
3035        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3036                           | (0x3fdULL << 52)
3037                           | ((uint64_t)(val & 0x7fffff) << 29));
3038    }
3039
3040    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
3041
3042    f64 = recip_sqrt_estimate(f64, env);
3043
3044    val64 = float64_val(f64);
3045
3046    val = ((result_exp & 0xff) << 23)
3047        | ((val64 >> 29)  & 0x7fffff);
3048    return make_float32(val);
3049}
3050
3051uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
3052{
3053    float64 f64;
3054
3055    if ((a & 0x80000000) == 0) {
3056        return 0xffffffff;
3057    }
3058
3059    f64 = make_float64((0x3feULL << 52)
3060                       | ((int64_t)(a & 0x7fffffff) << 21));
3061
3062    f64 = recip_estimate (f64, env);
3063
3064    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3065}
3066
3067uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
3068{
3069    float64 f64;
3070
3071    if ((a & 0xc0000000) == 0) {
3072        return 0xffffffff;
3073    }
3074
3075    if (a & 0x80000000) {
3076        f64 = make_float64((0x3feULL << 52)
3077                           | ((uint64_t)(a & 0x7fffffff) << 21));
3078    } else { /* bits 31-30 == '01' */
3079        f64 = make_float64((0x3fdULL << 52)
3080                           | ((uint64_t)(a & 0x3fffffff) << 22));
3081    }
3082
3083    f64 = recip_sqrt_estimate(f64, env);
3084
3085    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3086}
3087
3088/* VFPv4 fused multiply-accumulate */
3089float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
3090{
3091    float_status *fpst = fpstp;
3092    return float32_muladd(a, b, c, 0, fpst);
3093}
3094
3095float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
3096{
3097    float_status *fpst = fpstp;
3098    return float64_muladd(a, b, c, 0, fpst);
3099}
3100
3101void HELPER(set_teecr)(CPUState *env, uint32_t val)
3102{
3103    val &= 1;
3104    if (env->teecr != val) {
3105        env->teecr = val;
3106        tb_flush(env);
3107    }
3108}
3109