qemu/target/alpha/helper.c
<<
>>
Prefs
   1/*
   2 *  Alpha emulation cpu helpers for qemu.
   3 *
   4 *  Copyright (c) 2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24#include "fpu/softfloat-types.h"
  25#include "exec/helper-proto.h"
  26#include "qemu/qemu-print.h"
  27
  28
  29#define CONVERT_BIT(X, SRC, DST) \
  30    (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
  31
  32uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
  33{
  34    return (uint64_t)env->fpcr << 32;
  35}
  36
  37void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
  38{
  39    static const uint8_t rm_map[] = {
  40        [FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT] = float_round_nearest_even,
  41        [FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT] = float_round_to_zero,
  42        [FPCR_DYN_MINUS >> FPCR_DYN_SHIFT] = float_round_down,
  43        [FPCR_DYN_PLUS >> FPCR_DYN_SHIFT] = float_round_up,
  44    };
  45
  46    uint32_t fpcr = val >> 32;
  47    uint32_t t = 0;
  48
  49    /* Record the raw value before adjusting for linux-user.  */
  50    env->fpcr = fpcr;
  51
  52#ifdef CONFIG_USER_ONLY
  53    /*
  54     * Override some of these bits with the contents of ENV->SWCR.
  55     * In system mode, some of these would trap to the kernel, at
  56     * which point the kernel's handler would emulate and apply
  57     * the software exception mask.
  58     */
  59    uint32_t soft_fpcr = alpha_ieee_swcr_to_fpcr(env->swcr) >> 32;
  60    fpcr |= soft_fpcr & (FPCR_STATUS_MASK | FPCR_DNZ);
  61
  62    /*
  63     * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
  64     * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
  65     * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
  66     */
  67    t |= CONVERT_BIT(soft_fpcr, FPCR_INVD, FPCR_IOV);
  68#endif
  69
  70    t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
  71    t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
  72    t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
  73    t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
  74    t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
  75
  76    env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
  77
  78    env->fpcr_dyn_round = rm_map[(fpcr & FPCR_DYN_MASK) >> FPCR_DYN_SHIFT];
  79    env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
  80
  81    t = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
  82#ifdef CONFIG_USER_ONLY
  83    t |= (env->swcr & SWCR_MAP_UMZ) != 0;
  84#endif
  85    env->fpcr_flush_to_zero = t;
  86}
  87
  88uint64_t helper_load_fpcr(CPUAlphaState *env)
  89{
  90    return cpu_alpha_load_fpcr(env);
  91}
  92
  93void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
  94{
  95    cpu_alpha_store_fpcr(env, val);
  96}
  97
  98static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
  99{
 100#ifndef CONFIG_USER_ONLY
 101    if (env->flags & ENV_FLAG_PAL_MODE) {
 102        if (reg >= 8 && reg <= 14) {
 103            return &env->shadow[reg - 8];
 104        } else if (reg == 25) {
 105            return &env->shadow[7];
 106        }
 107    }
 108#endif
 109    return &env->ir[reg];
 110}
 111
 112uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
 113{
 114    return *cpu_alpha_addr_gr(env, reg);
 115}
 116
 117void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
 118{
 119    *cpu_alpha_addr_gr(env, reg) = val;
 120}
 121
 122#if defined(CONFIG_USER_ONLY)
 123bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
 124                        MMUAccessType access_type, int mmu_idx,
 125                        bool probe, uintptr_t retaddr)
 126{
 127    AlphaCPU *cpu = ALPHA_CPU(cs);
 128
 129    cs->exception_index = EXCP_MMFAULT;
 130    cpu->env.trap_arg0 = address;
 131    cpu_loop_exit_restore(cs, retaddr);
 132}
 133#else
 134/* Returns the OSF/1 entMM failure indication, or -1 on success.  */
 135static int get_physical_address(CPUAlphaState *env, target_ulong addr,
 136                                int prot_need, int mmu_idx,
 137                                target_ulong *pphys, int *pprot)
 138{
 139    CPUState *cs = env_cpu(env);
 140    target_long saddr = addr;
 141    target_ulong phys = 0;
 142    target_ulong L1pte, L2pte, L3pte;
 143    target_ulong pt, index;
 144    int prot = 0;
 145    int ret = MM_K_ACV;
 146
 147    /* Handle physical accesses.  */
 148    if (mmu_idx == MMU_PHYS_IDX) {
 149        phys = addr;
 150        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 151        ret = -1;
 152        goto exit;
 153    }
 154
 155    /* Ensure that the virtual address is properly sign-extended from
 156       the last implemented virtual address bit.  */
 157    if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
 158        goto exit;
 159    }
 160
 161    /* Translate the superpage.  */
 162    /* ??? When we do more than emulate Unix PALcode, we'll need to
 163       determine which KSEG is actually active.  */
 164    if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
 165        /* User-space cannot access KSEG addresses.  */
 166        if (mmu_idx != MMU_KERNEL_IDX) {
 167            goto exit;
 168        }
 169
 170        /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
 171           We would not do this if the 48-bit KSEG is enabled.  */
 172        phys = saddr & ((1ull << 40) - 1);
 173        phys |= (saddr & (1ull << 40)) << 3;
 174
 175        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 176        ret = -1;
 177        goto exit;
 178    }
 179
 180    /* Interpret the page table exactly like PALcode does.  */
 181
 182    pt = env->ptbr;
 183
 184    /* TODO: rather than using ldq_phys() to read the page table we should
 185     * use address_space_ldq() so that we can handle the case when
 186     * the page table read gives a bus fault, rather than ignoring it.
 187     * For the existing code the zero data that ldq_phys will return for
 188     * an access to invalid memory will result in our treating the page
 189     * table as invalid, which may even be the right behaviour.
 190     */
 191
 192    /* L1 page table read.  */
 193    index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
 194    L1pte = ldq_phys(cs->as, pt + index*8);
 195
 196    if (unlikely((L1pte & PTE_VALID) == 0)) {
 197        ret = MM_K_TNV;
 198        goto exit;
 199    }
 200    if (unlikely((L1pte & PTE_KRE) == 0)) {
 201        goto exit;
 202    }
 203    pt = L1pte >> 32 << TARGET_PAGE_BITS;
 204
 205    /* L2 page table read.  */
 206    index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
 207    L2pte = ldq_phys(cs->as, pt + index*8);
 208
 209    if (unlikely((L2pte & PTE_VALID) == 0)) {
 210        ret = MM_K_TNV;
 211        goto exit;
 212    }
 213    if (unlikely((L2pte & PTE_KRE) == 0)) {
 214        goto exit;
 215    }
 216    pt = L2pte >> 32 << TARGET_PAGE_BITS;
 217
 218    /* L3 page table read.  */
 219    index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
 220    L3pte = ldq_phys(cs->as, pt + index*8);
 221
 222    phys = L3pte >> 32 << TARGET_PAGE_BITS;
 223    if (unlikely((L3pte & PTE_VALID) == 0)) {
 224        ret = MM_K_TNV;
 225        goto exit;
 226    }
 227
 228#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
 229# error page bits out of date
 230#endif
 231
 232    /* Check access violations.  */
 233    if (L3pte & (PTE_KRE << mmu_idx)) {
 234        prot |= PAGE_READ | PAGE_EXEC;
 235    }
 236    if (L3pte & (PTE_KWE << mmu_idx)) {
 237        prot |= PAGE_WRITE;
 238    }
 239    if (unlikely((prot & prot_need) == 0 && prot_need)) {
 240        goto exit;
 241    }
 242
 243    /* Check fault-on-operation violations.  */
 244    prot &= ~(L3pte >> 1);
 245    ret = -1;
 246    if (unlikely((prot & prot_need) == 0)) {
 247        ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
 248               prot_need & PAGE_WRITE ? MM_K_FOW :
 249               prot_need & PAGE_READ ? MM_K_FOR : -1);
 250    }
 251
 252 exit:
 253    *pphys = phys;
 254    *pprot = prot;
 255    return ret;
 256}
 257
 258hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
 259{
 260    AlphaCPU *cpu = ALPHA_CPU(cs);
 261    target_ulong phys;
 262    int prot, fail;
 263
 264    fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
 265    return (fail >= 0 ? -1 : phys);
 266}
 267
 268bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
 269                        MMUAccessType access_type, int mmu_idx,
 270                        bool probe, uintptr_t retaddr)
 271{
 272    AlphaCPU *cpu = ALPHA_CPU(cs);
 273    CPUAlphaState *env = &cpu->env;
 274    target_ulong phys;
 275    int prot, fail;
 276
 277    fail = get_physical_address(env, addr, 1 << access_type,
 278                                mmu_idx, &phys, &prot);
 279    if (unlikely(fail >= 0)) {
 280        if (probe) {
 281            return false;
 282        }
 283        cs->exception_index = EXCP_MMFAULT;
 284        env->trap_arg0 = addr;
 285        env->trap_arg1 = fail;
 286        env->trap_arg2 = (access_type == MMU_DATA_LOAD ? 0ull :
 287                          access_type == MMU_DATA_STORE ? 1ull :
 288                          /* access_type == MMU_INST_FETCH */ -1ull);
 289        cpu_loop_exit_restore(cs, retaddr);
 290    }
 291
 292    tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
 293                 prot, mmu_idx, TARGET_PAGE_SIZE);
 294    return true;
 295}
 296#endif /* USER_ONLY */
 297
 298void alpha_cpu_do_interrupt(CPUState *cs)
 299{
 300    AlphaCPU *cpu = ALPHA_CPU(cs);
 301    CPUAlphaState *env = &cpu->env;
 302    int i = cs->exception_index;
 303
 304    if (qemu_loglevel_mask(CPU_LOG_INT)) {
 305        static int count;
 306        const char *name = "<unknown>";
 307
 308        switch (i) {
 309        case EXCP_RESET:
 310            name = "reset";
 311            break;
 312        case EXCP_MCHK:
 313            name = "mchk";
 314            break;
 315        case EXCP_SMP_INTERRUPT:
 316            name = "smp_interrupt";
 317            break;
 318        case EXCP_CLK_INTERRUPT:
 319            name = "clk_interrupt";
 320            break;
 321        case EXCP_DEV_INTERRUPT:
 322            name = "dev_interrupt";
 323            break;
 324        case EXCP_MMFAULT:
 325            name = "mmfault";
 326            break;
 327        case EXCP_UNALIGN:
 328            name = "unalign";
 329            break;
 330        case EXCP_OPCDEC:
 331            name = "opcdec";
 332            break;
 333        case EXCP_ARITH:
 334            name = "arith";
 335            break;
 336        case EXCP_FEN:
 337            name = "fen";
 338            break;
 339        case EXCP_CALL_PAL:
 340            name = "call_pal";
 341            break;
 342        }
 343        qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
 344                 PRIx64 " sp=%016" PRIx64 "\n",
 345                 ++count, name, env->error_code, cs->cpu_index,
 346                 env->pc, env->ir[IR_SP]);
 347    }
 348
 349    cs->exception_index = -1;
 350
 351#if !defined(CONFIG_USER_ONLY)
 352    switch (i) {
 353    case EXCP_RESET:
 354        i = 0x0000;
 355        break;
 356    case EXCP_MCHK:
 357        i = 0x0080;
 358        break;
 359    case EXCP_SMP_INTERRUPT:
 360        i = 0x0100;
 361        break;
 362    case EXCP_CLK_INTERRUPT:
 363        i = 0x0180;
 364        break;
 365    case EXCP_DEV_INTERRUPT:
 366        i = 0x0200;
 367        break;
 368    case EXCP_MMFAULT:
 369        i = 0x0280;
 370        break;
 371    case EXCP_UNALIGN:
 372        i = 0x0300;
 373        break;
 374    case EXCP_OPCDEC:
 375        i = 0x0380;
 376        break;
 377    case EXCP_ARITH:
 378        i = 0x0400;
 379        break;
 380    case EXCP_FEN:
 381        i = 0x0480;
 382        break;
 383    case EXCP_CALL_PAL:
 384        i = env->error_code;
 385        /* There are 64 entry points for both privileged and unprivileged,
 386           with bit 0x80 indicating unprivileged.  Each entry point gets
 387           64 bytes to do its job.  */
 388        if (i & 0x80) {
 389            i = 0x2000 + (i - 0x80) * 64;
 390        } else {
 391            i = 0x1000 + i * 64;
 392        }
 393        break;
 394    default:
 395        cpu_abort(cs, "Unhandled CPU exception");
 396    }
 397
 398    /* Remember where the exception happened.  Emulate real hardware in
 399       that the low bit of the PC indicates PALmode.  */
 400    env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
 401
 402    /* Continue execution at the PALcode entry point.  */
 403    env->pc = env->palbr + i;
 404
 405    /* Switch to PALmode.  */
 406    env->flags |= ENV_FLAG_PAL_MODE;
 407#endif /* !USER_ONLY */
 408}
 409
 410bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 411{
 412    AlphaCPU *cpu = ALPHA_CPU(cs);
 413    CPUAlphaState *env = &cpu->env;
 414    int idx = -1;
 415
 416    /* We never take interrupts while in PALmode.  */
 417    if (env->flags & ENV_FLAG_PAL_MODE) {
 418        return false;
 419    }
 420
 421    /* Fall through the switch, collecting the highest priority
 422       interrupt that isn't masked by the processor status IPL.  */
 423    /* ??? This hard-codes the OSF/1 interrupt levels.  */
 424    switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
 425    case 0 ... 3:
 426        if (interrupt_request & CPU_INTERRUPT_HARD) {
 427            idx = EXCP_DEV_INTERRUPT;
 428        }
 429        /* FALLTHRU */
 430    case 4:
 431        if (interrupt_request & CPU_INTERRUPT_TIMER) {
 432            idx = EXCP_CLK_INTERRUPT;
 433        }
 434        /* FALLTHRU */
 435    case 5:
 436        if (interrupt_request & CPU_INTERRUPT_SMP) {
 437            idx = EXCP_SMP_INTERRUPT;
 438        }
 439        /* FALLTHRU */
 440    case 6:
 441        if (interrupt_request & CPU_INTERRUPT_MCHK) {
 442            idx = EXCP_MCHK;
 443        }
 444    }
 445    if (idx >= 0) {
 446        cs->exception_index = idx;
 447        env->error_code = 0;
 448        alpha_cpu_do_interrupt(cs);
 449        return true;
 450    }
 451    return false;
 452}
 453
 454void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
 455{
 456    static const char linux_reg_names[31][4] = {
 457        "v0",  "t0",  "t1", "t2",  "t3", "t4", "t5", "t6",
 458        "t7",  "s0",  "s1", "s2",  "s3", "s4", "s5", "fp",
 459        "a0",  "a1",  "a2", "a3",  "a4", "a5", "t8", "t9",
 460        "t10", "t11", "ra", "t12", "at", "gp", "sp"
 461    };
 462    AlphaCPU *cpu = ALPHA_CPU(cs);
 463    CPUAlphaState *env = &cpu->env;
 464    int i;
 465
 466    qemu_fprintf(f, "PC      " TARGET_FMT_lx " PS      %02x\n",
 467                 env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
 468    for (i = 0; i < 31; i++) {
 469        qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
 470                     linux_reg_names[i], cpu_alpha_load_gr(env, i),
 471                     (i % 3) == 2 ? '\n' : ' ');
 472    }
 473
 474    qemu_fprintf(f, "lock_a  " TARGET_FMT_lx " lock_v  " TARGET_FMT_lx "\n",
 475                 env->lock_addr, env->lock_value);
 476
 477    if (flags & CPU_DUMP_FPU) {
 478        for (i = 0; i < 31; i++) {
 479            qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
 480                         (i % 3) == 2 ? '\n' : ' ');
 481        }
 482        qemu_fprintf(f, "fpcr    %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
 483    }
 484    qemu_fprintf(f, "\n");
 485}
 486
 487/* This should only be called from translate, via gen_excp.
 488   We expect that ENV->PC has already been updated.  */
 489void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
 490{
 491    CPUState *cs = env_cpu(env);
 492
 493    cs->exception_index = excp;
 494    env->error_code = error;
 495    cpu_loop_exit(cs);
 496}
 497
 498/* This may be called from any of the helpers to set up EXCEPTION_INDEX.  */
 499void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
 500                                int excp, int error)
 501{
 502    CPUState *cs = env_cpu(env);
 503
 504    cs->exception_index = excp;
 505    env->error_code = error;
 506    if (retaddr) {
 507        cpu_restore_state(cs, retaddr, true);
 508        /* Floating-point exceptions (our only users) point to the next PC.  */
 509        env->pc += 4;
 510    }
 511    cpu_loop_exit(cs);
 512}
 513
 514void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
 515                              int exc, uint64_t mask)
 516{
 517    env->trap_arg0 = exc;
 518    env->trap_arg1 = mask;
 519    dynamic_excp(env, retaddr, EXCP_ARITH, 0);
 520}
 521