qemu/target/alpha/helper.c
<<
>>
Prefs
   1/*
   2 *  Alpha emulation cpu helpers for qemu.
   3 *
   4 *  Copyright (c) 2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/log.h"
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24#include "fpu/softfloat-types.h"
  25#include "exec/helper-proto.h"
  26#include "qemu/qemu-print.h"
  27
  28
  29#define CONVERT_BIT(X, SRC, DST) \
  30    (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
  31
  32uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
  33{
  34    return (uint64_t)env->fpcr << 32;
  35}
  36
  37void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
  38{
  39    static const uint8_t rm_map[] = {
  40        [FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT] = float_round_nearest_even,
  41        [FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT] = float_round_to_zero,
  42        [FPCR_DYN_MINUS >> FPCR_DYN_SHIFT] = float_round_down,
  43        [FPCR_DYN_PLUS >> FPCR_DYN_SHIFT] = float_round_up,
  44    };
  45
  46    uint32_t fpcr = val >> 32;
  47    uint32_t t = 0;
  48
  49    /* Record the raw value before adjusting for linux-user.  */
  50    env->fpcr = fpcr;
  51
  52#ifdef CONFIG_USER_ONLY
  53    /*
  54     * Override some of these bits with the contents of ENV->SWCR.
  55     * In system mode, some of these would trap to the kernel, at
  56     * which point the kernel's handler would emulate and apply
  57     * the software exception mask.
  58     */
  59    uint32_t soft_fpcr = alpha_ieee_swcr_to_fpcr(env->swcr) >> 32;
  60    fpcr |= soft_fpcr & (FPCR_STATUS_MASK | FPCR_DNZ);
  61
  62    /*
  63     * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
  64     * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
  65     * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
  66     */
  67    t |= CONVERT_BIT(soft_fpcr, FPCR_INVD, FPCR_IOV);
  68#endif
  69
  70    t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
  71    t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
  72    t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
  73    t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
  74    t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
  75
  76    env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
  77
  78    env->fpcr_dyn_round = rm_map[(fpcr & FPCR_DYN_MASK) >> FPCR_DYN_SHIFT];
  79    env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
  80
  81    t = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
  82#ifdef CONFIG_USER_ONLY
  83    t |= (env->swcr & SWCR_MAP_UMZ) != 0;
  84#endif
  85    env->fpcr_flush_to_zero = t;
  86}
  87
  88uint64_t helper_load_fpcr(CPUAlphaState *env)
  89{
  90    return cpu_alpha_load_fpcr(env);
  91}
  92
  93void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
  94{
  95    cpu_alpha_store_fpcr(env, val);
  96}
  97
  98static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
  99{
 100#ifndef CONFIG_USER_ONLY
 101    if (env->flags & ENV_FLAG_PAL_MODE) {
 102        if (reg >= 8 && reg <= 14) {
 103            return &env->shadow[reg - 8];
 104        } else if (reg == 25) {
 105            return &env->shadow[7];
 106        }
 107    }
 108#endif
 109    return &env->ir[reg];
 110}
 111
 112uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
 113{
 114    return *cpu_alpha_addr_gr(env, reg);
 115}
 116
 117void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
 118{
 119    *cpu_alpha_addr_gr(env, reg) = val;
 120}
 121
 122#if defined(CONFIG_USER_ONLY)
 123void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
 124                              MMUAccessType access_type,
 125                              bool maperr, uintptr_t retaddr)
 126{
 127    AlphaCPU *cpu = ALPHA_CPU(cs);
 128    target_ulong mmcsr, cause;
 129
 130    /* Assuming !maperr, infer the missing protection. */
 131    switch (access_type) {
 132    case MMU_DATA_LOAD:
 133        mmcsr = MM_K_FOR;
 134        cause = 0;
 135        break;
 136    case MMU_DATA_STORE:
 137        mmcsr = MM_K_FOW;
 138        cause = 1;
 139        break;
 140    case MMU_INST_FETCH:
 141        mmcsr = MM_K_FOE;
 142        cause = -1;
 143        break;
 144    default:
 145        g_assert_not_reached();
 146    }
 147    if (maperr) {
 148        if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) {
 149            /* Userspace address, therefore page not mapped. */
 150            mmcsr = MM_K_TNV;
 151        } else {
 152            /* Kernel or invalid address. */
 153            mmcsr = MM_K_ACV;
 154        }
 155    }
 156
 157    /* Record the arguments that PALcode would give to the kernel. */
 158    cpu->env.trap_arg0 = address;
 159    cpu->env.trap_arg1 = mmcsr;
 160    cpu->env.trap_arg2 = cause;
 161}
 162#else
 163/* Returns the OSF/1 entMM failure indication, or -1 on success.  */
 164static int get_physical_address(CPUAlphaState *env, target_ulong addr,
 165                                int prot_need, int mmu_idx,
 166                                target_ulong *pphys, int *pprot)
 167{
 168    CPUState *cs = env_cpu(env);
 169    target_long saddr = addr;
 170    target_ulong phys = 0;
 171    target_ulong L1pte, L2pte, L3pte;
 172    target_ulong pt, index;
 173    int prot = 0;
 174    int ret = MM_K_ACV;
 175
 176    /* Handle physical accesses.  */
 177    if (mmu_idx == MMU_PHYS_IDX) {
 178        phys = addr;
 179        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 180        ret = -1;
 181        goto exit;
 182    }
 183
 184    /* Ensure that the virtual address is properly sign-extended from
 185       the last implemented virtual address bit.  */
 186    if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
 187        goto exit;
 188    }
 189
 190    /* Translate the superpage.  */
 191    /* ??? When we do more than emulate Unix PALcode, we'll need to
 192       determine which KSEG is actually active.  */
 193    if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
 194        /* User-space cannot access KSEG addresses.  */
 195        if (mmu_idx != MMU_KERNEL_IDX) {
 196            goto exit;
 197        }
 198
 199        /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
 200           We would not do this if the 48-bit KSEG is enabled.  */
 201        phys = saddr & ((1ull << 40) - 1);
 202        phys |= (saddr & (1ull << 40)) << 3;
 203
 204        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 205        ret = -1;
 206        goto exit;
 207    }
 208
 209    /* Interpret the page table exactly like PALcode does.  */
 210
 211    pt = env->ptbr;
 212
 213    /* TODO: rather than using ldq_phys() to read the page table we should
 214     * use address_space_ldq() so that we can handle the case when
 215     * the page table read gives a bus fault, rather than ignoring it.
 216     * For the existing code the zero data that ldq_phys will return for
 217     * an access to invalid memory will result in our treating the page
 218     * table as invalid, which may even be the right behaviour.
 219     */
 220
 221    /* L1 page table read.  */
 222    index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
 223    L1pte = ldq_phys(cs->as, pt + index*8);
 224
 225    if (unlikely((L1pte & PTE_VALID) == 0)) {
 226        ret = MM_K_TNV;
 227        goto exit;
 228    }
 229    if (unlikely((L1pte & PTE_KRE) == 0)) {
 230        goto exit;
 231    }
 232    pt = L1pte >> 32 << TARGET_PAGE_BITS;
 233
 234    /* L2 page table read.  */
 235    index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
 236    L2pte = ldq_phys(cs->as, pt + index*8);
 237
 238    if (unlikely((L2pte & PTE_VALID) == 0)) {
 239        ret = MM_K_TNV;
 240        goto exit;
 241    }
 242    if (unlikely((L2pte & PTE_KRE) == 0)) {
 243        goto exit;
 244    }
 245    pt = L2pte >> 32 << TARGET_PAGE_BITS;
 246
 247    /* L3 page table read.  */
 248    index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
 249    L3pte = ldq_phys(cs->as, pt + index*8);
 250
 251    phys = L3pte >> 32 << TARGET_PAGE_BITS;
 252    if (unlikely((L3pte & PTE_VALID) == 0)) {
 253        ret = MM_K_TNV;
 254        goto exit;
 255    }
 256
 257#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
 258# error page bits out of date
 259#endif
 260
 261    /* Check access violations.  */
 262    if (L3pte & (PTE_KRE << mmu_idx)) {
 263        prot |= PAGE_READ | PAGE_EXEC;
 264    }
 265    if (L3pte & (PTE_KWE << mmu_idx)) {
 266        prot |= PAGE_WRITE;
 267    }
 268    if (unlikely((prot & prot_need) == 0 && prot_need)) {
 269        goto exit;
 270    }
 271
 272    /* Check fault-on-operation violations.  */
 273    prot &= ~(L3pte >> 1);
 274    ret = -1;
 275    if (unlikely((prot & prot_need) == 0)) {
 276        ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
 277               prot_need & PAGE_WRITE ? MM_K_FOW :
 278               prot_need & PAGE_READ ? MM_K_FOR : -1);
 279    }
 280
 281 exit:
 282    *pphys = phys;
 283    *pprot = prot;
 284    return ret;
 285}
 286
 287hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
 288{
 289    AlphaCPU *cpu = ALPHA_CPU(cs);
 290    target_ulong phys;
 291    int prot, fail;
 292
 293    fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
 294    return (fail >= 0 ? -1 : phys);
 295}
 296
 297bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
 298                        MMUAccessType access_type, int mmu_idx,
 299                        bool probe, uintptr_t retaddr)
 300{
 301    AlphaCPU *cpu = ALPHA_CPU(cs);
 302    CPUAlphaState *env = &cpu->env;
 303    target_ulong phys;
 304    int prot, fail;
 305
 306    fail = get_physical_address(env, addr, 1 << access_type,
 307                                mmu_idx, &phys, &prot);
 308    if (unlikely(fail >= 0)) {
 309        if (probe) {
 310            return false;
 311        }
 312        cs->exception_index = EXCP_MMFAULT;
 313        env->trap_arg0 = addr;
 314        env->trap_arg1 = fail;
 315        env->trap_arg2 = (access_type == MMU_DATA_LOAD ? 0ull :
 316                          access_type == MMU_DATA_STORE ? 1ull :
 317                          /* access_type == MMU_INST_FETCH */ -1ull);
 318        cpu_loop_exit_restore(cs, retaddr);
 319    }
 320
 321    tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
 322                 prot, mmu_idx, TARGET_PAGE_SIZE);
 323    return true;
 324}
 325
 326void alpha_cpu_do_interrupt(CPUState *cs)
 327{
 328    AlphaCPU *cpu = ALPHA_CPU(cs);
 329    CPUAlphaState *env = &cpu->env;
 330    int i = cs->exception_index;
 331
 332    if (qemu_loglevel_mask(CPU_LOG_INT)) {
 333        static int count;
 334        const char *name = "<unknown>";
 335
 336        switch (i) {
 337        case EXCP_RESET:
 338            name = "reset";
 339            break;
 340        case EXCP_MCHK:
 341            name = "mchk";
 342            break;
 343        case EXCP_SMP_INTERRUPT:
 344            name = "smp_interrupt";
 345            break;
 346        case EXCP_CLK_INTERRUPT:
 347            name = "clk_interrupt";
 348            break;
 349        case EXCP_DEV_INTERRUPT:
 350            name = "dev_interrupt";
 351            break;
 352        case EXCP_MMFAULT:
 353            name = "mmfault";
 354            break;
 355        case EXCP_UNALIGN:
 356            name = "unalign";
 357            break;
 358        case EXCP_OPCDEC:
 359            name = "opcdec";
 360            break;
 361        case EXCP_ARITH:
 362            name = "arith";
 363            break;
 364        case EXCP_FEN:
 365            name = "fen";
 366            break;
 367        case EXCP_CALL_PAL:
 368            name = "call_pal";
 369            break;
 370        }
 371        qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
 372                 PRIx64 " sp=%016" PRIx64 "\n",
 373                 ++count, name, env->error_code, cs->cpu_index,
 374                 env->pc, env->ir[IR_SP]);
 375    }
 376
 377    cs->exception_index = -1;
 378
 379    switch (i) {
 380    case EXCP_RESET:
 381        i = 0x0000;
 382        break;
 383    case EXCP_MCHK:
 384        i = 0x0080;
 385        break;
 386    case EXCP_SMP_INTERRUPT:
 387        i = 0x0100;
 388        break;
 389    case EXCP_CLK_INTERRUPT:
 390        i = 0x0180;
 391        break;
 392    case EXCP_DEV_INTERRUPT:
 393        i = 0x0200;
 394        break;
 395    case EXCP_MMFAULT:
 396        i = 0x0280;
 397        break;
 398    case EXCP_UNALIGN:
 399        i = 0x0300;
 400        break;
 401    case EXCP_OPCDEC:
 402        i = 0x0380;
 403        break;
 404    case EXCP_ARITH:
 405        i = 0x0400;
 406        break;
 407    case EXCP_FEN:
 408        i = 0x0480;
 409        break;
 410    case EXCP_CALL_PAL:
 411        i = env->error_code;
 412        /* There are 64 entry points for both privileged and unprivileged,
 413           with bit 0x80 indicating unprivileged.  Each entry point gets
 414           64 bytes to do its job.  */
 415        if (i & 0x80) {
 416            i = 0x2000 + (i - 0x80) * 64;
 417        } else {
 418            i = 0x1000 + i * 64;
 419        }
 420        break;
 421    default:
 422        cpu_abort(cs, "Unhandled CPU exception");
 423    }
 424
 425    /* Remember where the exception happened.  Emulate real hardware in
 426       that the low bit of the PC indicates PALmode.  */
 427    env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
 428
 429    /* Continue execution at the PALcode entry point.  */
 430    env->pc = env->palbr + i;
 431
 432    /* Switch to PALmode.  */
 433    env->flags |= ENV_FLAG_PAL_MODE;
 434}
 435
 436bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 437{
 438    AlphaCPU *cpu = ALPHA_CPU(cs);
 439    CPUAlphaState *env = &cpu->env;
 440    int idx = -1;
 441
 442    /* We never take interrupts while in PALmode.  */
 443    if (env->flags & ENV_FLAG_PAL_MODE) {
 444        return false;
 445    }
 446
 447    /* Fall through the switch, collecting the highest priority
 448       interrupt that isn't masked by the processor status IPL.  */
 449    /* ??? This hard-codes the OSF/1 interrupt levels.  */
 450    switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
 451    case 0 ... 3:
 452        if (interrupt_request & CPU_INTERRUPT_HARD) {
 453            idx = EXCP_DEV_INTERRUPT;
 454        }
 455        /* FALLTHRU */
 456    case 4:
 457        if (interrupt_request & CPU_INTERRUPT_TIMER) {
 458            idx = EXCP_CLK_INTERRUPT;
 459        }
 460        /* FALLTHRU */
 461    case 5:
 462        if (interrupt_request & CPU_INTERRUPT_SMP) {
 463            idx = EXCP_SMP_INTERRUPT;
 464        }
 465        /* FALLTHRU */
 466    case 6:
 467        if (interrupt_request & CPU_INTERRUPT_MCHK) {
 468            idx = EXCP_MCHK;
 469        }
 470    }
 471    if (idx >= 0) {
 472        cs->exception_index = idx;
 473        env->error_code = 0;
 474        alpha_cpu_do_interrupt(cs);
 475        return true;
 476    }
 477    return false;
 478}
 479
 480#endif /* !CONFIG_USER_ONLY */
 481
 482void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
 483{
 484    static const char linux_reg_names[31][4] = {
 485        "v0",  "t0",  "t1", "t2",  "t3", "t4", "t5", "t6",
 486        "t7",  "s0",  "s1", "s2",  "s3", "s4", "s5", "fp",
 487        "a0",  "a1",  "a2", "a3",  "a4", "a5", "t8", "t9",
 488        "t10", "t11", "ra", "t12", "at", "gp", "sp"
 489    };
 490    AlphaCPU *cpu = ALPHA_CPU(cs);
 491    CPUAlphaState *env = &cpu->env;
 492    int i;
 493
 494    qemu_fprintf(f, "PC      " TARGET_FMT_lx " PS      %02x\n",
 495                 env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
 496    for (i = 0; i < 31; i++) {
 497        qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
 498                     linux_reg_names[i], cpu_alpha_load_gr(env, i),
 499                     (i % 3) == 2 ? '\n' : ' ');
 500    }
 501
 502    qemu_fprintf(f, "lock_a  " TARGET_FMT_lx " lock_v  " TARGET_FMT_lx "\n",
 503                 env->lock_addr, env->lock_value);
 504
 505    if (flags & CPU_DUMP_FPU) {
 506        for (i = 0; i < 31; i++) {
 507            qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
 508                         (i % 3) == 2 ? '\n' : ' ');
 509        }
 510        qemu_fprintf(f, "fpcr    %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
 511    }
 512    qemu_fprintf(f, "\n");
 513}
 514
 515/* This should only be called from translate, via gen_excp.
 516   We expect that ENV->PC has already been updated.  */
 517G_NORETURN void helper_excp(CPUAlphaState *env, int excp, int error)
 518{
 519    CPUState *cs = env_cpu(env);
 520
 521    cs->exception_index = excp;
 522    env->error_code = error;
 523    cpu_loop_exit(cs);
 524}
 525
 526/* This may be called from any of the helpers to set up EXCEPTION_INDEX.  */
 527G_NORETURN void dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
 528                             int excp, int error)
 529{
 530    CPUState *cs = env_cpu(env);
 531
 532    cs->exception_index = excp;
 533    env->error_code = error;
 534    if (retaddr) {
 535        cpu_restore_state(cs, retaddr, true);
 536        /* Floating-point exceptions (our only users) point to the next PC.  */
 537        env->pc += 4;
 538    }
 539    cpu_loop_exit(cs);
 540}
 541
 542G_NORETURN void arith_excp(CPUAlphaState *env, uintptr_t retaddr,
 543                           int exc, uint64_t mask)
 544{
 545    env->trap_arg0 = exc;
 546    env->trap_arg1 = mask;
 547    dynamic_excp(env, retaddr, EXCP_ARITH, 0);
 548}
 549