qemu/cpu-exec.c
<<
>>
Prefs
   1/*
   2 *  i386 emulator main execution loop
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "config.h"
  20#include "exec.h"
  21#include "disas.h"
  22#include "tcg.h"
  23#include "kvm.h"
  24#include "qemu-barrier.h"
  25
  26#if !defined(CONFIG_SOFTMMU)
  27#undef EAX
  28#undef ECX
  29#undef EDX
  30#undef EBX
  31#undef ESP
  32#undef EBP
  33#undef ESI
  34#undef EDI
  35#undef EIP
  36#include <signal.h>
  37#ifdef __linux__
  38#include <sys/ucontext.h>
  39#endif
  40#endif
  41
  42#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
  43// Work around ugly bugs in glibc that mangle global register contents
  44#undef env
  45#define env cpu_single_env
  46#endif
  47
  48int tb_invalidated_flag;
  49
  50//#define CONFIG_DEBUG_EXEC
  51//#define DEBUG_SIGNAL
  52
  53int qemu_cpu_has_work(CPUState *env)
  54{
  55    return cpu_has_work(env);
  56}
  57
  58void cpu_loop_exit(void)
  59{
  60    env->current_tb = NULL;
  61    longjmp(env->jmp_env, 1);
  62}
  63
  64/* exit the current TB from a signal handler. The host registers are
  65   restored in a state compatible with the CPU emulator
  66 */
  67void cpu_resume_from_signal(CPUState *env1, void *puc)
  68{
  69#if !defined(CONFIG_SOFTMMU)
  70#ifdef __linux__
  71    struct ucontext *uc = puc;
  72#elif defined(__OpenBSD__)
  73    struct sigcontext *uc = puc;
  74#endif
  75#endif
  76
  77    env = env1;
  78
  79    /* XXX: restore cpu registers saved in host registers */
  80
  81#if !defined(CONFIG_SOFTMMU)
  82    if (puc) {
  83        /* XXX: use siglongjmp ? */
  84#ifdef __linux__
  85#ifdef __ia64
  86        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
  87#else
  88        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
  89#endif
  90#elif defined(__OpenBSD__)
  91        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
  92#endif
  93    }
  94#endif
  95    env->exception_index = -1;
  96    longjmp(env->jmp_env, 1);
  97}
  98
  99/* Execute the code without caching the generated code. An interpreter
 100   could be used if available. */
 101static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
 102{
 103    unsigned long next_tb;
 104    TranslationBlock *tb;
 105
 106    /* Should never happen.
 107       We only end up here when an existing TB is too long.  */
 108    if (max_cycles > CF_COUNT_MASK)
 109        max_cycles = CF_COUNT_MASK;
 110
 111    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
 112                     max_cycles);
 113    env->current_tb = tb;
 114    /* execute the generated code */
 115    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
 116    env->current_tb = NULL;
 117
 118    if ((next_tb & 3) == 2) {
 119        /* Restore PC.  This may happen if async event occurs before
 120           the TB starts executing.  */
 121        cpu_pc_from_tb(env, tb);
 122    }
 123    tb_phys_invalidate(tb, -1);
 124    tb_free(tb);
 125}
 126
 127static TranslationBlock *tb_find_slow(target_ulong pc,
 128                                      target_ulong cs_base,
 129                                      uint64_t flags)
 130{
 131    TranslationBlock *tb, **ptb1;
 132    unsigned int h;
 133    tb_page_addr_t phys_pc, phys_page1, phys_page2;
 134    target_ulong virt_page2;
 135
 136    tb_invalidated_flag = 0;
 137
 138    /* find translated block using physical mappings */
 139    phys_pc = get_page_addr_code(env, pc);
 140    phys_page1 = phys_pc & TARGET_PAGE_MASK;
 141    phys_page2 = -1;
 142    h = tb_phys_hash_func(phys_pc);
 143    ptb1 = &tb_phys_hash[h];
 144    for(;;) {
 145        tb = *ptb1;
 146        if (!tb)
 147            goto not_found;
 148        if (tb->pc == pc &&
 149            tb->page_addr[0] == phys_page1 &&
 150            tb->cs_base == cs_base &&
 151            tb->flags == flags) {
 152            /* check next page if needed */
 153            if (tb->page_addr[1] != -1) {
 154                virt_page2 = (pc & TARGET_PAGE_MASK) +
 155                    TARGET_PAGE_SIZE;
 156                phys_page2 = get_page_addr_code(env, virt_page2);
 157                if (tb->page_addr[1] == phys_page2)
 158                    goto found;
 159            } else {
 160                goto found;
 161            }
 162        }
 163        ptb1 = &tb->phys_hash_next;
 164    }
 165 not_found:
 166   /* if no translated code available, then translate it now */
 167    tb = tb_gen_code(env, pc, cs_base, flags, 0);
 168
 169 found:
 170    /* we add the TB in the virtual pc hash table */
 171    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
 172    return tb;
 173}
 174
 175static inline TranslationBlock *tb_find_fast(void)
 176{
 177    TranslationBlock *tb;
 178    target_ulong cs_base, pc;
 179    int flags;
 180
 181    /* we record a subset of the CPU state. It will
 182       always be the same before a given translated block
 183       is executed. */
 184    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 185    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
 186    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
 187                 tb->flags != flags)) {
 188        tb = tb_find_slow(pc, cs_base, flags);
 189    }
 190    return tb;
 191}
 192
 193static CPUDebugExcpHandler *debug_excp_handler;
 194
 195CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
 196{
 197    CPUDebugExcpHandler *old_handler = debug_excp_handler;
 198
 199    debug_excp_handler = handler;
 200    return old_handler;
 201}
 202
 203static void cpu_handle_debug_exception(CPUState *env)
 204{
 205    CPUWatchpoint *wp;
 206
 207    if (!env->watchpoint_hit)
 208        QTAILQ_FOREACH(wp, &env->watchpoints, entry)
 209            wp->flags &= ~BP_WATCHPOINT_HIT;
 210
 211    if (debug_excp_handler)
 212        debug_excp_handler(env);
 213}
 214
 215/* main execution loop */
 216
 217volatile sig_atomic_t exit_request;
 218
 219int cpu_exec(CPUState *env1)
 220{
 221    volatile host_reg_t saved_env_reg;
 222    int ret, interrupt_request;
 223    TranslationBlock *tb;
 224    uint8_t *tc_ptr;
 225    unsigned long next_tb;
 226
 227    if (cpu_halted(env1) == EXCP_HALTED)
 228        return EXCP_HALTED;
 229
 230    cpu_single_env = env1;
 231
 232    /* the access to env below is actually saving the global register's
 233       value, so that files not including target-xyz/exec.h are free to
 234       use it.  */
 235    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
 236    saved_env_reg = (host_reg_t) env;
 237    barrier();
 238    env = env1;
 239
 240    if (unlikely(exit_request)) {
 241        env->exit_request = 1;
 242    }
 243
 244#if defined(TARGET_I386)
 245    if (!kvm_enabled()) {
 246        /* put eflags in CPU temporary format */
 247        CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 248        DF = 1 - (2 * ((env->eflags >> 10) & 1));
 249        CC_OP = CC_OP_EFLAGS;
 250        env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 251    }
 252#elif defined(TARGET_SPARC)
 253#elif defined(TARGET_M68K)
 254    env->cc_op = CC_OP_FLAGS;
 255    env->cc_dest = env->sr & 0xf;
 256    env->cc_x = (env->sr >> 4) & 1;
 257#elif defined(TARGET_ALPHA)
 258#elif defined(TARGET_ARM)
 259#elif defined(TARGET_PPC)
 260#elif defined(TARGET_MICROBLAZE)
 261#elif defined(TARGET_MIPS)
 262#elif defined(TARGET_SH4)
 263#elif defined(TARGET_CRIS)
 264#elif defined(TARGET_S390X)
 265    /* XXXXX */
 266#else
 267#error unsupported target CPU
 268#endif
 269    env->exception_index = -1;
 270
 271    /* prepare setjmp context for exception handling */
 272    for(;;) {
 273        if (setjmp(env->jmp_env) == 0) {
 274#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
 275#undef env
 276                    env = cpu_single_env;
 277#define env cpu_single_env
 278#endif
 279            /* if an exception is pending, we execute it here */
 280            if (env->exception_index >= 0) {
 281                if (env->exception_index >= EXCP_INTERRUPT) {
 282                    /* exit request from the cpu execution loop */
 283                    ret = env->exception_index;
 284                    if (ret == EXCP_DEBUG)
 285                        cpu_handle_debug_exception(env);
 286                    break;
 287                } else {
 288#if defined(CONFIG_USER_ONLY)
 289                    /* if user mode only, we simulate a fake exception
 290                       which will be handled outside the cpu execution
 291                       loop */
 292#if defined(TARGET_I386)
 293                    do_interrupt_user(env->exception_index,
 294                                      env->exception_is_int,
 295                                      env->error_code,
 296                                      env->exception_next_eip);
 297                    /* successfully delivered */
 298                    env->old_exception = -1;
 299#endif
 300                    ret = env->exception_index;
 301                    break;
 302#else
 303#if defined(TARGET_I386)
 304                    /* simulate a real cpu exception. On i386, it can
 305                       trigger new exceptions, but we do not handle
 306                       double or triple faults yet. */
 307                    do_interrupt(env->exception_index,
 308                                 env->exception_is_int,
 309                                 env->error_code,
 310                                 env->exception_next_eip, 0);
 311                    /* successfully delivered */
 312                    env->old_exception = -1;
 313#elif defined(TARGET_PPC)
 314                    do_interrupt(env);
 315#elif defined(TARGET_MICROBLAZE)
 316                    do_interrupt(env);
 317#elif defined(TARGET_MIPS)
 318                    do_interrupt(env);
 319#elif defined(TARGET_SPARC)
 320                    do_interrupt(env);
 321#elif defined(TARGET_ARM)
 322                    do_interrupt(env);
 323#elif defined(TARGET_SH4)
 324                    do_interrupt(env);
 325#elif defined(TARGET_ALPHA)
 326                    do_interrupt(env);
 327#elif defined(TARGET_CRIS)
 328                    do_interrupt(env);
 329#elif defined(TARGET_M68K)
 330                    do_interrupt(0);
 331#endif
 332                    env->exception_index = -1;
 333#endif
 334                }
 335            }
 336
 337            if (kvm_enabled()) {
 338                kvm_cpu_exec(env);
 339                longjmp(env->jmp_env, 1);
 340            }
 341
 342            next_tb = 0; /* force lookup of first TB */
 343            for(;;) {
 344                interrupt_request = env->interrupt_request;
 345                if (unlikely(interrupt_request)) {
 346                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
 347                        /* Mask out external interrupts for this step. */
 348                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
 349                                               CPU_INTERRUPT_FIQ |
 350                                               CPU_INTERRUPT_SMI |
 351                                               CPU_INTERRUPT_NMI);
 352                    }
 353                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
 354                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
 355                        env->exception_index = EXCP_DEBUG;
 356                        cpu_loop_exit();
 357                    }
 358#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
 359    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
 360    defined(TARGET_MICROBLAZE)
 361                    if (interrupt_request & CPU_INTERRUPT_HALT) {
 362                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
 363                        env->halted = 1;
 364                        env->exception_index = EXCP_HLT;
 365                        cpu_loop_exit();
 366                    }
 367#endif
 368#if defined(TARGET_I386)
 369                    if (interrupt_request & CPU_INTERRUPT_INIT) {
 370                            svm_check_intercept(SVM_EXIT_INIT);
 371                            do_cpu_init(env);
 372                            env->exception_index = EXCP_HALTED;
 373                            cpu_loop_exit();
 374                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
 375                            do_cpu_sipi(env);
 376                    } else if (env->hflags2 & HF2_GIF_MASK) {
 377                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
 378                            !(env->hflags & HF_SMM_MASK)) {
 379                            svm_check_intercept(SVM_EXIT_SMI);
 380                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
 381                            do_smm_enter();
 382                            next_tb = 0;
 383                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
 384                                   !(env->hflags2 & HF2_NMI_MASK)) {
 385                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
 386                            env->hflags2 |= HF2_NMI_MASK;
 387                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
 388                            next_tb = 0;
 389                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
 390                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
 391                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
 392                            next_tb = 0;
 393                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 394                                   (((env->hflags2 & HF2_VINTR_MASK) && 
 395                                     (env->hflags2 & HF2_HIF_MASK)) ||
 396                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
 397                                     (env->eflags & IF_MASK && 
 398                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
 399                            int intno;
 400                            svm_check_intercept(SVM_EXIT_INTR);
 401                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
 402                            intno = cpu_get_pic_interrupt(env);
 403                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
 404#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
 405#undef env
 406                    env = cpu_single_env;
 407#define env cpu_single_env
 408#endif
 409                            do_interrupt(intno, 0, 0, 0, 1);
 410                            /* ensure that no TB jump will be modified as
 411                               the program flow was changed */
 412                            next_tb = 0;
 413#if !defined(CONFIG_USER_ONLY)
 414                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
 415                                   (env->eflags & IF_MASK) && 
 416                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
 417                            int intno;
 418                            /* FIXME: this should respect TPR */
 419                            svm_check_intercept(SVM_EXIT_VINTR);
 420                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
 421                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
 422                            do_interrupt(intno, 0, 0, 0, 1);
 423                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
 424                            next_tb = 0;
 425#endif
 426                        }
 427                    }
 428#elif defined(TARGET_PPC)
 429#if 0
 430                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
 431                        cpu_reset(env);
 432                    }
 433#endif
 434                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 435                        ppc_hw_interrupt(env);
 436                        if (env->pending_interrupts == 0)
 437                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
 438                        next_tb = 0;
 439                    }
 440#elif defined(TARGET_MICROBLAZE)
 441                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 442                        && (env->sregs[SR_MSR] & MSR_IE)
 443                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
 444                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
 445                        env->exception_index = EXCP_IRQ;
 446                        do_interrupt(env);
 447                        next_tb = 0;
 448                    }
 449#elif defined(TARGET_MIPS)
 450                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 451                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
 452                        (env->CP0_Status & (1 << CP0St_IE)) &&
 453                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
 454                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
 455                        !(env->hflags & MIPS_HFLAG_DM)) {
 456                        /* Raise it */
 457                        env->exception_index = EXCP_EXT_INTERRUPT;
 458                        env->error_code = 0;
 459                        do_interrupt(env);
 460                        next_tb = 0;
 461                    }
 462#elif defined(TARGET_SPARC)
 463                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 464                        if (cpu_interrupts_enabled(env) &&
 465                            env->interrupt_index > 0) {
 466                            int pil = env->interrupt_index & 0xf;
 467                            int type = env->interrupt_index & 0xf0;
 468
 469                            if (((type == TT_EXTINT) &&
 470                                  cpu_pil_allowed(env, pil)) ||
 471                                  type != TT_EXTINT) {
 472                                env->exception_index = env->interrupt_index;
 473                                do_interrupt(env);
 474                                next_tb = 0;
 475                            }
 476                        }
 477                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
 478                        //do_interrupt(0, 0, 0, 0, 0);
 479                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
 480                    }
 481#elif defined(TARGET_ARM)
 482                    if (interrupt_request & CPU_INTERRUPT_FIQ
 483                        && !(env->uncached_cpsr & CPSR_F)) {
 484                        env->exception_index = EXCP_FIQ;
 485                        do_interrupt(env);
 486                        next_tb = 0;
 487                    }
 488                    /* ARMv7-M interrupt return works by loading a magic value
 489                       into the PC.  On real hardware the load causes the
 490                       return to occur.  The qemu implementation performs the
 491                       jump normally, then does the exception return when the
 492                       CPU tries to execute code at the magic address.
 493                       This will cause the magic PC value to be pushed to
 494                       the stack if an interrupt occured at the wrong time.
 495                       We avoid this by disabling interrupts when
 496                       pc contains a magic address.  */
 497                    if (interrupt_request & CPU_INTERRUPT_HARD
 498                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
 499                            || !(env->uncached_cpsr & CPSR_I))) {
 500                        env->exception_index = EXCP_IRQ;
 501                        do_interrupt(env);
 502                        next_tb = 0;
 503                    }
 504#elif defined(TARGET_SH4)
 505                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 506                        do_interrupt(env);
 507                        next_tb = 0;
 508                    }
 509#elif defined(TARGET_ALPHA)
 510                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 511                        do_interrupt(env);
 512                        next_tb = 0;
 513                    }
 514#elif defined(TARGET_CRIS)
 515                    if (interrupt_request & CPU_INTERRUPT_HARD
 516                        && (env->pregs[PR_CCS] & I_FLAG)
 517                        && !env->locked_irq) {
 518                        env->exception_index = EXCP_IRQ;
 519                        do_interrupt(env);
 520                        next_tb = 0;
 521                    }
 522                    if (interrupt_request & CPU_INTERRUPT_NMI
 523                        && (env->pregs[PR_CCS] & M_FLAG)) {
 524                        env->exception_index = EXCP_NMI;
 525                        do_interrupt(env);
 526                        next_tb = 0;
 527                    }
 528#elif defined(TARGET_M68K)
 529                    if (interrupt_request & CPU_INTERRUPT_HARD
 530                        && ((env->sr & SR_I) >> SR_I_SHIFT)
 531                            < env->pending_level) {
 532                        /* Real hardware gets the interrupt vector via an
 533                           IACK cycle at this point.  Current emulated
 534                           hardware doesn't rely on this, so we
 535                           provide/save the vector when the interrupt is
 536                           first signalled.  */
 537                        env->exception_index = env->pending_vector;
 538                        do_interrupt(1);
 539                        next_tb = 0;
 540                    }
 541#endif
 542                   /* Don't use the cached interupt_request value,
 543                      do_interrupt may have updated the EXITTB flag. */
 544                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
 545                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
 546                        /* ensure that no TB jump will be modified as
 547                           the program flow was changed */
 548                        next_tb = 0;
 549                    }
 550                }
 551                if (unlikely(env->exit_request)) {
 552                    env->exit_request = 0;
 553                    env->exception_index = EXCP_INTERRUPT;
 554                    cpu_loop_exit();
 555                }
 556#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
 557                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
 558                    /* restore flags in standard format */
 559#if defined(TARGET_I386)
 560                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
 561                    log_cpu_state(env, X86_DUMP_CCOP);
 562                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 563#elif defined(TARGET_M68K)
 564                    cpu_m68k_flush_flags(env, env->cc_op);
 565                    env->cc_op = CC_OP_FLAGS;
 566                    env->sr = (env->sr & 0xffe0)
 567                              | env->cc_dest | (env->cc_x << 4);
 568                    log_cpu_state(env, 0);
 569#else
 570                    log_cpu_state(env, 0);
 571#endif
 572                }
 573#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
 574                spin_lock(&tb_lock);
 575                tb = tb_find_fast();
 576                /* Note: we do it here to avoid a gcc bug on Mac OS X when
 577                   doing it in tb_find_slow */
 578                if (tb_invalidated_flag) {
 579                    /* as some TB could have been invalidated because
 580                       of memory exceptions while generating the code, we
 581                       must recompute the hash index here */
 582                    next_tb = 0;
 583                    tb_invalidated_flag = 0;
 584                }
 585#ifdef CONFIG_DEBUG_EXEC
 586                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
 587                             (long)tb->tc_ptr, tb->pc,
 588                             lookup_symbol(tb->pc));
 589#endif
 590                /* see if we can patch the calling TB. When the TB
 591                   spans two pages, we cannot safely do a direct
 592                   jump. */
 593                if (next_tb != 0 && tb->page_addr[1] == -1) {
 594                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
 595                }
 596                spin_unlock(&tb_lock);
 597
 598                /* cpu_interrupt might be called while translating the
 599                   TB, but before it is linked into a potentially
 600                   infinite loop and becomes env->current_tb. Avoid
 601                   starting execution if there is a pending interrupt. */
 602                env->current_tb = tb;
 603                barrier();
 604                if (likely(!env->exit_request)) {
 605                    tc_ptr = tb->tc_ptr;
 606                /* execute the generated code */
 607#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
 608#undef env
 609                    env = cpu_single_env;
 610#define env cpu_single_env
 611#endif
 612                    next_tb = tcg_qemu_tb_exec(tc_ptr);
 613                    if ((next_tb & 3) == 2) {
 614                        /* Instruction counter expired.  */
 615                        int insns_left;
 616                        tb = (TranslationBlock *)(long)(next_tb & ~3);
 617                        /* Restore PC.  */
 618                        cpu_pc_from_tb(env, tb);
 619                        insns_left = env->icount_decr.u32;
 620                        if (env->icount_extra && insns_left >= 0) {
 621                            /* Refill decrementer and continue execution.  */
 622                            env->icount_extra += insns_left;
 623                            if (env->icount_extra > 0xffff) {
 624                                insns_left = 0xffff;
 625                            } else {
 626                                insns_left = env->icount_extra;
 627                            }
 628                            env->icount_extra -= insns_left;
 629                            env->icount_decr.u16.low = insns_left;
 630                        } else {
 631                            if (insns_left > 0) {
 632                                /* Execute remaining instructions.  */
 633                                cpu_exec_nocache(insns_left, tb);
 634                            }
 635                            env->exception_index = EXCP_INTERRUPT;
 636                            next_tb = 0;
 637                            cpu_loop_exit();
 638                        }
 639                    }
 640                }
 641                env->current_tb = NULL;
 642                /* reset soft MMU for next block (it can currently
 643                   only be set by a memory fault) */
 644            } /* for(;;) */
 645        }
 646    } /* for(;;) */
 647
 648
 649#if defined(TARGET_I386)
 650    /* restore flags in standard format */
 651    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
 652#elif defined(TARGET_ARM)
 653    /* XXX: Save/restore host fpu exception state?.  */
 654#elif defined(TARGET_SPARC)
 655#elif defined(TARGET_PPC)
 656#elif defined(TARGET_M68K)
 657    cpu_m68k_flush_flags(env, env->cc_op);
 658    env->cc_op = CC_OP_FLAGS;
 659    env->sr = (env->sr & 0xffe0)
 660              | env->cc_dest | (env->cc_x << 4);
 661#elif defined(TARGET_MICROBLAZE)
 662#elif defined(TARGET_MIPS)
 663#elif defined(TARGET_SH4)
 664#elif defined(TARGET_ALPHA)
 665#elif defined(TARGET_CRIS)
 666#elif defined(TARGET_S390X)
 667    /* XXXXX */
 668#else
 669#error unsupported target CPU
 670#endif
 671
 672    /* restore global registers */
 673    barrier();
 674    env = (void *) saved_env_reg;
 675
 676    /* fail safe : never use cpu_single_env outside cpu_exec() */
 677    cpu_single_env = NULL;
 678    return ret;
 679}
 680
 681/* must only be called from the generated code as an exception can be
 682   generated */
 683void tb_invalidate_page_range(target_ulong start, target_ulong end)
 684{
 685    /* XXX: cannot enable it yet because it yields to MMU exception
 686       where NIP != read address on PowerPC */
 687#if 0
 688    target_ulong phys_addr;
 689    phys_addr = get_phys_addr_code(env, start);
 690    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
 691#endif
 692}
 693
 694#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
 695
 696void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
 697{
 698    CPUX86State *saved_env;
 699
 700    saved_env = env;
 701    env = s;
 702    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
 703        selector &= 0xffff;
 704        cpu_x86_load_seg_cache(env, seg_reg, selector,
 705                               (selector << 4), 0xffff, 0);
 706    } else {
 707        helper_load_seg(seg_reg, selector);
 708    }
 709    env = saved_env;
 710}
 711
 712void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
 713{
 714    CPUX86State *saved_env;
 715
 716    saved_env = env;
 717    env = s;
 718
 719    helper_fsave(ptr, data32);
 720
 721    env = saved_env;
 722}
 723
 724void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
 725{
 726    CPUX86State *saved_env;
 727
 728    saved_env = env;
 729    env = s;
 730
 731    helper_frstor(ptr, data32);
 732
 733    env = saved_env;
 734}
 735
 736#endif /* TARGET_I386 */
 737
 738#if !defined(CONFIG_SOFTMMU)
 739
 740#if defined(TARGET_I386)
 741#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
 742#else
 743#define EXCEPTION_ACTION cpu_loop_exit()
 744#endif
 745
 746/* 'pc' is the host PC at which the exception was raised. 'address' is
 747   the effective address of the memory exception. 'is_write' is 1 if a
 748   write caused the exception and otherwise 0'. 'old_set' is the
 749   signal set which should be restored */
 750static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
 751                                    int is_write, sigset_t *old_set,
 752                                    void *puc)
 753{
 754    TranslationBlock *tb;
 755    int ret;
 756
 757    if (cpu_single_env)
 758        env = cpu_single_env; /* XXX: find a correct solution for multithread */
 759#if defined(DEBUG_SIGNAL)
 760    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
 761                pc, address, is_write, *(unsigned long *)old_set);
 762#endif
 763    /* XXX: locking issue */
 764    if (is_write && page_unprotect(h2g(address), pc, puc)) {
 765        return 1;
 766    }
 767
 768    /* see if it is an MMU fault */
 769    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
 770    if (ret < 0)
 771        return 0; /* not an MMU fault */
 772    if (ret == 0)
 773        return 1; /* the MMU fault was handled without causing real CPU fault */
 774    /* now we have a real cpu fault */
 775    tb = tb_find_pc(pc);
 776    if (tb) {
 777        /* the PC is inside the translated code. It means that we have
 778           a virtual CPU fault */
 779        cpu_restore_state(tb, env, pc, puc);
 780    }
 781
 782    /* we restore the process signal mask as the sigreturn should
 783       do it (XXX: use sigsetjmp) */
 784    sigprocmask(SIG_SETMASK, old_set, NULL);
 785    EXCEPTION_ACTION;
 786
 787    /* never comes here */
 788    return 1;
 789}
 790
 791#if defined(__i386__)
 792
 793#if defined(__APPLE__)
 794# include <sys/ucontext.h>
 795
 796# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
 797# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
 798# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
 799# define MASK_sig(context)    ((context)->uc_sigmask)
 800#elif defined (__NetBSD__)
 801# include <ucontext.h>
 802
 803# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
 804# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
 805# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
 806# define MASK_sig(context)    ((context)->uc_sigmask)
 807#elif defined (__FreeBSD__) || defined(__DragonFly__)
 808# include <ucontext.h>
 809
 810# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
 811# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
 812# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
 813# define MASK_sig(context)    ((context)->uc_sigmask)
 814#elif defined(__OpenBSD__)
 815# define EIP_sig(context)     ((context)->sc_eip)
 816# define TRAP_sig(context)    ((context)->sc_trapno)
 817# define ERROR_sig(context)   ((context)->sc_err)
 818# define MASK_sig(context)    ((context)->sc_mask)
 819#else
 820# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
 821# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
 822# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
 823# define MASK_sig(context)    ((context)->uc_sigmask)
 824#endif
 825
 826int cpu_signal_handler(int host_signum, void *pinfo,
 827                       void *puc)
 828{
 829    siginfo_t *info = pinfo;
 830#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
 831    ucontext_t *uc = puc;
 832#elif defined(__OpenBSD__)
 833    struct sigcontext *uc = puc;
 834#else
 835    struct ucontext *uc = puc;
 836#endif
 837    unsigned long pc;
 838    int trapno;
 839
 840#ifndef REG_EIP
 841/* for glibc 2.1 */
 842#define REG_EIP    EIP
 843#define REG_ERR    ERR
 844#define REG_TRAPNO TRAPNO
 845#endif
 846    pc = EIP_sig(uc);
 847    trapno = TRAP_sig(uc);
 848    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
 849                             trapno == 0xe ?
 850                             (ERROR_sig(uc) >> 1) & 1 : 0,
 851                             &MASK_sig(uc), puc);
 852}
 853
 854#elif defined(__x86_64__)
 855
 856#ifdef __NetBSD__
 857#define PC_sig(context)       _UC_MACHINE_PC(context)
 858#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
 859#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
 860#define MASK_sig(context)     ((context)->uc_sigmask)
 861#elif defined(__OpenBSD__)
 862#define PC_sig(context)       ((context)->sc_rip)
 863#define TRAP_sig(context)     ((context)->sc_trapno)
 864#define ERROR_sig(context)    ((context)->sc_err)
 865#define MASK_sig(context)     ((context)->sc_mask)
 866#elif defined (__FreeBSD__) || defined(__DragonFly__)
 867#include <ucontext.h>
 868
 869#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
 870#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
 871#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
 872#define MASK_sig(context)     ((context)->uc_sigmask)
 873#else
 874#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
 875#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
 876#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
 877#define MASK_sig(context)     ((context)->uc_sigmask)
 878#endif
 879
 880int cpu_signal_handler(int host_signum, void *pinfo,
 881                       void *puc)
 882{
 883    siginfo_t *info = pinfo;
 884    unsigned long pc;
 885#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
 886    ucontext_t *uc = puc;
 887#elif defined(__OpenBSD__)
 888    struct sigcontext *uc = puc;
 889#else
 890    struct ucontext *uc = puc;
 891#endif
 892
 893    pc = PC_sig(uc);
 894    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
 895                             TRAP_sig(uc) == 0xe ?
 896                             (ERROR_sig(uc) >> 1) & 1 : 0,
 897                             &MASK_sig(uc), puc);
 898}
 899
 900#elif defined(_ARCH_PPC)
 901
 902/***********************************************************************
 903 * signal context platform-specific definitions
 904 * From Wine
 905 */
 906#ifdef linux
 907/* All Registers access - only for local access */
 908# define REG_sig(reg_name, context)             ((context)->uc_mcontext.regs->reg_name)
 909/* Gpr Registers access  */
 910# define GPR_sig(reg_num, context)              REG_sig(gpr[reg_num], context)
 911# define IAR_sig(context)                       REG_sig(nip, context)   /* Program counter */
 912# define MSR_sig(context)                       REG_sig(msr, context)   /* Machine State Register (Supervisor) */
 913# define CTR_sig(context)                       REG_sig(ctr, context)   /* Count register */
 914# define XER_sig(context)                       REG_sig(xer, context) /* User's integer exception register */
 915# define LR_sig(context)                        REG_sig(link, context) /* Link register */
 916# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
 917/* Float Registers access  */
 918# define FLOAT_sig(reg_num, context)            (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
 919# define FPSCR_sig(context)                     (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
 920/* Exception Registers access */
 921# define DAR_sig(context)                       REG_sig(dar, context)
 922# define DSISR_sig(context)                     REG_sig(dsisr, context)
 923# define TRAP_sig(context)                      REG_sig(trap, context)
 924#endif /* linux */
 925
 926#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 927#include <ucontext.h>
 928# define IAR_sig(context)               ((context)->uc_mcontext.mc_srr0)
 929# define MSR_sig(context)               ((context)->uc_mcontext.mc_srr1)
 930# define CTR_sig(context)               ((context)->uc_mcontext.mc_ctr)
 931# define XER_sig(context)               ((context)->uc_mcontext.mc_xer)
 932# define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
 933# define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
 934/* Exception Registers access */
 935# define DAR_sig(context)               ((context)->uc_mcontext.mc_dar)
 936# define DSISR_sig(context)             ((context)->uc_mcontext.mc_dsisr)
 937# define TRAP_sig(context)              ((context)->uc_mcontext.mc_exc)
 938#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
 939
 940#ifdef __APPLE__
 941# include <sys/ucontext.h>
 942typedef struct ucontext SIGCONTEXT;
 943/* All Registers access - only for local access */
 944# define REG_sig(reg_name, context)             ((context)->uc_mcontext->ss.reg_name)
 945# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
 946# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
 947# define VECREG_sig(reg_name, context)          ((context)->uc_mcontext->vs.reg_name)
 948/* Gpr Registers access */
 949# define GPR_sig(reg_num, context)              REG_sig(r##reg_num, context)
 950# define IAR_sig(context)                       REG_sig(srr0, context)  /* Program counter */
 951# define MSR_sig(context)                       REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
 952# define CTR_sig(context)                       REG_sig(ctr, context)
 953# define XER_sig(context)                       REG_sig(xer, context) /* Link register */
 954# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
 955# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
 956/* Float Registers access */
 957# define FLOAT_sig(reg_num, context)            FLOATREG_sig(fpregs[reg_num], context)
 958# define FPSCR_sig(context)                     ((double)FLOATREG_sig(fpscr, context))
 959/* Exception Registers access */
 960# define DAR_sig(context)                       EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
 961# define DSISR_sig(context)                     EXCEPREG_sig(dsisr, context)
 962# define TRAP_sig(context)                      EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
 963#endif /* __APPLE__ */
 964
 965int cpu_signal_handler(int host_signum, void *pinfo,
 966                       void *puc)
 967{
 968    siginfo_t *info = pinfo;
 969#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 970    ucontext_t *uc = puc;
 971#else
 972    struct ucontext *uc = puc;
 973#endif
 974    unsigned long pc;
 975    int is_write;
 976
 977    pc = IAR_sig(uc);
 978    is_write = 0;
 979#if 0
 980    /* ppc 4xx case */
 981    if (DSISR_sig(uc) & 0x00800000)
 982        is_write = 1;
 983#else
 984    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
 985        is_write = 1;
 986#endif
 987    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
 988                             is_write, &uc->uc_sigmask, puc);
 989}
 990
 991#elif defined(__alpha__)
 992
 993int cpu_signal_handler(int host_signum, void *pinfo,
 994                           void *puc)
 995{
 996    siginfo_t *info = pinfo;
 997    struct ucontext *uc = puc;
 998    uint32_t *pc = uc->uc_mcontext.sc_pc;
 999    uint32_t insn = *pc;
1000    int is_write = 0;
1001
1002    /* XXX: need kernel patch to get write flag faster */
1003    switch (insn >> 26) {
1004    case 0x0d: // stw
1005    case 0x0e: // stb
1006    case 0x0f: // stq_u
1007    case 0x24: // stf
1008    case 0x25: // stg
1009    case 0x26: // sts
1010    case 0x27: // stt
1011    case 0x2c: // stl
1012    case 0x2d: // stq
1013    case 0x2e: // stl_c
1014    case 0x2f: // stq_c
1015        is_write = 1;
1016    }
1017
1018    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1019                             is_write, &uc->uc_sigmask, puc);
1020}
1021#elif defined(__sparc__)
1022
1023int cpu_signal_handler(int host_signum, void *pinfo,
1024                       void *puc)
1025{
1026    siginfo_t *info = pinfo;
1027    int is_write;
1028    uint32_t insn;
1029#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1030    uint32_t *regs = (uint32_t *)(info + 1);
1031    void *sigmask = (regs + 20);
1032    /* XXX: is there a standard glibc define ? */
1033    unsigned long pc = regs[1];
1034#else
1035#ifdef __linux__
1036    struct sigcontext *sc = puc;
1037    unsigned long pc = sc->sigc_regs.tpc;
1038    void *sigmask = (void *)sc->sigc_mask;
1039#elif defined(__OpenBSD__)
1040    struct sigcontext *uc = puc;
1041    unsigned long pc = uc->sc_pc;
1042    void *sigmask = (void *)(long)uc->sc_mask;
1043#endif
1044#endif
1045
1046    /* XXX: need kernel patch to get write flag faster */
1047    is_write = 0;
1048    insn = *(uint32_t *)pc;
1049    if ((insn >> 30) == 3) {
1050      switch((insn >> 19) & 0x3f) {
1051      case 0x05: // stb
1052      case 0x15: // stba
1053      case 0x06: // sth
1054      case 0x16: // stha
1055      case 0x04: // st
1056      case 0x14: // sta
1057      case 0x07: // std
1058      case 0x17: // stda
1059      case 0x0e: // stx
1060      case 0x1e: // stxa
1061      case 0x24: // stf
1062      case 0x34: // stfa
1063      case 0x27: // stdf
1064      case 0x37: // stdfa
1065      case 0x26: // stqf
1066      case 0x36: // stqfa
1067      case 0x25: // stfsr
1068      case 0x3c: // casa
1069      case 0x3e: // casxa
1070        is_write = 1;
1071        break;
1072      }
1073    }
1074    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1075                             is_write, sigmask, NULL);
1076}
1077
1078#elif defined(__arm__)
1079
1080int cpu_signal_handler(int host_signum, void *pinfo,
1081                       void *puc)
1082{
1083    siginfo_t *info = pinfo;
1084    struct ucontext *uc = puc;
1085    unsigned long pc;
1086    int is_write;
1087
1088#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1089    pc = uc->uc_mcontext.gregs[R15];
1090#else
1091    pc = uc->uc_mcontext.arm_pc;
1092#endif
1093    /* XXX: compute is_write */
1094    is_write = 0;
1095    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1096                             is_write,
1097                             &uc->uc_sigmask, puc);
1098}
1099
1100#elif defined(__mc68000)
1101
1102int cpu_signal_handler(int host_signum, void *pinfo,
1103                       void *puc)
1104{
1105    siginfo_t *info = pinfo;
1106    struct ucontext *uc = puc;
1107    unsigned long pc;
1108    int is_write;
1109
1110    pc = uc->uc_mcontext.gregs[16];
1111    /* XXX: compute is_write */
1112    is_write = 0;
1113    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1114                             is_write,
1115                             &uc->uc_sigmask, puc);
1116}
1117
1118#elif defined(__ia64)
1119
1120#ifndef __ISR_VALID
1121  /* This ought to be in <bits/siginfo.h>... */
1122# define __ISR_VALID    1
1123#endif
1124
1125int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1126{
1127    siginfo_t *info = pinfo;
1128    struct ucontext *uc = puc;
1129    unsigned long ip;
1130    int is_write = 0;
1131
1132    ip = uc->uc_mcontext.sc_ip;
1133    switch (host_signum) {
1134      case SIGILL:
1135      case SIGFPE:
1136      case SIGSEGV:
1137      case SIGBUS:
1138      case SIGTRAP:
1139          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1140              /* ISR.W (write-access) is bit 33:  */
1141              is_write = (info->si_isr >> 33) & 1;
1142          break;
1143
1144      default:
1145          break;
1146    }
1147    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1148                             is_write,
1149                             (sigset_t *)&uc->uc_sigmask, puc);
1150}
1151
1152#elif defined(__s390__)
1153
1154int cpu_signal_handler(int host_signum, void *pinfo,
1155                       void *puc)
1156{
1157    siginfo_t *info = pinfo;
1158    struct ucontext *uc = puc;
1159    unsigned long pc;
1160    uint16_t *pinsn;
1161    int is_write = 0;
1162
1163    pc = uc->uc_mcontext.psw.addr;
1164
1165    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1166       of the normal 2 arguments.  The 3rd argument contains the "int_code"
1167       from the hardware which does in fact contain the is_write value.
1168       The rt signal handler, as far as I can tell, does not give this value
1169       at all.  Not that we could get to it from here even if it were.  */
1170    /* ??? This is not even close to complete, since it ignores all
1171       of the read-modify-write instructions.  */
1172    pinsn = (uint16_t *)pc;
1173    switch (pinsn[0] >> 8) {
1174    case 0x50: /* ST */
1175    case 0x42: /* STC */
1176    case 0x40: /* STH */
1177        is_write = 1;
1178        break;
1179    case 0xc4: /* RIL format insns */
1180        switch (pinsn[0] & 0xf) {
1181        case 0xf: /* STRL */
1182        case 0xb: /* STGRL */
1183        case 0x7: /* STHRL */
1184            is_write = 1;
1185        }
1186        break;
1187    case 0xe3: /* RXY format insns */
1188        switch (pinsn[2] & 0xff) {
1189        case 0x50: /* STY */
1190        case 0x24: /* STG */
1191        case 0x72: /* STCY */
1192        case 0x70: /* STHY */
1193        case 0x8e: /* STPQ */
1194        case 0x3f: /* STRVH */
1195        case 0x3e: /* STRV */
1196        case 0x2f: /* STRVG */
1197            is_write = 1;
1198        }
1199        break;
1200    }
1201    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1202                             is_write, &uc->uc_sigmask, puc);
1203}
1204
1205#elif defined(__mips__)
1206
1207int cpu_signal_handler(int host_signum, void *pinfo,
1208                       void *puc)
1209{
1210    siginfo_t *info = pinfo;
1211    struct ucontext *uc = puc;
1212    greg_t pc = uc->uc_mcontext.pc;
1213    int is_write;
1214
1215    /* XXX: compute is_write */
1216    is_write = 0;
1217    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1218                             is_write, &uc->uc_sigmask, puc);
1219}
1220
1221#elif defined(__hppa__)
1222
1223int cpu_signal_handler(int host_signum, void *pinfo,
1224                       void *puc)
1225{
1226    struct siginfo *info = pinfo;
1227    struct ucontext *uc = puc;
1228    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1229    uint32_t insn = *(uint32_t *)pc;
1230    int is_write = 0;
1231
1232    /* XXX: need kernel patch to get write flag faster.  */
1233    switch (insn >> 26) {
1234    case 0x1a: /* STW */
1235    case 0x19: /* STH */
1236    case 0x18: /* STB */
1237    case 0x1b: /* STWM */
1238        is_write = 1;
1239        break;
1240
1241    case 0x09: /* CSTWX, FSTWX, FSTWS */
1242    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1243        /* Distinguish from coprocessor load ... */
1244        is_write = (insn >> 9) & 1;
1245        break;
1246
1247    case 0x03:
1248        switch ((insn >> 6) & 15) {
1249        case 0xa: /* STWS */
1250        case 0x9: /* STHS */
1251        case 0x8: /* STBS */
1252        case 0xe: /* STWAS */
1253        case 0xc: /* STBYS */
1254            is_write = 1;
1255        }
1256        break;
1257    }
1258
1259    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1260                             is_write, &uc->uc_sigmask, puc);
1261}
1262
1263#else
1264
1265#error host CPU specific signal handler needed
1266
1267#endif
1268
1269#endif /* !defined(CONFIG_SOFTMMU) */
1270