qemu/cpu-exec.c
<<
>>
Prefs
   1/*
   2 *  emulator main execution loop
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "config.h"
  20#include "cpu.h"
  21#include "disas/disas.h"
  22#include "tcg.h"
  23#include "qemu/atomic.h"
  24#include "sysemu/qtest.h"
  25
  26int tb_invalidated_flag;
  27
  28//#define CONFIG_DEBUG_EXEC
  29
  30bool qemu_cpu_has_work(CPUState *cpu)
  31{
  32    return cpu_has_work(cpu);
  33}
  34
  35void cpu_loop_exit(CPUArchState *env)
  36{
  37    env->current_tb = NULL;
  38    longjmp(env->jmp_env, 1);
  39}
  40
  41/* exit the current TB from a signal handler. The host registers are
  42   restored in a state compatible with the CPU emulator
  43 */
  44#if defined(CONFIG_SOFTMMU)
  45void cpu_resume_from_signal(CPUArchState *env, void *puc)
  46{
  47    /* XXX: restore cpu registers saved in host registers */
  48
  49    env->exception_index = -1;
  50    longjmp(env->jmp_env, 1);
  51}
  52#endif
  53
  54/* Execute a TB, and fix up the CPU state afterwards if necessary */
  55static inline tcg_target_ulong cpu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
  56{
  57    tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
  58    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
  59        /* We didn't start executing this TB (eg because the instruction
  60         * counter hit zero); we must restore the guest PC to the address
  61         * of the start of the TB.
  62         */
  63        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
  64        cpu_pc_from_tb(env, tb);
  65    }
  66    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
  67        /* We were asked to stop executing TBs (probably a pending
  68         * interrupt. We've now stopped, so clear the flag.
  69         */
  70        env->tcg_exit_req = 0;
  71    }
  72    return next_tb;
  73}
  74
  75/* Execute the code without caching the generated code. An interpreter
  76   could be used if available. */
  77static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
  78                             TranslationBlock *orig_tb)
  79{
  80    TranslationBlock *tb;
  81
  82    /* Should never happen.
  83       We only end up here when an existing TB is too long.  */
  84    if (max_cycles > CF_COUNT_MASK)
  85        max_cycles = CF_COUNT_MASK;
  86
  87    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
  88                     max_cycles);
  89    env->current_tb = tb;
  90    /* execute the generated code */
  91    cpu_tb_exec(env, tb->tc_ptr);
  92    env->current_tb = NULL;
  93    tb_phys_invalidate(tb, -1);
  94    tb_free(tb);
  95}
  96
  97static TranslationBlock *tb_find_slow(CPUArchState *env,
  98                                      target_ulong pc,
  99                                      target_ulong cs_base,
 100                                      uint64_t flags)
 101{
 102    TranslationBlock *tb, **ptb1;
 103    unsigned int h;
 104    tb_page_addr_t phys_pc, phys_page1;
 105    target_ulong virt_page2;
 106
 107    tb_invalidated_flag = 0;
 108
 109    /* find translated block using physical mappings */
 110    phys_pc = get_page_addr_code(env, pc);
 111    phys_page1 = phys_pc & TARGET_PAGE_MASK;
 112    h = tb_phys_hash_func(phys_pc);
 113    ptb1 = &tb_phys_hash[h];
 114    for(;;) {
 115        tb = *ptb1;
 116        if (!tb)
 117            goto not_found;
 118        if (tb->pc == pc &&
 119            tb->page_addr[0] == phys_page1 &&
 120            tb->cs_base == cs_base &&
 121            tb->flags == flags) {
 122            /* check next page if needed */
 123            if (tb->page_addr[1] != -1) {
 124                tb_page_addr_t phys_page2;
 125
 126                virt_page2 = (pc & TARGET_PAGE_MASK) +
 127                    TARGET_PAGE_SIZE;
 128                phys_page2 = get_page_addr_code(env, virt_page2);
 129                if (tb->page_addr[1] == phys_page2)
 130                    goto found;
 131            } else {
 132                goto found;
 133            }
 134        }
 135        ptb1 = &tb->phys_hash_next;
 136    }
 137 not_found:
 138   /* if no translated code available, then translate it now */
 139    tb = tb_gen_code(env, pc, cs_base, flags, 0);
 140
 141 found:
 142    /* Move the last found TB to the head of the list */
 143    if (likely(*ptb1)) {
 144        *ptb1 = tb->phys_hash_next;
 145        tb->phys_hash_next = tb_phys_hash[h];
 146        tb_phys_hash[h] = tb;
 147    }
 148    /* we add the TB in the virtual pc hash table */
 149    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
 150    return tb;
 151}
 152
 153static inline TranslationBlock *tb_find_fast(CPUArchState *env)
 154{
 155    TranslationBlock *tb;
 156    target_ulong cs_base, pc;
 157    int flags;
 158
 159    /* we record a subset of the CPU state. It will
 160       always be the same before a given translated block
 161       is executed. */
 162    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 163    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
 164    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
 165                 tb->flags != flags)) {
 166        tb = tb_find_slow(env, pc, cs_base, flags);
 167    }
 168    return tb;
 169}
 170
 171static CPUDebugExcpHandler *debug_excp_handler;
 172
 173void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
 174{
 175    debug_excp_handler = handler;
 176}
 177
 178static void cpu_handle_debug_exception(CPUArchState *env)
 179{
 180    CPUWatchpoint *wp;
 181
 182    if (!env->watchpoint_hit) {
 183        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
 184            wp->flags &= ~BP_WATCHPOINT_HIT;
 185        }
 186    }
 187    if (debug_excp_handler) {
 188        debug_excp_handler(env);
 189    }
 190}
 191
 192/* main execution loop */
 193
 194volatile sig_atomic_t exit_request;
 195
 196int cpu_exec(CPUArchState *env)
 197{
 198    CPUState *cpu = ENV_GET_CPU(env);
 199    int ret, interrupt_request;
 200    TranslationBlock *tb;
 201    uint8_t *tc_ptr;
 202    tcg_target_ulong next_tb;
 203
 204    if (env->halted) {
 205        if (!cpu_has_work(cpu)) {
 206            return EXCP_HALTED;
 207        }
 208
 209        env->halted = 0;
 210    }
 211
 212    cpu_single_env = env;
 213
 214    if (unlikely(exit_request)) {
 215        env->exit_request = 1;
 216    }
 217
 218#if defined(TARGET_I386)
 219    /* put eflags in CPU temporary format */
 220    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 221    DF = 1 - (2 * ((env->eflags >> 10) & 1));
 222    CC_OP = CC_OP_EFLAGS;
 223    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 224#elif defined(TARGET_SPARC)
 225#elif defined(TARGET_M68K)
 226    env->cc_op = CC_OP_FLAGS;
 227    env->cc_dest = env->sr & 0xf;
 228    env->cc_x = (env->sr >> 4) & 1;
 229#elif defined(TARGET_ALPHA)
 230#elif defined(TARGET_ARM)
 231#elif defined(TARGET_UNICORE32)
 232#elif defined(TARGET_PPC)
 233    env->reserve_addr = -1;
 234#elif defined(TARGET_LM32)
 235#elif defined(TARGET_MICROBLAZE)
 236#elif defined(TARGET_MIPS)
 237#elif defined(TARGET_OPENRISC)
 238#elif defined(TARGET_SH4)
 239#elif defined(TARGET_CRIS)
 240#elif defined(TARGET_S390X)
 241#elif defined(TARGET_XTENSA)
 242    /* XXXXX */
 243#else
 244#error unsupported target CPU
 245#endif
 246    env->exception_index = -1;
 247
 248    /* prepare setjmp context for exception handling */
 249    for(;;) {
 250        if (setjmp(env->jmp_env) == 0) {
 251            /* if an exception is pending, we execute it here */
 252            if (env->exception_index >= 0) {
 253                if (env->exception_index >= EXCP_INTERRUPT) {
 254                    /* exit request from the cpu execution loop */
 255                    ret = env->exception_index;
 256                    if (ret == EXCP_DEBUG) {
 257                        cpu_handle_debug_exception(env);
 258                    }
 259                    break;
 260                } else {
 261#if defined(CONFIG_USER_ONLY)
 262                    /* if user mode only, we simulate a fake exception
 263                       which will be handled outside the cpu execution
 264                       loop */
 265#if defined(TARGET_I386)
 266                    do_interrupt(env);
 267#endif
 268                    ret = env->exception_index;
 269                    break;
 270#else
 271                    do_interrupt(env);
 272                    env->exception_index = -1;
 273#endif
 274                }
 275            }
 276
 277            next_tb = 0; /* force lookup of first TB */
 278            for(;;) {
 279                interrupt_request = env->interrupt_request;
 280                if (unlikely(interrupt_request)) {
 281                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
 282                        /* Mask out external interrupts for this step. */
 283                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
 284                    }
 285                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
 286                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
 287                        env->exception_index = EXCP_DEBUG;
 288                        cpu_loop_exit(env);
 289                    }
 290#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
 291    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
 292    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
 293                    if (interrupt_request & CPU_INTERRUPT_HALT) {
 294                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
 295                        env->halted = 1;
 296                        env->exception_index = EXCP_HLT;
 297                        cpu_loop_exit(env);
 298                    }
 299#endif
 300#if defined(TARGET_I386)
 301#if !defined(CONFIG_USER_ONLY)
 302                    if (interrupt_request & CPU_INTERRUPT_POLL) {
 303                        env->interrupt_request &= ~CPU_INTERRUPT_POLL;
 304                        apic_poll_irq(env->apic_state);
 305                    }
 306#endif
 307                    if (interrupt_request & CPU_INTERRUPT_INIT) {
 308                            cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
 309                                                          0);
 310                            do_cpu_init(x86_env_get_cpu(env));
 311                            env->exception_index = EXCP_HALTED;
 312                            cpu_loop_exit(env);
 313                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
 314                            do_cpu_sipi(x86_env_get_cpu(env));
 315                    } else if (env->hflags2 & HF2_GIF_MASK) {
 316                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
 317                            !(env->hflags & HF_SMM_MASK)) {
 318                            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
 319                                                          0);
 320                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
 321                            do_smm_enter(env);
 322                            next_tb = 0;
 323                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
 324                                   !(env->hflags2 & HF2_NMI_MASK)) {
 325                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
 326                            env->hflags2 |= HF2_NMI_MASK;
 327                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
 328                            next_tb = 0;
 329                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
 330                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
 331                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
 332                            next_tb = 0;
 333                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 334                                   (((env->hflags2 & HF2_VINTR_MASK) && 
 335                                     (env->hflags2 & HF2_HIF_MASK)) ||
 336                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
 337                                     (env->eflags & IF_MASK && 
 338                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
 339                            int intno;
 340                            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
 341                                                          0);
 342                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
 343                            intno = cpu_get_pic_interrupt(env);
 344                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
 345                            do_interrupt_x86_hardirq(env, intno, 1);
 346                            /* ensure that no TB jump will be modified as
 347                               the program flow was changed */
 348                            next_tb = 0;
 349#if !defined(CONFIG_USER_ONLY)
 350                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
 351                                   (env->eflags & IF_MASK) && 
 352                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
 353                            int intno;
 354                            /* FIXME: this should respect TPR */
 355                            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
 356                                                          0);
 357                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
 358                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
 359                            do_interrupt_x86_hardirq(env, intno, 1);
 360                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
 361                            next_tb = 0;
 362#endif
 363                        }
 364                    }
 365#elif defined(TARGET_PPC)
 366                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
 367                        cpu_reset(cpu);
 368                    }
 369                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 370                        ppc_hw_interrupt(env);
 371                        if (env->pending_interrupts == 0)
 372                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
 373                        next_tb = 0;
 374                    }
 375#elif defined(TARGET_LM32)
 376                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 377                        && (env->ie & IE_IE)) {
 378                        env->exception_index = EXCP_IRQ;
 379                        do_interrupt(env);
 380                        next_tb = 0;
 381                    }
 382#elif defined(TARGET_MICROBLAZE)
 383                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 384                        && (env->sregs[SR_MSR] & MSR_IE)
 385                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
 386                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
 387                        env->exception_index = EXCP_IRQ;
 388                        do_interrupt(env);
 389                        next_tb = 0;
 390                    }
 391#elif defined(TARGET_MIPS)
 392                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 393                        cpu_mips_hw_interrupts_pending(env)) {
 394                        /* Raise it */
 395                        env->exception_index = EXCP_EXT_INTERRUPT;
 396                        env->error_code = 0;
 397                        do_interrupt(env);
 398                        next_tb = 0;
 399                    }
 400#elif defined(TARGET_OPENRISC)
 401                    {
 402                        int idx = -1;
 403                        if ((interrupt_request & CPU_INTERRUPT_HARD)
 404                            && (env->sr & SR_IEE)) {
 405                            idx = EXCP_INT;
 406                        }
 407                        if ((interrupt_request & CPU_INTERRUPT_TIMER)
 408                            && (env->sr & SR_TEE)) {
 409                            idx = EXCP_TICK;
 410                        }
 411                        if (idx >= 0) {
 412                            env->exception_index = idx;
 413                            do_interrupt(env);
 414                            next_tb = 0;
 415                        }
 416                    }
 417#elif defined(TARGET_SPARC)
 418                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 419                        if (cpu_interrupts_enabled(env) &&
 420                            env->interrupt_index > 0) {
 421                            int pil = env->interrupt_index & 0xf;
 422                            int type = env->interrupt_index & 0xf0;
 423
 424                            if (((type == TT_EXTINT) &&
 425                                  cpu_pil_allowed(env, pil)) ||
 426                                  type != TT_EXTINT) {
 427                                env->exception_index = env->interrupt_index;
 428                                do_interrupt(env);
 429                                next_tb = 0;
 430                            }
 431                        }
 432                    }
 433#elif defined(TARGET_ARM)
 434                    if (interrupt_request & CPU_INTERRUPT_FIQ
 435                        && !(env->uncached_cpsr & CPSR_F)) {
 436                        env->exception_index = EXCP_FIQ;
 437                        do_interrupt(env);
 438                        next_tb = 0;
 439                    }
 440                    /* ARMv7-M interrupt return works by loading a magic value
 441                       into the PC.  On real hardware the load causes the
 442                       return to occur.  The qemu implementation performs the
 443                       jump normally, then does the exception return when the
 444                       CPU tries to execute code at the magic address.
 445                       This will cause the magic PC value to be pushed to
 446                       the stack if an interrupt occurred at the wrong time.
 447                       We avoid this by disabling interrupts when
 448                       pc contains a magic address.  */
 449                    if (interrupt_request & CPU_INTERRUPT_HARD
 450                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
 451                            || !(env->uncached_cpsr & CPSR_I))) {
 452                        env->exception_index = EXCP_IRQ;
 453                        do_interrupt(env);
 454                        next_tb = 0;
 455                    }
 456#elif defined(TARGET_UNICORE32)
 457                    if (interrupt_request & CPU_INTERRUPT_HARD
 458                        && !(env->uncached_asr & ASR_I)) {
 459                        env->exception_index = UC32_EXCP_INTR;
 460                        do_interrupt(env);
 461                        next_tb = 0;
 462                    }
 463#elif defined(TARGET_SH4)
 464                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 465                        do_interrupt(env);
 466                        next_tb = 0;
 467                    }
 468#elif defined(TARGET_ALPHA)
 469                    {
 470                        int idx = -1;
 471                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
 472                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
 473                        case 0 ... 3:
 474                            if (interrupt_request & CPU_INTERRUPT_HARD) {
 475                                idx = EXCP_DEV_INTERRUPT;
 476                            }
 477                            /* FALLTHRU */
 478                        case 4:
 479                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
 480                                idx = EXCP_CLK_INTERRUPT;
 481                            }
 482                            /* FALLTHRU */
 483                        case 5:
 484                            if (interrupt_request & CPU_INTERRUPT_SMP) {
 485                                idx = EXCP_SMP_INTERRUPT;
 486                            }
 487                            /* FALLTHRU */
 488                        case 6:
 489                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
 490                                idx = EXCP_MCHK;
 491                            }
 492                        }
 493                        if (idx >= 0) {
 494                            env->exception_index = idx;
 495                            env->error_code = 0;
 496                            do_interrupt(env);
 497                            next_tb = 0;
 498                        }
 499                    }
 500#elif defined(TARGET_CRIS)
 501                    if (interrupt_request & CPU_INTERRUPT_HARD
 502                        && (env->pregs[PR_CCS] & I_FLAG)
 503                        && !env->locked_irq) {
 504                        env->exception_index = EXCP_IRQ;
 505                        do_interrupt(env);
 506                        next_tb = 0;
 507                    }
 508                    if (interrupt_request & CPU_INTERRUPT_NMI) {
 509                        unsigned int m_flag_archval;
 510                        if (env->pregs[PR_VR] < 32) {
 511                            m_flag_archval = M_FLAG_V10;
 512                        } else {
 513                            m_flag_archval = M_FLAG_V32;
 514                        }
 515                        if ((env->pregs[PR_CCS] & m_flag_archval)) {
 516                            env->exception_index = EXCP_NMI;
 517                            do_interrupt(env);
 518                            next_tb = 0;
 519                        }
 520                    }
 521#elif defined(TARGET_M68K)
 522                    if (interrupt_request & CPU_INTERRUPT_HARD
 523                        && ((env->sr & SR_I) >> SR_I_SHIFT)
 524                            < env->pending_level) {
 525                        /* Real hardware gets the interrupt vector via an
 526                           IACK cycle at this point.  Current emulated
 527                           hardware doesn't rely on this, so we
 528                           provide/save the vector when the interrupt is
 529                           first signalled.  */
 530                        env->exception_index = env->pending_vector;
 531                        do_interrupt_m68k_hardirq(env);
 532                        next_tb = 0;
 533                    }
 534#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
 535                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 536                        (env->psw.mask & PSW_MASK_EXT)) {
 537                        do_interrupt(env);
 538                        next_tb = 0;
 539                    }
 540#elif defined(TARGET_XTENSA)
 541                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 542                        env->exception_index = EXC_IRQ;
 543                        do_interrupt(env);
 544                        next_tb = 0;
 545                    }
 546#endif
 547                   /* Don't use the cached interrupt_request value,
 548                      do_interrupt may have updated the EXITTB flag. */
 549                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
 550                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
 551                        /* ensure that no TB jump will be modified as
 552                           the program flow was changed */
 553                        next_tb = 0;
 554                    }
 555                }
 556                if (unlikely(env->exit_request)) {
 557                    env->exit_request = 0;
 558                    env->exception_index = EXCP_INTERRUPT;
 559                    cpu_loop_exit(env);
 560                }
 561#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
 562                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
 563                    /* restore flags in standard format */
 564#if defined(TARGET_I386)
 565                    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
 566                        | (DF & DF_MASK);
 567                    log_cpu_state(env, CPU_DUMP_CCOP);
 568                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 569#elif defined(TARGET_M68K)
 570                    cpu_m68k_flush_flags(env, env->cc_op);
 571                    env->cc_op = CC_OP_FLAGS;
 572                    env->sr = (env->sr & 0xffe0)
 573                              | env->cc_dest | (env->cc_x << 4);
 574                    log_cpu_state(env, 0);
 575#else
 576                    log_cpu_state(env, 0);
 577#endif
 578                }
 579#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
 580                spin_lock(&tb_lock);
 581                tb = tb_find_fast(env);
 582                /* Note: we do it here to avoid a gcc bug on Mac OS X when
 583                   doing it in tb_find_slow */
 584                if (tb_invalidated_flag) {
 585                    /* as some TB could have been invalidated because
 586                       of memory exceptions while generating the code, we
 587                       must recompute the hash index here */
 588                    next_tb = 0;
 589                    tb_invalidated_flag = 0;
 590                }
 591#ifdef CONFIG_DEBUG_EXEC
 592                qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
 593                             tb->tc_ptr, tb->pc,
 594                             lookup_symbol(tb->pc));
 595#endif
 596                /* see if we can patch the calling TB. When the TB
 597                   spans two pages, we cannot safely do a direct
 598                   jump. */
 599                if (next_tb != 0 && tb->page_addr[1] == -1) {
 600                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
 601                                next_tb & TB_EXIT_MASK, tb);
 602                }
 603                spin_unlock(&tb_lock);
 604
 605                /* cpu_interrupt might be called while translating the
 606                   TB, but before it is linked into a potentially
 607                   infinite loop and becomes env->current_tb. Avoid
 608                   starting execution if there is a pending interrupt. */
 609                env->current_tb = tb;
 610                barrier();
 611                if (likely(!env->exit_request)) {
 612                    tc_ptr = tb->tc_ptr;
 613                    /* execute the generated code */
 614                    next_tb = cpu_tb_exec(env, tc_ptr);
 615                    switch (next_tb & TB_EXIT_MASK) {
 616                    case TB_EXIT_REQUESTED:
 617                        /* Something asked us to stop executing
 618                         * chained TBs; just continue round the main
 619                         * loop. Whatever requested the exit will also
 620                         * have set something else (eg exit_request or
 621                         * interrupt_request) which we will handle
 622                         * next time around the loop.
 623                         */
 624                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
 625                        next_tb = 0;
 626                        break;
 627                    case TB_EXIT_ICOUNT_EXPIRED:
 628                    {
 629                        /* Instruction counter expired.  */
 630                        int insns_left;
 631                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
 632                        insns_left = env->icount_decr.u32;
 633                        if (env->icount_extra && insns_left >= 0) {
 634                            /* Refill decrementer and continue execution.  */
 635                            env->icount_extra += insns_left;
 636                            if (env->icount_extra > 0xffff) {
 637                                insns_left = 0xffff;
 638                            } else {
 639                                insns_left = env->icount_extra;
 640                            }
 641                            env->icount_extra -= insns_left;
 642                            env->icount_decr.u16.low = insns_left;
 643                        } else {
 644                            if (insns_left > 0) {
 645                                /* Execute remaining instructions.  */
 646                                cpu_exec_nocache(env, insns_left, tb);
 647                            }
 648                            env->exception_index = EXCP_INTERRUPT;
 649                            next_tb = 0;
 650                            cpu_loop_exit(env);
 651                        }
 652                        break;
 653                    }
 654                    default:
 655                        break;
 656                    }
 657                }
 658                env->current_tb = NULL;
 659                /* reset soft MMU for next block (it can currently
 660                   only be set by a memory fault) */
 661            } /* for(;;) */
 662        } else {
 663            /* Reload env after longjmp - the compiler may have smashed all
 664             * local variables as longjmp is marked 'noreturn'. */
 665            env = cpu_single_env;
 666        }
 667    } /* for(;;) */
 668
 669
 670#if defined(TARGET_I386)
 671    /* restore flags in standard format */
 672    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
 673        | (DF & DF_MASK);
 674#elif defined(TARGET_ARM)
 675    /* XXX: Save/restore host fpu exception state?.  */
 676#elif defined(TARGET_UNICORE32)
 677#elif defined(TARGET_SPARC)
 678#elif defined(TARGET_PPC)
 679#elif defined(TARGET_LM32)
 680#elif defined(TARGET_M68K)
 681    cpu_m68k_flush_flags(env, env->cc_op);
 682    env->cc_op = CC_OP_FLAGS;
 683    env->sr = (env->sr & 0xffe0)
 684              | env->cc_dest | (env->cc_x << 4);
 685#elif defined(TARGET_MICROBLAZE)
 686#elif defined(TARGET_MIPS)
 687#elif defined(TARGET_OPENRISC)
 688#elif defined(TARGET_SH4)
 689#elif defined(TARGET_ALPHA)
 690#elif defined(TARGET_CRIS)
 691#elif defined(TARGET_S390X)
 692#elif defined(TARGET_XTENSA)
 693    /* XXXXX */
 694#else
 695#error unsupported target CPU
 696#endif
 697
 698    /* fail safe : never use cpu_single_env outside cpu_exec() */
 699    cpu_single_env = NULL;
 700    return ret;
 701}
 702