qemu/cpu-exec.c
<<
>>
Prefs
   1/*
   2 *  i386 emulator main execution loop
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "config.h"
  20#include "cpu.h"
  21#include "disas.h"
  22#include "tcg.h"
  23#include "qemu-barrier.h"
  24
  25int tb_invalidated_flag;
  26
  27//#define CONFIG_DEBUG_EXEC
  28
  29bool qemu_cpu_has_work(CPUState *env)
  30{
  31    return cpu_has_work(env);
  32}
  33
  34void cpu_loop_exit(CPUState *env)
  35{
  36    env->current_tb = NULL;
  37    longjmp(env->jmp_env, 1);
  38}
  39
  40/* exit the current TB from a signal handler. The host registers are
  41   restored in a state compatible with the CPU emulator
  42 */
  43#if defined(CONFIG_SOFTMMU)
  44void cpu_resume_from_signal(CPUState *env, void *puc)
  45{
  46    /* XXX: restore cpu registers saved in host registers */
  47
  48    env->exception_index = -1;
  49    longjmp(env->jmp_env, 1);
  50}
  51#endif
  52
  53/* Execute the code without caching the generated code. An interpreter
  54   could be used if available. */
  55static void cpu_exec_nocache(CPUState *env, int max_cycles,
  56                             TranslationBlock *orig_tb)
  57{
  58    unsigned long next_tb;
  59    TranslationBlock *tb;
  60
  61    /* Should never happen.
  62       We only end up here when an existing TB is too long.  */
  63    if (max_cycles > CF_COUNT_MASK)
  64        max_cycles = CF_COUNT_MASK;
  65
  66    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
  67                     max_cycles);
  68    env->current_tb = tb;
  69    /* execute the generated code */
  70    next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
  71    env->current_tb = NULL;
  72
  73    if ((next_tb & 3) == 2) {
  74        /* Restore PC.  This may happen if async event occurs before
  75           the TB starts executing.  */
  76        cpu_pc_from_tb(env, tb);
  77    }
  78    tb_phys_invalidate(tb, -1);
  79    tb_free(tb);
  80}
  81
  82static TranslationBlock *tb_find_slow(CPUState *env,
  83                                      target_ulong pc,
  84                                      target_ulong cs_base,
  85                                      uint64_t flags)
  86{
  87    TranslationBlock *tb, **ptb1;
  88    unsigned int h;
  89    tb_page_addr_t phys_pc, phys_page1;
  90    target_ulong virt_page2;
  91
  92    tb_invalidated_flag = 0;
  93
  94    /* find translated block using physical mappings */
  95    phys_pc = get_page_addr_code(env, pc);
  96    phys_page1 = phys_pc & TARGET_PAGE_MASK;
  97    h = tb_phys_hash_func(phys_pc);
  98    ptb1 = &tb_phys_hash[h];
  99    for(;;) {
 100        tb = *ptb1;
 101        if (!tb)
 102            goto not_found;
 103        if (tb->pc == pc &&
 104            tb->page_addr[0] == phys_page1 &&
 105            tb->cs_base == cs_base &&
 106            tb->flags == flags) {
 107            /* check next page if needed */
 108            if (tb->page_addr[1] != -1) {
 109                tb_page_addr_t phys_page2;
 110
 111                virt_page2 = (pc & TARGET_PAGE_MASK) +
 112                    TARGET_PAGE_SIZE;
 113                phys_page2 = get_page_addr_code(env, virt_page2);
 114                if (tb->page_addr[1] == phys_page2)
 115                    goto found;
 116            } else {
 117                goto found;
 118            }
 119        }
 120        ptb1 = &tb->phys_hash_next;
 121    }
 122 not_found:
 123   /* if no translated code available, then translate it now */
 124    tb = tb_gen_code(env, pc, cs_base, flags, 0);
 125
 126 found:
 127    /* Move the last found TB to the head of the list */
 128    if (likely(*ptb1)) {
 129        *ptb1 = tb->phys_hash_next;
 130        tb->phys_hash_next = tb_phys_hash[h];
 131        tb_phys_hash[h] = tb;
 132    }
 133    /* we add the TB in the virtual pc hash table */
 134    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
 135    return tb;
 136}
 137
 138static inline TranslationBlock *tb_find_fast(CPUState *env)
 139{
 140    TranslationBlock *tb;
 141    target_ulong cs_base, pc;
 142    int flags;
 143
 144    /* we record a subset of the CPU state. It will
 145       always be the same before a given translated block
 146       is executed. */
 147    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 148    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
 149    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
 150                 tb->flags != flags)) {
 151        tb = tb_find_slow(env, pc, cs_base, flags);
 152    }
 153    return tb;
 154}
 155
 156static CPUDebugExcpHandler *debug_excp_handler;
 157
 158CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
 159{
 160    CPUDebugExcpHandler *old_handler = debug_excp_handler;
 161
 162    debug_excp_handler = handler;
 163    return old_handler;
 164}
 165
 166static void cpu_handle_debug_exception(CPUState *env)
 167{
 168    CPUWatchpoint *wp;
 169
 170    if (!env->watchpoint_hit) {
 171        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
 172            wp->flags &= ~BP_WATCHPOINT_HIT;
 173        }
 174    }
 175    if (debug_excp_handler) {
 176        debug_excp_handler(env);
 177    }
 178}
 179
 180/* main execution loop */
 181
 182volatile sig_atomic_t exit_request;
 183
 184int cpu_exec(CPUState *env)
 185{
 186    int ret, interrupt_request;
 187    TranslationBlock *tb;
 188    uint8_t *tc_ptr;
 189    unsigned long next_tb;
 190
 191    if (env->halted) {
 192        if (!cpu_has_work(env)) {
 193            return EXCP_HALTED;
 194        }
 195
 196        env->halted = 0;
 197    }
 198
 199    cpu_single_env = env;
 200
 201    if (unlikely(exit_request)) {
 202        env->exit_request = 1;
 203    }
 204
 205#if defined(TARGET_I386)
 206    /* put eflags in CPU temporary format */
 207    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 208    DF = 1 - (2 * ((env->eflags >> 10) & 1));
 209    CC_OP = CC_OP_EFLAGS;
 210    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 211#elif defined(TARGET_SPARC)
 212#elif defined(TARGET_M68K)
 213    env->cc_op = CC_OP_FLAGS;
 214    env->cc_dest = env->sr & 0xf;
 215    env->cc_x = (env->sr >> 4) & 1;
 216#elif defined(TARGET_ALPHA)
 217#elif defined(TARGET_ARM)
 218#elif defined(TARGET_UNICORE32)
 219#elif defined(TARGET_PPC)
 220    env->reserve_addr = -1;
 221#elif defined(TARGET_LM32)
 222#elif defined(TARGET_MICROBLAZE)
 223#elif defined(TARGET_MIPS)
 224#elif defined(TARGET_SH4)
 225#elif defined(TARGET_CRIS)
 226#elif defined(TARGET_S390X)
 227#elif defined(TARGET_XTENSA)
 228    /* XXXXX */
 229#else
 230#error unsupported target CPU
 231#endif
 232    env->exception_index = -1;
 233
 234    /* prepare setjmp context for exception handling */
 235    for(;;) {
 236        if (setjmp(env->jmp_env) == 0) {
 237            /* if an exception is pending, we execute it here */
 238            if (env->exception_index >= 0) {
 239                if (env->exception_index >= EXCP_INTERRUPT) {
 240                    /* exit request from the cpu execution loop */
 241                    ret = env->exception_index;
 242                    if (ret == EXCP_DEBUG) {
 243                        cpu_handle_debug_exception(env);
 244                    }
 245                    break;
 246                } else {
 247#if defined(CONFIG_USER_ONLY)
 248                    /* if user mode only, we simulate a fake exception
 249                       which will be handled outside the cpu execution
 250                       loop */
 251#if defined(TARGET_I386)
 252                    do_interrupt(env);
 253#endif
 254                    ret = env->exception_index;
 255                    break;
 256#else
 257                    do_interrupt(env);
 258                    env->exception_index = -1;
 259#endif
 260                }
 261            }
 262
 263            next_tb = 0; /* force lookup of first TB */
 264            for(;;) {
 265                interrupt_request = env->interrupt_request;
 266                if (unlikely(interrupt_request)) {
 267                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
 268                        /* Mask out external interrupts for this step. */
 269                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
 270                    }
 271                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
 272                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
 273                        env->exception_index = EXCP_DEBUG;
 274                        cpu_loop_exit(env);
 275                    }
 276#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
 277    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
 278    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
 279                    if (interrupt_request & CPU_INTERRUPT_HALT) {
 280                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
 281                        env->halted = 1;
 282                        env->exception_index = EXCP_HLT;
 283                        cpu_loop_exit(env);
 284                    }
 285#endif
 286#if defined(TARGET_I386)
 287                    if (interrupt_request & CPU_INTERRUPT_INIT) {
 288                            svm_check_intercept(env, SVM_EXIT_INIT);
 289                            do_cpu_init(env);
 290                            env->exception_index = EXCP_HALTED;
 291                            cpu_loop_exit(env);
 292                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
 293                            do_cpu_sipi(env);
 294                    } else if (env->hflags2 & HF2_GIF_MASK) {
 295                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
 296                            !(env->hflags & HF_SMM_MASK)) {
 297                            svm_check_intercept(env, SVM_EXIT_SMI);
 298                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
 299                            do_smm_enter(env);
 300                            next_tb = 0;
 301                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
 302                                   !(env->hflags2 & HF2_NMI_MASK)) {
 303                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
 304                            env->hflags2 |= HF2_NMI_MASK;
 305                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
 306                            next_tb = 0;
 307                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
 308                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
 309                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
 310                            next_tb = 0;
 311                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 312                                   (((env->hflags2 & HF2_VINTR_MASK) && 
 313                                     (env->hflags2 & HF2_HIF_MASK)) ||
 314                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
 315                                     (env->eflags & IF_MASK && 
 316                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
 317                            int intno;
 318                            svm_check_intercept(env, SVM_EXIT_INTR);
 319                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
 320                            intno = cpu_get_pic_interrupt(env);
 321                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
 322                            do_interrupt_x86_hardirq(env, intno, 1);
 323                            /* ensure that no TB jump will be modified as
 324                               the program flow was changed */
 325                            next_tb = 0;
 326#if !defined(CONFIG_USER_ONLY)
 327                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
 328                                   (env->eflags & IF_MASK) && 
 329                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
 330                            int intno;
 331                            /* FIXME: this should respect TPR */
 332                            svm_check_intercept(env, SVM_EXIT_VINTR);
 333                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
 334                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
 335                            do_interrupt_x86_hardirq(env, intno, 1);
 336                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
 337                            next_tb = 0;
 338#endif
 339                        }
 340                    }
 341#elif defined(TARGET_PPC)
 342#if 0
 343                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
 344                        cpu_reset(env);
 345                    }
 346#endif
 347                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 348                        ppc_hw_interrupt(env);
 349                        if (env->pending_interrupts == 0)
 350                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
 351                        next_tb = 0;
 352                    }
 353#elif defined(TARGET_LM32)
 354                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 355                        && (env->ie & IE_IE)) {
 356                        env->exception_index = EXCP_IRQ;
 357                        do_interrupt(env);
 358                        next_tb = 0;
 359                    }
 360#elif defined(TARGET_MICROBLAZE)
 361                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 362                        && (env->sregs[SR_MSR] & MSR_IE)
 363                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
 364                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
 365                        env->exception_index = EXCP_IRQ;
 366                        do_interrupt(env);
 367                        next_tb = 0;
 368                    }
 369#elif defined(TARGET_MIPS)
 370                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 371                        cpu_mips_hw_interrupts_pending(env)) {
 372                        /* Raise it */
 373                        env->exception_index = EXCP_EXT_INTERRUPT;
 374                        env->error_code = 0;
 375                        do_interrupt(env);
 376                        next_tb = 0;
 377                    }
 378#elif defined(TARGET_SPARC)
 379                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 380                        if (cpu_interrupts_enabled(env) &&
 381                            env->interrupt_index > 0) {
 382                            int pil = env->interrupt_index & 0xf;
 383                            int type = env->interrupt_index & 0xf0;
 384
 385                            if (((type == TT_EXTINT) &&
 386                                  cpu_pil_allowed(env, pil)) ||
 387                                  type != TT_EXTINT) {
 388                                env->exception_index = env->interrupt_index;
 389                                do_interrupt(env);
 390                                next_tb = 0;
 391                            }
 392                        }
 393                    }
 394#elif defined(TARGET_ARM)
 395                    if (interrupt_request & CPU_INTERRUPT_FIQ
 396                        && !(env->uncached_cpsr & CPSR_F)) {
 397                        env->exception_index = EXCP_FIQ;
 398                        do_interrupt(env);
 399                        next_tb = 0;
 400                    }
 401                    /* ARMv7-M interrupt return works by loading a magic value
 402                       into the PC.  On real hardware the load causes the
 403                       return to occur.  The qemu implementation performs the
 404                       jump normally, then does the exception return when the
 405                       CPU tries to execute code at the magic address.
 406                       This will cause the magic PC value to be pushed to
 407                       the stack if an interrupt occurred at the wrong time.
 408                       We avoid this by disabling interrupts when
 409                       pc contains a magic address.  */
 410                    if (interrupt_request & CPU_INTERRUPT_HARD
 411                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
 412                            || !(env->uncached_cpsr & CPSR_I))) {
 413                        env->exception_index = EXCP_IRQ;
 414                        do_interrupt(env);
 415                        next_tb = 0;
 416                    }
 417#elif defined(TARGET_UNICORE32)
 418                    if (interrupt_request & CPU_INTERRUPT_HARD
 419                        && !(env->uncached_asr & ASR_I)) {
 420                        do_interrupt(env);
 421                        next_tb = 0;
 422                    }
 423#elif defined(TARGET_SH4)
 424                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 425                        do_interrupt(env);
 426                        next_tb = 0;
 427                    }
 428#elif defined(TARGET_ALPHA)
 429                    {
 430                        int idx = -1;
 431                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
 432                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
 433                        case 0 ... 3:
 434                            if (interrupt_request & CPU_INTERRUPT_HARD) {
 435                                idx = EXCP_DEV_INTERRUPT;
 436                            }
 437                            /* FALLTHRU */
 438                        case 4:
 439                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
 440                                idx = EXCP_CLK_INTERRUPT;
 441                            }
 442                            /* FALLTHRU */
 443                        case 5:
 444                            if (interrupt_request & CPU_INTERRUPT_SMP) {
 445                                idx = EXCP_SMP_INTERRUPT;
 446                            }
 447                            /* FALLTHRU */
 448                        case 6:
 449                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
 450                                idx = EXCP_MCHK;
 451                            }
 452                        }
 453                        if (idx >= 0) {
 454                            env->exception_index = idx;
 455                            env->error_code = 0;
 456                            do_interrupt(env);
 457                            next_tb = 0;
 458                        }
 459                    }
 460#elif defined(TARGET_CRIS)
 461                    if (interrupt_request & CPU_INTERRUPT_HARD
 462                        && (env->pregs[PR_CCS] & I_FLAG)
 463                        && !env->locked_irq) {
 464                        env->exception_index = EXCP_IRQ;
 465                        do_interrupt(env);
 466                        next_tb = 0;
 467                    }
 468                    if (interrupt_request & CPU_INTERRUPT_NMI
 469                        && (env->pregs[PR_CCS] & M_FLAG)) {
 470                        env->exception_index = EXCP_NMI;
 471                        do_interrupt(env);
 472                        next_tb = 0;
 473                    }
 474#elif defined(TARGET_M68K)
 475                    if (interrupt_request & CPU_INTERRUPT_HARD
 476                        && ((env->sr & SR_I) >> SR_I_SHIFT)
 477                            < env->pending_level) {
 478                        /* Real hardware gets the interrupt vector via an
 479                           IACK cycle at this point.  Current emulated
 480                           hardware doesn't rely on this, so we
 481                           provide/save the vector when the interrupt is
 482                           first signalled.  */
 483                        env->exception_index = env->pending_vector;
 484                        do_interrupt_m68k_hardirq(env);
 485                        next_tb = 0;
 486                    }
 487#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
 488                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 489                        (env->psw.mask & PSW_MASK_EXT)) {
 490                        do_interrupt(env);
 491                        next_tb = 0;
 492                    }
 493#elif defined(TARGET_XTENSA)
 494                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 495                        env->exception_index = EXC_IRQ;
 496                        do_interrupt(env);
 497                        next_tb = 0;
 498                    }
 499#endif
 500                   /* Don't use the cached interrupt_request value,
 501                      do_interrupt may have updated the EXITTB flag. */
 502                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
 503                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
 504                        /* ensure that no TB jump will be modified as
 505                           the program flow was changed */
 506                        next_tb = 0;
 507                    }
 508                }
 509                if (unlikely(env->exit_request)) {
 510                    env->exit_request = 0;
 511                    env->exception_index = EXCP_INTERRUPT;
 512                    cpu_loop_exit(env);
 513                }
 514#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
 515                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
 516                    /* restore flags in standard format */
 517#if defined(TARGET_I386)
 518                    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
 519                        | (DF & DF_MASK);
 520                    log_cpu_state(env, X86_DUMP_CCOP);
 521                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 522#elif defined(TARGET_M68K)
 523                    cpu_m68k_flush_flags(env, env->cc_op);
 524                    env->cc_op = CC_OP_FLAGS;
 525                    env->sr = (env->sr & 0xffe0)
 526                              | env->cc_dest | (env->cc_x << 4);
 527                    log_cpu_state(env, 0);
 528#else
 529                    log_cpu_state(env, 0);
 530#endif
 531                }
 532#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
 533                spin_lock(&tb_lock);
 534                tb = tb_find_fast(env);
 535                /* Note: we do it here to avoid a gcc bug on Mac OS X when
 536                   doing it in tb_find_slow */
 537                if (tb_invalidated_flag) {
 538                    /* as some TB could have been invalidated because
 539                       of memory exceptions while generating the code, we
 540                       must recompute the hash index here */
 541                    next_tb = 0;
 542                    tb_invalidated_flag = 0;
 543                }
 544#ifdef CONFIG_DEBUG_EXEC
 545                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
 546                             (long)tb->tc_ptr, tb->pc,
 547                             lookup_symbol(tb->pc));
 548#endif
 549                /* see if we can patch the calling TB. When the TB
 550                   spans two pages, we cannot safely do a direct
 551                   jump. */
 552                if (next_tb != 0 && tb->page_addr[1] == -1) {
 553                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
 554                }
 555                spin_unlock(&tb_lock);
 556
 557                /* cpu_interrupt might be called while translating the
 558                   TB, but before it is linked into a potentially
 559                   infinite loop and becomes env->current_tb. Avoid
 560                   starting execution if there is a pending interrupt. */
 561                env->current_tb = tb;
 562                barrier();
 563                if (likely(!env->exit_request)) {
 564                    tc_ptr = tb->tc_ptr;
 565                /* execute the generated code */
 566                    next_tb = tcg_qemu_tb_exec(env, tc_ptr);
 567                    if ((next_tb & 3) == 2) {
 568                        /* Instruction counter expired.  */
 569                        int insns_left;
 570                        tb = (TranslationBlock *)(long)(next_tb & ~3);
 571                        /* Restore PC.  */
 572                        cpu_pc_from_tb(env, tb);
 573                        insns_left = env->icount_decr.u32;
 574                        if (env->icount_extra && insns_left >= 0) {
 575                            /* Refill decrementer and continue execution.  */
 576                            env->icount_extra += insns_left;
 577                            if (env->icount_extra > 0xffff) {
 578                                insns_left = 0xffff;
 579                            } else {
 580                                insns_left = env->icount_extra;
 581                            }
 582                            env->icount_extra -= insns_left;
 583                            env->icount_decr.u16.low = insns_left;
 584                        } else {
 585                            if (insns_left > 0) {
 586                                /* Execute remaining instructions.  */
 587                                cpu_exec_nocache(env, insns_left, tb);
 588                            }
 589                            env->exception_index = EXCP_INTERRUPT;
 590                            next_tb = 0;
 591                            cpu_loop_exit(env);
 592                        }
 593                    }
 594                }
 595                env->current_tb = NULL;
 596                /* reset soft MMU for next block (it can currently
 597                   only be set by a memory fault) */
 598            } /* for(;;) */
 599        } else {
 600            /* Reload env after longjmp - the compiler may have smashed all
 601             * local variables as longjmp is marked 'noreturn'. */
 602            env = cpu_single_env;
 603        }
 604    } /* for(;;) */
 605
 606
 607#if defined(TARGET_I386)
 608    /* restore flags in standard format */
 609    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
 610        | (DF & DF_MASK);
 611#elif defined(TARGET_ARM)
 612    /* XXX: Save/restore host fpu exception state?.  */
 613#elif defined(TARGET_UNICORE32)
 614#elif defined(TARGET_SPARC)
 615#elif defined(TARGET_PPC)
 616#elif defined(TARGET_LM32)
 617#elif defined(TARGET_M68K)
 618    cpu_m68k_flush_flags(env, env->cc_op);
 619    env->cc_op = CC_OP_FLAGS;
 620    env->sr = (env->sr & 0xffe0)
 621              | env->cc_dest | (env->cc_x << 4);
 622#elif defined(TARGET_MICROBLAZE)
 623#elif defined(TARGET_MIPS)
 624#elif defined(TARGET_SH4)
 625#elif defined(TARGET_ALPHA)
 626#elif defined(TARGET_CRIS)
 627#elif defined(TARGET_S390X)
 628#elif defined(TARGET_XTENSA)
 629    /* XXXXX */
 630#else
 631#error unsupported target CPU
 632#endif
 633
 634    /* fail safe : never use cpu_single_env outside cpu_exec() */
 635    cpu_single_env = NULL;
 636    return ret;
 637}
 638