qemu/cpu-exec.c
<<
>>
Prefs
   1/*
   2 *  emulator main execution loop
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "config.h"
  20#include "cpu.h"
  21#include "disas/disas.h"
  22#include "tcg.h"
  23#include "qemu/atomic.h"
  24#include "sysemu/qtest.h"
  25
  26int tb_invalidated_flag;
  27
  28//#define CONFIG_DEBUG_EXEC
  29
  30bool qemu_cpu_has_work(CPUState *cpu)
  31{
  32    return cpu_has_work(cpu);
  33}
  34
  35void cpu_loop_exit(CPUArchState *env)
  36{
  37    env->current_tb = NULL;
  38    longjmp(env->jmp_env, 1);
  39}
  40
  41/* exit the current TB from a signal handler. The host registers are
  42   restored in a state compatible with the CPU emulator
  43 */
  44#if defined(CONFIG_SOFTMMU)
  45void cpu_resume_from_signal(CPUArchState *env, void *puc)
  46{
  47    /* XXX: restore cpu registers saved in host registers */
  48
  49    env->exception_index = -1;
  50    longjmp(env->jmp_env, 1);
  51}
  52#endif
  53
  54/* Execute the code without caching the generated code. An interpreter
  55   could be used if available. */
  56static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
  57                             TranslationBlock *orig_tb)
  58{
  59    tcg_target_ulong next_tb;
  60    TranslationBlock *tb;
  61
  62    /* Should never happen.
  63       We only end up here when an existing TB is too long.  */
  64    if (max_cycles > CF_COUNT_MASK)
  65        max_cycles = CF_COUNT_MASK;
  66
  67    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
  68                     max_cycles);
  69    env->current_tb = tb;
  70    /* execute the generated code */
  71    next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
  72    env->current_tb = NULL;
  73
  74    if ((next_tb & 3) == 2) {
  75        /* Restore PC.  This may happen if async event occurs before
  76           the TB starts executing.  */
  77        cpu_pc_from_tb(env, tb);
  78    }
  79    tb_phys_invalidate(tb, -1);
  80    tb_free(tb);
  81}
  82
  83static TranslationBlock *tb_find_slow(CPUArchState *env,
  84                                      target_ulong pc,
  85                                      target_ulong cs_base,
  86                                      uint64_t flags)
  87{
  88    TranslationBlock *tb, **ptb1;
  89    unsigned int h;
  90    tb_page_addr_t phys_pc, phys_page1;
  91    target_ulong virt_page2;
  92
  93    tb_invalidated_flag = 0;
  94
  95    /* find translated block using physical mappings */
  96    phys_pc = get_page_addr_code(env, pc);
  97    phys_page1 = phys_pc & TARGET_PAGE_MASK;
  98    h = tb_phys_hash_func(phys_pc);
  99    ptb1 = &tb_phys_hash[h];
 100    for(;;) {
 101        tb = *ptb1;
 102        if (!tb)
 103            goto not_found;
 104        if (tb->pc == pc &&
 105            tb->page_addr[0] == phys_page1 &&
 106            tb->cs_base == cs_base &&
 107            tb->flags == flags) {
 108            /* check next page if needed */
 109            if (tb->page_addr[1] != -1) {
 110                tb_page_addr_t phys_page2;
 111
 112                virt_page2 = (pc & TARGET_PAGE_MASK) +
 113                    TARGET_PAGE_SIZE;
 114                phys_page2 = get_page_addr_code(env, virt_page2);
 115                if (tb->page_addr[1] == phys_page2)
 116                    goto found;
 117            } else {
 118                goto found;
 119            }
 120        }
 121        ptb1 = &tb->phys_hash_next;
 122    }
 123 not_found:
 124   /* if no translated code available, then translate it now */
 125    tb = tb_gen_code(env, pc, cs_base, flags, 0);
 126
 127 found:
 128    /* Move the last found TB to the head of the list */
 129    if (likely(*ptb1)) {
 130        *ptb1 = tb->phys_hash_next;
 131        tb->phys_hash_next = tb_phys_hash[h];
 132        tb_phys_hash[h] = tb;
 133    }
 134    /* we add the TB in the virtual pc hash table */
 135    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
 136    return tb;
 137}
 138
 139static inline TranslationBlock *tb_find_fast(CPUArchState *env)
 140{
 141    TranslationBlock *tb;
 142    target_ulong cs_base, pc;
 143    int flags;
 144
 145    /* we record a subset of the CPU state. It will
 146       always be the same before a given translated block
 147       is executed. */
 148    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 149    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
 150    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
 151                 tb->flags != flags)) {
 152        tb = tb_find_slow(env, pc, cs_base, flags);
 153    }
 154    return tb;
 155}
 156
 157static CPUDebugExcpHandler *debug_excp_handler;
 158
 159void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
 160{
 161    debug_excp_handler = handler;
 162}
 163
 164static void cpu_handle_debug_exception(CPUArchState *env)
 165{
 166    CPUWatchpoint *wp;
 167
 168    if (!env->watchpoint_hit) {
 169        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
 170            wp->flags &= ~BP_WATCHPOINT_HIT;
 171        }
 172    }
 173    if (debug_excp_handler) {
 174        debug_excp_handler(env);
 175    }
 176}
 177
 178/* main execution loop */
 179
 180volatile sig_atomic_t exit_request;
 181
 182int cpu_exec(CPUArchState *env)
 183{
 184    CPUState *cpu = ENV_GET_CPU(env);
 185    int ret, interrupt_request;
 186    TranslationBlock *tb;
 187    uint8_t *tc_ptr;
 188    tcg_target_ulong next_tb;
 189
 190    if (env->halted) {
 191        if (!cpu_has_work(cpu)) {
 192            return EXCP_HALTED;
 193        }
 194
 195        env->halted = 0;
 196    }
 197
 198    cpu_single_env = env;
 199
 200    if (unlikely(exit_request)) {
 201        env->exit_request = 1;
 202    }
 203
 204#if defined(TARGET_I386)
 205    /* put eflags in CPU temporary format */
 206    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 207    DF = 1 - (2 * ((env->eflags >> 10) & 1));
 208    CC_OP = CC_OP_EFLAGS;
 209    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 210#elif defined(TARGET_SPARC)
 211#elif defined(TARGET_M68K)
 212    env->cc_op = CC_OP_FLAGS;
 213    env->cc_dest = env->sr & 0xf;
 214    env->cc_x = (env->sr >> 4) & 1;
 215#elif defined(TARGET_ALPHA)
 216#elif defined(TARGET_ARM)
 217#elif defined(TARGET_UNICORE32)
 218#elif defined(TARGET_PPC)
 219    env->reserve_addr = -1;
 220#elif defined(TARGET_LM32)
 221#elif defined(TARGET_MICROBLAZE)
 222#elif defined(TARGET_MIPS)
 223#elif defined(TARGET_OPENRISC)
 224#elif defined(TARGET_SH4)
 225#elif defined(TARGET_CRIS)
 226#elif defined(TARGET_S390X)
 227#elif defined(TARGET_XTENSA)
 228    /* XXXXX */
 229#else
 230#error unsupported target CPU
 231#endif
 232    env->exception_index = -1;
 233
 234    /* prepare setjmp context for exception handling */
 235    for(;;) {
 236        if (setjmp(env->jmp_env) == 0) {
 237            /* if an exception is pending, we execute it here */
 238            if (env->exception_index >= 0) {
 239                if (env->exception_index >= EXCP_INTERRUPT) {
 240                    /* exit request from the cpu execution loop */
 241                    ret = env->exception_index;
 242                    if (ret == EXCP_DEBUG) {
 243                        cpu_handle_debug_exception(env);
 244                    }
 245                    break;
 246                } else {
 247#if defined(CONFIG_USER_ONLY)
 248                    /* if user mode only, we simulate a fake exception
 249                       which will be handled outside the cpu execution
 250                       loop */
 251#if defined(TARGET_I386)
 252                    do_interrupt(env);
 253#endif
 254                    ret = env->exception_index;
 255                    break;
 256#else
 257                    do_interrupt(env);
 258                    env->exception_index = -1;
 259#endif
 260                }
 261            }
 262
 263            next_tb = 0; /* force lookup of first TB */
 264            for(;;) {
 265                interrupt_request = env->interrupt_request;
 266                if (unlikely(interrupt_request)) {
 267                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
 268                        /* Mask out external interrupts for this step. */
 269                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
 270                    }
 271                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
 272                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
 273                        env->exception_index = EXCP_DEBUG;
 274                        cpu_loop_exit(env);
 275                    }
 276#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
 277    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
 278    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
 279                    if (interrupt_request & CPU_INTERRUPT_HALT) {
 280                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
 281                        env->halted = 1;
 282                        env->exception_index = EXCP_HLT;
 283                        cpu_loop_exit(env);
 284                    }
 285#endif
 286#if defined(TARGET_I386)
 287#if !defined(CONFIG_USER_ONLY)
 288                    if (interrupt_request & CPU_INTERRUPT_POLL) {
 289                        env->interrupt_request &= ~CPU_INTERRUPT_POLL;
 290                        apic_poll_irq(env->apic_state);
 291                    }
 292#endif
 293                    if (interrupt_request & CPU_INTERRUPT_INIT) {
 294                            cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
 295                                                          0);
 296                            do_cpu_init(x86_env_get_cpu(env));
 297                            env->exception_index = EXCP_HALTED;
 298                            cpu_loop_exit(env);
 299                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
 300                            do_cpu_sipi(x86_env_get_cpu(env));
 301                    } else if (env->hflags2 & HF2_GIF_MASK) {
 302                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
 303                            !(env->hflags & HF_SMM_MASK)) {
 304                            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
 305                                                          0);
 306                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
 307                            do_smm_enter(env);
 308                            next_tb = 0;
 309                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
 310                                   !(env->hflags2 & HF2_NMI_MASK)) {
 311                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
 312                            env->hflags2 |= HF2_NMI_MASK;
 313                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
 314                            next_tb = 0;
 315                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
 316                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
 317                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
 318                            next_tb = 0;
 319                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 320                                   (((env->hflags2 & HF2_VINTR_MASK) && 
 321                                     (env->hflags2 & HF2_HIF_MASK)) ||
 322                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
 323                                     (env->eflags & IF_MASK && 
 324                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
 325                            int intno;
 326                            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
 327                                                          0);
 328                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
 329                            intno = cpu_get_pic_interrupt(env);
 330                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
 331                            do_interrupt_x86_hardirq(env, intno, 1);
 332                            /* ensure that no TB jump will be modified as
 333                               the program flow was changed */
 334                            next_tb = 0;
 335#if !defined(CONFIG_USER_ONLY)
 336                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
 337                                   (env->eflags & IF_MASK) && 
 338                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
 339                            int intno;
 340                            /* FIXME: this should respect TPR */
 341                            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
 342                                                          0);
 343                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
 344                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
 345                            do_interrupt_x86_hardirq(env, intno, 1);
 346                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
 347                            next_tb = 0;
 348#endif
 349                        }
 350                    }
 351#elif defined(TARGET_PPC)
 352                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
 353                        cpu_reset(cpu);
 354                    }
 355                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 356                        ppc_hw_interrupt(env);
 357                        if (env->pending_interrupts == 0)
 358                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
 359                        next_tb = 0;
 360                    }
 361#elif defined(TARGET_LM32)
 362                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 363                        && (env->ie & IE_IE)) {
 364                        env->exception_index = EXCP_IRQ;
 365                        do_interrupt(env);
 366                        next_tb = 0;
 367                    }
 368#elif defined(TARGET_MICROBLAZE)
 369                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 370                        && (env->sregs[SR_MSR] & MSR_IE)
 371                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
 372                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
 373                        env->exception_index = EXCP_IRQ;
 374                        do_interrupt(env);
 375                        next_tb = 0;
 376                    }
 377#elif defined(TARGET_MIPS)
 378                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 379                        cpu_mips_hw_interrupts_pending(env)) {
 380                        /* Raise it */
 381                        env->exception_index = EXCP_EXT_INTERRUPT;
 382                        env->error_code = 0;
 383                        do_interrupt(env);
 384                        next_tb = 0;
 385                    }
 386#elif defined(TARGET_OPENRISC)
 387                    {
 388                        int idx = -1;
 389                        if ((interrupt_request & CPU_INTERRUPT_HARD)
 390                            && (env->sr & SR_IEE)) {
 391                            idx = EXCP_INT;
 392                        }
 393                        if ((interrupt_request & CPU_INTERRUPT_TIMER)
 394                            && (env->sr & SR_TEE)) {
 395                            idx = EXCP_TICK;
 396                        }
 397                        if (idx >= 0) {
 398                            env->exception_index = idx;
 399                            do_interrupt(env);
 400                            next_tb = 0;
 401                        }
 402                    }
 403#elif defined(TARGET_SPARC)
 404                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 405                        if (cpu_interrupts_enabled(env) &&
 406                            env->interrupt_index > 0) {
 407                            int pil = env->interrupt_index & 0xf;
 408                            int type = env->interrupt_index & 0xf0;
 409
 410                            if (((type == TT_EXTINT) &&
 411                                  cpu_pil_allowed(env, pil)) ||
 412                                  type != TT_EXTINT) {
 413                                env->exception_index = env->interrupt_index;
 414                                do_interrupt(env);
 415                                next_tb = 0;
 416                            }
 417                        }
 418                    }
 419#elif defined(TARGET_ARM)
 420                    if (interrupt_request & CPU_INTERRUPT_FIQ
 421                        && !(env->uncached_cpsr & CPSR_F)) {
 422                        env->exception_index = EXCP_FIQ;
 423                        do_interrupt(env);
 424                        next_tb = 0;
 425                    }
 426                    /* ARMv7-M interrupt return works by loading a magic value
 427                       into the PC.  On real hardware the load causes the
 428                       return to occur.  The qemu implementation performs the
 429                       jump normally, then does the exception return when the
 430                       CPU tries to execute code at the magic address.
 431                       This will cause the magic PC value to be pushed to
 432                       the stack if an interrupt occurred at the wrong time.
 433                       We avoid this by disabling interrupts when
 434                       pc contains a magic address.  */
 435                    if (interrupt_request & CPU_INTERRUPT_HARD
 436                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
 437                            || !(env->uncached_cpsr & CPSR_I))) {
 438                        env->exception_index = EXCP_IRQ;
 439                        do_interrupt(env);
 440                        next_tb = 0;
 441                    }
 442#elif defined(TARGET_UNICORE32)
 443                    if (interrupt_request & CPU_INTERRUPT_HARD
 444                        && !(env->uncached_asr & ASR_I)) {
 445                        env->exception_index = UC32_EXCP_INTR;
 446                        do_interrupt(env);
 447                        next_tb = 0;
 448                    }
 449#elif defined(TARGET_SH4)
 450                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 451                        do_interrupt(env);
 452                        next_tb = 0;
 453                    }
 454#elif defined(TARGET_ALPHA)
 455                    {
 456                        int idx = -1;
 457                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
 458                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
 459                        case 0 ... 3:
 460                            if (interrupt_request & CPU_INTERRUPT_HARD) {
 461                                idx = EXCP_DEV_INTERRUPT;
 462                            }
 463                            /* FALLTHRU */
 464                        case 4:
 465                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
 466                                idx = EXCP_CLK_INTERRUPT;
 467                            }
 468                            /* FALLTHRU */
 469                        case 5:
 470                            if (interrupt_request & CPU_INTERRUPT_SMP) {
 471                                idx = EXCP_SMP_INTERRUPT;
 472                            }
 473                            /* FALLTHRU */
 474                        case 6:
 475                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
 476                                idx = EXCP_MCHK;
 477                            }
 478                        }
 479                        if (idx >= 0) {
 480                            env->exception_index = idx;
 481                            env->error_code = 0;
 482                            do_interrupt(env);
 483                            next_tb = 0;
 484                        }
 485                    }
 486#elif defined(TARGET_CRIS)
 487                    if (interrupt_request & CPU_INTERRUPT_HARD
 488                        && (env->pregs[PR_CCS] & I_FLAG)
 489                        && !env->locked_irq) {
 490                        env->exception_index = EXCP_IRQ;
 491                        do_interrupt(env);
 492                        next_tb = 0;
 493                    }
 494                    if (interrupt_request & CPU_INTERRUPT_NMI) {
 495                        unsigned int m_flag_archval;
 496                        if (env->pregs[PR_VR] < 32) {
 497                            m_flag_archval = M_FLAG_V10;
 498                        } else {
 499                            m_flag_archval = M_FLAG_V32;
 500                        }
 501                        if ((env->pregs[PR_CCS] & m_flag_archval)) {
 502                            env->exception_index = EXCP_NMI;
 503                            do_interrupt(env);
 504                            next_tb = 0;
 505                        }
 506                    }
 507#elif defined(TARGET_M68K)
 508                    if (interrupt_request & CPU_INTERRUPT_HARD
 509                        && ((env->sr & SR_I) >> SR_I_SHIFT)
 510                            < env->pending_level) {
 511                        /* Real hardware gets the interrupt vector via an
 512                           IACK cycle at this point.  Current emulated
 513                           hardware doesn't rely on this, so we
 514                           provide/save the vector when the interrupt is
 515                           first signalled.  */
 516                        env->exception_index = env->pending_vector;
 517                        do_interrupt_m68k_hardirq(env);
 518                        next_tb = 0;
 519                    }
 520#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
 521                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 522                        (env->psw.mask & PSW_MASK_EXT)) {
 523                        do_interrupt(env);
 524                        next_tb = 0;
 525                    }
 526#elif defined(TARGET_XTENSA)
 527                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 528                        env->exception_index = EXC_IRQ;
 529                        do_interrupt(env);
 530                        next_tb = 0;
 531                    }
 532#endif
 533                   /* Don't use the cached interrupt_request value,
 534                      do_interrupt may have updated the EXITTB flag. */
 535                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
 536                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
 537                        /* ensure that no TB jump will be modified as
 538                           the program flow was changed */
 539                        next_tb = 0;
 540                    }
 541                }
 542                if (unlikely(env->exit_request)) {
 543                    env->exit_request = 0;
 544                    env->exception_index = EXCP_INTERRUPT;
 545                    cpu_loop_exit(env);
 546                }
 547#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
 548                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
 549                    /* restore flags in standard format */
 550#if defined(TARGET_I386)
 551                    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
 552                        | (DF & DF_MASK);
 553                    log_cpu_state(env, CPU_DUMP_CCOP);
 554                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 555#elif defined(TARGET_M68K)
 556                    cpu_m68k_flush_flags(env, env->cc_op);
 557                    env->cc_op = CC_OP_FLAGS;
 558                    env->sr = (env->sr & 0xffe0)
 559                              | env->cc_dest | (env->cc_x << 4);
 560                    log_cpu_state(env, 0);
 561#else
 562                    log_cpu_state(env, 0);
 563#endif
 564                }
 565#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
 566                spin_lock(&tb_lock);
 567                tb = tb_find_fast(env);
 568                /* Note: we do it here to avoid a gcc bug on Mac OS X when
 569                   doing it in tb_find_slow */
 570                if (tb_invalidated_flag) {
 571                    /* as some TB could have been invalidated because
 572                       of memory exceptions while generating the code, we
 573                       must recompute the hash index here */
 574                    next_tb = 0;
 575                    tb_invalidated_flag = 0;
 576                }
 577#ifdef CONFIG_DEBUG_EXEC
 578                qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
 579                             tb->tc_ptr, tb->pc,
 580                             lookup_symbol(tb->pc));
 581#endif
 582                /* see if we can patch the calling TB. When the TB
 583                   spans two pages, we cannot safely do a direct
 584                   jump. */
 585                if (next_tb != 0 && tb->page_addr[1] == -1) {
 586                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
 587                }
 588                spin_unlock(&tb_lock);
 589
 590                /* cpu_interrupt might be called while translating the
 591                   TB, but before it is linked into a potentially
 592                   infinite loop and becomes env->current_tb. Avoid
 593                   starting execution if there is a pending interrupt. */
 594                env->current_tb = tb;
 595                barrier();
 596                if (likely(!env->exit_request)) {
 597                    tc_ptr = tb->tc_ptr;
 598                    /* execute the generated code */
 599                    next_tb = tcg_qemu_tb_exec(env, tc_ptr);
 600                    if ((next_tb & 3) == 2) {
 601                        /* Instruction counter expired.  */
 602                        int insns_left;
 603                        tb = (TranslationBlock *)(next_tb & ~3);
 604                        /* Restore PC.  */
 605                        cpu_pc_from_tb(env, tb);
 606                        insns_left = env->icount_decr.u32;
 607                        if (env->icount_extra && insns_left >= 0) {
 608                            /* Refill decrementer and continue execution.  */
 609                            env->icount_extra += insns_left;
 610                            if (env->icount_extra > 0xffff) {
 611                                insns_left = 0xffff;
 612                            } else {
 613                                insns_left = env->icount_extra;
 614                            }
 615                            env->icount_extra -= insns_left;
 616                            env->icount_decr.u16.low = insns_left;
 617                        } else {
 618                            if (insns_left > 0) {
 619                                /* Execute remaining instructions.  */
 620                                cpu_exec_nocache(env, insns_left, tb);
 621                            }
 622                            env->exception_index = EXCP_INTERRUPT;
 623                            next_tb = 0;
 624                            cpu_loop_exit(env);
 625                        }
 626                    }
 627                }
 628                env->current_tb = NULL;
 629                /* reset soft MMU for next block (it can currently
 630                   only be set by a memory fault) */
 631            } /* for(;;) */
 632        } else {
 633            /* Reload env after longjmp - the compiler may have smashed all
 634             * local variables as longjmp is marked 'noreturn'. */
 635            env = cpu_single_env;
 636        }
 637    } /* for(;;) */
 638
 639
 640#if defined(TARGET_I386)
 641    /* restore flags in standard format */
 642    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
 643        | (DF & DF_MASK);
 644#elif defined(TARGET_ARM)
 645    /* XXX: Save/restore host fpu exception state?.  */
 646#elif defined(TARGET_UNICORE32)
 647#elif defined(TARGET_SPARC)
 648#elif defined(TARGET_PPC)
 649#elif defined(TARGET_LM32)
 650#elif defined(TARGET_M68K)
 651    cpu_m68k_flush_flags(env, env->cc_op);
 652    env->cc_op = CC_OP_FLAGS;
 653    env->sr = (env->sr & 0xffe0)
 654              | env->cc_dest | (env->cc_x << 4);
 655#elif defined(TARGET_MICROBLAZE)
 656#elif defined(TARGET_MIPS)
 657#elif defined(TARGET_OPENRISC)
 658#elif defined(TARGET_SH4)
 659#elif defined(TARGET_ALPHA)
 660#elif defined(TARGET_CRIS)
 661#elif defined(TARGET_S390X)
 662#elif defined(TARGET_XTENSA)
 663    /* XXXXX */
 664#else
 665#error unsupported target CPU
 666#endif
 667
 668    /* fail safe : never use cpu_single_env outside cpu_exec() */
 669    cpu_single_env = NULL;
 670    return ret;
 671}
 672