qemu/cpu-exec.c
<<
>>
Prefs
   1/*
   2 *  emulator main execution loop
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "config.h"
  20#include "cpu.h"
  21#include "disas/disas.h"
  22#include "tcg.h"
  23#include "qemu/atomic.h"
  24#include "sysemu/qtest.h"
  25
  26void cpu_loop_exit(CPUState *cpu)
  27{
  28    cpu->current_tb = NULL;
  29    siglongjmp(cpu->jmp_env, 1);
  30}
  31
  32/* exit the current TB from a signal handler. The host registers are
  33   restored in a state compatible with the CPU emulator
  34 */
  35#if defined(CONFIG_SOFTMMU)
  36void cpu_resume_from_signal(CPUState *cpu, void *puc)
  37{
  38    /* XXX: restore cpu registers saved in host registers */
  39
  40    cpu->exception_index = -1;
  41    siglongjmp(cpu->jmp_env, 1);
  42}
  43#endif
  44
  45/* Execute a TB, and fix up the CPU state afterwards if necessary */
  46static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
  47{
  48    CPUArchState *env = cpu->env_ptr;
  49    uintptr_t next_tb;
  50
  51#if defined(DEBUG_DISAS)
  52    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
  53#if defined(TARGET_I386)
  54        log_cpu_state(cpu, CPU_DUMP_CCOP);
  55#elif defined(TARGET_M68K)
  56        /* ??? Should not modify env state for dumping.  */
  57        cpu_m68k_flush_flags(env, env->cc_op);
  58        env->cc_op = CC_OP_FLAGS;
  59        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
  60        log_cpu_state(cpu, 0);
  61#else
  62        log_cpu_state(cpu, 0);
  63#endif
  64    }
  65#endif /* DEBUG_DISAS */
  66
  67    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
  68    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
  69        /* We didn't start executing this TB (eg because the instruction
  70         * counter hit zero); we must restore the guest PC to the address
  71         * of the start of the TB.
  72         */
  73        CPUClass *cc = CPU_GET_CLASS(cpu);
  74        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
  75        if (cc->synchronize_from_tb) {
  76            cc->synchronize_from_tb(cpu, tb);
  77        } else {
  78            assert(cc->set_pc);
  79            cc->set_pc(cpu, tb->pc);
  80        }
  81    }
  82    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
  83        /* We were asked to stop executing TBs (probably a pending
  84         * interrupt. We've now stopped, so clear the flag.
  85         */
  86        cpu->tcg_exit_req = 0;
  87    }
  88    return next_tb;
  89}
  90
  91/* Execute the code without caching the generated code. An interpreter
  92   could be used if available. */
  93static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
  94                             TranslationBlock *orig_tb)
  95{
  96    CPUState *cpu = ENV_GET_CPU(env);
  97    TranslationBlock *tb;
  98
  99    /* Should never happen.
 100       We only end up here when an existing TB is too long.  */
 101    if (max_cycles > CF_COUNT_MASK)
 102        max_cycles = CF_COUNT_MASK;
 103
 104    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
 105                     max_cycles);
 106    cpu->current_tb = tb;
 107    /* execute the generated code */
 108    cpu_tb_exec(cpu, tb->tc_ptr);
 109    cpu->current_tb = NULL;
 110    tb_phys_invalidate(tb, -1);
 111    tb_free(tb);
 112}
 113
 114static TranslationBlock *tb_find_slow(CPUArchState *env,
 115                                      target_ulong pc,
 116                                      target_ulong cs_base,
 117                                      uint64_t flags)
 118{
 119    CPUState *cpu = ENV_GET_CPU(env);
 120    TranslationBlock *tb, **ptb1;
 121    unsigned int h;
 122    tb_page_addr_t phys_pc, phys_page1;
 123    target_ulong virt_page2;
 124
 125    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
 126
 127    /* find translated block using physical mappings */
 128    phys_pc = get_page_addr_code(env, pc);
 129    phys_page1 = phys_pc & TARGET_PAGE_MASK;
 130    h = tb_phys_hash_func(phys_pc);
 131    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
 132    for(;;) {
 133        tb = *ptb1;
 134        if (!tb)
 135            goto not_found;
 136        if (tb->pc == pc &&
 137            tb->page_addr[0] == phys_page1 &&
 138            tb->cs_base == cs_base &&
 139            tb->flags == flags) {
 140            /* check next page if needed */
 141            if (tb->page_addr[1] != -1) {
 142                tb_page_addr_t phys_page2;
 143
 144                virt_page2 = (pc & TARGET_PAGE_MASK) +
 145                    TARGET_PAGE_SIZE;
 146                phys_page2 = get_page_addr_code(env, virt_page2);
 147                if (tb->page_addr[1] == phys_page2)
 148                    goto found;
 149            } else {
 150                goto found;
 151            }
 152        }
 153        ptb1 = &tb->phys_hash_next;
 154    }
 155 not_found:
 156   /* if no translated code available, then translate it now */
 157    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
 158
 159 found:
 160    /* Move the last found TB to the head of the list */
 161    if (likely(*ptb1)) {
 162        *ptb1 = tb->phys_hash_next;
 163        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
 164        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
 165    }
 166    /* we add the TB in the virtual pc hash table */
 167    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
 168    return tb;
 169}
 170
 171static inline TranslationBlock *tb_find_fast(CPUArchState *env)
 172{
 173    CPUState *cpu = ENV_GET_CPU(env);
 174    TranslationBlock *tb;
 175    target_ulong cs_base, pc;
 176    int flags;
 177
 178    /* we record a subset of the CPU state. It will
 179       always be the same before a given translated block
 180       is executed. */
 181    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 182    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
 183    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
 184                 tb->flags != flags)) {
 185        tb = tb_find_slow(env, pc, cs_base, flags);
 186    }
 187    return tb;
 188}
 189
 190static CPUDebugExcpHandler *debug_excp_handler;
 191
 192void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
 193{
 194    debug_excp_handler = handler;
 195}
 196
 197static void cpu_handle_debug_exception(CPUArchState *env)
 198{
 199    CPUState *cpu = ENV_GET_CPU(env);
 200    CPUWatchpoint *wp;
 201
 202    if (!cpu->watchpoint_hit) {
 203        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
 204            wp->flags &= ~BP_WATCHPOINT_HIT;
 205        }
 206    }
 207    if (debug_excp_handler) {
 208        debug_excp_handler(env);
 209    }
 210}
 211
 212/* main execution loop */
 213
 214volatile sig_atomic_t exit_request;
 215
 216int cpu_exec(CPUArchState *env)
 217{
 218    CPUState *cpu = ENV_GET_CPU(env);
 219#if !(defined(CONFIG_USER_ONLY) && \
 220      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
 221    CPUClass *cc = CPU_GET_CLASS(cpu);
 222#endif
 223#ifdef TARGET_I386
 224    X86CPU *x86_cpu = X86_CPU(cpu);
 225#endif
 226    int ret, interrupt_request;
 227    TranslationBlock *tb;
 228    uint8_t *tc_ptr;
 229    uintptr_t next_tb;
 230    /* This must be volatile so it is not trashed by longjmp() */
 231    volatile bool have_tb_lock = false;
 232
 233    if (cpu->halted) {
 234        if (!cpu_has_work(cpu)) {
 235            return EXCP_HALTED;
 236        }
 237
 238        cpu->halted = 0;
 239    }
 240
 241    current_cpu = cpu;
 242
 243    /* As long as current_cpu is null, up to the assignment just above,
 244     * requests by other threads to exit the execution loop are expected to
 245     * be issued using the exit_request global. We must make sure that our
 246     * evaluation of the global value is performed past the current_cpu
 247     * value transition point, which requires a memory barrier as well as
 248     * an instruction scheduling constraint on modern architectures.  */
 249    smp_mb();
 250
 251    if (unlikely(exit_request)) {
 252        cpu->exit_request = 1;
 253    }
 254
 255#if defined(TARGET_I386)
 256    /* put eflags in CPU temporary format */
 257    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 258    env->df = 1 - (2 * ((env->eflags >> 10) & 1));
 259    CC_OP = CC_OP_EFLAGS;
 260    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
 261#elif defined(TARGET_SPARC)
 262#elif defined(TARGET_M68K)
 263    env->cc_op = CC_OP_FLAGS;
 264    env->cc_dest = env->sr & 0xf;
 265    env->cc_x = (env->sr >> 4) & 1;
 266#elif defined(TARGET_ALPHA)
 267#elif defined(TARGET_ARM)
 268#elif defined(TARGET_UNICORE32)
 269#elif defined(TARGET_PPC)
 270    env->reserve_addr = -1;
 271#elif defined(TARGET_LM32)
 272#elif defined(TARGET_MICROBLAZE)
 273#elif defined(TARGET_MIPS)
 274#elif defined(TARGET_MOXIE)
 275#elif defined(TARGET_OPENRISC)
 276#elif defined(TARGET_SH4)
 277#elif defined(TARGET_CRIS)
 278#elif defined(TARGET_S390X)
 279#elif defined(TARGET_XTENSA)
 280    /* XXXXX */
 281#else
 282#error unsupported target CPU
 283#endif
 284    cpu->exception_index = -1;
 285
 286    /* prepare setjmp context for exception handling */
 287    for(;;) {
 288        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
 289            /* if an exception is pending, we execute it here */
 290            if (cpu->exception_index >= 0) {
 291                if (cpu->exception_index >= EXCP_INTERRUPT) {
 292                    /* exit request from the cpu execution loop */
 293                    ret = cpu->exception_index;
 294                    if (ret == EXCP_DEBUG) {
 295                        cpu_handle_debug_exception(env);
 296                    }
 297                    break;
 298                } else {
 299#if defined(CONFIG_USER_ONLY)
 300                    /* if user mode only, we simulate a fake exception
 301                       which will be handled outside the cpu execution
 302                       loop */
 303#if defined(TARGET_I386)
 304                    cc->do_interrupt(cpu);
 305#endif
 306                    ret = cpu->exception_index;
 307                    break;
 308#else
 309                    cc->do_interrupt(cpu);
 310                    cpu->exception_index = -1;
 311#endif
 312                }
 313            }
 314
 315            next_tb = 0; /* force lookup of first TB */
 316            for(;;) {
 317                interrupt_request = cpu->interrupt_request;
 318                if (unlikely(interrupt_request)) {
 319                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
 320                        /* Mask out external interrupts for this step. */
 321                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
 322                    }
 323                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
 324                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
 325                        cpu->exception_index = EXCP_DEBUG;
 326                        cpu_loop_exit(cpu);
 327                    }
 328#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
 329    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
 330    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
 331                    if (interrupt_request & CPU_INTERRUPT_HALT) {
 332                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
 333                        cpu->halted = 1;
 334                        cpu->exception_index = EXCP_HLT;
 335                        cpu_loop_exit(cpu);
 336                    }
 337#endif
 338#if defined(TARGET_I386)
 339                    if (interrupt_request & CPU_INTERRUPT_INIT) {
 340                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
 341                        do_cpu_init(x86_cpu);
 342                        cpu->exception_index = EXCP_HALTED;
 343                        cpu_loop_exit(cpu);
 344                    }
 345#else
 346                    if (interrupt_request & CPU_INTERRUPT_RESET) {
 347                        cpu_reset(cpu);
 348                    }
 349#endif
 350#if defined(TARGET_I386)
 351#if !defined(CONFIG_USER_ONLY)
 352                    if (interrupt_request & CPU_INTERRUPT_POLL) {
 353                        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
 354                        apic_poll_irq(x86_cpu->apic_state);
 355                    }
 356#endif
 357                    if (interrupt_request & CPU_INTERRUPT_SIPI) {
 358                            do_cpu_sipi(x86_cpu);
 359                    } else if (env->hflags2 & HF2_GIF_MASK) {
 360                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
 361                            !(env->hflags & HF_SMM_MASK)) {
 362                            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
 363                                                          0);
 364                            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
 365                            do_smm_enter(x86_cpu);
 366                            next_tb = 0;
 367                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
 368                                   !(env->hflags2 & HF2_NMI_MASK)) {
 369                            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
 370                            env->hflags2 |= HF2_NMI_MASK;
 371                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
 372                            next_tb = 0;
 373                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
 374                            cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
 375                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
 376                            next_tb = 0;
 377                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 378                                   (((env->hflags2 & HF2_VINTR_MASK) && 
 379                                     (env->hflags2 & HF2_HIF_MASK)) ||
 380                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
 381                                     (env->eflags & IF_MASK && 
 382                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
 383                            int intno;
 384                            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
 385                                                          0);
 386                            cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
 387                                                        CPU_INTERRUPT_VIRQ);
 388                            intno = cpu_get_pic_interrupt(env);
 389                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
 390                            do_interrupt_x86_hardirq(env, intno, 1);
 391                            /* ensure that no TB jump will be modified as
 392                               the program flow was changed */
 393                            next_tb = 0;
 394#if !defined(CONFIG_USER_ONLY)
 395                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
 396                                   (env->eflags & IF_MASK) && 
 397                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
 398                            int intno;
 399                            /* FIXME: this should respect TPR */
 400                            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
 401                                                          0);
 402                            intno = ldl_phys(cpu->as,
 403                                             env->vm_vmcb
 404                                             + offsetof(struct vmcb,
 405                                                        control.int_vector));
 406                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
 407                            do_interrupt_x86_hardirq(env, intno, 1);
 408                            cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
 409                            next_tb = 0;
 410#endif
 411                        }
 412                    }
 413#elif defined(TARGET_PPC)
 414                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 415                        ppc_hw_interrupt(env);
 416                        if (env->pending_interrupts == 0) {
 417                            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
 418                        }
 419                        next_tb = 0;
 420                    }
 421#elif defined(TARGET_LM32)
 422                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 423                        && (env->ie & IE_IE)) {
 424                        cpu->exception_index = EXCP_IRQ;
 425                        cc->do_interrupt(cpu);
 426                        next_tb = 0;
 427                    }
 428#elif defined(TARGET_MICROBLAZE)
 429                    if ((interrupt_request & CPU_INTERRUPT_HARD)
 430                        && (env->sregs[SR_MSR] & MSR_IE)
 431                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
 432                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
 433                        cpu->exception_index = EXCP_IRQ;
 434                        cc->do_interrupt(cpu);
 435                        next_tb = 0;
 436                    }
 437#elif defined(TARGET_MIPS)
 438                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 439                        cpu_mips_hw_interrupts_pending(env)) {
 440                        /* Raise it */
 441                        cpu->exception_index = EXCP_EXT_INTERRUPT;
 442                        env->error_code = 0;
 443                        cc->do_interrupt(cpu);
 444                        next_tb = 0;
 445                    }
 446#elif defined(TARGET_OPENRISC)
 447                    {
 448                        int idx = -1;
 449                        if ((interrupt_request & CPU_INTERRUPT_HARD)
 450                            && (env->sr & SR_IEE)) {
 451                            idx = EXCP_INT;
 452                        }
 453                        if ((interrupt_request & CPU_INTERRUPT_TIMER)
 454                            && (env->sr & SR_TEE)) {
 455                            idx = EXCP_TICK;
 456                        }
 457                        if (idx >= 0) {
 458                            cpu->exception_index = idx;
 459                            cc->do_interrupt(cpu);
 460                            next_tb = 0;
 461                        }
 462                    }
 463#elif defined(TARGET_SPARC)
 464                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 465                        if (cpu_interrupts_enabled(env) &&
 466                            env->interrupt_index > 0) {
 467                            int pil = env->interrupt_index & 0xf;
 468                            int type = env->interrupt_index & 0xf0;
 469
 470                            if (((type == TT_EXTINT) &&
 471                                  cpu_pil_allowed(env, pil)) ||
 472                                  type != TT_EXTINT) {
 473                                cpu->exception_index = env->interrupt_index;
 474                                cc->do_interrupt(cpu);
 475                                next_tb = 0;
 476                            }
 477                        }
 478                    }
 479#elif defined(TARGET_ARM)
 480                    if (interrupt_request & CPU_INTERRUPT_FIQ
 481                        && !(env->daif & PSTATE_F)) {
 482                        cpu->exception_index = EXCP_FIQ;
 483                        cc->do_interrupt(cpu);
 484                        next_tb = 0;
 485                    }
 486                    /* ARMv7-M interrupt return works by loading a magic value
 487                       into the PC.  On real hardware the load causes the
 488                       return to occur.  The qemu implementation performs the
 489                       jump normally, then does the exception return when the
 490                       CPU tries to execute code at the magic address.
 491                       This will cause the magic PC value to be pushed to
 492                       the stack if an interrupt occurred at the wrong time.
 493                       We avoid this by disabling interrupts when
 494                       pc contains a magic address.  */
 495                    if (interrupt_request & CPU_INTERRUPT_HARD
 496                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
 497                            || !(env->daif & PSTATE_I))) {
 498                        cpu->exception_index = EXCP_IRQ;
 499                        cc->do_interrupt(cpu);
 500                        next_tb = 0;
 501                    }
 502#elif defined(TARGET_UNICORE32)
 503                    if (interrupt_request & CPU_INTERRUPT_HARD
 504                        && !(env->uncached_asr & ASR_I)) {
 505                        cpu->exception_index = UC32_EXCP_INTR;
 506                        cc->do_interrupt(cpu);
 507                        next_tb = 0;
 508                    }
 509#elif defined(TARGET_SH4)
 510                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 511                        cc->do_interrupt(cpu);
 512                        next_tb = 0;
 513                    }
 514#elif defined(TARGET_ALPHA)
 515                    {
 516                        int idx = -1;
 517                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
 518                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
 519                        case 0 ... 3:
 520                            if (interrupt_request & CPU_INTERRUPT_HARD) {
 521                                idx = EXCP_DEV_INTERRUPT;
 522                            }
 523                            /* FALLTHRU */
 524                        case 4:
 525                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
 526                                idx = EXCP_CLK_INTERRUPT;
 527                            }
 528                            /* FALLTHRU */
 529                        case 5:
 530                            if (interrupt_request & CPU_INTERRUPT_SMP) {
 531                                idx = EXCP_SMP_INTERRUPT;
 532                            }
 533                            /* FALLTHRU */
 534                        case 6:
 535                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
 536                                idx = EXCP_MCHK;
 537                            }
 538                        }
 539                        if (idx >= 0) {
 540                            cpu->exception_index = idx;
 541                            env->error_code = 0;
 542                            cc->do_interrupt(cpu);
 543                            next_tb = 0;
 544                        }
 545                    }
 546#elif defined(TARGET_CRIS)
 547                    if (interrupt_request & CPU_INTERRUPT_HARD
 548                        && (env->pregs[PR_CCS] & I_FLAG)
 549                        && !env->locked_irq) {
 550                        cpu->exception_index = EXCP_IRQ;
 551                        cc->do_interrupt(cpu);
 552                        next_tb = 0;
 553                    }
 554                    if (interrupt_request & CPU_INTERRUPT_NMI) {
 555                        unsigned int m_flag_archval;
 556                        if (env->pregs[PR_VR] < 32) {
 557                            m_flag_archval = M_FLAG_V10;
 558                        } else {
 559                            m_flag_archval = M_FLAG_V32;
 560                        }
 561                        if ((env->pregs[PR_CCS] & m_flag_archval)) {
 562                            cpu->exception_index = EXCP_NMI;
 563                            cc->do_interrupt(cpu);
 564                            next_tb = 0;
 565                        }
 566                    }
 567#elif defined(TARGET_M68K)
 568                    if (interrupt_request & CPU_INTERRUPT_HARD
 569                        && ((env->sr & SR_I) >> SR_I_SHIFT)
 570                            < env->pending_level) {
 571                        /* Real hardware gets the interrupt vector via an
 572                           IACK cycle at this point.  Current emulated
 573                           hardware doesn't rely on this, so we
 574                           provide/save the vector when the interrupt is
 575                           first signalled.  */
 576                        cpu->exception_index = env->pending_vector;
 577                        do_interrupt_m68k_hardirq(env);
 578                        next_tb = 0;
 579                    }
 580#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
 581                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
 582                        (env->psw.mask & PSW_MASK_EXT)) {
 583                        cc->do_interrupt(cpu);
 584                        next_tb = 0;
 585                    }
 586#elif defined(TARGET_XTENSA)
 587                    if (interrupt_request & CPU_INTERRUPT_HARD) {
 588                        cpu->exception_index = EXC_IRQ;
 589                        cc->do_interrupt(cpu);
 590                        next_tb = 0;
 591                    }
 592#endif
 593                   /* Don't use the cached interrupt_request value,
 594                      do_interrupt may have updated the EXITTB flag. */
 595                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
 596                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
 597                        /* ensure that no TB jump will be modified as
 598                           the program flow was changed */
 599                        next_tb = 0;
 600                    }
 601                }
 602                if (unlikely(cpu->exit_request)) {
 603                    cpu->exit_request = 0;
 604                    cpu->exception_index = EXCP_INTERRUPT;
 605                    cpu_loop_exit(cpu);
 606                }
 607                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
 608                have_tb_lock = true;
 609                tb = tb_find_fast(env);
 610                /* Note: we do it here to avoid a gcc bug on Mac OS X when
 611                   doing it in tb_find_slow */
 612                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
 613                    /* as some TB could have been invalidated because
 614                       of memory exceptions while generating the code, we
 615                       must recompute the hash index here */
 616                    next_tb = 0;
 617                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
 618                }
 619                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
 620                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
 621                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
 622                }
 623                /* see if we can patch the calling TB. When the TB
 624                   spans two pages, we cannot safely do a direct
 625                   jump. */
 626                if (next_tb != 0 && tb->page_addr[1] == -1) {
 627                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
 628                                next_tb & TB_EXIT_MASK, tb);
 629                }
 630                have_tb_lock = false;
 631                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
 632
 633                /* cpu_interrupt might be called while translating the
 634                   TB, but before it is linked into a potentially
 635                   infinite loop and becomes env->current_tb. Avoid
 636                   starting execution if there is a pending interrupt. */
 637                cpu->current_tb = tb;
 638                barrier();
 639                if (likely(!cpu->exit_request)) {
 640                    tc_ptr = tb->tc_ptr;
 641                    /* execute the generated code */
 642                    next_tb = cpu_tb_exec(cpu, tc_ptr);
 643                    switch (next_tb & TB_EXIT_MASK) {
 644                    case TB_EXIT_REQUESTED:
 645                        /* Something asked us to stop executing
 646                         * chained TBs; just continue round the main
 647                         * loop. Whatever requested the exit will also
 648                         * have set something else (eg exit_request or
 649                         * interrupt_request) which we will handle
 650                         * next time around the loop.
 651                         */
 652                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
 653                        next_tb = 0;
 654                        break;
 655                    case TB_EXIT_ICOUNT_EXPIRED:
 656                    {
 657                        /* Instruction counter expired.  */
 658                        int insns_left;
 659                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
 660                        insns_left = cpu->icount_decr.u32;
 661                        if (cpu->icount_extra && insns_left >= 0) {
 662                            /* Refill decrementer and continue execution.  */
 663                            cpu->icount_extra += insns_left;
 664                            if (cpu->icount_extra > 0xffff) {
 665                                insns_left = 0xffff;
 666                            } else {
 667                                insns_left = cpu->icount_extra;
 668                            }
 669                            cpu->icount_extra -= insns_left;
 670                            cpu->icount_decr.u16.low = insns_left;
 671                        } else {
 672                            if (insns_left > 0) {
 673                                /* Execute remaining instructions.  */
 674                                cpu_exec_nocache(env, insns_left, tb);
 675                            }
 676                            cpu->exception_index = EXCP_INTERRUPT;
 677                            next_tb = 0;
 678                            cpu_loop_exit(cpu);
 679                        }
 680                        break;
 681                    }
 682                    default:
 683                        break;
 684                    }
 685                }
 686                cpu->current_tb = NULL;
 687                /* reset soft MMU for next block (it can currently
 688                   only be set by a memory fault) */
 689            } /* for(;;) */
 690        } else {
 691            /* Reload env after longjmp - the compiler may have smashed all
 692             * local variables as longjmp is marked 'noreturn'. */
 693            cpu = current_cpu;
 694            env = cpu->env_ptr;
 695#if !(defined(CONFIG_USER_ONLY) && \
 696      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
 697            cc = CPU_GET_CLASS(cpu);
 698#endif
 699#ifdef TARGET_I386
 700            x86_cpu = X86_CPU(cpu);
 701#endif
 702            if (have_tb_lock) {
 703                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
 704                have_tb_lock = false;
 705            }
 706        }
 707    } /* for(;;) */
 708
 709
 710#if defined(TARGET_I386)
 711    /* restore flags in standard format */
 712    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
 713        | (env->df & DF_MASK);
 714#elif defined(TARGET_ARM)
 715    /* XXX: Save/restore host fpu exception state?.  */
 716#elif defined(TARGET_UNICORE32)
 717#elif defined(TARGET_SPARC)
 718#elif defined(TARGET_PPC)
 719#elif defined(TARGET_LM32)
 720#elif defined(TARGET_M68K)
 721    cpu_m68k_flush_flags(env, env->cc_op);
 722    env->cc_op = CC_OP_FLAGS;
 723    env->sr = (env->sr & 0xffe0)
 724              | env->cc_dest | (env->cc_x << 4);
 725#elif defined(TARGET_MICROBLAZE)
 726#elif defined(TARGET_MIPS)
 727#elif defined(TARGET_MOXIE)
 728#elif defined(TARGET_OPENRISC)
 729#elif defined(TARGET_SH4)
 730#elif defined(TARGET_ALPHA)
 731#elif defined(TARGET_CRIS)
 732#elif defined(TARGET_S390X)
 733#elif defined(TARGET_XTENSA)
 734    /* XXXXX */
 735#else
 736#error unsupported target CPU
 737#endif
 738
 739    /* fail safe : never use current_cpu outside cpu_exec() */
 740    current_cpu = NULL;
 741    return ret;
 742}
 743