qemu/accel/tcg/cpu-exec.c
<<
>>
Prefs
   1/*
   2 *  emulator main execution loop
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "trace.h"
  22#include "disas/disas.h"
  23#include "exec/exec-all.h"
  24#include "tcg.h"
  25#include "qemu/atomic.h"
  26#include "sysemu/qtest.h"
  27#include "qemu/timer.h"
  28#include "qemu/rcu.h"
  29#include "exec/tb-hash.h"
  30#include "exec/tb-lookup.h"
  31#include "exec/log.h"
  32#include "qemu/main-loop.h"
  33#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
  34#include "hw/i386/apic.h"
  35#endif
  36#include "sysemu/cpus.h"
  37#include "sysemu/replay.h"
  38
  39/* -icount align implementation. */
  40
  41typedef struct SyncClocks {
  42    int64_t diff_clk;
  43    int64_t last_cpu_icount;
  44    int64_t realtime_clock;
  45} SyncClocks;
  46
  47#if !defined(CONFIG_USER_ONLY)
  48/* Allow the guest to have a max 3ms advance.
  49 * The difference between the 2 clocks could therefore
  50 * oscillate around 0.
  51 */
  52#define VM_CLOCK_ADVANCE 3000000
  53#define THRESHOLD_REDUCE 1.5
  54#define MAX_DELAY_PRINT_RATE 2000000000LL
  55#define MAX_NB_PRINTS 100
  56
  57static void align_clocks(SyncClocks *sc, const CPUState *cpu)
  58{
  59    int64_t cpu_icount;
  60
  61    if (!icount_align_option) {
  62        return;
  63    }
  64
  65    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
  66    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
  67    sc->last_cpu_icount = cpu_icount;
  68
  69    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
  70#ifndef _WIN32
  71        struct timespec sleep_delay, rem_delay;
  72        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
  73        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
  74        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
  75            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
  76        } else {
  77            sc->diff_clk = 0;
  78        }
  79#else
  80        Sleep(sc->diff_clk / SCALE_MS);
  81        sc->diff_clk = 0;
  82#endif
  83    }
  84}
  85
  86static void print_delay(const SyncClocks *sc)
  87{
  88    static float threshold_delay;
  89    static int64_t last_realtime_clock;
  90    static int nb_prints;
  91
  92    if (icount_align_option &&
  93        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
  94        nb_prints < MAX_NB_PRINTS) {
  95        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
  96            (-sc->diff_clk / (float)1000000000LL <
  97             (threshold_delay - THRESHOLD_REDUCE))) {
  98            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
  99            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
 100                   threshold_delay - 1,
 101                   threshold_delay);
 102            nb_prints++;
 103            last_realtime_clock = sc->realtime_clock;
 104        }
 105    }
 106}
 107
 108static void init_delay_params(SyncClocks *sc,
 109                              const CPUState *cpu)
 110{
 111    if (!icount_align_option) {
 112        return;
 113    }
 114    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
 115    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
 116    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
 117    if (sc->diff_clk < max_delay) {
 118        max_delay = sc->diff_clk;
 119    }
 120    if (sc->diff_clk > max_advance) {
 121        max_advance = sc->diff_clk;
 122    }
 123
 124    /* Print every 2s max if the guest is late. We limit the number
 125       of printed messages to NB_PRINT_MAX(currently 100) */
 126    print_delay(sc);
 127}
 128#else
 129static void align_clocks(SyncClocks *sc, const CPUState *cpu)
 130{
 131}
 132
 133static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
 134{
 135}
 136#endif /* CONFIG USER ONLY */
 137
 138/* Execute a TB, and fix up the CPU state afterwards if necessary */
 139static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
 140{
 141    CPUArchState *env = cpu->env_ptr;
 142    uintptr_t ret;
 143    TranslationBlock *last_tb;
 144    int tb_exit;
 145    uint8_t *tb_ptr = itb->tc.ptr;
 146
 147    qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
 148                           "Trace %d: %p ["
 149                           TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
 150                           cpu->cpu_index, itb->tc.ptr,
 151                           itb->cs_base, itb->pc, itb->flags,
 152                           lookup_symbol(itb->pc));
 153
 154#if defined(DEBUG_DISAS)
 155    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
 156        && qemu_log_in_addr_range(itb->pc)) {
 157        qemu_log_lock();
 158        int flags = 0;
 159        if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
 160            flags |= CPU_DUMP_FPU;
 161        }
 162#if defined(TARGET_I386)
 163        flags |= CPU_DUMP_CCOP;
 164#endif
 165        log_cpu_state(cpu, flags);
 166        qemu_log_unlock();
 167    }
 168#endif /* DEBUG_DISAS */
 169
 170    cpu->can_do_io = !use_icount;
 171    ret = tcg_qemu_tb_exec(env, tb_ptr);
 172    cpu->can_do_io = 1;
 173    last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
 174    tb_exit = ret & TB_EXIT_MASK;
 175    trace_exec_tb_exit(last_tb, tb_exit);
 176
 177    if (tb_exit > TB_EXIT_IDX1) {
 178        /* We didn't start executing this TB (eg because the instruction
 179         * counter hit zero); we must restore the guest PC to the address
 180         * of the start of the TB.
 181         */
 182        CPUClass *cc = CPU_GET_CLASS(cpu);
 183        qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
 184                               "Stopped execution of TB chain before %p ["
 185                               TARGET_FMT_lx "] %s\n",
 186                               last_tb->tc.ptr, last_tb->pc,
 187                               lookup_symbol(last_tb->pc));
 188        if (cc->synchronize_from_tb) {
 189            cc->synchronize_from_tb(cpu, last_tb);
 190        } else {
 191            assert(cc->set_pc);
 192            cc->set_pc(cpu, last_tb->pc);
 193        }
 194    }
 195    return ret;
 196}
 197
 198#ifndef CONFIG_USER_ONLY
 199/* Execute the code without caching the generated code. An interpreter
 200   could be used if available. */
 201static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
 202                             TranslationBlock *orig_tb, bool ignore_icount)
 203{
 204    TranslationBlock *tb;
 205    uint32_t cflags = curr_cflags() | CF_NOCACHE;
 206
 207    if (ignore_icount) {
 208        cflags &= ~CF_USE_ICOUNT;
 209    }
 210
 211    /* Should never happen.
 212       We only end up here when an existing TB is too long.  */
 213    cflags |= MIN(max_cycles, CF_COUNT_MASK);
 214
 215    mmap_lock();
 216    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
 217                     orig_tb->flags, cflags);
 218    tb->orig_tb = orig_tb;
 219    mmap_unlock();
 220
 221    /* execute the generated code */
 222    trace_exec_tb_nocache(tb, tb->pc);
 223    cpu_tb_exec(cpu, tb);
 224
 225    mmap_lock();
 226    tb_phys_invalidate(tb, -1);
 227    mmap_unlock();
 228    tcg_tb_remove(tb);
 229}
 230#endif
 231
 232void cpu_exec_step_atomic(CPUState *cpu)
 233{
 234    CPUClass *cc = CPU_GET_CLASS(cpu);
 235    TranslationBlock *tb;
 236    target_ulong cs_base, pc;
 237    uint32_t flags;
 238    uint32_t cflags = 1;
 239    uint32_t cf_mask = cflags & CF_HASH_MASK;
 240    /* volatile because we modify it between setjmp and longjmp */
 241    volatile bool in_exclusive_region = false;
 242
 243    if (sigsetjmp(cpu->jmp_env, 0) == 0) {
 244        tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
 245        if (tb == NULL) {
 246            mmap_lock();
 247            tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
 248            mmap_unlock();
 249        }
 250
 251        start_exclusive();
 252
 253        /* Since we got here, we know that parallel_cpus must be true.  */
 254        parallel_cpus = false;
 255        in_exclusive_region = true;
 256        cc->cpu_exec_enter(cpu);
 257        /* execute the generated code */
 258        trace_exec_tb(tb, pc);
 259        cpu_tb_exec(cpu, tb);
 260        cc->cpu_exec_exit(cpu);
 261    } else {
 262        /*
 263         * The mmap_lock is dropped by tb_gen_code if it runs out of
 264         * memory.
 265         */
 266#ifndef CONFIG_SOFTMMU
 267        tcg_debug_assert(!have_mmap_lock());
 268#endif
 269        if (qemu_mutex_iothread_locked()) {
 270            qemu_mutex_unlock_iothread();
 271        }
 272        assert_no_pages_locked();
 273    }
 274
 275    if (in_exclusive_region) {
 276        /* We might longjump out of either the codegen or the
 277         * execution, so must make sure we only end the exclusive
 278         * region if we started it.
 279         */
 280        parallel_cpus = true;
 281        end_exclusive();
 282    }
 283}
 284
 285struct tb_desc {
 286    target_ulong pc;
 287    target_ulong cs_base;
 288    CPUArchState *env;
 289    tb_page_addr_t phys_page1;
 290    uint32_t flags;
 291    uint32_t cf_mask;
 292    uint32_t trace_vcpu_dstate;
 293};
 294
 295static bool tb_lookup_cmp(const void *p, const void *d)
 296{
 297    const TranslationBlock *tb = p;
 298    const struct tb_desc *desc = d;
 299
 300    if (tb->pc == desc->pc &&
 301        tb->page_addr[0] == desc->phys_page1 &&
 302        tb->cs_base == desc->cs_base &&
 303        tb->flags == desc->flags &&
 304        tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
 305        (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
 306        /* check next page if needed */
 307        if (tb->page_addr[1] == -1) {
 308            return true;
 309        } else {
 310            tb_page_addr_t phys_page2;
 311            target_ulong virt_page2;
 312
 313            virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
 314            phys_page2 = get_page_addr_code(desc->env, virt_page2);
 315            if (tb->page_addr[1] == phys_page2) {
 316                return true;
 317            }
 318        }
 319    }
 320    return false;
 321}
 322
 323TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
 324                                   target_ulong cs_base, uint32_t flags,
 325                                   uint32_t cf_mask)
 326{
 327    tb_page_addr_t phys_pc;
 328    struct tb_desc desc;
 329    uint32_t h;
 330
 331    desc.env = (CPUArchState *)cpu->env_ptr;
 332    desc.cs_base = cs_base;
 333    desc.flags = flags;
 334    desc.cf_mask = cf_mask;
 335    desc.trace_vcpu_dstate = *cpu->trace_dstate;
 336    desc.pc = pc;
 337    phys_pc = get_page_addr_code(desc.env, pc);
 338    if (phys_pc == -1) {
 339        return NULL;
 340    }
 341    desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
 342    h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
 343    return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
 344}
 345
 346void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
 347{
 348    if (TCG_TARGET_HAS_direct_jump) {
 349        uintptr_t offset = tb->jmp_target_arg[n];
 350        uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
 351        tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
 352    } else {
 353        tb->jmp_target_arg[n] = addr;
 354    }
 355}
 356
 357static inline void tb_add_jump(TranslationBlock *tb, int n,
 358                               TranslationBlock *tb_next)
 359{
 360    uintptr_t old;
 361
 362    assert(n < ARRAY_SIZE(tb->jmp_list_next));
 363    qemu_spin_lock(&tb_next->jmp_lock);
 364
 365    /* make sure the destination TB is valid */
 366    if (tb_next->cflags & CF_INVALID) {
 367        goto out_unlock_next;
 368    }
 369    /* Atomically claim the jump destination slot only if it was NULL */
 370    old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
 371    if (old) {
 372        goto out_unlock_next;
 373    }
 374
 375    /* patch the native jump address */
 376    tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
 377
 378    /* add in TB jmp list */
 379    tb->jmp_list_next[n] = tb_next->jmp_list_head;
 380    tb_next->jmp_list_head = (uintptr_t)tb | n;
 381
 382    qemu_spin_unlock(&tb_next->jmp_lock);
 383
 384    qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
 385                           "Linking TBs %p [" TARGET_FMT_lx
 386                           "] index %d -> %p [" TARGET_FMT_lx "]\n",
 387                           tb->tc.ptr, tb->pc, n,
 388                           tb_next->tc.ptr, tb_next->pc);
 389    return;
 390
 391 out_unlock_next:
 392    qemu_spin_unlock(&tb_next->jmp_lock);
 393    return;
 394}
 395
 396static inline TranslationBlock *tb_find(CPUState *cpu,
 397                                        TranslationBlock *last_tb,
 398                                        int tb_exit, uint32_t cf_mask)
 399{
 400    TranslationBlock *tb;
 401    target_ulong cs_base, pc;
 402    uint32_t flags;
 403
 404    tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
 405    if (tb == NULL) {
 406        mmap_lock();
 407        tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
 408        mmap_unlock();
 409        /* We add the TB in the virtual pc hash table for the fast lookup */
 410        atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
 411    }
 412#ifndef CONFIG_USER_ONLY
 413    /* We don't take care of direct jumps when address mapping changes in
 414     * system emulation. So it's not safe to make a direct jump to a TB
 415     * spanning two pages because the mapping for the second page can change.
 416     */
 417    if (tb->page_addr[1] != -1) {
 418        last_tb = NULL;
 419    }
 420#endif
 421    /* See if we can patch the calling TB. */
 422    if (last_tb) {
 423        tb_add_jump(last_tb, tb_exit, tb);
 424    }
 425    return tb;
 426}
 427
 428static inline bool cpu_handle_halt(CPUState *cpu)
 429{
 430    if (cpu->halted) {
 431#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
 432        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
 433            && replay_interrupt()) {
 434            X86CPU *x86_cpu = X86_CPU(cpu);
 435            qemu_mutex_lock_iothread();
 436            apic_poll_irq(x86_cpu->apic_state);
 437            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
 438            qemu_mutex_unlock_iothread();
 439        }
 440#endif
 441        if (!cpu_has_work(cpu)) {
 442            return true;
 443        }
 444
 445        cpu->halted = 0;
 446    }
 447
 448    return false;
 449}
 450
 451static inline void cpu_handle_debug_exception(CPUState *cpu)
 452{
 453    CPUClass *cc = CPU_GET_CLASS(cpu);
 454    CPUWatchpoint *wp;
 455
 456    if (!cpu->watchpoint_hit) {
 457        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
 458            wp->flags &= ~BP_WATCHPOINT_HIT;
 459        }
 460    }
 461
 462    cc->debug_excp_handler(cpu);
 463}
 464
 465static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
 466{
 467    if (cpu->exception_index < 0) {
 468#ifndef CONFIG_USER_ONLY
 469        if (replay_has_exception()
 470               && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
 471            /* try to cause an exception pending in the log */
 472            cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
 473        }
 474#endif
 475        if (cpu->exception_index < 0) {
 476            return false;
 477        }
 478    }
 479
 480    if (cpu->exception_index >= EXCP_INTERRUPT) {
 481        /* exit request from the cpu execution loop */
 482        *ret = cpu->exception_index;
 483        if (*ret == EXCP_DEBUG) {
 484            cpu_handle_debug_exception(cpu);
 485        }
 486        cpu->exception_index = -1;
 487        return true;
 488    } else {
 489#if defined(CONFIG_USER_ONLY)
 490        /* if user mode only, we simulate a fake exception
 491           which will be handled outside the cpu execution
 492           loop */
 493#if defined(TARGET_I386)
 494        CPUClass *cc = CPU_GET_CLASS(cpu);
 495        cc->do_interrupt(cpu);
 496#endif
 497        *ret = cpu->exception_index;
 498        cpu->exception_index = -1;
 499        return true;
 500#else
 501        if (replay_exception()) {
 502            CPUClass *cc = CPU_GET_CLASS(cpu);
 503            qemu_mutex_lock_iothread();
 504            cc->do_interrupt(cpu);
 505            qemu_mutex_unlock_iothread();
 506            cpu->exception_index = -1;
 507        } else if (!replay_has_interrupt()) {
 508            /* give a chance to iothread in replay mode */
 509            *ret = EXCP_INTERRUPT;
 510            return true;
 511        }
 512#endif
 513    }
 514
 515    return false;
 516}
 517
 518static inline bool cpu_handle_interrupt(CPUState *cpu,
 519                                        TranslationBlock **last_tb)
 520{
 521    CPUClass *cc = CPU_GET_CLASS(cpu);
 522
 523    /* Clear the interrupt flag now since we're processing
 524     * cpu->interrupt_request and cpu->exit_request.
 525     * Ensure zeroing happens before reading cpu->exit_request or
 526     * cpu->interrupt_request (see also smp_wmb in cpu_exit())
 527     */
 528    atomic_mb_set(&cpu->icount_decr.u16.high, 0);
 529
 530    if (unlikely(atomic_read(&cpu->interrupt_request))) {
 531        int interrupt_request;
 532        qemu_mutex_lock_iothread();
 533        interrupt_request = cpu->interrupt_request;
 534        if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
 535            /* Mask out external interrupts for this step. */
 536            interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
 537        }
 538        if (interrupt_request & CPU_INTERRUPT_DEBUG) {
 539            cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
 540            cpu->exception_index = EXCP_DEBUG;
 541            qemu_mutex_unlock_iothread();
 542            return true;
 543        }
 544        if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
 545            /* Do nothing */
 546        } else if (interrupt_request & CPU_INTERRUPT_HALT) {
 547            replay_interrupt();
 548            cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
 549            cpu->halted = 1;
 550            cpu->exception_index = EXCP_HLT;
 551            qemu_mutex_unlock_iothread();
 552            return true;
 553        }
 554#if defined(TARGET_I386)
 555        else if (interrupt_request & CPU_INTERRUPT_INIT) {
 556            X86CPU *x86_cpu = X86_CPU(cpu);
 557            CPUArchState *env = &x86_cpu->env;
 558            replay_interrupt();
 559            cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
 560            do_cpu_init(x86_cpu);
 561            cpu->exception_index = EXCP_HALTED;
 562            qemu_mutex_unlock_iothread();
 563            return true;
 564        }
 565#else
 566        else if (interrupt_request & CPU_INTERRUPT_RESET) {
 567            replay_interrupt();
 568            cpu_reset(cpu);
 569            qemu_mutex_unlock_iothread();
 570            return true;
 571        }
 572#endif
 573        /* The target hook has 3 exit conditions:
 574           False when the interrupt isn't processed,
 575           True when it is, and we should restart on a new TB,
 576           and via longjmp via cpu_loop_exit.  */
 577        else {
 578            if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
 579                replay_interrupt();
 580                cpu->exception_index = -1;
 581                *last_tb = NULL;
 582            }
 583            /* The target hook may have updated the 'cpu->interrupt_request';
 584             * reload the 'interrupt_request' value */
 585            interrupt_request = cpu->interrupt_request;
 586        }
 587        if (interrupt_request & CPU_INTERRUPT_EXITTB) {
 588            cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
 589            /* ensure that no TB jump will be modified as
 590               the program flow was changed */
 591            *last_tb = NULL;
 592        }
 593
 594        /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
 595        qemu_mutex_unlock_iothread();
 596    }
 597
 598    /* Finally, check if we need to exit to the main loop.  */
 599    if (unlikely(atomic_read(&cpu->exit_request)
 600        || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
 601        atomic_set(&cpu->exit_request, 0);
 602        if (cpu->exception_index == -1) {
 603            cpu->exception_index = EXCP_INTERRUPT;
 604        }
 605        return true;
 606    }
 607
 608    return false;
 609}
 610
 611static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
 612                                    TranslationBlock **last_tb, int *tb_exit)
 613{
 614    uintptr_t ret;
 615    int32_t insns_left;
 616
 617    trace_exec_tb(tb, tb->pc);
 618    ret = cpu_tb_exec(cpu, tb);
 619    tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
 620    *tb_exit = ret & TB_EXIT_MASK;
 621    if (*tb_exit != TB_EXIT_REQUESTED) {
 622        *last_tb = tb;
 623        return;
 624    }
 625
 626    *last_tb = NULL;
 627    insns_left = atomic_read(&cpu->icount_decr.u32);
 628    if (insns_left < 0) {
 629        /* Something asked us to stop executing chained TBs; just
 630         * continue round the main loop. Whatever requested the exit
 631         * will also have set something else (eg exit_request or
 632         * interrupt_request) which will be handled by
 633         * cpu_handle_interrupt.  cpu_handle_interrupt will also
 634         * clear cpu->icount_decr.u16.high.
 635         */
 636        return;
 637    }
 638
 639    /* Instruction counter expired.  */
 640    assert(use_icount);
 641#ifndef CONFIG_USER_ONLY
 642    /* Ensure global icount has gone forward */
 643    cpu_update_icount(cpu);
 644    /* Refill decrementer and continue execution.  */
 645    insns_left = MIN(0xffff, cpu->icount_budget);
 646    cpu->icount_decr.u16.low = insns_left;
 647    cpu->icount_extra = cpu->icount_budget - insns_left;
 648    if (!cpu->icount_extra) {
 649        /* Execute any remaining instructions, then let the main loop
 650         * handle the next event.
 651         */
 652        if (insns_left > 0) {
 653            cpu_exec_nocache(cpu, insns_left, tb, false);
 654        }
 655    }
 656#endif
 657}
 658
 659/* main execution loop */
 660
 661int cpu_exec(CPUState *cpu)
 662{
 663    CPUClass *cc = CPU_GET_CLASS(cpu);
 664    int ret;
 665    SyncClocks sc = { 0 };
 666
 667    /* replay_interrupt may need current_cpu */
 668    current_cpu = cpu;
 669
 670    if (cpu_handle_halt(cpu)) {
 671        return EXCP_HALTED;
 672    }
 673
 674    rcu_read_lock();
 675
 676    cc->cpu_exec_enter(cpu);
 677
 678    /* Calculate difference between guest clock and host clock.
 679     * This delay includes the delay of the last cycle, so
 680     * what we have to do is sleep until it is 0. As for the
 681     * advance/delay we gain here, we try to fix it next time.
 682     */
 683    init_delay_params(&sc, cpu);
 684
 685    /* prepare setjmp context for exception handling */
 686    if (sigsetjmp(cpu->jmp_env, 0) != 0) {
 687#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
 688        /* Some compilers wrongly smash all local variables after
 689         * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
 690         * Reload essential local variables here for those compilers.
 691         * Newer versions of gcc would complain about this code (-Wclobbered). */
 692        cpu = current_cpu;
 693        cc = CPU_GET_CLASS(cpu);
 694#else /* buggy compiler */
 695        /* Assert that the compiler does not smash local variables. */
 696        g_assert(cpu == current_cpu);
 697        g_assert(cc == CPU_GET_CLASS(cpu));
 698#endif /* buggy compiler */
 699#ifndef CONFIG_SOFTMMU
 700        tcg_debug_assert(!have_mmap_lock());
 701#endif
 702        if (qemu_mutex_iothread_locked()) {
 703            qemu_mutex_unlock_iothread();
 704        }
 705        assert_no_pages_locked();
 706    }
 707
 708    /* if an exception is pending, we execute it here */
 709    while (!cpu_handle_exception(cpu, &ret)) {
 710        TranslationBlock *last_tb = NULL;
 711        int tb_exit = 0;
 712
 713        while (!cpu_handle_interrupt(cpu, &last_tb)) {
 714            uint32_t cflags = cpu->cflags_next_tb;
 715            TranslationBlock *tb;
 716
 717            /* When requested, use an exact setting for cflags for the next
 718               execution.  This is used for icount, precise smc, and stop-
 719               after-access watchpoints.  Since this request should never
 720               have CF_INVALID set, -1 is a convenient invalid value that
 721               does not require tcg headers for cpu_common_reset.  */
 722            if (cflags == -1) {
 723                cflags = curr_cflags();
 724            } else {
 725                cpu->cflags_next_tb = -1;
 726            }
 727
 728            tb = tb_find(cpu, last_tb, tb_exit, cflags);
 729            cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
 730            /* Try to align the host and virtual clocks
 731               if the guest is in advance */
 732            align_clocks(&sc, cpu);
 733        }
 734    }
 735
 736    cc->cpu_exec_exit(cpu);
 737    rcu_read_unlock();
 738
 739    return ret;
 740}
 741