qemu/accel/tcg/cpu-exec.c
<<
>>
Prefs
   1/*
   2 *  emulator main execution loop
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu-common.h"
  22#include "cpu.h"
  23#include "trace.h"
  24#include "disas/disas.h"
  25#include "exec/exec-all.h"
  26#include "tcg.h"
  27#include "qemu/atomic.h"
  28#include "sysemu/qtest.h"
  29#include "qemu/timer.h"
  30#include "qemu/rcu.h"
  31#include "exec/tb-hash.h"
  32#include "exec/tb-lookup.h"
  33#include "exec/log.h"
  34#include "qemu/main-loop.h"
  35#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
  36#include "hw/i386/apic.h"
  37#endif
  38#include "sysemu/cpus.h"
  39#include "sysemu/replay.h"
  40
  41/* -icount align implementation. */
  42
  43typedef struct SyncClocks {
  44    int64_t diff_clk;
  45    int64_t last_cpu_icount;
  46    int64_t realtime_clock;
  47} SyncClocks;
  48
  49#if !defined(CONFIG_USER_ONLY)
  50/* Allow the guest to have a max 3ms advance.
  51 * The difference between the 2 clocks could therefore
  52 * oscillate around 0.
  53 */
  54#define VM_CLOCK_ADVANCE 3000000
  55#define THRESHOLD_REDUCE 1.5
  56#define MAX_DELAY_PRINT_RATE 2000000000LL
  57#define MAX_NB_PRINTS 100
  58
  59static void align_clocks(SyncClocks *sc, CPUState *cpu)
  60{
  61    int64_t cpu_icount;
  62
  63    if (!icount_align_option) {
  64        return;
  65    }
  66
  67    cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
  68    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
  69    sc->last_cpu_icount = cpu_icount;
  70
  71    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
  72#ifndef _WIN32
  73        struct timespec sleep_delay, rem_delay;
  74        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
  75        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
  76        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
  77            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
  78        } else {
  79            sc->diff_clk = 0;
  80        }
  81#else
  82        Sleep(sc->diff_clk / SCALE_MS);
  83        sc->diff_clk = 0;
  84#endif
  85    }
  86}
  87
  88static void print_delay(const SyncClocks *sc)
  89{
  90    static float threshold_delay;
  91    static int64_t last_realtime_clock;
  92    static int nb_prints;
  93
  94    if (icount_align_option &&
  95        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
  96        nb_prints < MAX_NB_PRINTS) {
  97        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
  98            (-sc->diff_clk / (float)1000000000LL <
  99             (threshold_delay - THRESHOLD_REDUCE))) {
 100            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
 101            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
 102                   threshold_delay - 1,
 103                   threshold_delay);
 104            nb_prints++;
 105            last_realtime_clock = sc->realtime_clock;
 106        }
 107    }
 108}
 109
 110static void init_delay_params(SyncClocks *sc, CPUState *cpu)
 111{
 112    if (!icount_align_option) {
 113        return;
 114    }
 115    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
 116    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
 117    sc->last_cpu_icount
 118        = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
 119    if (sc->diff_clk < max_delay) {
 120        max_delay = sc->diff_clk;
 121    }
 122    if (sc->diff_clk > max_advance) {
 123        max_advance = sc->diff_clk;
 124    }
 125
 126    /* Print every 2s max if the guest is late. We limit the number
 127       of printed messages to NB_PRINT_MAX(currently 100) */
 128    print_delay(sc);
 129}
 130#else
 131static void align_clocks(SyncClocks *sc, const CPUState *cpu)
 132{
 133}
 134
 135static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
 136{
 137}
 138#endif /* CONFIG USER ONLY */
 139
 140/* Execute a TB, and fix up the CPU state afterwards if necessary */
 141static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
 142{
 143    CPUArchState *env = cpu->env_ptr;
 144    uintptr_t ret;
 145    TranslationBlock *last_tb;
 146    int tb_exit;
 147    uint8_t *tb_ptr = itb->tc.ptr;
 148
 149    qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
 150                           "Trace %d: %p ["
 151                           TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
 152                           cpu->cpu_index, itb->tc.ptr,
 153                           itb->cs_base, itb->pc, itb->flags,
 154                           lookup_symbol(itb->pc));
 155
 156#if defined(DEBUG_DISAS)
 157    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
 158        && qemu_log_in_addr_range(itb->pc)) {
 159        qemu_log_lock();
 160        int flags = 0;
 161        if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
 162            flags |= CPU_DUMP_FPU;
 163        }
 164#if defined(TARGET_I386)
 165        flags |= CPU_DUMP_CCOP;
 166#endif
 167        log_cpu_state(cpu, flags);
 168        qemu_log_unlock();
 169    }
 170#endif /* DEBUG_DISAS */
 171
 172    ret = tcg_qemu_tb_exec(env, tb_ptr);
 173    cpu->can_do_io = 1;
 174    last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
 175    tb_exit = ret & TB_EXIT_MASK;
 176    trace_exec_tb_exit(last_tb, tb_exit);
 177
 178    if (tb_exit > TB_EXIT_IDX1) {
 179        /* We didn't start executing this TB (eg because the instruction
 180         * counter hit zero); we must restore the guest PC to the address
 181         * of the start of the TB.
 182         */
 183        CPUClass *cc = CPU_GET_CLASS(cpu);
 184        qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
 185                               "Stopped execution of TB chain before %p ["
 186                               TARGET_FMT_lx "] %s\n",
 187                               last_tb->tc.ptr, last_tb->pc,
 188                               lookup_symbol(last_tb->pc));
 189        if (cc->synchronize_from_tb) {
 190            cc->synchronize_from_tb(cpu, last_tb);
 191        } else {
 192            assert(cc->set_pc);
 193            cc->set_pc(cpu, last_tb->pc);
 194        }
 195    }
 196    return ret;
 197}
 198
 199#ifndef CONFIG_USER_ONLY
 200/* Execute the code without caching the generated code. An interpreter
 201   could be used if available. */
 202static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
 203                             TranslationBlock *orig_tb, bool ignore_icount)
 204{
 205    TranslationBlock *tb;
 206    uint32_t cflags = curr_cflags() | CF_NOCACHE;
 207
 208    if (ignore_icount) {
 209        cflags &= ~CF_USE_ICOUNT;
 210    }
 211
 212    /* Should never happen.
 213       We only end up here when an existing TB is too long.  */
 214    cflags |= MIN(max_cycles, CF_COUNT_MASK);
 215
 216    mmap_lock();
 217    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
 218                     orig_tb->flags, cflags);
 219    tb->orig_tb = orig_tb;
 220    mmap_unlock();
 221
 222    /* execute the generated code */
 223    trace_exec_tb_nocache(tb, tb->pc);
 224    cpu_tb_exec(cpu, tb);
 225
 226    mmap_lock();
 227    tb_phys_invalidate(tb, -1);
 228    mmap_unlock();
 229    tcg_tb_remove(tb);
 230}
 231#endif
 232
 233void cpu_exec_step_atomic(CPUState *cpu)
 234{
 235    CPUClass *cc = CPU_GET_CLASS(cpu);
 236    TranslationBlock *tb;
 237    target_ulong cs_base, pc;
 238    uint32_t flags;
 239    uint32_t cflags = 1;
 240    uint32_t cf_mask = cflags & CF_HASH_MASK;
 241
 242    if (sigsetjmp(cpu->jmp_env, 0) == 0) {
 243        tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
 244        if (tb == NULL) {
 245            mmap_lock();
 246            tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
 247            mmap_unlock();
 248        }
 249
 250        start_exclusive();
 251
 252        /* Since we got here, we know that parallel_cpus must be true.  */
 253        parallel_cpus = false;
 254        cc->cpu_exec_enter(cpu);
 255        /* execute the generated code */
 256        trace_exec_tb(tb, pc);
 257        cpu_tb_exec(cpu, tb);
 258        cc->cpu_exec_exit(cpu);
 259    } else {
 260        /*
 261         * The mmap_lock is dropped by tb_gen_code if it runs out of
 262         * memory.
 263         */
 264#ifndef CONFIG_SOFTMMU
 265        tcg_debug_assert(!have_mmap_lock());
 266#endif
 267        if (qemu_mutex_iothread_locked()) {
 268            qemu_mutex_unlock_iothread();
 269        }
 270        assert_no_pages_locked();
 271        qemu_plugin_disable_mem_helpers(cpu);
 272    }
 273
 274    if (cpu_in_exclusive_context(cpu)) {
 275        /* We might longjump out of either the codegen or the
 276         * execution, so must make sure we only end the exclusive
 277         * region if we started it.
 278         */
 279        parallel_cpus = true;
 280        end_exclusive();
 281    }
 282}
 283
 284struct tb_desc {
 285    target_ulong pc;
 286    target_ulong cs_base;
 287    CPUArchState *env;
 288    tb_page_addr_t phys_page1;
 289    uint32_t flags;
 290    uint32_t cf_mask;
 291    uint32_t trace_vcpu_dstate;
 292};
 293
 294static bool tb_lookup_cmp(const void *p, const void *d)
 295{
 296    const TranslationBlock *tb = p;
 297    const struct tb_desc *desc = d;
 298
 299    if (tb->pc == desc->pc &&
 300        tb->page_addr[0] == desc->phys_page1 &&
 301        tb->cs_base == desc->cs_base &&
 302        tb->flags == desc->flags &&
 303        tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
 304        (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
 305        /* check next page if needed */
 306        if (tb->page_addr[1] == -1) {
 307            return true;
 308        } else {
 309            tb_page_addr_t phys_page2;
 310            target_ulong virt_page2;
 311
 312            virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
 313            phys_page2 = get_page_addr_code(desc->env, virt_page2);
 314            if (tb->page_addr[1] == phys_page2) {
 315                return true;
 316            }
 317        }
 318    }
 319    return false;
 320}
 321
 322TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
 323                                   target_ulong cs_base, uint32_t flags,
 324                                   uint32_t cf_mask)
 325{
 326    tb_page_addr_t phys_pc;
 327    struct tb_desc desc;
 328    uint32_t h;
 329
 330    desc.env = (CPUArchState *)cpu->env_ptr;
 331    desc.cs_base = cs_base;
 332    desc.flags = flags;
 333    desc.cf_mask = cf_mask;
 334    desc.trace_vcpu_dstate = *cpu->trace_dstate;
 335    desc.pc = pc;
 336    phys_pc = get_page_addr_code(desc.env, pc);
 337    if (phys_pc == -1) {
 338        return NULL;
 339    }
 340    desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
 341    h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
 342    return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
 343}
 344
 345void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
 346{
 347    if (TCG_TARGET_HAS_direct_jump) {
 348        uintptr_t offset = tb->jmp_target_arg[n];
 349        uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
 350        tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
 351    } else {
 352        tb->jmp_target_arg[n] = addr;
 353    }
 354}
 355
 356static inline void tb_add_jump(TranslationBlock *tb, int n,
 357                               TranslationBlock *tb_next)
 358{
 359    uintptr_t old;
 360
 361    assert(n < ARRAY_SIZE(tb->jmp_list_next));
 362    qemu_spin_lock(&tb_next->jmp_lock);
 363
 364    /* make sure the destination TB is valid */
 365    if (tb_next->cflags & CF_INVALID) {
 366        goto out_unlock_next;
 367    }
 368    /* Atomically claim the jump destination slot only if it was NULL */
 369    old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
 370    if (old) {
 371        goto out_unlock_next;
 372    }
 373
 374    /* patch the native jump address */
 375    tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
 376
 377    /* add in TB jmp list */
 378    tb->jmp_list_next[n] = tb_next->jmp_list_head;
 379    tb_next->jmp_list_head = (uintptr_t)tb | n;
 380
 381    qemu_spin_unlock(&tb_next->jmp_lock);
 382
 383    qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
 384                           "Linking TBs %p [" TARGET_FMT_lx
 385                           "] index %d -> %p [" TARGET_FMT_lx "]\n",
 386                           tb->tc.ptr, tb->pc, n,
 387                           tb_next->tc.ptr, tb_next->pc);
 388    return;
 389
 390 out_unlock_next:
 391    qemu_spin_unlock(&tb_next->jmp_lock);
 392    return;
 393}
 394
 395static inline TranslationBlock *tb_find(CPUState *cpu,
 396                                        TranslationBlock *last_tb,
 397                                        int tb_exit, uint32_t cf_mask)
 398{
 399    TranslationBlock *tb;
 400    target_ulong cs_base, pc;
 401    uint32_t flags;
 402
 403    tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
 404    if (tb == NULL) {
 405        mmap_lock();
 406        tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
 407        mmap_unlock();
 408        /* We add the TB in the virtual pc hash table for the fast lookup */
 409        atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
 410    }
 411#ifndef CONFIG_USER_ONLY
 412    /* We don't take care of direct jumps when address mapping changes in
 413     * system emulation. So it's not safe to make a direct jump to a TB
 414     * spanning two pages because the mapping for the second page can change.
 415     */
 416    if (tb->page_addr[1] != -1) {
 417        last_tb = NULL;
 418    }
 419#endif
 420    /* See if we can patch the calling TB. */
 421    if (last_tb) {
 422        tb_add_jump(last_tb, tb_exit, tb);
 423    }
 424    return tb;
 425}
 426
 427static inline bool cpu_handle_halt(CPUState *cpu)
 428{
 429    if (cpu->halted) {
 430#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
 431        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
 432            && replay_interrupt()) {
 433            X86CPU *x86_cpu = X86_CPU(cpu);
 434            qemu_mutex_lock_iothread();
 435            apic_poll_irq(x86_cpu->apic_state);
 436            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
 437            qemu_mutex_unlock_iothread();
 438        }
 439#endif
 440        if (!cpu_has_work(cpu)) {
 441            return true;
 442        }
 443
 444        cpu->halted = 0;
 445    }
 446
 447    return false;
 448}
 449
 450static inline void cpu_handle_debug_exception(CPUState *cpu)
 451{
 452    CPUClass *cc = CPU_GET_CLASS(cpu);
 453    CPUWatchpoint *wp;
 454
 455    if (!cpu->watchpoint_hit) {
 456        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
 457            wp->flags &= ~BP_WATCHPOINT_HIT;
 458        }
 459    }
 460
 461    cc->debug_excp_handler(cpu);
 462}
 463
 464static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
 465{
 466    if (cpu->exception_index < 0) {
 467#ifndef CONFIG_USER_ONLY
 468        if (replay_has_exception()
 469            && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
 470            /* try to cause an exception pending in the log */
 471            cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
 472        }
 473#endif
 474        if (cpu->exception_index < 0) {
 475            return false;
 476        }
 477    }
 478
 479    if (cpu->exception_index >= EXCP_INTERRUPT) {
 480        /* exit request from the cpu execution loop */
 481        *ret = cpu->exception_index;
 482        if (*ret == EXCP_DEBUG) {
 483            cpu_handle_debug_exception(cpu);
 484        }
 485        cpu->exception_index = -1;
 486        return true;
 487    } else {
 488#if defined(CONFIG_USER_ONLY)
 489        /* if user mode only, we simulate a fake exception
 490           which will be handled outside the cpu execution
 491           loop */
 492#if defined(TARGET_I386)
 493        CPUClass *cc = CPU_GET_CLASS(cpu);
 494        cc->do_interrupt(cpu);
 495#endif
 496        *ret = cpu->exception_index;
 497        cpu->exception_index = -1;
 498        return true;
 499#else
 500        if (replay_exception()) {
 501            CPUClass *cc = CPU_GET_CLASS(cpu);
 502            qemu_mutex_lock_iothread();
 503            cc->do_interrupt(cpu);
 504            qemu_mutex_unlock_iothread();
 505            cpu->exception_index = -1;
 506        } else if (!replay_has_interrupt()) {
 507            /* give a chance to iothread in replay mode */
 508            *ret = EXCP_INTERRUPT;
 509            return true;
 510        }
 511#endif
 512    }
 513
 514    return false;
 515}
 516
 517static inline bool cpu_handle_interrupt(CPUState *cpu,
 518                                        TranslationBlock **last_tb)
 519{
 520    CPUClass *cc = CPU_GET_CLASS(cpu);
 521
 522    /* Clear the interrupt flag now since we're processing
 523     * cpu->interrupt_request and cpu->exit_request.
 524     * Ensure zeroing happens before reading cpu->exit_request or
 525     * cpu->interrupt_request (see also smp_wmb in cpu_exit())
 526     */
 527    atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
 528
 529    if (unlikely(atomic_read(&cpu->interrupt_request))) {
 530        int interrupt_request;
 531        qemu_mutex_lock_iothread();
 532        interrupt_request = cpu->interrupt_request;
 533        if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
 534            /* Mask out external interrupts for this step. */
 535            interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
 536        }
 537        if (interrupt_request & CPU_INTERRUPT_DEBUG) {
 538            cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
 539            cpu->exception_index = EXCP_DEBUG;
 540            qemu_mutex_unlock_iothread();
 541            return true;
 542        }
 543        if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
 544            /* Do nothing */
 545        } else if (interrupt_request & CPU_INTERRUPT_HALT) {
 546            replay_interrupt();
 547            cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
 548            cpu->halted = 1;
 549            cpu->exception_index = EXCP_HLT;
 550            qemu_mutex_unlock_iothread();
 551            return true;
 552        }
 553#if defined(TARGET_I386)
 554        else if (interrupt_request & CPU_INTERRUPT_INIT) {
 555            X86CPU *x86_cpu = X86_CPU(cpu);
 556            CPUArchState *env = &x86_cpu->env;
 557            replay_interrupt();
 558            cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
 559            do_cpu_init(x86_cpu);
 560            cpu->exception_index = EXCP_HALTED;
 561            qemu_mutex_unlock_iothread();
 562            return true;
 563        }
 564#else
 565        else if (interrupt_request & CPU_INTERRUPT_RESET) {
 566            replay_interrupt();
 567            cpu_reset(cpu);
 568            qemu_mutex_unlock_iothread();
 569            return true;
 570        }
 571#endif
 572        /* The target hook has 3 exit conditions:
 573           False when the interrupt isn't processed,
 574           True when it is, and we should restart on a new TB,
 575           and via longjmp via cpu_loop_exit.  */
 576        else {
 577            if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
 578                replay_interrupt();
 579                cpu->exception_index = -1;
 580                *last_tb = NULL;
 581            }
 582            /* The target hook may have updated the 'cpu->interrupt_request';
 583             * reload the 'interrupt_request' value */
 584            interrupt_request = cpu->interrupt_request;
 585        }
 586        if (interrupt_request & CPU_INTERRUPT_EXITTB) {
 587            cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
 588            /* ensure that no TB jump will be modified as
 589               the program flow was changed */
 590            *last_tb = NULL;
 591        }
 592
 593        /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
 594        qemu_mutex_unlock_iothread();
 595    }
 596
 597    /* Finally, check if we need to exit to the main loop.  */
 598    if (unlikely(atomic_read(&cpu->exit_request))
 599        || (use_icount
 600            && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
 601        atomic_set(&cpu->exit_request, 0);
 602        if (cpu->exception_index == -1) {
 603            cpu->exception_index = EXCP_INTERRUPT;
 604        }
 605        return true;
 606    }
 607
 608    return false;
 609}
 610
 611static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
 612                                    TranslationBlock **last_tb, int *tb_exit)
 613{
 614    uintptr_t ret;
 615    int32_t insns_left;
 616
 617    trace_exec_tb(tb, tb->pc);
 618    ret = cpu_tb_exec(cpu, tb);
 619    tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
 620    *tb_exit = ret & TB_EXIT_MASK;
 621    if (*tb_exit != TB_EXIT_REQUESTED) {
 622        *last_tb = tb;
 623        return;
 624    }
 625
 626    *last_tb = NULL;
 627    insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
 628    if (insns_left < 0) {
 629        /* Something asked us to stop executing chained TBs; just
 630         * continue round the main loop. Whatever requested the exit
 631         * will also have set something else (eg exit_request or
 632         * interrupt_request) which will be handled by
 633         * cpu_handle_interrupt.  cpu_handle_interrupt will also
 634         * clear cpu->icount_decr.u16.high.
 635         */
 636        return;
 637    }
 638
 639    /* Instruction counter expired.  */
 640    assert(use_icount);
 641#ifndef CONFIG_USER_ONLY
 642    /* Ensure global icount has gone forward */
 643    cpu_update_icount(cpu);
 644    /* Refill decrementer and continue execution.  */
 645    insns_left = MIN(0xffff, cpu->icount_budget);
 646    cpu_neg(cpu)->icount_decr.u16.low = insns_left;
 647    cpu->icount_extra = cpu->icount_budget - insns_left;
 648    if (!cpu->icount_extra) {
 649        /* Execute any remaining instructions, then let the main loop
 650         * handle the next event.
 651         */
 652        if (insns_left > 0) {
 653            cpu_exec_nocache(cpu, insns_left, tb, false);
 654        }
 655    }
 656#endif
 657}
 658
 659/* main execution loop */
 660
 661int cpu_exec(CPUState *cpu)
 662{
 663    CPUClass *cc = CPU_GET_CLASS(cpu);
 664    int ret;
 665    SyncClocks sc = { 0 };
 666
 667    /* replay_interrupt may need current_cpu */
 668    current_cpu = cpu;
 669
 670    if (cpu_handle_halt(cpu)) {
 671        return EXCP_HALTED;
 672    }
 673
 674    rcu_read_lock();
 675
 676    cc->cpu_exec_enter(cpu);
 677
 678    /* Calculate difference between guest clock and host clock.
 679     * This delay includes the delay of the last cycle, so
 680     * what we have to do is sleep until it is 0. As for the
 681     * advance/delay we gain here, we try to fix it next time.
 682     */
 683    init_delay_params(&sc, cpu);
 684
 685    /* prepare setjmp context for exception handling */
 686    if (sigsetjmp(cpu->jmp_env, 0) != 0) {
 687#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
 688        /* Some compilers wrongly smash all local variables after
 689         * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
 690         * Reload essential local variables here for those compilers.
 691         * Newer versions of gcc would complain about this code (-Wclobbered). */
 692        cpu = current_cpu;
 693        cc = CPU_GET_CLASS(cpu);
 694#else /* buggy compiler */
 695        /* Assert that the compiler does not smash local variables. */
 696        g_assert(cpu == current_cpu);
 697        g_assert(cc == CPU_GET_CLASS(cpu));
 698#endif /* buggy compiler */
 699#ifndef CONFIG_SOFTMMU
 700        tcg_debug_assert(!have_mmap_lock());
 701#endif
 702        if (qemu_mutex_iothread_locked()) {
 703            qemu_mutex_unlock_iothread();
 704        }
 705        qemu_plugin_disable_mem_helpers(cpu);
 706
 707        assert_no_pages_locked();
 708    }
 709
 710    /* if an exception is pending, we execute it here */
 711    while (!cpu_handle_exception(cpu, &ret)) {
 712        TranslationBlock *last_tb = NULL;
 713        int tb_exit = 0;
 714
 715        while (!cpu_handle_interrupt(cpu, &last_tb)) {
 716            uint32_t cflags = cpu->cflags_next_tb;
 717            TranslationBlock *tb;
 718
 719            /* When requested, use an exact setting for cflags for the next
 720               execution.  This is used for icount, precise smc, and stop-
 721               after-access watchpoints.  Since this request should never
 722               have CF_INVALID set, -1 is a convenient invalid value that
 723               does not require tcg headers for cpu_common_reset.  */
 724            if (cflags == -1) {
 725                cflags = curr_cflags();
 726            } else {
 727                cpu->cflags_next_tb = -1;
 728            }
 729
 730            tb = tb_find(cpu, last_tb, tb_exit, cflags);
 731            cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
 732            /* Try to align the host and virtual clocks
 733               if the guest is in advance */
 734            align_clocks(&sc, cpu);
 735        }
 736    }
 737
 738    cc->cpu_exec_exit(cpu);
 739    rcu_read_unlock();
 740
 741    return ret;
 742}
 743