qemu/accel/tcg/cpu-exec.c
<<
>>
Prefs
   1/*
   2 *  emulator main execution loop
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu-common.h"
  22#include "cpu.h"
  23#include "trace.h"
  24#include "disas/disas.h"
  25#include "exec/exec-all.h"
  26#include "tcg/tcg.h"
  27#include "qemu/atomic.h"
  28#include "sysemu/qtest.h"
  29#include "qemu/timer.h"
  30#include "qemu/rcu.h"
  31#include "exec/tb-hash.h"
  32#include "exec/tb-lookup.h"
  33#include "exec/log.h"
  34#include "qemu/main-loop.h"
  35#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
  36#include "hw/i386/apic.h"
  37#endif
  38#include "sysemu/cpus.h"
  39#include "sysemu/replay.h"
  40#include "qemu/etrace.h"
  41
  42/* -icount align implementation. */
  43
  44typedef struct SyncClocks {
  45    int64_t diff_clk;
  46    int64_t last_cpu_icount;
  47    int64_t realtime_clock;
  48} SyncClocks;
  49
  50#if !defined(CONFIG_USER_ONLY)
  51/* Allow the guest to have a max 3ms advance.
  52 * The difference between the 2 clocks could therefore
  53 * oscillate around 0.
  54 */
  55#define VM_CLOCK_ADVANCE 3000000
  56#define THRESHOLD_REDUCE 1.5
  57#define MAX_DELAY_PRINT_RATE 2000000000LL
  58#define MAX_NB_PRINTS 100
  59
  60static void align_clocks(SyncClocks *sc, CPUState *cpu)
  61{
  62    int64_t cpu_icount;
  63
  64    if (!icount_align_option) {
  65        return;
  66    }
  67
  68    cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
  69    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
  70    sc->last_cpu_icount = cpu_icount;
  71
  72    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
  73#ifndef _WIN32
  74        struct timespec sleep_delay, rem_delay;
  75        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
  76        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
  77        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
  78            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
  79        } else {
  80            sc->diff_clk = 0;
  81        }
  82#else
  83        Sleep(sc->diff_clk / SCALE_MS);
  84        sc->diff_clk = 0;
  85#endif
  86    }
  87}
  88
  89static void print_delay(const SyncClocks *sc)
  90{
  91    static float threshold_delay;
  92    static int64_t last_realtime_clock;
  93    static int nb_prints;
  94
  95    if (icount_align_option &&
  96        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
  97        nb_prints < MAX_NB_PRINTS) {
  98        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
  99            (-sc->diff_clk / (float)1000000000LL <
 100             (threshold_delay - THRESHOLD_REDUCE))) {
 101            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
 102            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
 103                   threshold_delay - 1,
 104                   threshold_delay);
 105            nb_prints++;
 106            last_realtime_clock = sc->realtime_clock;
 107        }
 108    }
 109}
 110
 111static void init_delay_params(SyncClocks *sc, CPUState *cpu)
 112{
 113    if (!icount_align_option) {
 114        return;
 115    }
 116    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
 117    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
 118    sc->last_cpu_icount
 119        = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
 120    if (sc->diff_clk < max_delay) {
 121        max_delay = sc->diff_clk;
 122    }
 123    if (sc->diff_clk > max_advance) {
 124        max_advance = sc->diff_clk;
 125    }
 126
 127    /* Print every 2s max if the guest is late. We limit the number
 128       of printed messages to NB_PRINT_MAX(currently 100) */
 129    print_delay(sc);
 130}
 131#else
 132static void align_clocks(SyncClocks *sc, const CPUState *cpu)
 133{
 134}
 135
 136static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
 137{
 138}
 139#endif /* CONFIG USER ONLY */
 140
 141/* Execute a TB, and fix up the CPU state afterwards if necessary */
 142static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
 143{
 144    CPUArchState *env = cpu->env_ptr;
 145    uintptr_t ret;
 146    TranslationBlock *last_tb;
 147    int tb_exit;
 148    uint8_t *tb_ptr = itb->tc.ptr;
 149
 150    qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
 151                           "Trace %d: %p ["
 152                           TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
 153                           cpu->cpu_index, itb->tc.ptr,
 154                           itb->cs_base, itb->pc, itb->flags,
 155                           lookup_symbol(itb->pc));
 156
 157    if (qemu_etrace_mask(ETRACE_F_EXEC)) {
 158        etrace_dump_exec_start(&qemu_etracer, cpu->cpu_index,
 159                               itb->pc);
 160    }
 161
 162
 163#if defined(DEBUG_DISAS)
 164    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
 165        && qemu_log_in_addr_range(itb->pc)) {
 166        FILE *logfile = qemu_log_lock();
 167        int flags = 0;
 168        if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
 169            flags |= CPU_DUMP_FPU;
 170        }
 171#if defined(TARGET_I386)
 172        flags |= CPU_DUMP_CCOP;
 173#endif
 174        log_cpu_state(cpu, flags);
 175        qemu_log_unlock(logfile);
 176    }
 177#endif /* DEBUG_DISAS */
 178
 179    ret = tcg_qemu_tb_exec(env, tb_ptr);
 180    cpu->can_do_io = 1;
 181    last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
 182    tb_exit = ret & TB_EXIT_MASK;
 183    trace_exec_tb_exit(last_tb, tb_exit);
 184
 185    if (tb_exit > TB_EXIT_IDX1) {
 186        /* We didn't start executing this TB (eg because the instruction
 187         * counter hit zero); we must restore the guest PC to the address
 188         * of the start of the TB.
 189         */
 190        CPUClass *cc = CPU_GET_CLASS(cpu);
 191        qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
 192                               "Stopped execution of TB chain before %p ["
 193                               TARGET_FMT_lx "] %s\n",
 194                               last_tb->tc.ptr, last_tb->pc,
 195                               lookup_symbol(last_tb->pc));
 196        if (cc->synchronize_from_tb) {
 197            cc->synchronize_from_tb(cpu, last_tb);
 198        } else {
 199            assert(cc->set_pc);
 200            cc->set_pc(cpu, last_tb->pc);
 201        }
 202    }
 203    return ret;
 204}
 205
 206#ifndef CONFIG_USER_ONLY
 207/* Execute the code without caching the generated code. An interpreter
 208   could be used if available. */
 209static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
 210                             TranslationBlock *orig_tb, bool ignore_icount)
 211{
 212    TranslationBlock *tb;
 213    uint32_t cflags = curr_cflags() | CF_NOCACHE;
 214
 215    if (ignore_icount) {
 216        cflags &= ~CF_USE_ICOUNT;
 217    }
 218
 219    /* Should never happen.
 220       We only end up here when an existing TB is too long.  */
 221    cflags |= MIN(max_cycles, CF_COUNT_MASK);
 222
 223    mmap_lock();
 224    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
 225                     orig_tb->flags, cflags);
 226    tb->orig_tb = orig_tb;
 227    mmap_unlock();
 228
 229    /* execute the generated code */
 230    trace_exec_tb_nocache(tb, tb->pc);
 231    cpu_tb_exec(cpu, tb);
 232
 233    mmap_lock();
 234    tb_phys_invalidate(tb, -1);
 235    mmap_unlock();
 236    tcg_tb_remove(tb);
 237}
 238#endif
 239
 240void cpu_exec_step_atomic(CPUState *cpu)
 241{
 242    CPUClass *cc = CPU_GET_CLASS(cpu);
 243    TranslationBlock *tb;
 244    target_ulong cs_base, pc;
 245    uint32_t flags;
 246    uint32_t cflags = 1;
 247    uint32_t cf_mask = cflags & CF_HASH_MASK;
 248
 249    if (sigsetjmp(cpu->jmp_env, 0) == 0) {
 250        start_exclusive();
 251
 252        tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
 253        if (tb == NULL) {
 254            mmap_lock();
 255            tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
 256            mmap_unlock();
 257        }
 258
 259        /* Since we got here, we know that parallel_cpus must be true.  */
 260        parallel_cpus = false;
 261        cc->cpu_exec_enter(cpu);
 262        /* execute the generated code */
 263        trace_exec_tb(tb, pc);
 264        cpu_tb_exec(cpu, tb);
 265        cc->cpu_exec_exit(cpu);
 266    } else {
 267        /*
 268         * The mmap_lock is dropped by tb_gen_code if it runs out of
 269         * memory.
 270         */
 271#ifndef CONFIG_SOFTMMU
 272        tcg_debug_assert(!have_mmap_lock());
 273#endif
 274        if (qemu_mutex_iothread_locked()) {
 275            qemu_mutex_unlock_iothread();
 276        }
 277        assert_no_pages_locked();
 278        qemu_plugin_disable_mem_helpers(cpu);
 279    }
 280
 281
 282    /*
 283     * As we start the exclusive region before codegen we must still
 284     * be in the region if we longjump out of either the codegen or
 285     * the execution.
 286     */
 287    g_assert(cpu_in_exclusive_context(cpu));
 288    parallel_cpus = true;
 289    end_exclusive();
 290}
 291
 292struct tb_desc {
 293    target_ulong pc;
 294    target_ulong cs_base;
 295    CPUArchState *env;
 296    tb_page_addr_t phys_page1;
 297    uint32_t flags;
 298    uint32_t cf_mask;
 299    uint32_t trace_vcpu_dstate;
 300};
 301
 302static bool tb_lookup_cmp(const void *p, const void *d)
 303{
 304    const TranslationBlock *tb = p;
 305    const struct tb_desc *desc = d;
 306
 307    if (tb->pc == desc->pc &&
 308        tb->page_addr[0] == desc->phys_page1 &&
 309        tb->cs_base == desc->cs_base &&
 310        tb->flags == desc->flags &&
 311        tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
 312        (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
 313        /* check next page if needed */
 314        if (tb->page_addr[1] == -1) {
 315            return true;
 316        } else {
 317            tb_page_addr_t phys_page2;
 318            target_ulong virt_page2;
 319
 320            virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
 321            phys_page2 = get_page_addr_code(desc->env, virt_page2);
 322            if (tb->page_addr[1] == phys_page2) {
 323                return true;
 324            }
 325        }
 326    }
 327    return false;
 328}
 329
 330TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
 331                                   target_ulong cs_base, uint32_t flags,
 332                                   uint32_t cf_mask)
 333{
 334    tb_page_addr_t phys_pc;
 335    struct tb_desc desc;
 336    uint32_t h;
 337
 338    desc.env = (CPUArchState *)cpu->env_ptr;
 339    desc.cs_base = cs_base;
 340    desc.flags = flags;
 341    desc.cf_mask = cf_mask;
 342    desc.trace_vcpu_dstate = *cpu->trace_dstate;
 343    desc.pc = pc;
 344    phys_pc = get_page_addr_code(desc.env, pc);
 345    if (phys_pc == -1) {
 346        return NULL;
 347    }
 348    desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
 349    h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
 350    return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
 351}
 352
 353void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
 354{
 355    if (TCG_TARGET_HAS_direct_jump) {
 356        uintptr_t offset = tb->jmp_target_arg[n];
 357        uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
 358        tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
 359    } else {
 360        tb->jmp_target_arg[n] = addr;
 361    }
 362}
 363
 364static inline void tb_add_jump(TranslationBlock *tb, int n,
 365                               TranslationBlock *tb_next)
 366{
 367    uintptr_t old;
 368
 369    assert(n < ARRAY_SIZE(tb->jmp_list_next));
 370    qemu_spin_lock(&tb_next->jmp_lock);
 371
 372    /* make sure the destination TB is valid */
 373    if (tb_next->cflags & CF_INVALID) {
 374        goto out_unlock_next;
 375    }
 376    /* Atomically claim the jump destination slot only if it was NULL */
 377    old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
 378    if (old) {
 379        goto out_unlock_next;
 380    }
 381
 382    /* patch the native jump address */
 383    tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
 384
 385    /* add in TB jmp list */
 386    tb->jmp_list_next[n] = tb_next->jmp_list_head;
 387    tb_next->jmp_list_head = (uintptr_t)tb | n;
 388
 389    qemu_spin_unlock(&tb_next->jmp_lock);
 390
 391    qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
 392                           "Linking TBs %p [" TARGET_FMT_lx
 393                           "] index %d -> %p [" TARGET_FMT_lx "]\n",
 394                           tb->tc.ptr, tb->pc, n,
 395                           tb_next->tc.ptr, tb_next->pc);
 396    return;
 397
 398 out_unlock_next:
 399    qemu_spin_unlock(&tb_next->jmp_lock);
 400    return;
 401}
 402
 403static inline TranslationBlock *tb_find(CPUState *cpu,
 404                                        TranslationBlock *last_tb,
 405                                        int tb_exit, uint32_t cf_mask)
 406{
 407    TranslationBlock *tb;
 408    target_ulong cs_base, pc;
 409    uint32_t flags;
 410
 411    tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
 412    if (tb == NULL) {
 413        mmap_lock();
 414        tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
 415        mmap_unlock();
 416        /* We add the TB in the virtual pc hash table for the fast lookup */
 417        atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
 418    }
 419#ifndef CONFIG_USER_ONLY
 420    /* We don't take care of direct jumps when address mapping changes in
 421     * system emulation. So it's not safe to make a direct jump to a TB
 422     * spanning two pages because the mapping for the second page can change.
 423     */
 424    if (tb->page_addr[1] != -1) {
 425        last_tb = NULL;
 426    }
 427#endif
 428    /* See if we can patch the calling TB. */
 429    if (last_tb) {
 430        tb_add_jump(last_tb, tb_exit, tb);
 431    }
 432    return tb;
 433}
 434
 435static inline bool cpu_handle_halt(CPUState *cpu)
 436{
 437    if (cpu->halted) {
 438#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
 439        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
 440            && replay_interrupt()) {
 441            X86CPU *x86_cpu = X86_CPU(cpu);
 442            qemu_mutex_lock_iothread();
 443            apic_poll_irq(x86_cpu->apic_state);
 444            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
 445            qemu_mutex_unlock_iothread();
 446        }
 447#endif
 448
 449        if (qemu_etrace_mask(ETRACE_F_EXEC)) {
 450            const char *dev_name = object_get_canonical_path(OBJECT(cpu));
 451            etrace_event_u64(&qemu_etracer, cpu->cpu_index,
 452                             ETRACE_EVU64_F_PREV_VAL,
 453                             dev_name, "sleep", 0, 1);
 454        }
 455
 456        if (!cpu_has_work(cpu) || cpu->reset_pin) {
 457            return true;
 458        }
 459
 460        cpu->halted = 0;
 461    }
 462
 463    return false;
 464}
 465
 466static inline void cpu_handle_debug_exception(CPUState *cpu)
 467{
 468    CPUClass *cc = CPU_GET_CLASS(cpu);
 469    CPUWatchpoint *wp;
 470
 471    if (!cpu->watchpoint_hit) {
 472        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
 473            wp->flags &= ~BP_WATCHPOINT_HIT;
 474        }
 475    }
 476
 477    cc->debug_excp_handler(cpu);
 478}
 479
 480static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
 481{
 482    if (cpu->exception_index < 0) {
 483#ifndef CONFIG_USER_ONLY
 484        if (replay_has_exception()
 485            && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
 486            /* try to cause an exception pending in the log */
 487            cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
 488        }
 489#endif
 490        if (cpu->exception_index < 0) {
 491            return false;
 492        }
 493    }
 494
 495    if (cpu->exception_index >= EXCP_INTERRUPT) {
 496        /* exit request from the cpu execution loop */
 497        *ret = cpu->exception_index;
 498        if (*ret == EXCP_DEBUG) {
 499            cpu_handle_debug_exception(cpu);
 500        }
 501        cpu->exception_index = -1;
 502        return true;
 503    } else {
 504#if defined(CONFIG_USER_ONLY)
 505        /* if user mode only, we simulate a fake exception
 506           which will be handled outside the cpu execution
 507           loop */
 508#if defined(TARGET_I386)
 509        CPUClass *cc = CPU_GET_CLASS(cpu);
 510        cc->do_interrupt(cpu);
 511#endif
 512        *ret = cpu->exception_index;
 513        cpu->exception_index = -1;
 514        return true;
 515#else
 516        if (replay_exception()) {
 517            CPUClass *cc = CPU_GET_CLASS(cpu);
 518            qemu_mutex_lock_iothread();
 519            cc->do_interrupt(cpu);
 520            qemu_mutex_unlock_iothread();
 521            cpu->exception_index = -1;
 522
 523            if (unlikely(cpu->singlestep_enabled)) {
 524                /*
 525                 * After processing the exception, ensure an EXCP_DEBUG is
 526                 * raised when single-stepping so that GDB doesn't miss the
 527                 * next instruction.
 528                 */
 529                *ret = EXCP_DEBUG;
 530                cpu_handle_debug_exception(cpu);
 531                return true;
 532            }
 533        } else if (!replay_has_interrupt()) {
 534            /* give a chance to iothread in replay mode */
 535            *ret = EXCP_INTERRUPT;
 536            return true;
 537        }
 538#endif
 539    }
 540
 541    return false;
 542}
 543
 544static inline bool cpu_handle_interrupt(CPUState *cpu,
 545                                        TranslationBlock **last_tb)
 546{
 547    CPUClass *cc = CPU_GET_CLASS(cpu);
 548
 549    /* Clear the interrupt flag now since we're processing
 550     * cpu->interrupt_request and cpu->exit_request.
 551     * Ensure zeroing happens before reading cpu->exit_request or
 552     * cpu->interrupt_request (see also smp_wmb in cpu_exit())
 553     */
 554    atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
 555
 556    if (unlikely(atomic_read(&cpu->interrupt_request))) {
 557        int interrupt_request;
 558        qemu_mutex_lock_iothread();
 559        interrupt_request = cpu->interrupt_request;
 560        if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
 561            /* Mask out external interrupts for this step. */
 562            interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
 563        }
 564        if (interrupt_request & CPU_INTERRUPT_DEBUG) {
 565            cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
 566            cpu->exception_index = EXCP_DEBUG;
 567            qemu_mutex_unlock_iothread();
 568            return true;
 569        }
 570        if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
 571            /* Do nothing */
 572        } else if (interrupt_request & CPU_INTERRUPT_HALT) {
 573            replay_interrupt();
 574            cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
 575            cpu->halted = 1;
 576            cpu->exception_index = EXCP_HLT;
 577            qemu_mutex_unlock_iothread();
 578            return true;
 579        }
 580#if defined(TARGET_I386)
 581        else if (interrupt_request & CPU_INTERRUPT_INIT) {
 582            X86CPU *x86_cpu = X86_CPU(cpu);
 583            CPUArchState *env = &x86_cpu->env;
 584            replay_interrupt();
 585            cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
 586            do_cpu_init(x86_cpu);
 587            cpu->exception_index = EXCP_HALTED;
 588            qemu_mutex_unlock_iothread();
 589            return true;
 590        }
 591#else
 592        else if (interrupt_request & CPU_INTERRUPT_RESET) {
 593            replay_interrupt();
 594            cpu_reset(cpu);
 595            qemu_mutex_unlock_iothread();
 596            return true;
 597        }
 598#endif
 599        /* The target hook has 3 exit conditions:
 600           False when the interrupt isn't processed,
 601           True when it is, and we should restart on a new TB,
 602           and via longjmp via cpu_loop_exit.  */
 603        else {
 604            if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
 605                replay_interrupt();
 606                /*
 607                 * After processing the interrupt, ensure an EXCP_DEBUG is
 608                 * raised when single-stepping so that GDB doesn't miss the
 609                 * next instruction.
 610                 */
 611                cpu->exception_index =
 612                    (cpu->singlestep_enabled ? EXCP_DEBUG : -1);
 613                *last_tb = NULL;
 614            }
 615            /* The target hook may have updated the 'cpu->interrupt_request';
 616             * reload the 'interrupt_request' value */
 617            interrupt_request = cpu->interrupt_request;
 618        }
 619        if (interrupt_request & CPU_INTERRUPT_EXITTB) {
 620            cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
 621            /* ensure that no TB jump will be modified as
 622               the program flow was changed */
 623            *last_tb = NULL;
 624        }
 625
 626        /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
 627        qemu_mutex_unlock_iothread();
 628    }
 629
 630    /* Finally, check if we need to exit to the main loop.  */
 631    if (unlikely(atomic_read(&cpu->exit_request))
 632        || (use_icount
 633            && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
 634        atomic_set(&cpu->exit_request, 0);
 635        if (cpu->exception_index == -1) {
 636            cpu->exception_index = EXCP_INTERRUPT;
 637        }
 638        return true;
 639    }
 640
 641    return false;
 642}
 643
 644static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
 645                                    TranslationBlock **last_tb, int *tb_exit)
 646{
 647    uintptr_t ret;
 648    int32_t insns_left;
 649
 650    trace_exec_tb(tb, tb->pc);
 651    ret = cpu_tb_exec(cpu, tb);
 652    tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
 653    *tb_exit = ret & TB_EXIT_MASK;
 654    if (*tb_exit != TB_EXIT_REQUESTED) {
 655        *last_tb = tb;
 656        return;
 657    }
 658
 659    *last_tb = NULL;
 660    insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
 661    if (insns_left < 0) {
 662        /* Something asked us to stop executing chained TBs; just
 663         * continue round the main loop. Whatever requested the exit
 664         * will also have set something else (eg exit_request or
 665         * interrupt_request) which will be handled by
 666         * cpu_handle_interrupt.  cpu_handle_interrupt will also
 667         * clear cpu->icount_decr.u16.high.
 668         */
 669        return;
 670    }
 671
 672    /* Instruction counter expired.  */
 673    assert(use_icount);
 674#ifndef CONFIG_USER_ONLY
 675    /* Ensure global icount has gone forward */
 676    cpu_update_icount(cpu);
 677    /* Refill decrementer and continue execution.  */
 678    insns_left = MIN(0xffff, cpu->icount_budget);
 679    cpu_neg(cpu)->icount_decr.u16.low = insns_left;
 680    cpu->icount_extra = cpu->icount_budget - insns_left;
 681    if (!cpu->icount_extra) {
 682        /* Execute any remaining instructions, then let the main loop
 683         * handle the next event.
 684         */
 685        if (insns_left > 0) {
 686            cpu_exec_nocache(cpu, insns_left, tb, false);
 687        }
 688    }
 689#endif
 690}
 691
 692/* main execution loop */
 693
 694int cpu_exec(CPUState *cpu)
 695{
 696    CPUClass *cc = CPU_GET_CLASS(cpu);
 697    int ret;
 698    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
 699    SyncClocks sc = { 0 };
 700
 701    /* replay_interrupt may need current_cpu */
 702    current_cpu = cpu;
 703
 704    if (cpu_handle_halt(cpu)) {
 705        return EXCP_HALTED;
 706    }
 707
 708    rcu_read_lock();
 709
 710    cc->cpu_exec_enter(cpu);
 711
 712    /* Calculate difference between guest clock and host clock.
 713     * This delay includes the delay of the last cycle, so
 714     * what we have to do is sleep until it is 0. As for the
 715     * advance/delay we gain here, we try to fix it next time.
 716     */
 717    init_delay_params(&sc, cpu);
 718
 719    /* prepare setjmp context for exception handling */
 720    if (sigsetjmp(cpu->jmp_env, 0) != 0) {
 721#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
 722        /* Some compilers wrongly smash all local variables after
 723         * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
 724         * Reload essential local variables here for those compilers.
 725         * Newer versions of gcc would complain about this code (-Wclobbered). */
 726        cpu = current_cpu;
 727        cc = CPU_GET_CLASS(cpu);
 728#else /* buggy compiler */
 729        /* Assert that the compiler does not smash local variables. */
 730        g_assert(cpu == current_cpu);
 731        g_assert(cc == CPU_GET_CLASS(cpu));
 732#endif /* buggy compiler */
 733#ifndef CONFIG_SOFTMMU
 734        tcg_debug_assert(!have_mmap_lock());
 735#endif
 736        if (qemu_mutex_iothread_locked()) {
 737            qemu_mutex_unlock_iothread();
 738        }
 739
 740        if (qemu_etrace_mask(ETRACE_F_EXEC)
 741            && qemu_etracer.exec_start_valid) {
 742            target_ulong cs_base, pc;
 743            uint32_t flags;
 744
 745            cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 746            etrace_dump_exec_end(&qemu_etracer, cpu->cpu_index, pc);
 747        }
 748        qemu_plugin_disable_mem_helpers(cpu);
 749
 750        assert_no_pages_locked();
 751    }
 752
 753    /* if an exception is pending, we execute it here */
 754    while (!cpu_handle_exception(cpu, &ret)) {
 755        TranslationBlock *last_tb = NULL;
 756        int tb_exit = 0;
 757
 758        while (!cpu_handle_interrupt(cpu, &last_tb)) {
 759            uint32_t cflags = cpu->cflags_next_tb;
 760            TranslationBlock *tb;
 761
 762            /* When requested, use an exact setting for cflags for the next
 763               execution.  This is used for icount, precise smc, and stop-
 764               after-access watchpoints.  Since this request should never
 765               have CF_INVALID set, -1 is a convenient invalid value that
 766               does not require tcg headers for cpu_common_reset.  */
 767            if (cflags == -1) {
 768                cflags = curr_cflags();
 769            } else {
 770                cpu->cflags_next_tb = -1;
 771            }
 772
 773            tb = tb_find(cpu, last_tb, tb_exit, cflags);
 774            cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
 775
 776            if (qemu_etrace_mask(ETRACE_F_EXEC)) {
 777                target_ulong cs_base, pc;
 778                uint32_t flags;
 779
 780                if (tb_exit) {
 781                    /* TB early exit, ask for CPU state.  */
 782                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 783                } else {
 784                    /* TB didn't exit, assume we ran all of it.  */
 785                    pc = tb->pc + tb->size;
 786                }
 787                etrace_dump_exec_end(&qemu_etracer,
 788                                     cpu->cpu_index, pc);
 789            }
 790
 791            qemu_etracer.exec_start_valid = false;
 792
 793            /* Try to align the host and virtual clocks
 794               if the guest is in advance */
 795            align_clocks(&sc, cpu);
 796        }
 797    }
 798
 799    cc->cpu_exec_exit(cpu);
 800    rcu_read_unlock();
 801
 802    return ret;
 803}
 804