1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "trace.h"
22#include "disas/disas.h"
23#include "exec/exec-all.h"
24#include "tcg.h"
25#include "qemu/atomic.h"
26#include "sysemu/qtest.h"
27#include "qemu/timer.h"
28#include "exec/address-spaces.h"
29#include "qemu/rcu.h"
30#include "exec/tb-hash.h"
31#include "exec/tb-lookup.h"
32#include "exec/log.h"
33#include "qemu/main-loop.h"
34#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
35#include "hw/i386/apic.h"
36#endif
37#include "sysemu/cpus.h"
38#include "sysemu/replay.h"
39#include "qemu/etrace.h"
40
41
42
43typedef struct SyncClocks {
44 int64_t diff_clk;
45 int64_t last_cpu_icount;
46 int64_t realtime_clock;
47} SyncClocks;
48
49#if !defined(CONFIG_USER_ONLY)
50
51
52
53
54#define VM_CLOCK_ADVANCE 3000000
55#define THRESHOLD_REDUCE 1.5
56#define MAX_DELAY_PRINT_RATE 2000000000LL
57#define MAX_NB_PRINTS 100
58
59static void align_clocks(SyncClocks *sc, const CPUState *cpu)
60{
61 int64_t cpu_icount;
62
63 if (!icount_align_option) {
64 return;
65 }
66
67 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
68 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
69 sc->last_cpu_icount = cpu_icount;
70
71 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
72#ifndef _WIN32
73 struct timespec sleep_delay, rem_delay;
74 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
75 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
76 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
77 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
78 } else {
79 sc->diff_clk = 0;
80 }
81#else
82 Sleep(sc->diff_clk / SCALE_MS);
83 sc->diff_clk = 0;
84#endif
85 }
86}
87
88static void print_delay(const SyncClocks *sc)
89{
90 static float threshold_delay;
91 static int64_t last_realtime_clock;
92 static int nb_prints;
93
94 if (icount_align_option &&
95 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
96 nb_prints < MAX_NB_PRINTS) {
97 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
98 (-sc->diff_clk / (float)1000000000LL <
99 (threshold_delay - THRESHOLD_REDUCE))) {
100 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
101 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
102 threshold_delay - 1,
103 threshold_delay);
104 nb_prints++;
105 last_realtime_clock = sc->realtime_clock;
106 }
107 }
108}
109
110static void init_delay_params(SyncClocks *sc,
111 const CPUState *cpu)
112{
113 if (!icount_align_option) {
114 return;
115 }
116 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
117 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
118 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
119 if (sc->diff_clk < max_delay) {
120 max_delay = sc->diff_clk;
121 }
122 if (sc->diff_clk > max_advance) {
123 max_advance = sc->diff_clk;
124 }
125
126
127
128 print_delay(sc);
129}
130#else
131static void align_clocks(SyncClocks *sc, const CPUState *cpu)
132{
133}
134
135static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
136{
137}
138#endif
139
140
141static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
142{
143 CPUArchState *env = cpu->env_ptr;
144 uintptr_t ret;
145 TranslationBlock *last_tb;
146 int tb_exit;
147 uint8_t *tb_ptr = itb->tc.ptr;
148
149 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
150 "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
151 itb->tc.ptr, cpu->cpu_index, itb->pc,
152 lookup_symbol(itb->pc));
153
154 if (qemu_etrace_mask(ETRACE_F_CPU)) {
155
156
157 qemu_etracer.current_unit_id = cpu->cpu_index;
158 cpu_dump_state(cpu, (void *) &qemu_etracer,
159 etrace_note_fprintf, 0);
160 }
161 if (qemu_etrace_mask(ETRACE_F_EXEC)) {
162 etrace_dump_exec_start(&qemu_etracer, cpu->cpu_index,
163 itb->pc);
164 }
165
166
167#if defined(DEBUG_DISAS)
168 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
169 && qemu_log_in_addr_range(itb->pc)) {
170 qemu_log_lock();
171#if defined(TARGET_I386)
172 log_cpu_state(cpu, CPU_DUMP_CCOP);
173#else
174 log_cpu_state(cpu, 0);
175#endif
176 qemu_log_unlock();
177 }
178#endif
179
180 cpu->can_do_io = !use_icount;
181 ret = tcg_qemu_tb_exec(env, tb_ptr);
182 cpu->can_do_io = 1;
183 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
184 tb_exit = ret & TB_EXIT_MASK;
185 trace_exec_tb_exit(last_tb, tb_exit);
186
187 if (tb_exit > TB_EXIT_IDX1) {
188
189
190
191
192 CPUClass *cc = CPU_GET_CLASS(cpu);
193 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
194 "Stopped execution of TB chain before %p ["
195 TARGET_FMT_lx "] %s\n",
196 last_tb->tc.ptr, last_tb->pc,
197 lookup_symbol(last_tb->pc));
198 if (cc->synchronize_from_tb) {
199 cc->synchronize_from_tb(cpu, last_tb);
200 } else {
201 assert(cc->set_pc);
202 cc->set_pc(cpu, last_tb->pc);
203 }
204 }
205 return ret;
206}
207
208#ifndef CONFIG_USER_ONLY
209
210
211static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
212 TranslationBlock *orig_tb, bool ignore_icount)
213{
214 TranslationBlock *tb;
215 uint32_t cflags = curr_cflags() | CF_NOCACHE;
216
217 if (ignore_icount) {
218 cflags &= ~CF_USE_ICOUNT;
219 }
220
221
222
223 cflags |= MIN(max_cycles, CF_COUNT_MASK);
224
225 tb_lock();
226 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
227 orig_tb->flags, cflags);
228 tb->orig_tb = orig_tb;
229 tb_unlock();
230
231
232 trace_exec_tb_nocache(tb, tb->pc);
233 cpu_tb_exec(cpu, tb);
234
235 tb_lock();
236 tb_phys_invalidate(tb, -1);
237 tb_remove(tb);
238 tb_unlock();
239}
240#endif
241
242void cpu_exec_step_atomic(CPUState *cpu)
243{
244 CPUClass *cc = CPU_GET_CLASS(cpu);
245 TranslationBlock *tb;
246 target_ulong cs_base, pc;
247 uint32_t flags;
248 uint32_t cflags = 1;
249 uint32_t cf_mask = cflags & CF_HASH_MASK;
250
251 volatile bool in_exclusive_region = false;
252
253 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
254 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
255 if (tb == NULL) {
256 mmap_lock();
257 tb_lock();
258 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
259 if (likely(tb == NULL)) {
260 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
261 }
262 tb_unlock();
263 mmap_unlock();
264 }
265
266 start_exclusive();
267
268
269 parallel_cpus = false;
270 in_exclusive_region = true;
271 cc->cpu_exec_enter(cpu);
272
273 trace_exec_tb(tb, pc);
274 cpu_tb_exec(cpu, tb);
275 cc->cpu_exec_exit(cpu);
276 } else {
277
278
279
280
281
282#ifndef CONFIG_SOFTMMU
283 tcg_debug_assert(!have_mmap_lock());
284#endif
285 tb_lock_reset();
286 }
287
288 if (in_exclusive_region) {
289
290
291
292
293 parallel_cpus = true;
294 end_exclusive();
295 }
296}
297
298struct tb_desc {
299 target_ulong pc;
300 target_ulong cs_base;
301 CPUArchState *env;
302 tb_page_addr_t phys_page1;
303 uint32_t flags;
304 uint32_t cf_mask;
305 uint32_t trace_vcpu_dstate;
306};
307
308static bool tb_cmp(const void *p, const void *d)
309{
310 const TranslationBlock *tb = p;
311 const struct tb_desc *desc = d;
312
313 if (tb->pc == desc->pc &&
314 tb->page_addr[0] == desc->phys_page1 &&
315 tb->cs_base == desc->cs_base &&
316 tb->flags == desc->flags &&
317 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
318 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
319
320 if (tb->page_addr[1] == -1) {
321 return true;
322 } else {
323 tb_page_addr_t phys_page2;
324 target_ulong virt_page2;
325
326 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
327 phys_page2 = get_page_addr_code(desc->env, virt_page2);
328 if (tb->page_addr[1] == phys_page2) {
329 return true;
330 }
331 }
332 }
333 return false;
334}
335
336TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
337 target_ulong cs_base, uint32_t flags,
338 uint32_t cf_mask)
339{
340 tb_page_addr_t phys_pc;
341 struct tb_desc desc;
342 uint32_t h;
343
344 desc.env = (CPUArchState *)cpu->env_ptr;
345 desc.cs_base = cs_base;
346 desc.flags = flags;
347 desc.cf_mask = cf_mask;
348 desc.trace_vcpu_dstate = *cpu->trace_dstate;
349 desc.pc = pc;
350 phys_pc = get_page_addr_code(desc.env, pc);
351 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
352 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
353 return qht_lookup(&tb_ctx.htable, tb_cmp, &desc, h);
354}
355
356void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
357{
358 if (TCG_TARGET_HAS_direct_jump) {
359 uintptr_t offset = tb->jmp_target_arg[n];
360 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
361 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
362 } else {
363 tb->jmp_target_arg[n] = addr;
364 }
365}
366
367
368static inline void tb_add_jump(TranslationBlock *tb, int n,
369 TranslationBlock *tb_next)
370{
371 assert(n < ARRAY_SIZE(tb->jmp_list_next));
372 if (tb->jmp_list_next[n]) {
373
374
375 return;
376 }
377 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
378 "Linking TBs %p [" TARGET_FMT_lx
379 "] index %d -> %p [" TARGET_FMT_lx "]\n",
380 tb->tc.ptr, tb->pc, n,
381 tb_next->tc.ptr, tb_next->pc);
382
383
384 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
385
386
387 tb->jmp_list_next[n] = tb_next->jmp_list_first;
388 tb_next->jmp_list_first = (uintptr_t)tb | n;
389}
390
391static inline TranslationBlock *tb_find(CPUState *cpu,
392 TranslationBlock *last_tb,
393 int tb_exit, uint32_t cf_mask)
394{
395 TranslationBlock *tb;
396 target_ulong cs_base, pc;
397 uint32_t flags;
398 bool acquired_tb_lock = false;
399
400 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
401 if (tb == NULL) {
402
403
404
405
406 mmap_lock();
407 tb_lock();
408 acquired_tb_lock = true;
409
410
411
412
413 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
414 if (likely(tb == NULL)) {
415
416 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
417 }
418
419 mmap_unlock();
420
421 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
422 }
423#ifndef CONFIG_USER_ONLY
424
425
426
427
428 if (tb->page_addr[1] != -1) {
429 last_tb = NULL;
430 }
431#endif
432
433 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
434 if (!acquired_tb_lock) {
435 tb_lock();
436 acquired_tb_lock = true;
437 }
438 if (!(tb->cflags & CF_INVALID)) {
439 tb_add_jump(last_tb, tb_exit, tb);
440 }
441 }
442 if (acquired_tb_lock) {
443 tb_unlock();
444 }
445 return tb;
446}
447
448static inline bool cpu_handle_halt(CPUState *cpu)
449{
450 if (cpu->halted) {
451#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
452 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
453 && replay_interrupt()) {
454 X86CPU *x86_cpu = X86_CPU(cpu);
455 qemu_mutex_lock_iothread();
456 apic_poll_irq(x86_cpu->apic_state);
457 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
458 qemu_mutex_unlock_iothread();
459 }
460#endif
461
462 if (qemu_etrace_mask(ETRACE_F_EXEC)) {
463 const char *dev_name = object_get_canonical_path(OBJECT(cpu));
464 etrace_event_u64(&qemu_etracer, cpu->cpu_index,
465 ETRACE_EVU64_F_PREV_VAL,
466 dev_name, "sleep", 0, 1);
467 }
468
469 if (!cpu_has_work(cpu)) {
470 return true;
471 }
472
473 cpu->halted = 0;
474 }
475
476 return false;
477}
478
479static inline void cpu_handle_debug_exception(CPUState *cpu)
480{
481 CPUClass *cc = CPU_GET_CLASS(cpu);
482 CPUWatchpoint *wp;
483
484 if (!cpu->watchpoint_hit) {
485 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
486 wp->flags &= ~BP_WATCHPOINT_HIT;
487 }
488 }
489
490 cc->debug_excp_handler(cpu);
491}
492
493static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
494{
495 if (cpu->exception_index < 0) {
496#ifndef CONFIG_USER_ONLY
497 if (replay_has_exception()
498 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
499
500 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
501 }
502#endif
503 if (cpu->exception_index < 0) {
504 return false;
505 }
506 }
507
508 if (cpu->exception_index >= EXCP_INTERRUPT) {
509
510 *ret = cpu->exception_index;
511 if (*ret == EXCP_DEBUG) {
512 cpu_handle_debug_exception(cpu);
513 }
514 cpu->exception_index = -1;
515 return true;
516 } else {
517#if defined(CONFIG_USER_ONLY)
518
519
520
521#if defined(TARGET_I386)
522 CPUClass *cc = CPU_GET_CLASS(cpu);
523 cc->do_interrupt(cpu);
524#endif
525 *ret = cpu->exception_index;
526 cpu->exception_index = -1;
527 return true;
528#else
529 if (replay_exception()) {
530 CPUClass *cc = CPU_GET_CLASS(cpu);
531 qemu_mutex_lock_iothread();
532 cc->do_interrupt(cpu);
533 qemu_mutex_unlock_iothread();
534 cpu->exception_index = -1;
535 } else if (!replay_has_interrupt()) {
536
537 *ret = EXCP_INTERRUPT;
538 return true;
539 }
540#endif
541 }
542
543 return false;
544}
545
546static inline bool cpu_handle_interrupt(CPUState *cpu,
547 TranslationBlock **last_tb)
548{
549 CPUClass *cc = CPU_GET_CLASS(cpu);
550 int32_t insns_left;
551
552
553
554
555 insns_left = atomic_read(&cpu->icount_decr.u32);
556 atomic_set(&cpu->icount_decr.u16.high, 0);
557 if (unlikely(insns_left < 0)) {
558
559
560
561 smp_mb();
562 }
563
564 if (unlikely(atomic_read(&cpu->interrupt_request))) {
565 int interrupt_request;
566 qemu_mutex_lock_iothread();
567 interrupt_request = cpu->interrupt_request;
568 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
569
570 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
571 }
572 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
573 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
574 cpu->exception_index = EXCP_DEBUG;
575 qemu_mutex_unlock_iothread();
576 return true;
577 }
578 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
579
580 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
581 replay_interrupt();
582 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
583 cpu->halted = 1;
584 cpu->exception_index = EXCP_HLT;
585 qemu_mutex_unlock_iothread();
586 return true;
587 }
588#if defined(TARGET_I386)
589 else if (interrupt_request & CPU_INTERRUPT_INIT) {
590 X86CPU *x86_cpu = X86_CPU(cpu);
591 CPUArchState *env = &x86_cpu->env;
592 replay_interrupt();
593 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
594 do_cpu_init(x86_cpu);
595 cpu->exception_index = EXCP_HALTED;
596 qemu_mutex_unlock_iothread();
597 return true;
598 }
599#else
600 else if (interrupt_request & CPU_INTERRUPT_RESET) {
601 replay_interrupt();
602 cpu_reset(cpu);
603 qemu_mutex_unlock_iothread();
604 return true;
605 }
606#endif
607
608
609
610
611 else {
612 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
613 replay_interrupt();
614 *last_tb = NULL;
615 }
616
617
618 interrupt_request = cpu->interrupt_request;
619 }
620 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
621 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
622
623
624 *last_tb = NULL;
625 }
626
627
628 qemu_mutex_unlock_iothread();
629 }
630
631
632 if (unlikely(atomic_read(&cpu->exit_request)
633 || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
634 atomic_set(&cpu->exit_request, 0);
635 cpu->exception_index = EXCP_INTERRUPT;
636 return true;
637 }
638
639 return false;
640}
641
642static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
643 TranslationBlock **last_tb, int *tb_exit)
644{
645 uintptr_t ret;
646 int32_t insns_left;
647
648 trace_exec_tb(tb, tb->pc);
649 ret = cpu_tb_exec(cpu, tb);
650 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
651 *tb_exit = ret & TB_EXIT_MASK;
652 if (*tb_exit != TB_EXIT_REQUESTED) {
653 *last_tb = tb;
654 return;
655 }
656
657 *last_tb = NULL;
658 insns_left = atomic_read(&cpu->icount_decr.u32);
659 if (insns_left < 0) {
660
661
662
663
664
665
666
667 return;
668 }
669
670
671 assert(use_icount);
672#ifndef CONFIG_USER_ONLY
673
674 cpu_update_icount(cpu);
675
676 insns_left = MIN(0xffff, cpu->icount_budget);
677 cpu->icount_decr.u16.low = insns_left;
678 cpu->icount_extra = cpu->icount_budget - insns_left;
679 if (!cpu->icount_extra) {
680
681
682
683 if (insns_left > 0) {
684 cpu_exec_nocache(cpu, insns_left, tb, false);
685 }
686 }
687#endif
688}
689
690
691
692int cpu_exec(CPUState *cpu)
693{
694 CPUClass *cc = CPU_GET_CLASS(cpu);
695 int ret;
696 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
697 SyncClocks sc = { 0 };
698
699
700 current_cpu = cpu;
701
702 if (cpu_handle_halt(cpu)) {
703 return EXCP_HALTED;
704 }
705
706 rcu_read_lock();
707
708 cc->cpu_exec_enter(cpu);
709
710
711
712
713
714
715 init_delay_params(&sc, cpu);
716
717
718 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
719#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
720
721
722
723
724 cpu = current_cpu;
725 cc = CPU_GET_CLASS(cpu);
726#else
727
728 g_assert(cpu == current_cpu);
729 g_assert(cc == CPU_GET_CLASS(cpu));
730#endif
731 cpu->can_do_io = 1;
732 tb_lock_reset();
733 if (qemu_mutex_iothread_locked()) {
734 qemu_mutex_unlock_iothread();
735 }
736
737 if (qemu_etrace_mask(ETRACE_F_EXEC)
738 && qemu_etracer.exec_start_valid) {
739 target_ulong cs_base, pc;
740 uint32_t flags;
741
742 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
743 etrace_dump_exec_end(&qemu_etracer, cpu->cpu_index, pc);
744 }
745 }
746
747
748 while (!cpu_handle_exception(cpu, &ret)) {
749 TranslationBlock *last_tb = NULL;
750 int tb_exit = 0;
751
752 while (!cpu_handle_interrupt(cpu, &last_tb)) {
753 uint32_t cflags = cpu->cflags_next_tb;
754 TranslationBlock *tb;
755
756
757
758
759
760
761 if (cflags == -1) {
762 cflags = curr_cflags();
763 } else {
764 cpu->cflags_next_tb = -1;
765 }
766
767 tb = tb_find(cpu, last_tb, tb_exit, cflags);
768 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
769
770 if (qemu_etrace_mask(ETRACE_F_EXEC)) {
771 target_ulong cs_base, pc;
772 uint32_t flags;
773
774 if (tb_exit) {
775
776 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
777 } else {
778
779 pc = tb->pc + tb->size;
780 }
781 etrace_dump_exec_end(&qemu_etracer,
782 cpu->cpu_index, pc);
783 }
784
785 qemu_etracer.exec_start_valid = false;
786
787
788
789 align_clocks(&sc, cpu);
790 }
791 }
792
793 cc->cpu_exec_exit(cpu);
794 rcu_read_unlock();
795
796 return ret;
797}
798