1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu-common.h"
22#include "cpu.h"
23#include "trace.h"
24#include "disas/disas.h"
25#include "exec/exec-all.h"
26#include "tcg/tcg.h"
27#include "qemu/atomic.h"
28#include "sysemu/qtest.h"
29#include "qemu/timer.h"
30#include "qemu/rcu.h"
31#include "exec/tb-hash.h"
32#include "exec/tb-lookup.h"
33#include "exec/log.h"
34#include "qemu/main-loop.h"
35#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
36#include "hw/i386/apic.h"
37#endif
38#include "sysemu/cpus.h"
39#include "sysemu/replay.h"
40#include "qemu/etrace.h"
41
42
43
44typedef struct SyncClocks {
45 int64_t diff_clk;
46 int64_t last_cpu_icount;
47 int64_t realtime_clock;
48} SyncClocks;
49
50#if !defined(CONFIG_USER_ONLY)
51
52
53
54
55#define VM_CLOCK_ADVANCE 3000000
56#define THRESHOLD_REDUCE 1.5
57#define MAX_DELAY_PRINT_RATE 2000000000LL
58#define MAX_NB_PRINTS 100
59
60static void align_clocks(SyncClocks *sc, CPUState *cpu)
61{
62 int64_t cpu_icount;
63
64 if (!icount_align_option) {
65 return;
66 }
67
68 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
69 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
70 sc->last_cpu_icount = cpu_icount;
71
72 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
73#ifndef _WIN32
74 struct timespec sleep_delay, rem_delay;
75 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
76 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
77 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
78 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
79 } else {
80 sc->diff_clk = 0;
81 }
82#else
83 Sleep(sc->diff_clk / SCALE_MS);
84 sc->diff_clk = 0;
85#endif
86 }
87}
88
89static void print_delay(const SyncClocks *sc)
90{
91 static float threshold_delay;
92 static int64_t last_realtime_clock;
93 static int nb_prints;
94
95 if (icount_align_option &&
96 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
97 nb_prints < MAX_NB_PRINTS) {
98 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
99 (-sc->diff_clk / (float)1000000000LL <
100 (threshold_delay - THRESHOLD_REDUCE))) {
101 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
102 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
103 threshold_delay - 1,
104 threshold_delay);
105 nb_prints++;
106 last_realtime_clock = sc->realtime_clock;
107 }
108 }
109}
110
111static void init_delay_params(SyncClocks *sc, CPUState *cpu)
112{
113 if (!icount_align_option) {
114 return;
115 }
116 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
117 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
118 sc->last_cpu_icount
119 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
120 if (sc->diff_clk < max_delay) {
121 max_delay = sc->diff_clk;
122 }
123 if (sc->diff_clk > max_advance) {
124 max_advance = sc->diff_clk;
125 }
126
127
128
129 print_delay(sc);
130}
131#else
132static void align_clocks(SyncClocks *sc, const CPUState *cpu)
133{
134}
135
136static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
137{
138}
139#endif
140
141
142static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
143{
144 CPUArchState *env = cpu->env_ptr;
145 uintptr_t ret;
146 TranslationBlock *last_tb;
147 int tb_exit;
148 uint8_t *tb_ptr = itb->tc.ptr;
149
150 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
151 "Trace %d: %p ["
152 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
153 cpu->cpu_index, itb->tc.ptr,
154 itb->cs_base, itb->pc, itb->flags,
155 lookup_symbol(itb->pc));
156
157 if (qemu_etrace_mask(ETRACE_F_EXEC)) {
158 etrace_dump_exec_start(&qemu_etracer, cpu->cpu_index,
159 itb->pc);
160 }
161
162
163#if defined(DEBUG_DISAS)
164 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
165 && qemu_log_in_addr_range(itb->pc)) {
166 FILE *logfile = qemu_log_lock();
167 int flags = 0;
168 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
169 flags |= CPU_DUMP_FPU;
170 }
171#if defined(TARGET_I386)
172 flags |= CPU_DUMP_CCOP;
173#endif
174 log_cpu_state(cpu, flags);
175 qemu_log_unlock(logfile);
176 }
177#endif
178
179 ret = tcg_qemu_tb_exec(env, tb_ptr);
180 cpu->can_do_io = 1;
181 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
182 tb_exit = ret & TB_EXIT_MASK;
183 trace_exec_tb_exit(last_tb, tb_exit);
184
185 if (tb_exit > TB_EXIT_IDX1) {
186
187
188
189
190 CPUClass *cc = CPU_GET_CLASS(cpu);
191 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
192 "Stopped execution of TB chain before %p ["
193 TARGET_FMT_lx "] %s\n",
194 last_tb->tc.ptr, last_tb->pc,
195 lookup_symbol(last_tb->pc));
196 if (cc->synchronize_from_tb) {
197 cc->synchronize_from_tb(cpu, last_tb);
198 } else {
199 assert(cc->set_pc);
200 cc->set_pc(cpu, last_tb->pc);
201 }
202 }
203 return ret;
204}
205
206#ifndef CONFIG_USER_ONLY
207
208
209static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
210 TranslationBlock *orig_tb, bool ignore_icount)
211{
212 TranslationBlock *tb;
213 uint32_t cflags = curr_cflags() | CF_NOCACHE;
214
215 if (ignore_icount) {
216 cflags &= ~CF_USE_ICOUNT;
217 }
218
219
220
221 cflags |= MIN(max_cycles, CF_COUNT_MASK);
222
223 mmap_lock();
224 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
225 orig_tb->flags, cflags);
226 tb->orig_tb = orig_tb;
227 mmap_unlock();
228
229
230 trace_exec_tb_nocache(tb, tb->pc);
231 cpu_tb_exec(cpu, tb);
232
233 mmap_lock();
234 tb_phys_invalidate(tb, -1);
235 mmap_unlock();
236 tcg_tb_remove(tb);
237}
238#endif
239
240void cpu_exec_step_atomic(CPUState *cpu)
241{
242 CPUClass *cc = CPU_GET_CLASS(cpu);
243 TranslationBlock *tb;
244 target_ulong cs_base, pc;
245 uint32_t flags;
246 uint32_t cflags = 1;
247 uint32_t cf_mask = cflags & CF_HASH_MASK;
248
249 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
250 start_exclusive();
251
252 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
253 if (tb == NULL) {
254 mmap_lock();
255 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
256 mmap_unlock();
257 }
258
259
260 parallel_cpus = false;
261 cc->cpu_exec_enter(cpu);
262
263 trace_exec_tb(tb, pc);
264 cpu_tb_exec(cpu, tb);
265 cc->cpu_exec_exit(cpu);
266 } else {
267
268
269
270
271#ifndef CONFIG_SOFTMMU
272 tcg_debug_assert(!have_mmap_lock());
273#endif
274 if (qemu_mutex_iothread_locked()) {
275 qemu_mutex_unlock_iothread();
276 }
277 assert_no_pages_locked();
278 qemu_plugin_disable_mem_helpers(cpu);
279 }
280
281
282
283
284
285
286
287 g_assert(cpu_in_exclusive_context(cpu));
288 parallel_cpus = true;
289 end_exclusive();
290}
291
292struct tb_desc {
293 target_ulong pc;
294 target_ulong cs_base;
295 CPUArchState *env;
296 tb_page_addr_t phys_page1;
297 uint32_t flags;
298 uint32_t cf_mask;
299 uint32_t trace_vcpu_dstate;
300};
301
302static bool tb_lookup_cmp(const void *p, const void *d)
303{
304 const TranslationBlock *tb = p;
305 const struct tb_desc *desc = d;
306
307 if (tb->pc == desc->pc &&
308 tb->page_addr[0] == desc->phys_page1 &&
309 tb->cs_base == desc->cs_base &&
310 tb->flags == desc->flags &&
311 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
312 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
313
314 if (tb->page_addr[1] == -1) {
315 return true;
316 } else {
317 tb_page_addr_t phys_page2;
318 target_ulong virt_page2;
319
320 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
321 phys_page2 = get_page_addr_code(desc->env, virt_page2);
322 if (tb->page_addr[1] == phys_page2) {
323 return true;
324 }
325 }
326 }
327 return false;
328}
329
330TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
331 target_ulong cs_base, uint32_t flags,
332 uint32_t cf_mask)
333{
334 tb_page_addr_t phys_pc;
335 struct tb_desc desc;
336 uint32_t h;
337
338 desc.env = (CPUArchState *)cpu->env_ptr;
339 desc.cs_base = cs_base;
340 desc.flags = flags;
341 desc.cf_mask = cf_mask;
342 desc.trace_vcpu_dstate = *cpu->trace_dstate;
343 desc.pc = pc;
344 phys_pc = get_page_addr_code(desc.env, pc);
345 if (phys_pc == -1) {
346 return NULL;
347 }
348 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
349 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
350 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
351}
352
353void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
354{
355 if (TCG_TARGET_HAS_direct_jump) {
356 uintptr_t offset = tb->jmp_target_arg[n];
357 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
358 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
359 } else {
360 tb->jmp_target_arg[n] = addr;
361 }
362}
363
364static inline void tb_add_jump(TranslationBlock *tb, int n,
365 TranslationBlock *tb_next)
366{
367 uintptr_t old;
368
369 assert(n < ARRAY_SIZE(tb->jmp_list_next));
370 qemu_spin_lock(&tb_next->jmp_lock);
371
372
373 if (tb_next->cflags & CF_INVALID) {
374 goto out_unlock_next;
375 }
376
377 old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
378 if (old) {
379 goto out_unlock_next;
380 }
381
382
383 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
384
385
386 tb->jmp_list_next[n] = tb_next->jmp_list_head;
387 tb_next->jmp_list_head = (uintptr_t)tb | n;
388
389 qemu_spin_unlock(&tb_next->jmp_lock);
390
391 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
392 "Linking TBs %p [" TARGET_FMT_lx
393 "] index %d -> %p [" TARGET_FMT_lx "]\n",
394 tb->tc.ptr, tb->pc, n,
395 tb_next->tc.ptr, tb_next->pc);
396 return;
397
398 out_unlock_next:
399 qemu_spin_unlock(&tb_next->jmp_lock);
400 return;
401}
402
403static inline TranslationBlock *tb_find(CPUState *cpu,
404 TranslationBlock *last_tb,
405 int tb_exit, uint32_t cf_mask)
406{
407 TranslationBlock *tb;
408 target_ulong cs_base, pc;
409 uint32_t flags;
410
411 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
412 if (tb == NULL) {
413 mmap_lock();
414 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
415 mmap_unlock();
416
417 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
418 }
419#ifndef CONFIG_USER_ONLY
420
421
422
423
424 if (tb->page_addr[1] != -1) {
425 last_tb = NULL;
426 }
427#endif
428
429 if (last_tb) {
430 tb_add_jump(last_tb, tb_exit, tb);
431 }
432 return tb;
433}
434
435static inline bool cpu_handle_halt(CPUState *cpu)
436{
437 if (cpu->halted) {
438#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
439 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
440 && replay_interrupt()) {
441 X86CPU *x86_cpu = X86_CPU(cpu);
442 qemu_mutex_lock_iothread();
443 apic_poll_irq(x86_cpu->apic_state);
444 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
445 qemu_mutex_unlock_iothread();
446 }
447#endif
448
449 if (qemu_etrace_mask(ETRACE_F_EXEC)) {
450 const char *dev_name = object_get_canonical_path(OBJECT(cpu));
451 etrace_event_u64(&qemu_etracer, cpu->cpu_index,
452 ETRACE_EVU64_F_PREV_VAL,
453 dev_name, "sleep", 0, 1);
454 }
455
456 if (!cpu_has_work(cpu) || cpu->reset_pin) {
457 return true;
458 }
459
460 cpu->halted = 0;
461 }
462
463 return false;
464}
465
466static inline void cpu_handle_debug_exception(CPUState *cpu)
467{
468 CPUClass *cc = CPU_GET_CLASS(cpu);
469 CPUWatchpoint *wp;
470
471 if (!cpu->watchpoint_hit) {
472 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
473 wp->flags &= ~BP_WATCHPOINT_HIT;
474 }
475 }
476
477 cc->debug_excp_handler(cpu);
478}
479
480static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
481{
482 if (cpu->exception_index < 0) {
483#ifndef CONFIG_USER_ONLY
484 if (replay_has_exception()
485 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
486
487 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
488 }
489#endif
490 if (cpu->exception_index < 0) {
491 return false;
492 }
493 }
494
495 if (cpu->exception_index >= EXCP_INTERRUPT) {
496
497 *ret = cpu->exception_index;
498 if (*ret == EXCP_DEBUG) {
499 cpu_handle_debug_exception(cpu);
500 }
501 cpu->exception_index = -1;
502 return true;
503 } else {
504#if defined(CONFIG_USER_ONLY)
505
506
507
508#if defined(TARGET_I386)
509 CPUClass *cc = CPU_GET_CLASS(cpu);
510 cc->do_interrupt(cpu);
511#endif
512 *ret = cpu->exception_index;
513 cpu->exception_index = -1;
514 return true;
515#else
516 if (replay_exception()) {
517 CPUClass *cc = CPU_GET_CLASS(cpu);
518 qemu_mutex_lock_iothread();
519 cc->do_interrupt(cpu);
520 qemu_mutex_unlock_iothread();
521 cpu->exception_index = -1;
522
523 if (unlikely(cpu->singlestep_enabled)) {
524
525
526
527
528
529 *ret = EXCP_DEBUG;
530 cpu_handle_debug_exception(cpu);
531 return true;
532 }
533 } else if (!replay_has_interrupt()) {
534
535 *ret = EXCP_INTERRUPT;
536 return true;
537 }
538#endif
539 }
540
541 return false;
542}
543
544static inline bool cpu_handle_interrupt(CPUState *cpu,
545 TranslationBlock **last_tb)
546{
547 CPUClass *cc = CPU_GET_CLASS(cpu);
548
549
550
551
552
553
554 atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
555
556 if (unlikely(atomic_read(&cpu->interrupt_request))) {
557 int interrupt_request;
558 qemu_mutex_lock_iothread();
559 interrupt_request = cpu->interrupt_request;
560 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
561
562 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
563 }
564 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
565 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
566 cpu->exception_index = EXCP_DEBUG;
567 qemu_mutex_unlock_iothread();
568 return true;
569 }
570 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
571
572 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
573 replay_interrupt();
574 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
575 cpu->halted = 1;
576 cpu->exception_index = EXCP_HLT;
577 qemu_mutex_unlock_iothread();
578 return true;
579 }
580#if defined(TARGET_I386)
581 else if (interrupt_request & CPU_INTERRUPT_INIT) {
582 X86CPU *x86_cpu = X86_CPU(cpu);
583 CPUArchState *env = &x86_cpu->env;
584 replay_interrupt();
585 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
586 do_cpu_init(x86_cpu);
587 cpu->exception_index = EXCP_HALTED;
588 qemu_mutex_unlock_iothread();
589 return true;
590 }
591#else
592 else if (interrupt_request & CPU_INTERRUPT_RESET) {
593 replay_interrupt();
594 cpu_reset(cpu);
595 qemu_mutex_unlock_iothread();
596 return true;
597 }
598#endif
599
600
601
602
603 else {
604 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
605 replay_interrupt();
606
607
608
609
610
611 cpu->exception_index =
612 (cpu->singlestep_enabled ? EXCP_DEBUG : -1);
613 *last_tb = NULL;
614 }
615
616
617 interrupt_request = cpu->interrupt_request;
618 }
619 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
620 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
621
622
623 *last_tb = NULL;
624 }
625
626
627 qemu_mutex_unlock_iothread();
628 }
629
630
631 if (unlikely(atomic_read(&cpu->exit_request))
632 || (use_icount
633 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
634 atomic_set(&cpu->exit_request, 0);
635 if (cpu->exception_index == -1) {
636 cpu->exception_index = EXCP_INTERRUPT;
637 }
638 return true;
639 }
640
641 return false;
642}
643
644static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
645 TranslationBlock **last_tb, int *tb_exit)
646{
647 uintptr_t ret;
648 int32_t insns_left;
649
650 trace_exec_tb(tb, tb->pc);
651 ret = cpu_tb_exec(cpu, tb);
652 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
653 *tb_exit = ret & TB_EXIT_MASK;
654 if (*tb_exit != TB_EXIT_REQUESTED) {
655 *last_tb = tb;
656 return;
657 }
658
659 *last_tb = NULL;
660 insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
661 if (insns_left < 0) {
662
663
664
665
666
667
668
669 return;
670 }
671
672
673 assert(use_icount);
674#ifndef CONFIG_USER_ONLY
675
676 cpu_update_icount(cpu);
677
678 insns_left = MIN(0xffff, cpu->icount_budget);
679 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
680 cpu->icount_extra = cpu->icount_budget - insns_left;
681 if (!cpu->icount_extra) {
682
683
684
685 if (insns_left > 0) {
686 cpu_exec_nocache(cpu, insns_left, tb, false);
687 }
688 }
689#endif
690}
691
692
693
694int cpu_exec(CPUState *cpu)
695{
696 CPUClass *cc = CPU_GET_CLASS(cpu);
697 int ret;
698 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
699 SyncClocks sc = { 0 };
700
701
702 current_cpu = cpu;
703
704 if (cpu_handle_halt(cpu)) {
705 return EXCP_HALTED;
706 }
707
708 rcu_read_lock();
709
710 cc->cpu_exec_enter(cpu);
711
712
713
714
715
716
717 init_delay_params(&sc, cpu);
718
719
720 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
721#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
722
723
724
725
726 cpu = current_cpu;
727 cc = CPU_GET_CLASS(cpu);
728#else
729
730 g_assert(cpu == current_cpu);
731 g_assert(cc == CPU_GET_CLASS(cpu));
732#endif
733#ifndef CONFIG_SOFTMMU
734 tcg_debug_assert(!have_mmap_lock());
735#endif
736 if (qemu_mutex_iothread_locked()) {
737 qemu_mutex_unlock_iothread();
738 }
739
740 if (qemu_etrace_mask(ETRACE_F_EXEC)
741 && qemu_etracer.exec_start_valid) {
742 target_ulong cs_base, pc;
743 uint32_t flags;
744
745 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
746 etrace_dump_exec_end(&qemu_etracer, cpu->cpu_index, pc);
747 }
748 qemu_plugin_disable_mem_helpers(cpu);
749
750 assert_no_pages_locked();
751 }
752
753
754 while (!cpu_handle_exception(cpu, &ret)) {
755 TranslationBlock *last_tb = NULL;
756 int tb_exit = 0;
757
758 while (!cpu_handle_interrupt(cpu, &last_tb)) {
759 uint32_t cflags = cpu->cflags_next_tb;
760 TranslationBlock *tb;
761
762
763
764
765
766
767 if (cflags == -1) {
768 cflags = curr_cflags();
769 } else {
770 cpu->cflags_next_tb = -1;
771 }
772
773 tb = tb_find(cpu, last_tb, tb_exit, cflags);
774 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
775
776 if (qemu_etrace_mask(ETRACE_F_EXEC)) {
777 target_ulong cs_base, pc;
778 uint32_t flags;
779
780 if (tb_exit) {
781
782 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
783 } else {
784
785 pc = tb->pc + tb->size;
786 }
787 etrace_dump_exec_end(&qemu_etracer,
788 cpu->cpu_index, pc);
789 }
790
791 qemu_etracer.exec_start_valid = false;
792
793
794
795 align_clocks(&sc, cpu);
796 }
797 }
798
799 cc->cpu_exec_exit(cpu);
800 rcu_read_unlock();
801
802 return ret;
803}
804