1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "trace.h"
22#include "disas/disas.h"
23#include "exec/exec-all.h"
24#include "tcg.h"
25#include "qemu/atomic.h"
26#include "sysemu/qtest.h"
27#include "qemu/timer.h"
28#include "qemu/rcu.h"
29#include "exec/tb-hash.h"
30#include "exec/tb-lookup.h"
31#include "exec/log.h"
32#include "qemu/main-loop.h"
33#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
34#include "hw/i386/apic.h"
35#endif
36#include "sysemu/cpus.h"
37#include "sysemu/replay.h"
38
39
40
41typedef struct SyncClocks {
42 int64_t diff_clk;
43 int64_t last_cpu_icount;
44 int64_t realtime_clock;
45} SyncClocks;
46
47#if !defined(CONFIG_USER_ONLY)
48
49
50
51
52#define VM_CLOCK_ADVANCE 3000000
53#define THRESHOLD_REDUCE 1.5
54#define MAX_DELAY_PRINT_RATE 2000000000LL
55#define MAX_NB_PRINTS 100
56
57static void align_clocks(SyncClocks *sc, const CPUState *cpu)
58{
59 int64_t cpu_icount;
60
61 if (!icount_align_option) {
62 return;
63 }
64
65 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
66 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
67 sc->last_cpu_icount = cpu_icount;
68
69 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
70#ifndef _WIN32
71 struct timespec sleep_delay, rem_delay;
72 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
73 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
74 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
75 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
76 } else {
77 sc->diff_clk = 0;
78 }
79#else
80 Sleep(sc->diff_clk / SCALE_MS);
81 sc->diff_clk = 0;
82#endif
83 }
84}
85
86static void print_delay(const SyncClocks *sc)
87{
88 static float threshold_delay;
89 static int64_t last_realtime_clock;
90 static int nb_prints;
91
92 if (icount_align_option &&
93 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
94 nb_prints < MAX_NB_PRINTS) {
95 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
96 (-sc->diff_clk / (float)1000000000LL <
97 (threshold_delay - THRESHOLD_REDUCE))) {
98 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
99 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
100 threshold_delay - 1,
101 threshold_delay);
102 nb_prints++;
103 last_realtime_clock = sc->realtime_clock;
104 }
105 }
106}
107
108static void init_delay_params(SyncClocks *sc,
109 const CPUState *cpu)
110{
111 if (!icount_align_option) {
112 return;
113 }
114 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
115 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
116 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
117 if (sc->diff_clk < max_delay) {
118 max_delay = sc->diff_clk;
119 }
120 if (sc->diff_clk > max_advance) {
121 max_advance = sc->diff_clk;
122 }
123
124
125
126 print_delay(sc);
127}
128#else
129static void align_clocks(SyncClocks *sc, const CPUState *cpu)
130{
131}
132
133static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
134{
135}
136#endif
137
138
139static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
140{
141 CPUArchState *env = cpu->env_ptr;
142 uintptr_t ret;
143 TranslationBlock *last_tb;
144 int tb_exit;
145 uint8_t *tb_ptr = itb->tc.ptr;
146
147 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
148 "Trace %d: %p ["
149 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
150 cpu->cpu_index, itb->tc.ptr,
151 itb->cs_base, itb->pc, itb->flags,
152 lookup_symbol(itb->pc));
153
154#if defined(DEBUG_DISAS)
155 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
156 && qemu_log_in_addr_range(itb->pc)) {
157 qemu_log_lock();
158 int flags = 0;
159 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
160 flags |= CPU_DUMP_FPU;
161 }
162#if defined(TARGET_I386)
163 flags |= CPU_DUMP_CCOP;
164#endif
165 log_cpu_state(cpu, flags);
166 qemu_log_unlock();
167 }
168#endif
169
170 cpu->can_do_io = !use_icount;
171 ret = tcg_qemu_tb_exec(env, tb_ptr);
172 cpu->can_do_io = 1;
173 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
174 tb_exit = ret & TB_EXIT_MASK;
175 trace_exec_tb_exit(last_tb, tb_exit);
176
177 if (tb_exit > TB_EXIT_IDX1) {
178
179
180
181
182 CPUClass *cc = CPU_GET_CLASS(cpu);
183 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
184 "Stopped execution of TB chain before %p ["
185 TARGET_FMT_lx "] %s\n",
186 last_tb->tc.ptr, last_tb->pc,
187 lookup_symbol(last_tb->pc));
188 if (cc->synchronize_from_tb) {
189 cc->synchronize_from_tb(cpu, last_tb);
190 } else {
191 assert(cc->set_pc);
192 cc->set_pc(cpu, last_tb->pc);
193 }
194 }
195 return ret;
196}
197
198#ifndef CONFIG_USER_ONLY
199
200
201static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
202 TranslationBlock *orig_tb, bool ignore_icount)
203{
204 TranslationBlock *tb;
205 uint32_t cflags = curr_cflags() | CF_NOCACHE;
206
207 if (ignore_icount) {
208 cflags &= ~CF_USE_ICOUNT;
209 }
210
211
212
213 cflags |= MIN(max_cycles, CF_COUNT_MASK);
214
215 mmap_lock();
216 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
217 orig_tb->flags, cflags);
218 tb->orig_tb = orig_tb;
219 mmap_unlock();
220
221
222 trace_exec_tb_nocache(tb, tb->pc);
223 cpu_tb_exec(cpu, tb);
224
225 mmap_lock();
226 tb_phys_invalidate(tb, -1);
227 mmap_unlock();
228 tcg_tb_remove(tb);
229}
230#endif
231
232void cpu_exec_step_atomic(CPUState *cpu)
233{
234 CPUClass *cc = CPU_GET_CLASS(cpu);
235 TranslationBlock *tb;
236 target_ulong cs_base, pc;
237 uint32_t flags;
238 uint32_t cflags = 1;
239 uint32_t cf_mask = cflags & CF_HASH_MASK;
240
241 volatile bool in_exclusive_region = false;
242
243 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
244 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
245 if (tb == NULL) {
246 mmap_lock();
247 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
248 mmap_unlock();
249 }
250
251 start_exclusive();
252
253
254 parallel_cpus = false;
255 in_exclusive_region = true;
256 cc->cpu_exec_enter(cpu);
257
258 trace_exec_tb(tb, pc);
259 cpu_tb_exec(cpu, tb);
260 cc->cpu_exec_exit(cpu);
261 } else {
262
263
264
265
266#ifndef CONFIG_SOFTMMU
267 tcg_debug_assert(!have_mmap_lock());
268#endif
269 if (qemu_mutex_iothread_locked()) {
270 qemu_mutex_unlock_iothread();
271 }
272 assert_no_pages_locked();
273 }
274
275 if (in_exclusive_region) {
276
277
278
279
280 parallel_cpus = true;
281 end_exclusive();
282 }
283}
284
285struct tb_desc {
286 target_ulong pc;
287 target_ulong cs_base;
288 CPUArchState *env;
289 tb_page_addr_t phys_page1;
290 uint32_t flags;
291 uint32_t cf_mask;
292 uint32_t trace_vcpu_dstate;
293};
294
295static bool tb_lookup_cmp(const void *p, const void *d)
296{
297 const TranslationBlock *tb = p;
298 const struct tb_desc *desc = d;
299
300 if (tb->pc == desc->pc &&
301 tb->page_addr[0] == desc->phys_page1 &&
302 tb->cs_base == desc->cs_base &&
303 tb->flags == desc->flags &&
304 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
305 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
306
307 if (tb->page_addr[1] == -1) {
308 return true;
309 } else {
310 tb_page_addr_t phys_page2;
311 target_ulong virt_page2;
312
313 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
314 phys_page2 = get_page_addr_code(desc->env, virt_page2);
315 if (tb->page_addr[1] == phys_page2) {
316 return true;
317 }
318 }
319 }
320 return false;
321}
322
323TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
324 target_ulong cs_base, uint32_t flags,
325 uint32_t cf_mask)
326{
327 tb_page_addr_t phys_pc;
328 struct tb_desc desc;
329 uint32_t h;
330
331 desc.env = (CPUArchState *)cpu->env_ptr;
332 desc.cs_base = cs_base;
333 desc.flags = flags;
334 desc.cf_mask = cf_mask;
335 desc.trace_vcpu_dstate = *cpu->trace_dstate;
336 desc.pc = pc;
337 phys_pc = get_page_addr_code(desc.env, pc);
338 if (phys_pc == -1) {
339 return NULL;
340 }
341 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
342 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
343 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
344}
345
346void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
347{
348 if (TCG_TARGET_HAS_direct_jump) {
349 uintptr_t offset = tb->jmp_target_arg[n];
350 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
351 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
352 } else {
353 tb->jmp_target_arg[n] = addr;
354 }
355}
356
357static inline void tb_add_jump(TranslationBlock *tb, int n,
358 TranslationBlock *tb_next)
359{
360 uintptr_t old;
361
362 assert(n < ARRAY_SIZE(tb->jmp_list_next));
363 qemu_spin_lock(&tb_next->jmp_lock);
364
365
366 if (tb_next->cflags & CF_INVALID) {
367 goto out_unlock_next;
368 }
369
370 old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
371 if (old) {
372 goto out_unlock_next;
373 }
374
375
376 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
377
378
379 tb->jmp_list_next[n] = tb_next->jmp_list_head;
380 tb_next->jmp_list_head = (uintptr_t)tb | n;
381
382 qemu_spin_unlock(&tb_next->jmp_lock);
383
384 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
385 "Linking TBs %p [" TARGET_FMT_lx
386 "] index %d -> %p [" TARGET_FMT_lx "]\n",
387 tb->tc.ptr, tb->pc, n,
388 tb_next->tc.ptr, tb_next->pc);
389 return;
390
391 out_unlock_next:
392 qemu_spin_unlock(&tb_next->jmp_lock);
393 return;
394}
395
396static inline TranslationBlock *tb_find(CPUState *cpu,
397 TranslationBlock *last_tb,
398 int tb_exit, uint32_t cf_mask)
399{
400 TranslationBlock *tb;
401 target_ulong cs_base, pc;
402 uint32_t flags;
403
404 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
405 if (tb == NULL) {
406 mmap_lock();
407 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
408 mmap_unlock();
409
410 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
411 }
412#ifndef CONFIG_USER_ONLY
413
414
415
416
417 if (tb->page_addr[1] != -1) {
418 last_tb = NULL;
419 }
420#endif
421
422 if (last_tb) {
423 tb_add_jump(last_tb, tb_exit, tb);
424 }
425 return tb;
426}
427
428static inline bool cpu_handle_halt(CPUState *cpu)
429{
430 if (cpu->halted) {
431#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
432 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
433 && replay_interrupt()) {
434 X86CPU *x86_cpu = X86_CPU(cpu);
435 qemu_mutex_lock_iothread();
436 apic_poll_irq(x86_cpu->apic_state);
437 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
438 qemu_mutex_unlock_iothread();
439 }
440#endif
441 if (!cpu_has_work(cpu)) {
442 return true;
443 }
444
445 cpu->halted = 0;
446 }
447
448 return false;
449}
450
451static inline void cpu_handle_debug_exception(CPUState *cpu)
452{
453 CPUClass *cc = CPU_GET_CLASS(cpu);
454 CPUWatchpoint *wp;
455
456 if (!cpu->watchpoint_hit) {
457 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
458 wp->flags &= ~BP_WATCHPOINT_HIT;
459 }
460 }
461
462 cc->debug_excp_handler(cpu);
463}
464
465static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
466{
467 if (cpu->exception_index < 0) {
468#ifndef CONFIG_USER_ONLY
469 if (replay_has_exception()
470 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
471
472 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
473 }
474#endif
475 if (cpu->exception_index < 0) {
476 return false;
477 }
478 }
479
480 if (cpu->exception_index >= EXCP_INTERRUPT) {
481
482 *ret = cpu->exception_index;
483 if (*ret == EXCP_DEBUG) {
484 cpu_handle_debug_exception(cpu);
485 }
486 cpu->exception_index = -1;
487 return true;
488 } else {
489#if defined(CONFIG_USER_ONLY)
490
491
492
493#if defined(TARGET_I386)
494 CPUClass *cc = CPU_GET_CLASS(cpu);
495 cc->do_interrupt(cpu);
496#endif
497 *ret = cpu->exception_index;
498 cpu->exception_index = -1;
499 return true;
500#else
501 if (replay_exception()) {
502 CPUClass *cc = CPU_GET_CLASS(cpu);
503 qemu_mutex_lock_iothread();
504 cc->do_interrupt(cpu);
505 qemu_mutex_unlock_iothread();
506 cpu->exception_index = -1;
507 } else if (!replay_has_interrupt()) {
508
509 *ret = EXCP_INTERRUPT;
510 return true;
511 }
512#endif
513 }
514
515 return false;
516}
517
518static inline bool cpu_handle_interrupt(CPUState *cpu,
519 TranslationBlock **last_tb)
520{
521 CPUClass *cc = CPU_GET_CLASS(cpu);
522
523
524
525
526
527
528 atomic_mb_set(&cpu->icount_decr.u16.high, 0);
529
530 if (unlikely(atomic_read(&cpu->interrupt_request))) {
531 int interrupt_request;
532 qemu_mutex_lock_iothread();
533 interrupt_request = cpu->interrupt_request;
534 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
535
536 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
537 }
538 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
539 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
540 cpu->exception_index = EXCP_DEBUG;
541 qemu_mutex_unlock_iothread();
542 return true;
543 }
544 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
545
546 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
547 replay_interrupt();
548 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
549 cpu->halted = 1;
550 cpu->exception_index = EXCP_HLT;
551 qemu_mutex_unlock_iothread();
552 return true;
553 }
554#if defined(TARGET_I386)
555 else if (interrupt_request & CPU_INTERRUPT_INIT) {
556 X86CPU *x86_cpu = X86_CPU(cpu);
557 CPUArchState *env = &x86_cpu->env;
558 replay_interrupt();
559 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
560 do_cpu_init(x86_cpu);
561 cpu->exception_index = EXCP_HALTED;
562 qemu_mutex_unlock_iothread();
563 return true;
564 }
565#else
566 else if (interrupt_request & CPU_INTERRUPT_RESET) {
567 replay_interrupt();
568 cpu_reset(cpu);
569 qemu_mutex_unlock_iothread();
570 return true;
571 }
572#endif
573
574
575
576
577 else {
578 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
579 replay_interrupt();
580 cpu->exception_index = -1;
581 *last_tb = NULL;
582 }
583
584
585 interrupt_request = cpu->interrupt_request;
586 }
587 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
588 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
589
590
591 *last_tb = NULL;
592 }
593
594
595 qemu_mutex_unlock_iothread();
596 }
597
598
599 if (unlikely(atomic_read(&cpu->exit_request)
600 || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
601 atomic_set(&cpu->exit_request, 0);
602 if (cpu->exception_index == -1) {
603 cpu->exception_index = EXCP_INTERRUPT;
604 }
605 return true;
606 }
607
608 return false;
609}
610
611static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
612 TranslationBlock **last_tb, int *tb_exit)
613{
614 uintptr_t ret;
615 int32_t insns_left;
616
617 trace_exec_tb(tb, tb->pc);
618 ret = cpu_tb_exec(cpu, tb);
619 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
620 *tb_exit = ret & TB_EXIT_MASK;
621 if (*tb_exit != TB_EXIT_REQUESTED) {
622 *last_tb = tb;
623 return;
624 }
625
626 *last_tb = NULL;
627 insns_left = atomic_read(&cpu->icount_decr.u32);
628 if (insns_left < 0) {
629
630
631
632
633
634
635
636 return;
637 }
638
639
640 assert(use_icount);
641#ifndef CONFIG_USER_ONLY
642
643 cpu_update_icount(cpu);
644
645 insns_left = MIN(0xffff, cpu->icount_budget);
646 cpu->icount_decr.u16.low = insns_left;
647 cpu->icount_extra = cpu->icount_budget - insns_left;
648 if (!cpu->icount_extra) {
649
650
651
652 if (insns_left > 0) {
653 cpu_exec_nocache(cpu, insns_left, tb, false);
654 }
655 }
656#endif
657}
658
659
660
661int cpu_exec(CPUState *cpu)
662{
663 CPUClass *cc = CPU_GET_CLASS(cpu);
664 int ret;
665 SyncClocks sc = { 0 };
666
667
668 current_cpu = cpu;
669
670 if (cpu_handle_halt(cpu)) {
671 return EXCP_HALTED;
672 }
673
674 rcu_read_lock();
675
676 cc->cpu_exec_enter(cpu);
677
678
679
680
681
682
683 init_delay_params(&sc, cpu);
684
685
686 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
687#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
688
689
690
691
692 cpu = current_cpu;
693 cc = CPU_GET_CLASS(cpu);
694#else
695
696 g_assert(cpu == current_cpu);
697 g_assert(cc == CPU_GET_CLASS(cpu));
698#endif
699#ifndef CONFIG_SOFTMMU
700 tcg_debug_assert(!have_mmap_lock());
701#endif
702 if (qemu_mutex_iothread_locked()) {
703 qemu_mutex_unlock_iothread();
704 }
705 assert_no_pages_locked();
706 }
707
708
709 while (!cpu_handle_exception(cpu, &ret)) {
710 TranslationBlock *last_tb = NULL;
711 int tb_exit = 0;
712
713 while (!cpu_handle_interrupt(cpu, &last_tb)) {
714 uint32_t cflags = cpu->cflags_next_tb;
715 TranslationBlock *tb;
716
717
718
719
720
721
722 if (cflags == -1) {
723 cflags = curr_cflags();
724 } else {
725 cpu->cflags_next_tb = -1;
726 }
727
728 tb = tb_find(cpu, last_tb, tb_exit, cflags);
729 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
730
731
732 align_clocks(&sc, cpu);
733 }
734 }
735
736 cc->cpu_exec_exit(cpu);
737 rcu_read_unlock();
738
739 return ret;
740}
741