1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "trace.h"
22#include "disas/disas.h"
23#include "exec/exec-all.h"
24#include "tcg.h"
25#include "qemu/atomic.h"
26#include "sysemu/qtest.h"
27#include "qemu/timer.h"
28#include "exec/address-spaces.h"
29#include "qemu/rcu.h"
30#include "exec/tb-hash.h"
31#include "exec/tb-lookup.h"
32#include "exec/log.h"
33#include "qemu/main-loop.h"
34#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
35#include "hw/i386/apic.h"
36#endif
37#include "sysemu/cpus.h"
38#include "sysemu/replay.h"
39
40
41
42typedef struct SyncClocks {
43 int64_t diff_clk;
44 int64_t last_cpu_icount;
45 int64_t realtime_clock;
46} SyncClocks;
47
48#if !defined(CONFIG_USER_ONLY)
49
50
51
52
53#define VM_CLOCK_ADVANCE 3000000
54#define THRESHOLD_REDUCE 1.5
55#define MAX_DELAY_PRINT_RATE 2000000000LL
56#define MAX_NB_PRINTS 100
57
58static void align_clocks(SyncClocks *sc, const CPUState *cpu)
59{
60 int64_t cpu_icount;
61
62 if (!icount_align_option) {
63 return;
64 }
65
66 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
67 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
68 sc->last_cpu_icount = cpu_icount;
69
70 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
71#ifndef _WIN32
72 struct timespec sleep_delay, rem_delay;
73 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
74 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
75 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
76 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
77 } else {
78 sc->diff_clk = 0;
79 }
80#else
81 Sleep(sc->diff_clk / SCALE_MS);
82 sc->diff_clk = 0;
83#endif
84 }
85}
86
87static void print_delay(const SyncClocks *sc)
88{
89 static float threshold_delay;
90 static int64_t last_realtime_clock;
91 static int nb_prints;
92
93 if (icount_align_option &&
94 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
95 nb_prints < MAX_NB_PRINTS) {
96 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
97 (-sc->diff_clk / (float)1000000000LL <
98 (threshold_delay - THRESHOLD_REDUCE))) {
99 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
100 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
101 threshold_delay - 1,
102 threshold_delay);
103 nb_prints++;
104 last_realtime_clock = sc->realtime_clock;
105 }
106 }
107}
108
109static void init_delay_params(SyncClocks *sc,
110 const CPUState *cpu)
111{
112 if (!icount_align_option) {
113 return;
114 }
115 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
116 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
117 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
118 if (sc->diff_clk < max_delay) {
119 max_delay = sc->diff_clk;
120 }
121 if (sc->diff_clk > max_advance) {
122 max_advance = sc->diff_clk;
123 }
124
125
126
127 print_delay(sc);
128}
129#else
130static void align_clocks(SyncClocks *sc, const CPUState *cpu)
131{
132}
133
134static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
135{
136}
137#endif
138
139
140static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
141{
142 CPUArchState *env = cpu->env_ptr;
143 uintptr_t ret;
144 TranslationBlock *last_tb;
145 int tb_exit;
146 uint8_t *tb_ptr = itb->tc.ptr;
147
148 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
149 "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
150 itb->tc.ptr, cpu->cpu_index, itb->pc,
151 lookup_symbol(itb->pc));
152
153#if defined(DEBUG_DISAS)
154 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
155 && qemu_log_in_addr_range(itb->pc)) {
156 qemu_log_lock();
157#if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159#else
160 log_cpu_state(cpu, 0);
161#endif
162 qemu_log_unlock();
163 }
164#endif
165
166 cpu->can_do_io = !use_icount;
167 ret = tcg_qemu_tb_exec(env, tb_ptr);
168 cpu->can_do_io = 1;
169 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
170 tb_exit = ret & TB_EXIT_MASK;
171 trace_exec_tb_exit(last_tb, tb_exit);
172
173 if (tb_exit > TB_EXIT_IDX1) {
174
175
176
177
178 CPUClass *cc = CPU_GET_CLASS(cpu);
179 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
180 "Stopped execution of TB chain before %p ["
181 TARGET_FMT_lx "] %s\n",
182 last_tb->tc.ptr, last_tb->pc,
183 lookup_symbol(last_tb->pc));
184 if (cc->synchronize_from_tb) {
185 cc->synchronize_from_tb(cpu, last_tb);
186 } else {
187 assert(cc->set_pc);
188 cc->set_pc(cpu, last_tb->pc);
189 }
190 }
191 return ret;
192}
193
194#ifndef CONFIG_USER_ONLY
195
196
197static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
198 TranslationBlock *orig_tb, bool ignore_icount)
199{
200 TranslationBlock *tb;
201 uint32_t cflags = curr_cflags() | CF_NOCACHE;
202
203 if (ignore_icount) {
204 cflags &= ~CF_USE_ICOUNT;
205 }
206
207
208
209 cflags |= MIN(max_cycles, CF_COUNT_MASK);
210
211 tb_lock();
212 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
213 orig_tb->flags, cflags);
214 tb->orig_tb = orig_tb;
215 tb_unlock();
216
217
218 trace_exec_tb_nocache(tb, tb->pc);
219 cpu_tb_exec(cpu, tb);
220
221 tb_lock();
222 tb_phys_invalidate(tb, -1);
223 tb_remove(tb);
224 tb_unlock();
225}
226#endif
227
228void cpu_exec_step_atomic(CPUState *cpu)
229{
230 CPUClass *cc = CPU_GET_CLASS(cpu);
231 TranslationBlock *tb;
232 target_ulong cs_base, pc;
233 uint32_t flags;
234 uint32_t cflags = 1;
235 uint32_t cf_mask = cflags & CF_HASH_MASK;
236
237 volatile bool in_exclusive_region = false;
238
239 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
240 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
241 if (tb == NULL) {
242 mmap_lock();
243 tb_lock();
244 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
245 if (likely(tb == NULL)) {
246 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
247 }
248 tb_unlock();
249 mmap_unlock();
250 }
251
252 start_exclusive();
253
254
255 parallel_cpus = false;
256 in_exclusive_region = true;
257 cc->cpu_exec_enter(cpu);
258
259 trace_exec_tb(tb, pc);
260 cpu_tb_exec(cpu, tb);
261 cc->cpu_exec_exit(cpu);
262 } else {
263
264
265
266
267
268#ifndef CONFIG_SOFTMMU
269 tcg_debug_assert(!have_mmap_lock());
270#endif
271 tb_lock_reset();
272 }
273
274 if (in_exclusive_region) {
275
276
277
278
279 parallel_cpus = true;
280 end_exclusive();
281 }
282}
283
284struct tb_desc {
285 target_ulong pc;
286 target_ulong cs_base;
287 CPUArchState *env;
288 tb_page_addr_t phys_page1;
289 uint32_t flags;
290 uint32_t cf_mask;
291 uint32_t trace_vcpu_dstate;
292};
293
294static bool tb_cmp(const void *p, const void *d)
295{
296 const TranslationBlock *tb = p;
297 const struct tb_desc *desc = d;
298
299 if (tb->pc == desc->pc &&
300 tb->page_addr[0] == desc->phys_page1 &&
301 tb->cs_base == desc->cs_base &&
302 tb->flags == desc->flags &&
303 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
304 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
305
306 if (tb->page_addr[1] == -1) {
307 return true;
308 } else {
309 tb_page_addr_t phys_page2;
310 target_ulong virt_page2;
311
312 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
313 phys_page2 = get_page_addr_code(desc->env, virt_page2);
314 if (tb->page_addr[1] == phys_page2) {
315 return true;
316 }
317 }
318 }
319 return false;
320}
321
322TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
323 target_ulong cs_base, uint32_t flags,
324 uint32_t cf_mask)
325{
326 tb_page_addr_t phys_pc;
327 struct tb_desc desc;
328 uint32_t h;
329
330 desc.env = (CPUArchState *)cpu->env_ptr;
331 desc.cs_base = cs_base;
332 desc.flags = flags;
333 desc.cf_mask = cf_mask;
334 desc.trace_vcpu_dstate = *cpu->trace_dstate;
335 desc.pc = pc;
336 phys_pc = get_page_addr_code(desc.env, pc);
337 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
338 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
339 return qht_lookup(&tb_ctx.htable, tb_cmp, &desc, h);
340}
341
342void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
343{
344 if (TCG_TARGET_HAS_direct_jump) {
345 uintptr_t offset = tb->jmp_target_arg[n];
346 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
347 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
348 } else {
349 tb->jmp_target_arg[n] = addr;
350 }
351}
352
353
354static inline void tb_add_jump(TranslationBlock *tb, int n,
355 TranslationBlock *tb_next)
356{
357 assert(n < ARRAY_SIZE(tb->jmp_list_next));
358 if (tb->jmp_list_next[n]) {
359
360
361 return;
362 }
363 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
364 "Linking TBs %p [" TARGET_FMT_lx
365 "] index %d -> %p [" TARGET_FMT_lx "]\n",
366 tb->tc.ptr, tb->pc, n,
367 tb_next->tc.ptr, tb_next->pc);
368
369
370 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
371
372
373 tb->jmp_list_next[n] = tb_next->jmp_list_first;
374 tb_next->jmp_list_first = (uintptr_t)tb | n;
375}
376
377static inline TranslationBlock *tb_find(CPUState *cpu,
378 TranslationBlock *last_tb,
379 int tb_exit, uint32_t cf_mask)
380{
381 TranslationBlock *tb;
382 target_ulong cs_base, pc;
383 uint32_t flags;
384 bool acquired_tb_lock = false;
385
386 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
387 if (tb == NULL) {
388
389
390
391
392 mmap_lock();
393 tb_lock();
394 acquired_tb_lock = true;
395
396
397
398
399 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
400 if (likely(tb == NULL)) {
401
402 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
403 }
404
405 mmap_unlock();
406
407 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
408 }
409#ifndef CONFIG_USER_ONLY
410
411
412
413
414 if (tb->page_addr[1] != -1) {
415 last_tb = NULL;
416 }
417#endif
418
419 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
420 if (!acquired_tb_lock) {
421 tb_lock();
422 acquired_tb_lock = true;
423 }
424 if (!(tb->cflags & CF_INVALID)) {
425 tb_add_jump(last_tb, tb_exit, tb);
426 }
427 }
428 if (acquired_tb_lock) {
429 tb_unlock();
430 }
431 return tb;
432}
433
434static inline bool cpu_handle_halt(CPUState *cpu)
435{
436 if (cpu->halted) {
437#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
438 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
439 && replay_interrupt()) {
440 X86CPU *x86_cpu = X86_CPU(cpu);
441 qemu_mutex_lock_iothread();
442 apic_poll_irq(x86_cpu->apic_state);
443 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
444 qemu_mutex_unlock_iothread();
445 }
446#endif
447 if (!cpu_has_work(cpu)) {
448 return true;
449 }
450
451 cpu->halted = 0;
452 }
453
454 return false;
455}
456
457static inline void cpu_handle_debug_exception(CPUState *cpu)
458{
459 CPUClass *cc = CPU_GET_CLASS(cpu);
460 CPUWatchpoint *wp;
461
462 if (!cpu->watchpoint_hit) {
463 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
464 wp->flags &= ~BP_WATCHPOINT_HIT;
465 }
466 }
467
468 cc->debug_excp_handler(cpu);
469}
470
471static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
472{
473 if (cpu->exception_index < 0) {
474#ifndef CONFIG_USER_ONLY
475 if (replay_has_exception()
476 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
477
478 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
479 }
480#endif
481 if (cpu->exception_index < 0) {
482 return false;
483 }
484 }
485
486 if (cpu->exception_index >= EXCP_INTERRUPT) {
487
488 *ret = cpu->exception_index;
489 if (*ret == EXCP_DEBUG) {
490 cpu_handle_debug_exception(cpu);
491 }
492 cpu->exception_index = -1;
493 return true;
494 } else {
495#if defined(CONFIG_USER_ONLY)
496
497
498
499#if defined(TARGET_I386)
500 CPUClass *cc = CPU_GET_CLASS(cpu);
501 cc->do_interrupt(cpu);
502#endif
503 *ret = cpu->exception_index;
504 cpu->exception_index = -1;
505 return true;
506#else
507 if (replay_exception()) {
508 CPUClass *cc = CPU_GET_CLASS(cpu);
509 qemu_mutex_lock_iothread();
510 cc->do_interrupt(cpu);
511 qemu_mutex_unlock_iothread();
512 cpu->exception_index = -1;
513 } else if (!replay_has_interrupt()) {
514
515 *ret = EXCP_INTERRUPT;
516 return true;
517 }
518#endif
519 }
520
521 return false;
522}
523
524static inline bool cpu_handle_interrupt(CPUState *cpu,
525 TranslationBlock **last_tb)
526{
527 CPUClass *cc = CPU_GET_CLASS(cpu);
528 int32_t insns_left;
529
530
531
532
533 insns_left = atomic_read(&cpu->icount_decr.u32);
534 atomic_set(&cpu->icount_decr.u16.high, 0);
535 if (unlikely(insns_left < 0)) {
536
537
538
539 smp_mb();
540 }
541
542 if (unlikely(atomic_read(&cpu->interrupt_request))) {
543 int interrupt_request;
544 qemu_mutex_lock_iothread();
545 interrupt_request = cpu->interrupt_request;
546 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
547
548 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
549 }
550 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
551 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
552 cpu->exception_index = EXCP_DEBUG;
553 qemu_mutex_unlock_iothread();
554 return true;
555 }
556 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
557
558 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
559 replay_interrupt();
560 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
561 cpu->halted = 1;
562 cpu->exception_index = EXCP_HLT;
563 qemu_mutex_unlock_iothread();
564 return true;
565 }
566#if defined(TARGET_I386)
567 else if (interrupt_request & CPU_INTERRUPT_INIT) {
568 X86CPU *x86_cpu = X86_CPU(cpu);
569 CPUArchState *env = &x86_cpu->env;
570 replay_interrupt();
571 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
572 do_cpu_init(x86_cpu);
573 cpu->exception_index = EXCP_HALTED;
574 qemu_mutex_unlock_iothread();
575 return true;
576 }
577#else
578 else if (interrupt_request & CPU_INTERRUPT_RESET) {
579 replay_interrupt();
580 cpu_reset(cpu);
581 qemu_mutex_unlock_iothread();
582 return true;
583 }
584#endif
585
586
587
588
589 else {
590 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
591 replay_interrupt();
592 *last_tb = NULL;
593 }
594
595
596 interrupt_request = cpu->interrupt_request;
597 }
598 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
599 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
600
601
602 *last_tb = NULL;
603 }
604
605
606 qemu_mutex_unlock_iothread();
607 }
608
609
610 if (unlikely(atomic_read(&cpu->exit_request)
611 || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
612 atomic_set(&cpu->exit_request, 0);
613 cpu->exception_index = EXCP_INTERRUPT;
614 return true;
615 }
616
617 return false;
618}
619
620static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
621 TranslationBlock **last_tb, int *tb_exit)
622{
623 uintptr_t ret;
624 int32_t insns_left;
625
626 trace_exec_tb(tb, tb->pc);
627 ret = cpu_tb_exec(cpu, tb);
628 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
629 *tb_exit = ret & TB_EXIT_MASK;
630 if (*tb_exit != TB_EXIT_REQUESTED) {
631 *last_tb = tb;
632 return;
633 }
634
635 *last_tb = NULL;
636 insns_left = atomic_read(&cpu->icount_decr.u32);
637 if (insns_left < 0) {
638
639
640
641
642
643
644
645 return;
646 }
647
648
649 assert(use_icount);
650#ifndef CONFIG_USER_ONLY
651
652 cpu_update_icount(cpu);
653
654 insns_left = MIN(0xffff, cpu->icount_budget);
655 cpu->icount_decr.u16.low = insns_left;
656 cpu->icount_extra = cpu->icount_budget - insns_left;
657 if (!cpu->icount_extra) {
658
659
660
661 if (insns_left > 0) {
662 cpu_exec_nocache(cpu, insns_left, tb, false);
663 }
664 }
665#endif
666}
667
668
669
670int cpu_exec(CPUState *cpu)
671{
672 CPUClass *cc = CPU_GET_CLASS(cpu);
673 int ret;
674 SyncClocks sc = { 0 };
675
676
677 current_cpu = cpu;
678
679 if (cpu_handle_halt(cpu)) {
680 return EXCP_HALTED;
681 }
682
683 rcu_read_lock();
684
685 cc->cpu_exec_enter(cpu);
686
687
688
689
690
691
692 init_delay_params(&sc, cpu);
693
694
695 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
696#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
697
698
699
700
701 cpu = current_cpu;
702 cc = CPU_GET_CLASS(cpu);
703#else
704
705 g_assert(cpu == current_cpu);
706 g_assert(cc == CPU_GET_CLASS(cpu));
707#endif
708 cpu->can_do_io = 1;
709 tb_lock_reset();
710 if (qemu_mutex_iothread_locked()) {
711 qemu_mutex_unlock_iothread();
712 }
713 }
714
715
716 while (!cpu_handle_exception(cpu, &ret)) {
717 TranslationBlock *last_tb = NULL;
718 int tb_exit = 0;
719
720 while (!cpu_handle_interrupt(cpu, &last_tb)) {
721 uint32_t cflags = cpu->cflags_next_tb;
722 TranslationBlock *tb;
723
724
725
726
727
728
729 if (cflags == -1) {
730 cflags = curr_cflags();
731 } else {
732 cpu->cflags_next_tb = -1;
733 }
734
735 tb = tb_find(cpu, last_tb, tb_exit, cflags);
736 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
737
738
739 align_clocks(&sc, cpu);
740 }
741 }
742
743 cc->cpu_exec_exit(cpu);
744 rcu_read_unlock();
745
746 return ret;
747}
748