1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "trace.h"
22#include "disas/disas.h"
23#include "exec/exec-all.h"
24#include "tcg.h"
25#include "qemu/atomic.h"
26#include "sysemu/qtest.h"
27#include "qemu/timer.h"
28#include "exec/address-spaces.h"
29#include "qemu/rcu.h"
30#include "exec/tb-hash.h"
31#include "exec/tb-lookup.h"
32#include "exec/log.h"
33#include "qemu/main-loop.h"
34#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
35#include "hw/i386/apic.h"
36#endif
37#include "sysemu/cpus.h"
38#include "sysemu/replay.h"
39
40
41
42typedef struct SyncClocks {
43 int64_t diff_clk;
44 int64_t last_cpu_icount;
45 int64_t realtime_clock;
46} SyncClocks;
47
48#if !defined(CONFIG_USER_ONLY)
49
50
51
52
53#define VM_CLOCK_ADVANCE 3000000
54#define THRESHOLD_REDUCE 1.5
55#define MAX_DELAY_PRINT_RATE 2000000000LL
56#define MAX_NB_PRINTS 100
57
58static void align_clocks(SyncClocks *sc, const CPUState *cpu)
59{
60 int64_t cpu_icount;
61
62 if (!icount_align_option) {
63 return;
64 }
65
66 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
67 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
68 sc->last_cpu_icount = cpu_icount;
69
70 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
71#ifndef _WIN32
72 struct timespec sleep_delay, rem_delay;
73 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
74 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
75 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
76 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
77 } else {
78 sc->diff_clk = 0;
79 }
80#else
81 Sleep(sc->diff_clk / SCALE_MS);
82 sc->diff_clk = 0;
83#endif
84 }
85}
86
87static void print_delay(const SyncClocks *sc)
88{
89 static float threshold_delay;
90 static int64_t last_realtime_clock;
91 static int nb_prints;
92
93 if (icount_align_option &&
94 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
95 nb_prints < MAX_NB_PRINTS) {
96 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
97 (-sc->diff_clk / (float)1000000000LL <
98 (threshold_delay - THRESHOLD_REDUCE))) {
99 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
100 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
101 threshold_delay - 1,
102 threshold_delay);
103 nb_prints++;
104 last_realtime_clock = sc->realtime_clock;
105 }
106 }
107}
108
109static void init_delay_params(SyncClocks *sc,
110 const CPUState *cpu)
111{
112 if (!icount_align_option) {
113 return;
114 }
115 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
116 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
117 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
118 if (sc->diff_clk < max_delay) {
119 max_delay = sc->diff_clk;
120 }
121 if (sc->diff_clk > max_advance) {
122 max_advance = sc->diff_clk;
123 }
124
125
126
127 print_delay(sc);
128}
129#else
130static void align_clocks(SyncClocks *sc, const CPUState *cpu)
131{
132}
133
134static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
135{
136}
137#endif
138
139
140static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
141{
142 CPUArchState *env = cpu->env_ptr;
143 uintptr_t ret;
144 TranslationBlock *last_tb;
145 int tb_exit;
146 uint8_t *tb_ptr = itb->tc.ptr;
147
148 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
149 "Trace %d: %p ["
150 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
151 cpu->cpu_index, itb->tc.ptr,
152 itb->cs_base, itb->pc, itb->flags,
153 lookup_symbol(itb->pc));
154
155#if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
157 && qemu_log_in_addr_range(itb->pc)) {
158 qemu_log_lock();
159#if defined(TARGET_I386)
160 log_cpu_state(cpu, CPU_DUMP_CCOP);
161#else
162 log_cpu_state(cpu, 0);
163#endif
164 qemu_log_unlock();
165 }
166#endif
167
168 cpu->can_do_io = !use_icount;
169 ret = tcg_qemu_tb_exec(env, tb_ptr);
170 cpu->can_do_io = 1;
171 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
172 tb_exit = ret & TB_EXIT_MASK;
173 trace_exec_tb_exit(last_tb, tb_exit);
174
175 if (tb_exit > TB_EXIT_IDX1) {
176
177
178
179
180 CPUClass *cc = CPU_GET_CLASS(cpu);
181 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
182 "Stopped execution of TB chain before %p ["
183 TARGET_FMT_lx "] %s\n",
184 last_tb->tc.ptr, last_tb->pc,
185 lookup_symbol(last_tb->pc));
186 if (cc->synchronize_from_tb) {
187 cc->synchronize_from_tb(cpu, last_tb);
188 } else {
189 assert(cc->set_pc);
190 cc->set_pc(cpu, last_tb->pc);
191 }
192 }
193 return ret;
194}
195
196#ifndef CONFIG_USER_ONLY
197
198
199static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
200 TranslationBlock *orig_tb, bool ignore_icount)
201{
202 TranslationBlock *tb;
203 uint32_t cflags = curr_cflags() | CF_NOCACHE;
204
205 if (ignore_icount) {
206 cflags &= ~CF_USE_ICOUNT;
207 }
208
209
210
211 cflags |= MIN(max_cycles, CF_COUNT_MASK);
212
213 tb_lock();
214 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
215 orig_tb->flags, cflags);
216 tb->orig_tb = orig_tb;
217 tb_unlock();
218
219
220 trace_exec_tb_nocache(tb, tb->pc);
221 cpu_tb_exec(cpu, tb);
222
223 tb_lock();
224 tb_phys_invalidate(tb, -1);
225 tb_remove(tb);
226 tb_unlock();
227}
228#endif
229
230void cpu_exec_step_atomic(CPUState *cpu)
231{
232 CPUClass *cc = CPU_GET_CLASS(cpu);
233 TranslationBlock *tb;
234 target_ulong cs_base, pc;
235 uint32_t flags;
236 uint32_t cflags = 1;
237 uint32_t cf_mask = cflags & CF_HASH_MASK;
238
239 volatile bool in_exclusive_region = false;
240
241 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
242 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
243 if (tb == NULL) {
244 mmap_lock();
245 tb_lock();
246 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
247 if (likely(tb == NULL)) {
248 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
249 }
250 tb_unlock();
251 mmap_unlock();
252 }
253
254 start_exclusive();
255
256
257 parallel_cpus = false;
258 in_exclusive_region = true;
259 cc->cpu_exec_enter(cpu);
260
261 trace_exec_tb(tb, pc);
262 cpu_tb_exec(cpu, tb);
263 cc->cpu_exec_exit(cpu);
264 } else {
265
266
267
268
269
270#ifndef CONFIG_SOFTMMU
271 tcg_debug_assert(!have_mmap_lock());
272#endif
273 tb_lock_reset();
274 }
275
276 if (in_exclusive_region) {
277
278
279
280
281 parallel_cpus = true;
282 end_exclusive();
283 }
284}
285
286struct tb_desc {
287 target_ulong pc;
288 target_ulong cs_base;
289 CPUArchState *env;
290 tb_page_addr_t phys_page1;
291 uint32_t flags;
292 uint32_t cf_mask;
293 uint32_t trace_vcpu_dstate;
294};
295
296static bool tb_cmp(const void *p, const void *d)
297{
298 const TranslationBlock *tb = p;
299 const struct tb_desc *desc = d;
300
301 if (tb->pc == desc->pc &&
302 tb->page_addr[0] == desc->phys_page1 &&
303 tb->cs_base == desc->cs_base &&
304 tb->flags == desc->flags &&
305 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
306 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
307
308 if (tb->page_addr[1] == -1) {
309 return true;
310 } else {
311 tb_page_addr_t phys_page2;
312 target_ulong virt_page2;
313
314 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
315 phys_page2 = get_page_addr_code(desc->env, virt_page2);
316 if (tb->page_addr[1] == phys_page2) {
317 return true;
318 }
319 }
320 }
321 return false;
322}
323
324TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
325 target_ulong cs_base, uint32_t flags,
326 uint32_t cf_mask)
327{
328 tb_page_addr_t phys_pc;
329 struct tb_desc desc;
330 uint32_t h;
331
332 desc.env = (CPUArchState *)cpu->env_ptr;
333 desc.cs_base = cs_base;
334 desc.flags = flags;
335 desc.cf_mask = cf_mask;
336 desc.trace_vcpu_dstate = *cpu->trace_dstate;
337 desc.pc = pc;
338 phys_pc = get_page_addr_code(desc.env, pc);
339 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
340 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
341 return qht_lookup(&tb_ctx.htable, tb_cmp, &desc, h);
342}
343
344void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
345{
346 if (TCG_TARGET_HAS_direct_jump) {
347 uintptr_t offset = tb->jmp_target_arg[n];
348 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
349 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
350 } else {
351 tb->jmp_target_arg[n] = addr;
352 }
353}
354
355
356static inline void tb_add_jump(TranslationBlock *tb, int n,
357 TranslationBlock *tb_next)
358{
359 assert(n < ARRAY_SIZE(tb->jmp_list_next));
360 if (tb->jmp_list_next[n]) {
361
362
363 return;
364 }
365 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
366 "Linking TBs %p [" TARGET_FMT_lx
367 "] index %d -> %p [" TARGET_FMT_lx "]\n",
368 tb->tc.ptr, tb->pc, n,
369 tb_next->tc.ptr, tb_next->pc);
370
371
372 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
373
374
375 tb->jmp_list_next[n] = tb_next->jmp_list_first;
376 tb_next->jmp_list_first = (uintptr_t)tb | n;
377}
378
379static inline TranslationBlock *tb_find(CPUState *cpu,
380 TranslationBlock *last_tb,
381 int tb_exit, uint32_t cf_mask)
382{
383 TranslationBlock *tb;
384 target_ulong cs_base, pc;
385 uint32_t flags;
386 bool acquired_tb_lock = false;
387
388 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
389 if (tb == NULL) {
390
391
392
393
394 mmap_lock();
395 tb_lock();
396 acquired_tb_lock = true;
397
398
399
400
401 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
402 if (likely(tb == NULL)) {
403
404 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
405 }
406
407 mmap_unlock();
408
409 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
410 }
411#ifndef CONFIG_USER_ONLY
412
413
414
415
416 if (tb->page_addr[1] != -1) {
417 last_tb = NULL;
418 }
419#endif
420
421 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
422 if (!acquired_tb_lock) {
423 tb_lock();
424 acquired_tb_lock = true;
425 }
426 if (!(tb->cflags & CF_INVALID)) {
427 tb_add_jump(last_tb, tb_exit, tb);
428 }
429 }
430 if (acquired_tb_lock) {
431 tb_unlock();
432 }
433 return tb;
434}
435
436static inline bool cpu_handle_halt(CPUState *cpu)
437{
438 if (cpu->halted) {
439#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
440 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
441 && replay_interrupt()) {
442 X86CPU *x86_cpu = X86_CPU(cpu);
443 qemu_mutex_lock_iothread();
444 apic_poll_irq(x86_cpu->apic_state);
445 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
446 qemu_mutex_unlock_iothread();
447 }
448#endif
449 if (!cpu_has_work(cpu)) {
450 return true;
451 }
452
453 cpu->halted = 0;
454 }
455
456 return false;
457}
458
459static inline void cpu_handle_debug_exception(CPUState *cpu)
460{
461 CPUClass *cc = CPU_GET_CLASS(cpu);
462 CPUWatchpoint *wp;
463
464 if (!cpu->watchpoint_hit) {
465 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
466 wp->flags &= ~BP_WATCHPOINT_HIT;
467 }
468 }
469
470 cc->debug_excp_handler(cpu);
471}
472
473static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
474{
475 if (cpu->exception_index < 0) {
476#ifndef CONFIG_USER_ONLY
477 if (replay_has_exception()
478 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
479
480 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
481 }
482#endif
483 if (cpu->exception_index < 0) {
484 return false;
485 }
486 }
487
488 if (cpu->exception_index >= EXCP_INTERRUPT) {
489
490 *ret = cpu->exception_index;
491 if (*ret == EXCP_DEBUG) {
492 cpu_handle_debug_exception(cpu);
493 }
494 cpu->exception_index = -1;
495 return true;
496 } else {
497#if defined(CONFIG_USER_ONLY)
498
499
500
501#if defined(TARGET_I386)
502 CPUClass *cc = CPU_GET_CLASS(cpu);
503 cc->do_interrupt(cpu);
504#endif
505 *ret = cpu->exception_index;
506 cpu->exception_index = -1;
507 return true;
508#else
509 if (replay_exception()) {
510 CPUClass *cc = CPU_GET_CLASS(cpu);
511 qemu_mutex_lock_iothread();
512 cc->do_interrupt(cpu);
513 qemu_mutex_unlock_iothread();
514 cpu->exception_index = -1;
515 } else if (!replay_has_interrupt()) {
516
517 *ret = EXCP_INTERRUPT;
518 return true;
519 }
520#endif
521 }
522
523 return false;
524}
525
526static inline bool cpu_handle_interrupt(CPUState *cpu,
527 TranslationBlock **last_tb)
528{
529 CPUClass *cc = CPU_GET_CLASS(cpu);
530
531
532
533
534
535
536 atomic_mb_set(&cpu->icount_decr.u16.high, 0);
537
538 if (unlikely(atomic_read(&cpu->interrupt_request))) {
539 int interrupt_request;
540 qemu_mutex_lock_iothread();
541 interrupt_request = cpu->interrupt_request;
542 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
543
544 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
545 }
546 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
547 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
548 cpu->exception_index = EXCP_DEBUG;
549 qemu_mutex_unlock_iothread();
550 return true;
551 }
552 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
553
554 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
555 replay_interrupt();
556 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
557 cpu->halted = 1;
558 cpu->exception_index = EXCP_HLT;
559 qemu_mutex_unlock_iothread();
560 return true;
561 }
562#if defined(TARGET_I386)
563 else if (interrupt_request & CPU_INTERRUPT_INIT) {
564 X86CPU *x86_cpu = X86_CPU(cpu);
565 CPUArchState *env = &x86_cpu->env;
566 replay_interrupt();
567 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
568 do_cpu_init(x86_cpu);
569 cpu->exception_index = EXCP_HALTED;
570 qemu_mutex_unlock_iothread();
571 return true;
572 }
573#else
574 else if (interrupt_request & CPU_INTERRUPT_RESET) {
575 replay_interrupt();
576 cpu_reset(cpu);
577 qemu_mutex_unlock_iothread();
578 return true;
579 }
580#endif
581
582
583
584
585 else {
586 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
587 replay_interrupt();
588 cpu->exception_index = -1;
589 *last_tb = NULL;
590 }
591
592
593 interrupt_request = cpu->interrupt_request;
594 }
595 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
596 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
597
598
599 *last_tb = NULL;
600 }
601
602
603 qemu_mutex_unlock_iothread();
604 }
605
606
607 if (unlikely(atomic_read(&cpu->exit_request)
608 || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
609 atomic_set(&cpu->exit_request, 0);
610 if (cpu->exception_index == -1) {
611 cpu->exception_index = EXCP_INTERRUPT;
612 }
613 return true;
614 }
615
616 return false;
617}
618
619static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
620 TranslationBlock **last_tb, int *tb_exit)
621{
622 uintptr_t ret;
623 int32_t insns_left;
624
625 trace_exec_tb(tb, tb->pc);
626 ret = cpu_tb_exec(cpu, tb);
627 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
628 *tb_exit = ret & TB_EXIT_MASK;
629 if (*tb_exit != TB_EXIT_REQUESTED) {
630 *last_tb = tb;
631 return;
632 }
633
634 *last_tb = NULL;
635 insns_left = atomic_read(&cpu->icount_decr.u32);
636 if (insns_left < 0) {
637
638
639
640
641
642
643
644 return;
645 }
646
647
648 assert(use_icount);
649#ifndef CONFIG_USER_ONLY
650
651 cpu_update_icount(cpu);
652
653 insns_left = MIN(0xffff, cpu->icount_budget);
654 cpu->icount_decr.u16.low = insns_left;
655 cpu->icount_extra = cpu->icount_budget - insns_left;
656 if (!cpu->icount_extra) {
657
658
659
660 if (insns_left > 0) {
661 cpu_exec_nocache(cpu, insns_left, tb, false);
662 }
663 }
664#endif
665}
666
667
668
669int cpu_exec(CPUState *cpu)
670{
671 CPUClass *cc = CPU_GET_CLASS(cpu);
672 int ret;
673 SyncClocks sc = { 0 };
674
675
676 current_cpu = cpu;
677
678 if (cpu_handle_halt(cpu)) {
679 return EXCP_HALTED;
680 }
681
682 rcu_read_lock();
683
684 cc->cpu_exec_enter(cpu);
685
686
687
688
689
690
691 init_delay_params(&sc, cpu);
692
693
694 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
695#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
696
697
698
699
700 cpu = current_cpu;
701 cc = CPU_GET_CLASS(cpu);
702#else
703
704 g_assert(cpu == current_cpu);
705 g_assert(cc == CPU_GET_CLASS(cpu));
706#endif
707 tb_lock_reset();
708 if (qemu_mutex_iothread_locked()) {
709 qemu_mutex_unlock_iothread();
710 }
711 }
712
713
714 while (!cpu_handle_exception(cpu, &ret)) {
715 TranslationBlock *last_tb = NULL;
716 int tb_exit = 0;
717
718 while (!cpu_handle_interrupt(cpu, &last_tb)) {
719 uint32_t cflags = cpu->cflags_next_tb;
720 TranslationBlock *tb;
721
722
723
724
725
726
727 if (cflags == -1) {
728 cflags = curr_cflags();
729 } else {
730 cpu->cflags_next_tb = -1;
731 }
732
733 tb = tb_find(cpu, last_tb, tb_exit, cflags);
734 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
735
736
737 align_clocks(&sc, cpu);
738 }
739 }
740
741 cc->cpu_exec_exit(cpu);
742 rcu_read_unlock();
743
744 return ret;
745}
746