1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu-common.h"
22#include "cpu.h"
23#include "trace.h"
24#include "disas/disas.h"
25#include "exec/exec-all.h"
26#include "tcg.h"
27#include "qemu/atomic.h"
28#include "sysemu/qtest.h"
29#include "qemu/timer.h"
30#include "qemu/rcu.h"
31#include "exec/tb-hash.h"
32#include "exec/tb-lookup.h"
33#include "exec/log.h"
34#include "qemu/main-loop.h"
35#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
36#include "hw/i386/apic.h"
37#endif
38#include "sysemu/cpus.h"
39#include "sysemu/replay.h"
40
41
42
43typedef struct SyncClocks {
44 int64_t diff_clk;
45 int64_t last_cpu_icount;
46 int64_t realtime_clock;
47} SyncClocks;
48
49#if !defined(CONFIG_USER_ONLY)
50
51
52
53
54#define VM_CLOCK_ADVANCE 3000000
55#define THRESHOLD_REDUCE 1.5
56#define MAX_DELAY_PRINT_RATE 2000000000LL
57#define MAX_NB_PRINTS 100
58
59static void align_clocks(SyncClocks *sc, CPUState *cpu)
60{
61 int64_t cpu_icount;
62
63 if (!icount_align_option) {
64 return;
65 }
66
67 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
68 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
69 sc->last_cpu_icount = cpu_icount;
70
71 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
72#ifndef _WIN32
73 struct timespec sleep_delay, rem_delay;
74 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
75 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
76 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
77 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
78 } else {
79 sc->diff_clk = 0;
80 }
81#else
82 Sleep(sc->diff_clk / SCALE_MS);
83 sc->diff_clk = 0;
84#endif
85 }
86}
87
88static void print_delay(const SyncClocks *sc)
89{
90 static float threshold_delay;
91 static int64_t last_realtime_clock;
92 static int nb_prints;
93
94 if (icount_align_option &&
95 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
96 nb_prints < MAX_NB_PRINTS) {
97 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
98 (-sc->diff_clk / (float)1000000000LL <
99 (threshold_delay - THRESHOLD_REDUCE))) {
100 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
101 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
102 threshold_delay - 1,
103 threshold_delay);
104 nb_prints++;
105 last_realtime_clock = sc->realtime_clock;
106 }
107 }
108}
109
110static void init_delay_params(SyncClocks *sc, CPUState *cpu)
111{
112 if (!icount_align_option) {
113 return;
114 }
115 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
116 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
117 sc->last_cpu_icount
118 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
119 if (sc->diff_clk < max_delay) {
120 max_delay = sc->diff_clk;
121 }
122 if (sc->diff_clk > max_advance) {
123 max_advance = sc->diff_clk;
124 }
125
126
127
128 print_delay(sc);
129}
130#else
131static void align_clocks(SyncClocks *sc, const CPUState *cpu)
132{
133}
134
135static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
136{
137}
138#endif
139
140
141static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
142{
143 CPUArchState *env = cpu->env_ptr;
144 uintptr_t ret;
145 TranslationBlock *last_tb;
146 int tb_exit;
147 uint8_t *tb_ptr = itb->tc.ptr;
148
149 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
150 "Trace %d: %p ["
151 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
152 cpu->cpu_index, itb->tc.ptr,
153 itb->cs_base, itb->pc, itb->flags,
154 lookup_symbol(itb->pc));
155
156#if defined(DEBUG_DISAS)
157 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
158 && qemu_log_in_addr_range(itb->pc)) {
159 qemu_log_lock();
160 int flags = 0;
161 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
162 flags |= CPU_DUMP_FPU;
163 }
164#if defined(TARGET_I386)
165 flags |= CPU_DUMP_CCOP;
166#endif
167 log_cpu_state(cpu, flags);
168 qemu_log_unlock();
169 }
170#endif
171
172 cpu->can_do_io = !use_icount;
173 ret = tcg_qemu_tb_exec(env, tb_ptr);
174 cpu->can_do_io = 1;
175 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
176 tb_exit = ret & TB_EXIT_MASK;
177 trace_exec_tb_exit(last_tb, tb_exit);
178
179 if (tb_exit > TB_EXIT_IDX1) {
180
181
182
183
184 CPUClass *cc = CPU_GET_CLASS(cpu);
185 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
186 "Stopped execution of TB chain before %p ["
187 TARGET_FMT_lx "] %s\n",
188 last_tb->tc.ptr, last_tb->pc,
189 lookup_symbol(last_tb->pc));
190 if (cc->synchronize_from_tb) {
191 cc->synchronize_from_tb(cpu, last_tb);
192 } else {
193 assert(cc->set_pc);
194 cc->set_pc(cpu, last_tb->pc);
195 }
196 }
197 return ret;
198}
199
200#ifndef CONFIG_USER_ONLY
201
202
203static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
204 TranslationBlock *orig_tb, bool ignore_icount)
205{
206 TranslationBlock *tb;
207 uint32_t cflags = curr_cflags() | CF_NOCACHE;
208
209 if (ignore_icount) {
210 cflags &= ~CF_USE_ICOUNT;
211 }
212
213
214
215 cflags |= MIN(max_cycles, CF_COUNT_MASK);
216
217 mmap_lock();
218 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
219 orig_tb->flags, cflags);
220 tb->orig_tb = orig_tb;
221 mmap_unlock();
222
223
224 trace_exec_tb_nocache(tb, tb->pc);
225 cpu_tb_exec(cpu, tb);
226
227 mmap_lock();
228 tb_phys_invalidate(tb, -1);
229 mmap_unlock();
230 tcg_tb_remove(tb);
231}
232#endif
233
234void cpu_exec_step_atomic(CPUState *cpu)
235{
236 CPUClass *cc = CPU_GET_CLASS(cpu);
237 TranslationBlock *tb;
238 target_ulong cs_base, pc;
239 uint32_t flags;
240 uint32_t cflags = 1;
241 uint32_t cf_mask = cflags & CF_HASH_MASK;
242
243 volatile bool in_exclusive_region = false;
244
245 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
246 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
247 if (tb == NULL) {
248 mmap_lock();
249 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
250 mmap_unlock();
251 }
252
253 start_exclusive();
254
255
256 parallel_cpus = false;
257 in_exclusive_region = true;
258 cc->cpu_exec_enter(cpu);
259
260 trace_exec_tb(tb, pc);
261 cpu_tb_exec(cpu, tb);
262 cc->cpu_exec_exit(cpu);
263 } else {
264
265
266
267
268#ifndef CONFIG_SOFTMMU
269 tcg_debug_assert(!have_mmap_lock());
270#endif
271 if (qemu_mutex_iothread_locked()) {
272 qemu_mutex_unlock_iothread();
273 }
274 assert_no_pages_locked();
275 }
276
277 if (in_exclusive_region) {
278
279
280
281
282 parallel_cpus = true;
283 end_exclusive();
284 }
285}
286
287struct tb_desc {
288 target_ulong pc;
289 target_ulong cs_base;
290 CPUArchState *env;
291 tb_page_addr_t phys_page1;
292 uint32_t flags;
293 uint32_t cf_mask;
294 uint32_t trace_vcpu_dstate;
295};
296
297static bool tb_lookup_cmp(const void *p, const void *d)
298{
299 const TranslationBlock *tb = p;
300 const struct tb_desc *desc = d;
301
302 if (tb->pc == desc->pc &&
303 tb->page_addr[0] == desc->phys_page1 &&
304 tb->cs_base == desc->cs_base &&
305 tb->flags == desc->flags &&
306 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
307 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
308
309 if (tb->page_addr[1] == -1) {
310 return true;
311 } else {
312 tb_page_addr_t phys_page2;
313 target_ulong virt_page2;
314
315 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
316 phys_page2 = get_page_addr_code(desc->env, virt_page2);
317 if (tb->page_addr[1] == phys_page2) {
318 return true;
319 }
320 }
321 }
322 return false;
323}
324
325TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
326 target_ulong cs_base, uint32_t flags,
327 uint32_t cf_mask)
328{
329 tb_page_addr_t phys_pc;
330 struct tb_desc desc;
331 uint32_t h;
332
333 desc.env = (CPUArchState *)cpu->env_ptr;
334 desc.cs_base = cs_base;
335 desc.flags = flags;
336 desc.cf_mask = cf_mask;
337 desc.trace_vcpu_dstate = *cpu->trace_dstate;
338 desc.pc = pc;
339 phys_pc = get_page_addr_code(desc.env, pc);
340 if (phys_pc == -1) {
341 return NULL;
342 }
343 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
344 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
345 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
346}
347
348void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
349{
350 if (TCG_TARGET_HAS_direct_jump) {
351 uintptr_t offset = tb->jmp_target_arg[n];
352 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
353 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
354 } else {
355 tb->jmp_target_arg[n] = addr;
356 }
357}
358
359static inline void tb_add_jump(TranslationBlock *tb, int n,
360 TranslationBlock *tb_next)
361{
362 uintptr_t old;
363
364 assert(n < ARRAY_SIZE(tb->jmp_list_next));
365 qemu_spin_lock(&tb_next->jmp_lock);
366
367
368 if (tb_next->cflags & CF_INVALID) {
369 goto out_unlock_next;
370 }
371
372 old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
373 if (old) {
374 goto out_unlock_next;
375 }
376
377
378 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
379
380
381 tb->jmp_list_next[n] = tb_next->jmp_list_head;
382 tb_next->jmp_list_head = (uintptr_t)tb | n;
383
384 qemu_spin_unlock(&tb_next->jmp_lock);
385
386 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
387 "Linking TBs %p [" TARGET_FMT_lx
388 "] index %d -> %p [" TARGET_FMT_lx "]\n",
389 tb->tc.ptr, tb->pc, n,
390 tb_next->tc.ptr, tb_next->pc);
391 return;
392
393 out_unlock_next:
394 qemu_spin_unlock(&tb_next->jmp_lock);
395 return;
396}
397
398static inline TranslationBlock *tb_find(CPUState *cpu,
399 TranslationBlock *last_tb,
400 int tb_exit, uint32_t cf_mask)
401{
402 TranslationBlock *tb;
403 target_ulong cs_base, pc;
404 uint32_t flags;
405
406 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
407 if (tb == NULL) {
408 mmap_lock();
409 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
410 mmap_unlock();
411
412 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
413 }
414#ifndef CONFIG_USER_ONLY
415
416
417
418
419 if (tb->page_addr[1] != -1) {
420 last_tb = NULL;
421 }
422#endif
423
424 if (last_tb) {
425 tb_add_jump(last_tb, tb_exit, tb);
426 }
427 return tb;
428}
429
430static inline bool cpu_handle_halt(CPUState *cpu)
431{
432 if (cpu->halted) {
433#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
434 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
435 && replay_interrupt()) {
436 X86CPU *x86_cpu = X86_CPU(cpu);
437 qemu_mutex_lock_iothread();
438 apic_poll_irq(x86_cpu->apic_state);
439 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
440 qemu_mutex_unlock_iothread();
441 }
442#endif
443 if (!cpu_has_work(cpu)) {
444 return true;
445 }
446
447 cpu->halted = 0;
448 }
449
450 return false;
451}
452
453static inline void cpu_handle_debug_exception(CPUState *cpu)
454{
455 CPUClass *cc = CPU_GET_CLASS(cpu);
456 CPUWatchpoint *wp;
457
458 if (!cpu->watchpoint_hit) {
459 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
460 wp->flags &= ~BP_WATCHPOINT_HIT;
461 }
462 }
463
464 cc->debug_excp_handler(cpu);
465}
466
467static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
468{
469 if (cpu->exception_index < 0) {
470#ifndef CONFIG_USER_ONLY
471 if (replay_has_exception()
472 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
473
474 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
475 }
476#endif
477 if (cpu->exception_index < 0) {
478 return false;
479 }
480 }
481
482 if (cpu->exception_index >= EXCP_INTERRUPT) {
483
484 *ret = cpu->exception_index;
485 if (*ret == EXCP_DEBUG) {
486 cpu_handle_debug_exception(cpu);
487 }
488 cpu->exception_index = -1;
489 return true;
490 } else {
491#if defined(CONFIG_USER_ONLY)
492
493
494
495#if defined(TARGET_I386)
496 CPUClass *cc = CPU_GET_CLASS(cpu);
497 cc->do_interrupt(cpu);
498#endif
499 *ret = cpu->exception_index;
500 cpu->exception_index = -1;
501 return true;
502#else
503 if (replay_exception()) {
504 CPUClass *cc = CPU_GET_CLASS(cpu);
505 qemu_mutex_lock_iothread();
506 cc->do_interrupt(cpu);
507 qemu_mutex_unlock_iothread();
508 cpu->exception_index = -1;
509 } else if (!replay_has_interrupt()) {
510
511 *ret = EXCP_INTERRUPT;
512 return true;
513 }
514#endif
515 }
516
517 return false;
518}
519
520static inline bool cpu_handle_interrupt(CPUState *cpu,
521 TranslationBlock **last_tb)
522{
523 CPUClass *cc = CPU_GET_CLASS(cpu);
524
525
526
527
528
529
530 atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
531
532 if (unlikely(atomic_read(&cpu->interrupt_request))) {
533 int interrupt_request;
534 qemu_mutex_lock_iothread();
535 interrupt_request = cpu->interrupt_request;
536 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
537
538 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
539 }
540 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
541 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
542 cpu->exception_index = EXCP_DEBUG;
543 qemu_mutex_unlock_iothread();
544 return true;
545 }
546 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
547
548 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
549 replay_interrupt();
550 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
551 cpu->halted = 1;
552 cpu->exception_index = EXCP_HLT;
553 qemu_mutex_unlock_iothread();
554 return true;
555 }
556#if defined(TARGET_I386)
557 else if (interrupt_request & CPU_INTERRUPT_INIT) {
558 X86CPU *x86_cpu = X86_CPU(cpu);
559 CPUArchState *env = &x86_cpu->env;
560 replay_interrupt();
561 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
562 do_cpu_init(x86_cpu);
563 cpu->exception_index = EXCP_HALTED;
564 qemu_mutex_unlock_iothread();
565 return true;
566 }
567#else
568 else if (interrupt_request & CPU_INTERRUPT_RESET) {
569 replay_interrupt();
570 cpu_reset(cpu);
571 qemu_mutex_unlock_iothread();
572 return true;
573 }
574#endif
575
576
577
578
579 else {
580 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
581 replay_interrupt();
582 cpu->exception_index = -1;
583 *last_tb = NULL;
584 }
585
586
587 interrupt_request = cpu->interrupt_request;
588 }
589 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
590 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
591
592
593 *last_tb = NULL;
594 }
595
596
597 qemu_mutex_unlock_iothread();
598 }
599
600
601 if (unlikely(atomic_read(&cpu->exit_request))
602 || (use_icount
603 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
604 atomic_set(&cpu->exit_request, 0);
605 if (cpu->exception_index == -1) {
606 cpu->exception_index = EXCP_INTERRUPT;
607 }
608 return true;
609 }
610
611 return false;
612}
613
614static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
615 TranslationBlock **last_tb, int *tb_exit)
616{
617 uintptr_t ret;
618 int32_t insns_left;
619
620 trace_exec_tb(tb, tb->pc);
621 ret = cpu_tb_exec(cpu, tb);
622 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
623 *tb_exit = ret & TB_EXIT_MASK;
624 if (*tb_exit != TB_EXIT_REQUESTED) {
625 *last_tb = tb;
626 return;
627 }
628
629 *last_tb = NULL;
630 insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
631 if (insns_left < 0) {
632
633
634
635
636
637
638
639 return;
640 }
641
642
643 assert(use_icount);
644#ifndef CONFIG_USER_ONLY
645
646 cpu_update_icount(cpu);
647
648 insns_left = MIN(0xffff, cpu->icount_budget);
649 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
650 cpu->icount_extra = cpu->icount_budget - insns_left;
651 if (!cpu->icount_extra) {
652
653
654
655 if (insns_left > 0) {
656 cpu_exec_nocache(cpu, insns_left, tb, false);
657 }
658 }
659#endif
660}
661
662
663
664int cpu_exec(CPUState *cpu)
665{
666 CPUClass *cc = CPU_GET_CLASS(cpu);
667 int ret;
668 SyncClocks sc = { 0 };
669
670
671 current_cpu = cpu;
672
673 if (cpu_handle_halt(cpu)) {
674 return EXCP_HALTED;
675 }
676
677 rcu_read_lock();
678
679 cc->cpu_exec_enter(cpu);
680
681
682
683
684
685
686 init_delay_params(&sc, cpu);
687
688
689 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
690#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
691
692
693
694
695 cpu = current_cpu;
696 cc = CPU_GET_CLASS(cpu);
697#else
698
699 g_assert(cpu == current_cpu);
700 g_assert(cc == CPU_GET_CLASS(cpu));
701#endif
702#ifndef CONFIG_SOFTMMU
703 tcg_debug_assert(!have_mmap_lock());
704#endif
705 if (qemu_mutex_iothread_locked()) {
706 qemu_mutex_unlock_iothread();
707 }
708 assert_no_pages_locked();
709 }
710
711
712 while (!cpu_handle_exception(cpu, &ret)) {
713 TranslationBlock *last_tb = NULL;
714 int tb_exit = 0;
715
716 while (!cpu_handle_interrupt(cpu, &last_tb)) {
717 uint32_t cflags = cpu->cflags_next_tb;
718 TranslationBlock *tb;
719
720
721
722
723
724
725 if (cflags == -1) {
726 cflags = curr_cflags();
727 } else {
728 cpu->cflags_next_tb = -1;
729 }
730
731 tb = tb_find(cpu, last_tb, tb_exit, cflags);
732 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
733
734
735 align_clocks(&sc, cpu);
736 }
737 }
738
739 cc->cpu_exec_exit(cpu);
740 rcu_read_unlock();
741
742 return ret;
743}
744