1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "trace.h"
22#include "disas/disas.h"
23#include "exec/exec-all.h"
24#include "tcg.h"
25#include "qemu/atomic.h"
26#include "sysemu/qtest.h"
27#include "qemu/timer.h"
28#include "qemu/rcu.h"
29#include "exec/tb-hash.h"
30#include "exec/tb-lookup.h"
31#include "exec/log.h"
32#include "qemu/main-loop.h"
33#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
34#include "hw/i386/apic.h"
35#endif
36#include "sysemu/cpus.h"
37#include "sysemu/replay.h"
38
39
40
41typedef struct SyncClocks {
42 int64_t diff_clk;
43 int64_t last_cpu_icount;
44 int64_t realtime_clock;
45} SyncClocks;
46
47#if !defined(CONFIG_USER_ONLY)
48
49
50
51
52#define VM_CLOCK_ADVANCE 3000000
53#define THRESHOLD_REDUCE 1.5
54#define MAX_DELAY_PRINT_RATE 2000000000LL
55#define MAX_NB_PRINTS 100
56
57static void align_clocks(SyncClocks *sc, const CPUState *cpu)
58{
59 int64_t cpu_icount;
60
61 if (!icount_align_option) {
62 return;
63 }
64
65 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
66 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
67 sc->last_cpu_icount = cpu_icount;
68
69 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
70#ifndef _WIN32
71 struct timespec sleep_delay, rem_delay;
72 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
73 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
74 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
75 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
76 } else {
77 sc->diff_clk = 0;
78 }
79#else
80 Sleep(sc->diff_clk / SCALE_MS);
81 sc->diff_clk = 0;
82#endif
83 }
84}
85
86static void print_delay(const SyncClocks *sc)
87{
88 static float threshold_delay;
89 static int64_t last_realtime_clock;
90 static int nb_prints;
91
92 if (icount_align_option &&
93 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
94 nb_prints < MAX_NB_PRINTS) {
95 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
96 (-sc->diff_clk / (float)1000000000LL <
97 (threshold_delay - THRESHOLD_REDUCE))) {
98 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
99 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
100 threshold_delay - 1,
101 threshold_delay);
102 nb_prints++;
103 last_realtime_clock = sc->realtime_clock;
104 }
105 }
106}
107
108static void init_delay_params(SyncClocks *sc,
109 const CPUState *cpu)
110{
111 if (!icount_align_option) {
112 return;
113 }
114 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
115 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
116 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
117 if (sc->diff_clk < max_delay) {
118 max_delay = sc->diff_clk;
119 }
120 if (sc->diff_clk > max_advance) {
121 max_advance = sc->diff_clk;
122 }
123
124
125
126 print_delay(sc);
127}
128#else
129static void align_clocks(SyncClocks *sc, const CPUState *cpu)
130{
131}
132
133static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
134{
135}
136#endif
137
138
139static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
140{
141 CPUArchState *env = cpu->env_ptr;
142 uintptr_t ret;
143 TranslationBlock *last_tb;
144 int tb_exit;
145 uint8_t *tb_ptr = itb->tc.ptr;
146
147 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
148 "Trace %d: %p ["
149 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
150 cpu->cpu_index, itb->tc.ptr,
151 itb->cs_base, itb->pc, itb->flags,
152 lookup_symbol(itb->pc));
153
154#if defined(DEBUG_DISAS)
155 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
156 && qemu_log_in_addr_range(itb->pc)) {
157 qemu_log_lock();
158 int flags = 0;
159 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
160 flags |= CPU_DUMP_FPU;
161 }
162#if defined(TARGET_I386)
163 flags |= CPU_DUMP_CCOP;
164#endif
165 log_cpu_state(cpu, flags);
166 qemu_log_unlock();
167 }
168#endif
169
170 cpu->can_do_io = !use_icount;
171 ret = tcg_qemu_tb_exec(env, tb_ptr);
172 cpu->can_do_io = 1;
173 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
174 tb_exit = ret & TB_EXIT_MASK;
175 trace_exec_tb_exit(last_tb, tb_exit);
176
177 if (tb_exit > TB_EXIT_IDX1) {
178
179
180
181
182 CPUClass *cc = CPU_GET_CLASS(cpu);
183 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
184 "Stopped execution of TB chain before %p ["
185 TARGET_FMT_lx "] %s\n",
186 last_tb->tc.ptr, last_tb->pc,
187 lookup_symbol(last_tb->pc));
188 if (cc->synchronize_from_tb) {
189 cc->synchronize_from_tb(cpu, last_tb);
190 } else {
191 assert(cc->set_pc);
192 cc->set_pc(cpu, last_tb->pc);
193 }
194 }
195 return ret;
196}
197
198#ifndef CONFIG_USER_ONLY
199
200
201static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
202 TranslationBlock *orig_tb, bool ignore_icount)
203{
204 TranslationBlock *tb;
205 uint32_t cflags = curr_cflags() | CF_NOCACHE;
206
207 if (ignore_icount) {
208 cflags &= ~CF_USE_ICOUNT;
209 }
210
211
212
213 cflags |= MIN(max_cycles, CF_COUNT_MASK);
214
215 mmap_lock();
216 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
217 orig_tb->flags, cflags);
218 tb->orig_tb = orig_tb;
219 mmap_unlock();
220
221
222 trace_exec_tb_nocache(tb, tb->pc);
223 cpu_tb_exec(cpu, tb);
224
225 mmap_lock();
226 tb_phys_invalidate(tb, -1);
227 mmap_unlock();
228 tcg_tb_remove(tb);
229}
230#endif
231
232void cpu_exec_step_atomic(CPUState *cpu)
233{
234 CPUClass *cc = CPU_GET_CLASS(cpu);
235 TranslationBlock *tb;
236 target_ulong cs_base, pc;
237 uint32_t flags;
238 uint32_t cflags = 1;
239 uint32_t cf_mask = cflags & CF_HASH_MASK;
240
241 volatile bool in_exclusive_region = false;
242
243 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
244 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
245 if (tb == NULL) {
246 mmap_lock();
247 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
248 mmap_unlock();
249 }
250
251 start_exclusive();
252
253
254 parallel_cpus = false;
255 in_exclusive_region = true;
256 cc->cpu_exec_enter(cpu);
257
258 trace_exec_tb(tb, pc);
259 cpu_tb_exec(cpu, tb);
260 cc->cpu_exec_exit(cpu);
261 } else {
262
263
264
265
266#ifndef CONFIG_SOFTMMU
267 tcg_debug_assert(!have_mmap_lock());
268#endif
269 assert_no_pages_locked();
270 }
271
272 if (in_exclusive_region) {
273
274
275
276
277 parallel_cpus = true;
278 end_exclusive();
279 }
280}
281
282struct tb_desc {
283 target_ulong pc;
284 target_ulong cs_base;
285 CPUArchState *env;
286 tb_page_addr_t phys_page1;
287 uint32_t flags;
288 uint32_t cf_mask;
289 uint32_t trace_vcpu_dstate;
290};
291
292static bool tb_lookup_cmp(const void *p, const void *d)
293{
294 const TranslationBlock *tb = p;
295 const struct tb_desc *desc = d;
296
297 if (tb->pc == desc->pc &&
298 tb->page_addr[0] == desc->phys_page1 &&
299 tb->cs_base == desc->cs_base &&
300 tb->flags == desc->flags &&
301 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
302 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
303
304 if (tb->page_addr[1] == -1) {
305 return true;
306 } else {
307 tb_page_addr_t phys_page2;
308 target_ulong virt_page2;
309
310 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
311 phys_page2 = get_page_addr_code(desc->env, virt_page2);
312 if (tb->page_addr[1] == phys_page2) {
313 return true;
314 }
315 }
316 }
317 return false;
318}
319
320TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
321 target_ulong cs_base, uint32_t flags,
322 uint32_t cf_mask)
323{
324 tb_page_addr_t phys_pc;
325 struct tb_desc desc;
326 uint32_t h;
327
328 desc.env = (CPUArchState *)cpu->env_ptr;
329 desc.cs_base = cs_base;
330 desc.flags = flags;
331 desc.cf_mask = cf_mask;
332 desc.trace_vcpu_dstate = *cpu->trace_dstate;
333 desc.pc = pc;
334 phys_pc = get_page_addr_code(desc.env, pc);
335 if (phys_pc == -1) {
336 return NULL;
337 }
338 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
339 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
340 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
341}
342
343void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
344{
345 if (TCG_TARGET_HAS_direct_jump) {
346 uintptr_t offset = tb->jmp_target_arg[n];
347 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
348 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
349 } else {
350 tb->jmp_target_arg[n] = addr;
351 }
352}
353
354static inline void tb_add_jump(TranslationBlock *tb, int n,
355 TranslationBlock *tb_next)
356{
357 uintptr_t old;
358
359 assert(n < ARRAY_SIZE(tb->jmp_list_next));
360 qemu_spin_lock(&tb_next->jmp_lock);
361
362
363 if (tb_next->cflags & CF_INVALID) {
364 goto out_unlock_next;
365 }
366
367 old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
368 if (old) {
369 goto out_unlock_next;
370 }
371
372
373 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
374
375
376 tb->jmp_list_next[n] = tb_next->jmp_list_head;
377 tb_next->jmp_list_head = (uintptr_t)tb | n;
378
379 qemu_spin_unlock(&tb_next->jmp_lock);
380
381 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
382 "Linking TBs %p [" TARGET_FMT_lx
383 "] index %d -> %p [" TARGET_FMT_lx "]\n",
384 tb->tc.ptr, tb->pc, n,
385 tb_next->tc.ptr, tb_next->pc);
386 return;
387
388 out_unlock_next:
389 qemu_spin_unlock(&tb_next->jmp_lock);
390 return;
391}
392
393static inline TranslationBlock *tb_find(CPUState *cpu,
394 TranslationBlock *last_tb,
395 int tb_exit, uint32_t cf_mask)
396{
397 TranslationBlock *tb;
398 target_ulong cs_base, pc;
399 uint32_t flags;
400
401 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
402 if (tb == NULL) {
403 mmap_lock();
404 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
405 mmap_unlock();
406
407 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
408 }
409#ifndef CONFIG_USER_ONLY
410
411
412
413
414 if (tb->page_addr[1] != -1) {
415 last_tb = NULL;
416 }
417#endif
418
419 if (last_tb) {
420 tb_add_jump(last_tb, tb_exit, tb);
421 }
422 return tb;
423}
424
425static inline bool cpu_handle_halt(CPUState *cpu)
426{
427 if (cpu->halted) {
428#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
429 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
430 && replay_interrupt()) {
431 X86CPU *x86_cpu = X86_CPU(cpu);
432 qemu_mutex_lock_iothread();
433 apic_poll_irq(x86_cpu->apic_state);
434 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
435 qemu_mutex_unlock_iothread();
436 }
437#endif
438 if (!cpu_has_work(cpu)) {
439 return true;
440 }
441
442 cpu->halted = 0;
443 }
444
445 return false;
446}
447
448static inline void cpu_handle_debug_exception(CPUState *cpu)
449{
450 CPUClass *cc = CPU_GET_CLASS(cpu);
451 CPUWatchpoint *wp;
452
453 if (!cpu->watchpoint_hit) {
454 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
455 wp->flags &= ~BP_WATCHPOINT_HIT;
456 }
457 }
458
459 cc->debug_excp_handler(cpu);
460}
461
462static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
463{
464 if (cpu->exception_index < 0) {
465#ifndef CONFIG_USER_ONLY
466 if (replay_has_exception()
467 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
468
469 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
470 }
471#endif
472 if (cpu->exception_index < 0) {
473 return false;
474 }
475 }
476
477 if (cpu->exception_index >= EXCP_INTERRUPT) {
478
479 *ret = cpu->exception_index;
480 if (*ret == EXCP_DEBUG) {
481 cpu_handle_debug_exception(cpu);
482 }
483 cpu->exception_index = -1;
484 return true;
485 } else {
486#if defined(CONFIG_USER_ONLY)
487
488
489
490#if defined(TARGET_I386)
491 CPUClass *cc = CPU_GET_CLASS(cpu);
492 cc->do_interrupt(cpu);
493#endif
494 *ret = cpu->exception_index;
495 cpu->exception_index = -1;
496 return true;
497#else
498 if (replay_exception()) {
499 CPUClass *cc = CPU_GET_CLASS(cpu);
500 qemu_mutex_lock_iothread();
501 cc->do_interrupt(cpu);
502 qemu_mutex_unlock_iothread();
503 cpu->exception_index = -1;
504 } else if (!replay_has_interrupt()) {
505
506 *ret = EXCP_INTERRUPT;
507 return true;
508 }
509#endif
510 }
511
512 return false;
513}
514
515static inline bool cpu_handle_interrupt(CPUState *cpu,
516 TranslationBlock **last_tb)
517{
518 CPUClass *cc = CPU_GET_CLASS(cpu);
519
520
521
522
523
524
525 atomic_mb_set(&cpu->icount_decr.u16.high, 0);
526
527 if (unlikely(atomic_read(&cpu->interrupt_request))) {
528 int interrupt_request;
529 qemu_mutex_lock_iothread();
530 interrupt_request = cpu->interrupt_request;
531 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
532
533 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
534 }
535 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
536 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
537 cpu->exception_index = EXCP_DEBUG;
538 qemu_mutex_unlock_iothread();
539 return true;
540 }
541 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
542
543 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
544 replay_interrupt();
545 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
546 cpu->halted = 1;
547 cpu->exception_index = EXCP_HLT;
548 qemu_mutex_unlock_iothread();
549 return true;
550 }
551#if defined(TARGET_I386)
552 else if (interrupt_request & CPU_INTERRUPT_INIT) {
553 X86CPU *x86_cpu = X86_CPU(cpu);
554 CPUArchState *env = &x86_cpu->env;
555 replay_interrupt();
556 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
557 do_cpu_init(x86_cpu);
558 cpu->exception_index = EXCP_HALTED;
559 qemu_mutex_unlock_iothread();
560 return true;
561 }
562#else
563 else if (interrupt_request & CPU_INTERRUPT_RESET) {
564 replay_interrupt();
565 cpu_reset(cpu);
566 qemu_mutex_unlock_iothread();
567 return true;
568 }
569#endif
570
571
572
573
574 else {
575 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
576 replay_interrupt();
577 cpu->exception_index = -1;
578 *last_tb = NULL;
579 }
580
581
582 interrupt_request = cpu->interrupt_request;
583 }
584 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
585 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
586
587
588 *last_tb = NULL;
589 }
590
591
592 qemu_mutex_unlock_iothread();
593 }
594
595
596 if (unlikely(atomic_read(&cpu->exit_request)
597 || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
598 atomic_set(&cpu->exit_request, 0);
599 if (cpu->exception_index == -1) {
600 cpu->exception_index = EXCP_INTERRUPT;
601 }
602 return true;
603 }
604
605 return false;
606}
607
608static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
609 TranslationBlock **last_tb, int *tb_exit)
610{
611 uintptr_t ret;
612 int32_t insns_left;
613
614 trace_exec_tb(tb, tb->pc);
615 ret = cpu_tb_exec(cpu, tb);
616 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
617 *tb_exit = ret & TB_EXIT_MASK;
618 if (*tb_exit != TB_EXIT_REQUESTED) {
619 *last_tb = tb;
620 return;
621 }
622
623 *last_tb = NULL;
624 insns_left = atomic_read(&cpu->icount_decr.u32);
625 if (insns_left < 0) {
626
627
628
629
630
631
632
633 return;
634 }
635
636
637 assert(use_icount);
638#ifndef CONFIG_USER_ONLY
639
640 cpu_update_icount(cpu);
641
642 insns_left = MIN(0xffff, cpu->icount_budget);
643 cpu->icount_decr.u16.low = insns_left;
644 cpu->icount_extra = cpu->icount_budget - insns_left;
645 if (!cpu->icount_extra) {
646
647
648
649 if (insns_left > 0) {
650 cpu_exec_nocache(cpu, insns_left, tb, false);
651 }
652 }
653#endif
654}
655
656
657
658int cpu_exec(CPUState *cpu)
659{
660 CPUClass *cc = CPU_GET_CLASS(cpu);
661 int ret;
662 SyncClocks sc = { 0 };
663
664
665 current_cpu = cpu;
666
667 if (cpu_handle_halt(cpu)) {
668 return EXCP_HALTED;
669 }
670
671 rcu_read_lock();
672
673 cc->cpu_exec_enter(cpu);
674
675
676
677
678
679
680 init_delay_params(&sc, cpu);
681
682
683 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
684#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
685
686
687
688
689 cpu = current_cpu;
690 cc = CPU_GET_CLASS(cpu);
691#else
692
693 g_assert(cpu == current_cpu);
694 g_assert(cc == CPU_GET_CLASS(cpu));
695#endif
696#ifndef CONFIG_SOFTMMU
697 tcg_debug_assert(!have_mmap_lock());
698#endif
699 if (qemu_mutex_iothread_locked()) {
700 qemu_mutex_unlock_iothread();
701 }
702 }
703
704
705 while (!cpu_handle_exception(cpu, &ret)) {
706 TranslationBlock *last_tb = NULL;
707 int tb_exit = 0;
708
709 while (!cpu_handle_interrupt(cpu, &last_tb)) {
710 uint32_t cflags = cpu->cflags_next_tb;
711 TranslationBlock *tb;
712
713
714
715
716
717
718 if (cflags == -1) {
719 cflags = curr_cflags();
720 } else {
721 cpu->cflags_next_tb = -1;
722 }
723
724 tb = tb_find(cpu, last_tb, tb_exit, cflags);
725 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
726
727
728 align_clocks(&sc, cpu);
729 }
730 }
731
732 cc->cpu_exec_exit(cpu);
733 rcu_read_unlock();
734
735 return ret;
736}
737