1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "trace.h"
22#include "disas/disas.h"
23#include "exec/exec-all.h"
24#include "tcg.h"
25#include "qemu/atomic.h"
26#include "sysemu/qtest.h"
27#include "qemu/timer.h"
28#include "exec/address-spaces.h"
29#include "qemu/rcu.h"
30#include "exec/tb-hash.h"
31#include "exec/log.h"
32#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
33#include "hw/i386/apic.h"
34#endif
35#include "sysemu/replay.h"
36
37
38
39typedef struct SyncClocks {
40 int64_t diff_clk;
41 int64_t last_cpu_icount;
42 int64_t realtime_clock;
43} SyncClocks;
44
45#if !defined(CONFIG_USER_ONLY)
46
47
48
49
50#define VM_CLOCK_ADVANCE 3000000
51#define THRESHOLD_REDUCE 1.5
52#define MAX_DELAY_PRINT_RATE 2000000000LL
53#define MAX_NB_PRINTS 100
54
55static void align_clocks(SyncClocks *sc, const CPUState *cpu)
56{
57 int64_t cpu_icount;
58
59 if (!icount_align_option) {
60 return;
61 }
62
63 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
64 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
65 sc->last_cpu_icount = cpu_icount;
66
67 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
68#ifndef _WIN32
69 struct timespec sleep_delay, rem_delay;
70 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
71 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
72 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
73 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
74 } else {
75 sc->diff_clk = 0;
76 }
77#else
78 Sleep(sc->diff_clk / SCALE_MS);
79 sc->diff_clk = 0;
80#endif
81 }
82}
83
84static void print_delay(const SyncClocks *sc)
85{
86 static float threshold_delay;
87 static int64_t last_realtime_clock;
88 static int nb_prints;
89
90 if (icount_align_option &&
91 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
92 nb_prints < MAX_NB_PRINTS) {
93 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
94 (-sc->diff_clk / (float)1000000000LL <
95 (threshold_delay - THRESHOLD_REDUCE))) {
96 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
97 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
98 threshold_delay - 1,
99 threshold_delay);
100 nb_prints++;
101 last_realtime_clock = sc->realtime_clock;
102 }
103 }
104}
105
106static void init_delay_params(SyncClocks *sc,
107 const CPUState *cpu)
108{
109 if (!icount_align_option) {
110 return;
111 }
112 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
113 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
114 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
115 if (sc->diff_clk < max_delay) {
116 max_delay = sc->diff_clk;
117 }
118 if (sc->diff_clk > max_advance) {
119 max_advance = sc->diff_clk;
120 }
121
122
123
124 print_delay(sc);
125}
126#else
127static void align_clocks(SyncClocks *sc, const CPUState *cpu)
128{
129}
130
131static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
132{
133}
134#endif
135
136
137static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
138{
139 CPUArchState *env = cpu->env_ptr;
140 uintptr_t ret;
141 TranslationBlock *last_tb;
142 int tb_exit;
143 uint8_t *tb_ptr = itb->tc_ptr;
144
145 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
146 "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
147 itb->tc_ptr, cpu->cpu_index, itb->pc,
148 lookup_symbol(itb->pc));
149
150#if defined(DEBUG_DISAS)
151 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
152 && qemu_log_in_addr_range(itb->pc)) {
153 qemu_log_lock();
154#if defined(TARGET_I386)
155 log_cpu_state(cpu, CPU_DUMP_CCOP);
156#else
157 log_cpu_state(cpu, 0);
158#endif
159 qemu_log_unlock();
160 }
161#endif
162
163 cpu->can_do_io = !use_icount;
164 ret = tcg_qemu_tb_exec(env, tb_ptr);
165 cpu->can_do_io = 1;
166 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
167 tb_exit = ret & TB_EXIT_MASK;
168 trace_exec_tb_exit(last_tb, tb_exit);
169
170 if (tb_exit > TB_EXIT_IDX1) {
171
172
173
174
175 CPUClass *cc = CPU_GET_CLASS(cpu);
176 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
177 "Stopped execution of TB chain before %p ["
178 TARGET_FMT_lx "] %s\n",
179 last_tb->tc_ptr, last_tb->pc,
180 lookup_symbol(last_tb->pc));
181 if (cc->synchronize_from_tb) {
182 cc->synchronize_from_tb(cpu, last_tb);
183 } else {
184 assert(cc->set_pc);
185 cc->set_pc(cpu, last_tb->pc);
186 }
187 }
188 if (tb_exit == TB_EXIT_REQUESTED) {
189
190
191
192 atomic_set(&cpu->tcg_exit_req, 0);
193 }
194 return ret;
195}
196
197#ifndef CONFIG_USER_ONLY
198
199
200static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
201 TranslationBlock *orig_tb, bool ignore_icount)
202{
203 TranslationBlock *tb;
204
205
206
207 if (max_cycles > CF_COUNT_MASK)
208 max_cycles = CF_COUNT_MASK;
209
210 tb_lock();
211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
212 max_cycles | CF_NOCACHE
213 | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
214 tb->orig_tb = orig_tb;
215 tb_unlock();
216
217
218 trace_exec_tb_nocache(tb, tb->pc);
219 cpu_tb_exec(cpu, tb);
220
221 tb_lock();
222 tb_phys_invalidate(tb, -1);
223 tb_free(tb);
224 tb_unlock();
225}
226#endif
227
228static void cpu_exec_step(CPUState *cpu)
229{
230 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
231 TranslationBlock *tb;
232 target_ulong cs_base, pc;
233 uint32_t flags;
234
235 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
236 tb = tb_gen_code(cpu, pc, cs_base, flags,
237 1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
238 tb->orig_tb = NULL;
239
240 trace_exec_tb_nocache(tb, pc);
241 cpu_tb_exec(cpu, tb);
242 tb_phys_invalidate(tb, -1);
243 tb_free(tb);
244}
245
246void cpu_exec_step_atomic(CPUState *cpu)
247{
248 start_exclusive();
249
250
251 parallel_cpus = false;
252 cpu_exec_step(cpu);
253 parallel_cpus = true;
254
255 end_exclusive();
256}
257
258struct tb_desc {
259 target_ulong pc;
260 target_ulong cs_base;
261 CPUArchState *env;
262 tb_page_addr_t phys_page1;
263 uint32_t flags;
264};
265
266static bool tb_cmp(const void *p, const void *d)
267{
268 const TranslationBlock *tb = p;
269 const struct tb_desc *desc = d;
270
271 if (tb->pc == desc->pc &&
272 tb->page_addr[0] == desc->phys_page1 &&
273 tb->cs_base == desc->cs_base &&
274 tb->flags == desc->flags &&
275 !atomic_read(&tb->invalid)) {
276
277 if (tb->page_addr[1] == -1) {
278 return true;
279 } else {
280 tb_page_addr_t phys_page2;
281 target_ulong virt_page2;
282
283 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
284 phys_page2 = get_page_addr_code(desc->env, virt_page2);
285 if (tb->page_addr[1] == phys_page2) {
286 return true;
287 }
288 }
289 }
290 return false;
291}
292
293static TranslationBlock *tb_htable_lookup(CPUState *cpu,
294 target_ulong pc,
295 target_ulong cs_base,
296 uint32_t flags)
297{
298 tb_page_addr_t phys_pc;
299 struct tb_desc desc;
300 uint32_t h;
301
302 desc.env = (CPUArchState *)cpu->env_ptr;
303 desc.cs_base = cs_base;
304 desc.flags = flags;
305 desc.pc = pc;
306 phys_pc = get_page_addr_code(desc.env, pc);
307 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
308 h = tb_hash_func(phys_pc, pc, flags);
309 return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
310}
311
312static inline TranslationBlock *tb_find(CPUState *cpu,
313 TranslationBlock *last_tb,
314 int tb_exit)
315{
316 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
317 TranslationBlock *tb;
318 target_ulong cs_base, pc;
319 uint32_t flags;
320 bool have_tb_lock = false;
321
322
323
324
325 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
326 tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
327 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
328 tb->flags != flags)) {
329 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
330 if (!tb) {
331
332
333
334
335
336 mmap_lock();
337 tb_lock();
338 have_tb_lock = true;
339
340
341
342
343 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
344 if (!tb) {
345
346 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
347 }
348
349 mmap_unlock();
350 }
351
352
353 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
354 }
355#ifndef CONFIG_USER_ONLY
356
357
358
359
360 if (tb->page_addr[1] != -1) {
361 last_tb = NULL;
362 }
363#endif
364
365 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
366 if (!have_tb_lock) {
367 tb_lock();
368 have_tb_lock = true;
369 }
370 if (!tb->invalid) {
371 tb_add_jump(last_tb, tb_exit, tb);
372 }
373 }
374 if (have_tb_lock) {
375 tb_unlock();
376 }
377 return tb;
378}
379
380static inline bool cpu_handle_halt(CPUState *cpu)
381{
382 if (cpu->halted) {
383#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
384 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
385 && replay_interrupt()) {
386 X86CPU *x86_cpu = X86_CPU(cpu);
387 apic_poll_irq(x86_cpu->apic_state);
388 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
389 }
390#endif
391 if (!cpu_has_work(cpu)) {
392 current_cpu = NULL;
393 return true;
394 }
395
396 cpu->halted = 0;
397 }
398
399 return false;
400}
401
402static inline void cpu_handle_debug_exception(CPUState *cpu)
403{
404 CPUClass *cc = CPU_GET_CLASS(cpu);
405 CPUWatchpoint *wp;
406
407 if (!cpu->watchpoint_hit) {
408 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
409 wp->flags &= ~BP_WATCHPOINT_HIT;
410 }
411 }
412
413 cc->debug_excp_handler(cpu);
414}
415
416static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
417{
418 if (cpu->exception_index >= 0) {
419 if (cpu->exception_index >= EXCP_INTERRUPT) {
420
421 *ret = cpu->exception_index;
422 if (*ret == EXCP_DEBUG) {
423 cpu_handle_debug_exception(cpu);
424 }
425 cpu->exception_index = -1;
426 return true;
427 } else {
428#if defined(CONFIG_USER_ONLY)
429
430
431
432#if defined(TARGET_I386)
433 CPUClass *cc = CPU_GET_CLASS(cpu);
434 cc->do_interrupt(cpu);
435#endif
436 *ret = cpu->exception_index;
437 cpu->exception_index = -1;
438 return true;
439#else
440 if (replay_exception()) {
441 CPUClass *cc = CPU_GET_CLASS(cpu);
442 cc->do_interrupt(cpu);
443 cpu->exception_index = -1;
444 } else if (!replay_has_interrupt()) {
445
446 *ret = EXCP_INTERRUPT;
447 return true;
448 }
449#endif
450 }
451#ifndef CONFIG_USER_ONLY
452 } else if (replay_has_exception()
453 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
454
455 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
456 *ret = -1;
457 return true;
458#endif
459 }
460
461 return false;
462}
463
464static inline void cpu_handle_interrupt(CPUState *cpu,
465 TranslationBlock **last_tb)
466{
467 CPUClass *cc = CPU_GET_CLASS(cpu);
468 int interrupt_request = cpu->interrupt_request;
469
470 if (unlikely(interrupt_request)) {
471 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
472
473 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
474 }
475 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
476 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
477 cpu->exception_index = EXCP_DEBUG;
478 cpu_loop_exit(cpu);
479 }
480 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
481
482 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
483 replay_interrupt();
484 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
485 cpu->halted = 1;
486 cpu->exception_index = EXCP_HLT;
487 cpu_loop_exit(cpu);
488 }
489#if defined(TARGET_I386)
490 else if (interrupt_request & CPU_INTERRUPT_INIT) {
491 X86CPU *x86_cpu = X86_CPU(cpu);
492 CPUArchState *env = &x86_cpu->env;
493 replay_interrupt();
494 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
495 do_cpu_init(x86_cpu);
496 cpu->exception_index = EXCP_HALTED;
497 cpu_loop_exit(cpu);
498 }
499#else
500 else if (interrupt_request & CPU_INTERRUPT_RESET) {
501 replay_interrupt();
502 cpu_reset(cpu);
503 cpu_loop_exit(cpu);
504 }
505#endif
506
507
508
509
510 else {
511 replay_interrupt();
512 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
513 *last_tb = NULL;
514 }
515
516
517 interrupt_request = cpu->interrupt_request;
518 }
519 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
520 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
521
522
523 *last_tb = NULL;
524 }
525 }
526 if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
527 atomic_set(&cpu->exit_request, 0);
528 cpu->exception_index = EXCP_INTERRUPT;
529 cpu_loop_exit(cpu);
530 }
531}
532
533static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
534 TranslationBlock **last_tb, int *tb_exit,
535 SyncClocks *sc)
536{
537 uintptr_t ret;
538
539 if (unlikely(atomic_read(&cpu->exit_request))) {
540 return;
541 }
542
543 trace_exec_tb(tb, tb->pc);
544 ret = cpu_tb_exec(cpu, tb);
545 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
546 *tb_exit = ret & TB_EXIT_MASK;
547 switch (*tb_exit) {
548 case TB_EXIT_REQUESTED:
549
550
551
552
553
554
555
556
557
558
559 smp_rmb();
560 *last_tb = NULL;
561 break;
562 case TB_EXIT_ICOUNT_EXPIRED:
563 {
564
565#ifdef CONFIG_USER_ONLY
566 abort();
567#else
568 int insns_left = cpu->icount_decr.u32;
569 *last_tb = NULL;
570 if (cpu->icount_extra && insns_left >= 0) {
571
572 cpu->icount_extra += insns_left;
573 insns_left = MIN(0xffff, cpu->icount_extra);
574 cpu->icount_extra -= insns_left;
575 cpu->icount_decr.u16.low = insns_left;
576 } else {
577 if (insns_left > 0) {
578
579 cpu_exec_nocache(cpu, insns_left, tb, false);
580 align_clocks(sc, cpu);
581 }
582 cpu->exception_index = EXCP_INTERRUPT;
583 cpu_loop_exit(cpu);
584 }
585 break;
586#endif
587 }
588 default:
589 *last_tb = tb;
590 break;
591 }
592}
593
594
595
596int cpu_exec(CPUState *cpu)
597{
598 CPUClass *cc = CPU_GET_CLASS(cpu);
599 int ret;
600 SyncClocks sc;
601
602
603 current_cpu = cpu;
604
605 if (cpu_handle_halt(cpu)) {
606 return EXCP_HALTED;
607 }
608
609 atomic_mb_set(&tcg_current_cpu, cpu);
610 rcu_read_lock();
611
612 if (unlikely(atomic_mb_read(&exit_request))) {
613 cpu->exit_request = 1;
614 }
615
616 cc->cpu_exec_enter(cpu);
617
618
619
620
621
622
623 init_delay_params(&sc, cpu);
624
625 for(;;) {
626
627 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
628 TranslationBlock *tb, *last_tb = NULL;
629 int tb_exit = 0;
630
631
632 if (cpu_handle_exception(cpu, &ret)) {
633 break;
634 }
635
636 for(;;) {
637 cpu_handle_interrupt(cpu, &last_tb);
638 tb = tb_find(cpu, last_tb, tb_exit);
639 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
640
641
642 align_clocks(&sc, cpu);
643 }
644 } else {
645#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
646
647
648
649
650 cpu = current_cpu;
651 cc = CPU_GET_CLASS(cpu);
652#else
653
654 g_assert(cpu == current_cpu);
655 g_assert(cc == CPU_GET_CLASS(cpu));
656#endif
657 cpu->can_do_io = 1;
658 tb_lock_reset();
659 }
660 }
661
662 cc->cpu_exec_exit(cpu);
663 rcu_read_unlock();
664
665
666 current_cpu = NULL;
667
668
669 atomic_set(&tcg_current_cpu, NULL);
670 return ret;
671}
672