1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "config.h"
20#include "cpu.h"
21#include "disas/disas.h"
22#include "tcg.h"
23#include "qemu/atomic.h"
24#include "sysemu/qtest.h"
25
26bool qemu_cpu_has_work(CPUState *cpu)
27{
28 return cpu_has_work(cpu);
29}
30
31void cpu_loop_exit(CPUArchState *env)
32{
33 CPUState *cpu = ENV_GET_CPU(env);
34
35 cpu->current_tb = NULL;
36 siglongjmp(env->jmp_env, 1);
37}
38
39
40
41
42#if defined(CONFIG_SOFTMMU)
43void cpu_resume_from_signal(CPUArchState *env, void *puc)
44{
45
46
47 env->exception_index = -1;
48 siglongjmp(env->jmp_env, 1);
49}
50#endif
51
52
53static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
54{
55 CPUArchState *env = cpu->env_ptr;
56 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
57 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
58
59
60
61
62 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
63 cpu_pc_from_tb(env, tb);
64 }
65 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
66
67
68
69 cpu->tcg_exit_req = 0;
70 }
71 return next_tb;
72}
73
74
75
76static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
77 TranslationBlock *orig_tb)
78{
79 CPUState *cpu = ENV_GET_CPU(env);
80 TranslationBlock *tb;
81
82
83
84 if (max_cycles > CF_COUNT_MASK)
85 max_cycles = CF_COUNT_MASK;
86
87 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
88 max_cycles);
89 cpu->current_tb = tb;
90
91 cpu_tb_exec(cpu, tb->tc_ptr);
92 cpu->current_tb = NULL;
93 tb_phys_invalidate(tb, -1);
94 tb_free(tb);
95}
96
97static TranslationBlock *tb_find_slow(CPUArchState *env,
98 target_ulong pc,
99 target_ulong cs_base,
100 uint64_t flags)
101{
102 TranslationBlock *tb, **ptb1;
103 unsigned int h;
104 tb_page_addr_t phys_pc, phys_page1;
105 target_ulong virt_page2;
106
107 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
108
109
110 phys_pc = get_page_addr_code(env, pc);
111 phys_page1 = phys_pc & TARGET_PAGE_MASK;
112 h = tb_phys_hash_func(phys_pc);
113 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
114 for(;;) {
115 tb = *ptb1;
116 if (!tb)
117 goto not_found;
118 if (tb->pc == pc &&
119 tb->page_addr[0] == phys_page1 &&
120 tb->cs_base == cs_base &&
121 tb->flags == flags) {
122
123 if (tb->page_addr[1] != -1) {
124 tb_page_addr_t phys_page2;
125
126 virt_page2 = (pc & TARGET_PAGE_MASK) +
127 TARGET_PAGE_SIZE;
128 phys_page2 = get_page_addr_code(env, virt_page2);
129 if (tb->page_addr[1] == phys_page2)
130 goto found;
131 } else {
132 goto found;
133 }
134 }
135 ptb1 = &tb->phys_hash_next;
136 }
137 not_found:
138
139 tb = tb_gen_code(env, pc, cs_base, flags, 0);
140
141 found:
142
143 if (likely(*ptb1)) {
144 *ptb1 = tb->phys_hash_next;
145 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
146 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
147 }
148
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 return tb;
151}
152
153static inline TranslationBlock *tb_find_fast(CPUArchState *env)
154{
155 TranslationBlock *tb;
156 target_ulong cs_base, pc;
157 int flags;
158
159
160
161
162 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
163 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
164 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
165 tb->flags != flags)) {
166 tb = tb_find_slow(env, pc, cs_base, flags);
167 }
168 return tb;
169}
170
171static CPUDebugExcpHandler *debug_excp_handler;
172
173void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
174{
175 debug_excp_handler = handler;
176}
177
178static void cpu_handle_debug_exception(CPUArchState *env)
179{
180 CPUWatchpoint *wp;
181
182 if (!env->watchpoint_hit) {
183 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
184 wp->flags &= ~BP_WATCHPOINT_HIT;
185 }
186 }
187 if (debug_excp_handler) {
188 debug_excp_handler(env);
189 }
190}
191
192
193
194volatile sig_atomic_t exit_request;
195
196int cpu_exec(CPUArchState *env)
197{
198 CPUState *cpu = ENV_GET_CPU(env);
199#if !(defined(CONFIG_USER_ONLY) && \
200 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
201 CPUClass *cc = CPU_GET_CLASS(cpu);
202#endif
203 int ret, interrupt_request;
204 TranslationBlock *tb;
205 uint8_t *tc_ptr;
206 tcg_target_ulong next_tb;
207
208 if (cpu->halted) {
209 if (!cpu_has_work(cpu)) {
210 return EXCP_HALTED;
211 }
212
213 cpu->halted = 0;
214 }
215
216 cpu_single_env = env;
217
218
219
220
221
222
223
224 smp_mb();
225
226 if (unlikely(exit_request)) {
227 cpu->exit_request = 1;
228 }
229
230#if defined(TARGET_I386)
231
232 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233 DF = 1 - (2 * ((env->eflags >> 10) & 1));
234 CC_OP = CC_OP_EFLAGS;
235 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236#elif defined(TARGET_SPARC)
237#elif defined(TARGET_M68K)
238 env->cc_op = CC_OP_FLAGS;
239 env->cc_dest = env->sr & 0xf;
240 env->cc_x = (env->sr >> 4) & 1;
241#elif defined(TARGET_ALPHA)
242#elif defined(TARGET_ARM)
243#elif defined(TARGET_UNICORE32)
244#elif defined(TARGET_PPC)
245 env->reserve_addr = -1;
246#elif defined(TARGET_LM32)
247#elif defined(TARGET_MICROBLAZE)
248#elif defined(TARGET_MIPS)
249#elif defined(TARGET_MOXIE)
250#elif defined(TARGET_OPENRISC)
251#elif defined(TARGET_SH4)
252#elif defined(TARGET_CRIS)
253#elif defined(TARGET_S390X)
254#elif defined(TARGET_XTENSA)
255
256#else
257#error unsupported target CPU
258#endif
259 env->exception_index = -1;
260
261
262 for(;;) {
263 if (sigsetjmp(env->jmp_env, 0) == 0) {
264
265 if (env->exception_index >= 0) {
266 if (env->exception_index >= EXCP_INTERRUPT) {
267
268 ret = env->exception_index;
269 if (ret == EXCP_DEBUG) {
270 cpu_handle_debug_exception(env);
271 }
272 break;
273 } else {
274#if defined(CONFIG_USER_ONLY)
275
276
277
278#if defined(TARGET_I386)
279 cc->do_interrupt(cpu);
280#endif
281 ret = env->exception_index;
282 break;
283#else
284 cc->do_interrupt(cpu);
285 env->exception_index = -1;
286#endif
287 }
288 }
289
290 next_tb = 0;
291 for(;;) {
292 interrupt_request = cpu->interrupt_request;
293 if (unlikely(interrupt_request)) {
294 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
295
296 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
297 }
298 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
299 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
300 env->exception_index = EXCP_DEBUG;
301 cpu_loop_exit(env);
302 }
303#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
304 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
305 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
306 if (interrupt_request & CPU_INTERRUPT_HALT) {
307 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
308 cpu->halted = 1;
309 env->exception_index = EXCP_HLT;
310 cpu_loop_exit(env);
311 }
312#endif
313#if defined(TARGET_I386)
314#if !defined(CONFIG_USER_ONLY)
315 if (interrupt_request & CPU_INTERRUPT_POLL) {
316 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
317 apic_poll_irq(env->apic_state);
318 }
319#endif
320 if (interrupt_request & CPU_INTERRUPT_INIT) {
321 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
322 0);
323 do_cpu_init(x86_env_get_cpu(env));
324 env->exception_index = EXCP_HALTED;
325 cpu_loop_exit(env);
326 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
327 do_cpu_sipi(x86_env_get_cpu(env));
328 } else if (env->hflags2 & HF2_GIF_MASK) {
329 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
330 !(env->hflags & HF_SMM_MASK)) {
331 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
332 0);
333 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
334 do_smm_enter(env);
335 next_tb = 0;
336 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
337 !(env->hflags2 & HF2_NMI_MASK)) {
338 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
339 env->hflags2 |= HF2_NMI_MASK;
340 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
341 next_tb = 0;
342 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
343 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
344 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
345 next_tb = 0;
346 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
347 (((env->hflags2 & HF2_VINTR_MASK) &&
348 (env->hflags2 & HF2_HIF_MASK)) ||
349 (!(env->hflags2 & HF2_VINTR_MASK) &&
350 (env->eflags & IF_MASK &&
351 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
352 int intno;
353 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
354 0);
355 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
356 CPU_INTERRUPT_VIRQ);
357 intno = cpu_get_pic_interrupt(env);
358 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
359 do_interrupt_x86_hardirq(env, intno, 1);
360
361
362 next_tb = 0;
363#if !defined(CONFIG_USER_ONLY)
364 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
365 (env->eflags & IF_MASK) &&
366 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
367 int intno;
368
369 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
370 0);
371 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
372 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
373 do_interrupt_x86_hardirq(env, intno, 1);
374 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
375 next_tb = 0;
376#endif
377 }
378 }
379#elif defined(TARGET_PPC)
380 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
381 cpu_reset(cpu);
382 }
383 if (interrupt_request & CPU_INTERRUPT_HARD) {
384 ppc_hw_interrupt(env);
385 if (env->pending_interrupts == 0) {
386 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
387 }
388 next_tb = 0;
389 }
390#elif defined(TARGET_LM32)
391 if ((interrupt_request & CPU_INTERRUPT_HARD)
392 && (env->ie & IE_IE)) {
393 env->exception_index = EXCP_IRQ;
394 cc->do_interrupt(cpu);
395 next_tb = 0;
396 }
397#elif defined(TARGET_MICROBLAZE)
398 if ((interrupt_request & CPU_INTERRUPT_HARD)
399 && (env->sregs[SR_MSR] & MSR_IE)
400 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
401 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
402 env->exception_index = EXCP_IRQ;
403 cc->do_interrupt(cpu);
404 next_tb = 0;
405 }
406#elif defined(TARGET_MIPS)
407 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
408 cpu_mips_hw_interrupts_pending(env)) {
409
410 env->exception_index = EXCP_EXT_INTERRUPT;
411 env->error_code = 0;
412 cc->do_interrupt(cpu);
413 next_tb = 0;
414 }
415#elif defined(TARGET_OPENRISC)
416 {
417 int idx = -1;
418 if ((interrupt_request & CPU_INTERRUPT_HARD)
419 && (env->sr & SR_IEE)) {
420 idx = EXCP_INT;
421 }
422 if ((interrupt_request & CPU_INTERRUPT_TIMER)
423 && (env->sr & SR_TEE)) {
424 idx = EXCP_TICK;
425 }
426 if (idx >= 0) {
427 env->exception_index = idx;
428 cc->do_interrupt(cpu);
429 next_tb = 0;
430 }
431 }
432#elif defined(TARGET_SPARC)
433 if (interrupt_request & CPU_INTERRUPT_HARD) {
434 if (cpu_interrupts_enabled(env) &&
435 env->interrupt_index > 0) {
436 int pil = env->interrupt_index & 0xf;
437 int type = env->interrupt_index & 0xf0;
438
439 if (((type == TT_EXTINT) &&
440 cpu_pil_allowed(env, pil)) ||
441 type != TT_EXTINT) {
442 env->exception_index = env->interrupt_index;
443 cc->do_interrupt(cpu);
444 next_tb = 0;
445 }
446 }
447 }
448#elif defined(TARGET_ARM)
449 if (interrupt_request & CPU_INTERRUPT_FIQ
450 && !(env->uncached_cpsr & CPSR_F)) {
451 env->exception_index = EXCP_FIQ;
452 cc->do_interrupt(cpu);
453 next_tb = 0;
454 }
455
456
457
458
459
460
461
462
463
464 if (interrupt_request & CPU_INTERRUPT_HARD
465 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
466 || !(env->uncached_cpsr & CPSR_I))) {
467 env->exception_index = EXCP_IRQ;
468 cc->do_interrupt(cpu);
469 next_tb = 0;
470 }
471#elif defined(TARGET_UNICORE32)
472 if (interrupt_request & CPU_INTERRUPT_HARD
473 && !(env->uncached_asr & ASR_I)) {
474 env->exception_index = UC32_EXCP_INTR;
475 cc->do_interrupt(cpu);
476 next_tb = 0;
477 }
478#elif defined(TARGET_SH4)
479 if (interrupt_request & CPU_INTERRUPT_HARD) {
480 cc->do_interrupt(cpu);
481 next_tb = 0;
482 }
483#elif defined(TARGET_ALPHA)
484 {
485 int idx = -1;
486
487 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
488 case 0 ... 3:
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 idx = EXCP_DEV_INTERRUPT;
491 }
492
493 case 4:
494 if (interrupt_request & CPU_INTERRUPT_TIMER) {
495 idx = EXCP_CLK_INTERRUPT;
496 }
497
498 case 5:
499 if (interrupt_request & CPU_INTERRUPT_SMP) {
500 idx = EXCP_SMP_INTERRUPT;
501 }
502
503 case 6:
504 if (interrupt_request & CPU_INTERRUPT_MCHK) {
505 idx = EXCP_MCHK;
506 }
507 }
508 if (idx >= 0) {
509 env->exception_index = idx;
510 env->error_code = 0;
511 cc->do_interrupt(cpu);
512 next_tb = 0;
513 }
514 }
515#elif defined(TARGET_CRIS)
516 if (interrupt_request & CPU_INTERRUPT_HARD
517 && (env->pregs[PR_CCS] & I_FLAG)
518 && !env->locked_irq) {
519 env->exception_index = EXCP_IRQ;
520 cc->do_interrupt(cpu);
521 next_tb = 0;
522 }
523 if (interrupt_request & CPU_INTERRUPT_NMI) {
524 unsigned int m_flag_archval;
525 if (env->pregs[PR_VR] < 32) {
526 m_flag_archval = M_FLAG_V10;
527 } else {
528 m_flag_archval = M_FLAG_V32;
529 }
530 if ((env->pregs[PR_CCS] & m_flag_archval)) {
531 env->exception_index = EXCP_NMI;
532 cc->do_interrupt(cpu);
533 next_tb = 0;
534 }
535 }
536#elif defined(TARGET_M68K)
537 if (interrupt_request & CPU_INTERRUPT_HARD
538 && ((env->sr & SR_I) >> SR_I_SHIFT)
539 < env->pending_level) {
540
541
542
543
544
545 env->exception_index = env->pending_vector;
546 do_interrupt_m68k_hardirq(env);
547 next_tb = 0;
548 }
549#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
550 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
551 (env->psw.mask & PSW_MASK_EXT)) {
552 cc->do_interrupt(cpu);
553 next_tb = 0;
554 }
555#elif defined(TARGET_XTENSA)
556 if (interrupt_request & CPU_INTERRUPT_HARD) {
557 env->exception_index = EXC_IRQ;
558 cc->do_interrupt(cpu);
559 next_tb = 0;
560 }
561#endif
562
563
564 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
565 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
566
567
568 next_tb = 0;
569 }
570 }
571 if (unlikely(cpu->exit_request)) {
572 cpu->exit_request = 0;
573 env->exception_index = EXCP_INTERRUPT;
574 cpu_loop_exit(env);
575 }
576#if defined(DEBUG_DISAS)
577 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
578
579#if defined(TARGET_I386)
580 log_cpu_state(env, CPU_DUMP_CCOP);
581#elif defined(TARGET_M68K)
582 cpu_m68k_flush_flags(env, env->cc_op);
583 env->cc_op = CC_OP_FLAGS;
584 env->sr = (env->sr & 0xffe0)
585 | env->cc_dest | (env->cc_x << 4);
586 log_cpu_state(env, 0);
587#else
588 log_cpu_state(env, 0);
589#endif
590 }
591#endif
592 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
593 tb = tb_find_fast(env);
594
595
596 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
597
598
599
600 next_tb = 0;
601 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
602 }
603 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
604 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
605 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
606 }
607
608
609
610 if (next_tb != 0 && tb->page_addr[1] == -1) {
611 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
612 next_tb & TB_EXIT_MASK, tb);
613 }
614 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
615
616
617
618
619
620 cpu->current_tb = tb;
621 barrier();
622 if (likely(!cpu->exit_request)) {
623 tc_ptr = tb->tc_ptr;
624
625 next_tb = cpu_tb_exec(cpu, tc_ptr);
626 switch (next_tb & TB_EXIT_MASK) {
627 case TB_EXIT_REQUESTED:
628
629
630
631
632
633
634
635 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
636 next_tb = 0;
637 break;
638 case TB_EXIT_ICOUNT_EXPIRED:
639 {
640
641 int insns_left;
642 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
643 insns_left = env->icount_decr.u32;
644 if (env->icount_extra && insns_left >= 0) {
645
646 env->icount_extra += insns_left;
647 if (env->icount_extra > 0xffff) {
648 insns_left = 0xffff;
649 } else {
650 insns_left = env->icount_extra;
651 }
652 env->icount_extra -= insns_left;
653 env->icount_decr.u16.low = insns_left;
654 } else {
655 if (insns_left > 0) {
656
657 cpu_exec_nocache(env, insns_left, tb);
658 }
659 env->exception_index = EXCP_INTERRUPT;
660 next_tb = 0;
661 cpu_loop_exit(env);
662 }
663 break;
664 }
665 default:
666 break;
667 }
668 }
669 cpu->current_tb = NULL;
670
671
672 }
673 } else {
674
675
676 env = cpu_single_env;
677 }
678 }
679
680
681#if defined(TARGET_I386)
682
683 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
684 | (DF & DF_MASK);
685#elif defined(TARGET_ARM)
686
687#elif defined(TARGET_UNICORE32)
688#elif defined(TARGET_SPARC)
689#elif defined(TARGET_PPC)
690#elif defined(TARGET_LM32)
691#elif defined(TARGET_M68K)
692 cpu_m68k_flush_flags(env, env->cc_op);
693 env->cc_op = CC_OP_FLAGS;
694 env->sr = (env->sr & 0xffe0)
695 | env->cc_dest | (env->cc_x << 4);
696#elif defined(TARGET_MICROBLAZE)
697#elif defined(TARGET_MIPS)
698#elif defined(TARGET_MOXIE)
699#elif defined(TARGET_OPENRISC)
700#elif defined(TARGET_SH4)
701#elif defined(TARGET_ALPHA)
702#elif defined(TARGET_CRIS)
703#elif defined(TARGET_S390X)
704#elif defined(TARGET_XTENSA)
705
706#else
707#error unsupported target CPU
708#endif
709
710
711 cpu_single_env = NULL;
712 return ret;
713}
714