1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "config.h"
20#include "cpu.h"
21#include "disas.h"
22#include "tcg.h"
23#include "qemu-barrier.h"
24#include "qtest.h"
25
26int tb_invalidated_flag;
27
28
29
30bool qemu_cpu_has_work(CPUArchState *env)
31{
32 return cpu_has_work(env);
33}
34
35void cpu_loop_exit(CPUArchState *env)
36{
37 env->current_tb = NULL;
38 longjmp(env->jmp_env, 1);
39}
40
41
42
43
44#if defined(CONFIG_SOFTMMU)
45void cpu_resume_from_signal(CPUArchState *env, void *puc)
46{
47
48
49 env->exception_index = -1;
50 longjmp(env->jmp_env, 1);
51}
52#endif
53
54
55
56static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
57 TranslationBlock *orig_tb)
58{
59 tcg_target_ulong next_tb;
60 TranslationBlock *tb;
61
62
63
64 if (max_cycles > CF_COUNT_MASK)
65 max_cycles = CF_COUNT_MASK;
66
67 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
68 max_cycles);
69 env->current_tb = tb;
70
71 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
72 env->current_tb = NULL;
73
74 if ((next_tb & 3) == 2) {
75
76
77 cpu_pc_from_tb(env, tb);
78 }
79 tb_phys_invalidate(tb, -1);
80 tb_free(tb);
81}
82
83static TranslationBlock *tb_find_slow(CPUArchState *env,
84 target_ulong pc,
85 target_ulong cs_base,
86 uint64_t flags)
87{
88 TranslationBlock *tb, **ptb1;
89 unsigned int h;
90 tb_page_addr_t phys_pc, phys_page1;
91 target_ulong virt_page2;
92
93 tb_invalidated_flag = 0;
94
95
96 phys_pc = get_page_addr_code(env, pc);
97 phys_page1 = phys_pc & TARGET_PAGE_MASK;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
100 for(;;) {
101 tb = *ptb1;
102 if (!tb)
103 goto not_found;
104 if (tb->pc == pc &&
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108
109 if (tb->page_addr[1] != -1) {
110 tb_page_addr_t phys_page2;
111
112 virt_page2 = (pc & TARGET_PAGE_MASK) +
113 TARGET_PAGE_SIZE;
114 phys_page2 = get_page_addr_code(env, virt_page2);
115 if (tb->page_addr[1] == phys_page2)
116 goto found;
117 } else {
118 goto found;
119 }
120 }
121 ptb1 = &tb->phys_hash_next;
122 }
123 not_found:
124
125 tb = tb_gen_code(env, pc, cs_base, flags, 0);
126
127 found:
128
129 if (likely(*ptb1)) {
130 *ptb1 = tb->phys_hash_next;
131 tb->phys_hash_next = tb_phys_hash[h];
132 tb_phys_hash[h] = tb;
133 }
134
135 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
136 return tb;
137}
138
139static inline TranslationBlock *tb_find_fast(CPUArchState *env)
140{
141 TranslationBlock *tb;
142 target_ulong cs_base, pc;
143 int flags;
144
145
146
147
148 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
149 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
150 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
151 tb->flags != flags)) {
152 tb = tb_find_slow(env, pc, cs_base, flags);
153 }
154 return tb;
155}
156
157static CPUDebugExcpHandler *debug_excp_handler;
158
159void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160{
161 debug_excp_handler = handler;
162}
163
164static void cpu_handle_debug_exception(CPUArchState *env)
165{
166 CPUWatchpoint *wp;
167
168 if (!env->watchpoint_hit) {
169 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
170 wp->flags &= ~BP_WATCHPOINT_HIT;
171 }
172 }
173 if (debug_excp_handler) {
174 debug_excp_handler(env);
175 }
176}
177
178
179
180volatile sig_atomic_t exit_request;
181
182int cpu_exec(CPUArchState *env)
183{
184#ifdef TARGET_PPC
185 CPUState *cpu = ENV_GET_CPU(env);
186#endif
187 int ret, interrupt_request;
188 TranslationBlock *tb;
189 uint8_t *tc_ptr;
190 tcg_target_ulong next_tb;
191
192 if (env->halted) {
193 if (!cpu_has_work(env)) {
194 return EXCP_HALTED;
195 }
196
197 env->halted = 0;
198 }
199
200 cpu_single_env = env;
201
202 if (unlikely(exit_request)) {
203 env->exit_request = 1;
204 }
205
206#if defined(TARGET_I386)
207
208 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
209 DF = 1 - (2 * ((env->eflags >> 10) & 1));
210 CC_OP = CC_OP_EFLAGS;
211 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
212#elif defined(TARGET_SPARC)
213#elif defined(TARGET_M68K)
214 env->cc_op = CC_OP_FLAGS;
215 env->cc_dest = env->sr & 0xf;
216 env->cc_x = (env->sr >> 4) & 1;
217#elif defined(TARGET_ALPHA)
218#elif defined(TARGET_ARM)
219#elif defined(TARGET_UNICORE32)
220#elif defined(TARGET_PPC)
221 env->reserve_addr = -1;
222#elif defined(TARGET_LM32)
223#elif defined(TARGET_MICROBLAZE)
224#elif defined(TARGET_MIPS)
225#elif defined(TARGET_OPENRISC)
226#elif defined(TARGET_SH4)
227#elif defined(TARGET_CRIS)
228#elif defined(TARGET_S390X)
229#elif defined(TARGET_XTENSA)
230
231#else
232#error unsupported target CPU
233#endif
234 env->exception_index = -1;
235
236
237 for(;;) {
238 if (setjmp(env->jmp_env) == 0) {
239
240 if (env->exception_index >= 0) {
241 if (env->exception_index >= EXCP_INTERRUPT) {
242
243 ret = env->exception_index;
244 if (ret == EXCP_DEBUG) {
245 cpu_handle_debug_exception(env);
246 }
247 break;
248 } else {
249#if defined(CONFIG_USER_ONLY)
250
251
252
253#if defined(TARGET_I386)
254 do_interrupt(env);
255#endif
256 ret = env->exception_index;
257 break;
258#else
259 do_interrupt(env);
260 env->exception_index = -1;
261#endif
262 }
263 }
264
265 next_tb = 0;
266 for(;;) {
267 interrupt_request = env->interrupt_request;
268 if (unlikely(interrupt_request)) {
269 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
270
271 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
272 }
273 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
274 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
275 env->exception_index = EXCP_DEBUG;
276 cpu_loop_exit(env);
277 }
278#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
279 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
280 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
281 if (interrupt_request & CPU_INTERRUPT_HALT) {
282 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
283 env->halted = 1;
284 env->exception_index = EXCP_HLT;
285 cpu_loop_exit(env);
286 }
287#endif
288#if defined(TARGET_I386)
289#if !defined(CONFIG_USER_ONLY)
290 if (interrupt_request & CPU_INTERRUPT_POLL) {
291 env->interrupt_request &= ~CPU_INTERRUPT_POLL;
292 apic_poll_irq(env->apic_state);
293 }
294#endif
295 if (interrupt_request & CPU_INTERRUPT_INIT) {
296 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
297 0);
298 do_cpu_init(x86_env_get_cpu(env));
299 env->exception_index = EXCP_HALTED;
300 cpu_loop_exit(env);
301 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
302 do_cpu_sipi(x86_env_get_cpu(env));
303 } else if (env->hflags2 & HF2_GIF_MASK) {
304 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
305 !(env->hflags & HF_SMM_MASK)) {
306 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
307 0);
308 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
309 do_smm_enter(env);
310 next_tb = 0;
311 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
312 !(env->hflags2 & HF2_NMI_MASK)) {
313 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
314 env->hflags2 |= HF2_NMI_MASK;
315 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
316 next_tb = 0;
317 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
318 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
319 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
320 next_tb = 0;
321 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
322 (((env->hflags2 & HF2_VINTR_MASK) &&
323 (env->hflags2 & HF2_HIF_MASK)) ||
324 (!(env->hflags2 & HF2_VINTR_MASK) &&
325 (env->eflags & IF_MASK &&
326 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
327 int intno;
328 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
329 0);
330 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
331 intno = cpu_get_pic_interrupt(env);
332 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
333 do_interrupt_x86_hardirq(env, intno, 1);
334
335
336 next_tb = 0;
337#if !defined(CONFIG_USER_ONLY)
338 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
339 (env->eflags & IF_MASK) &&
340 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
341 int intno;
342
343 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
344 0);
345 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
346 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
347 do_interrupt_x86_hardirq(env, intno, 1);
348 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
349 next_tb = 0;
350#endif
351 }
352 }
353#elif defined(TARGET_PPC)
354 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
355 cpu_reset(cpu);
356 }
357 if (interrupt_request & CPU_INTERRUPT_HARD) {
358 ppc_hw_interrupt(env);
359 if (env->pending_interrupts == 0)
360 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
361 next_tb = 0;
362 }
363#elif defined(TARGET_LM32)
364 if ((interrupt_request & CPU_INTERRUPT_HARD)
365 && (env->ie & IE_IE)) {
366 env->exception_index = EXCP_IRQ;
367 do_interrupt(env);
368 next_tb = 0;
369 }
370#elif defined(TARGET_MICROBLAZE)
371 if ((interrupt_request & CPU_INTERRUPT_HARD)
372 && (env->sregs[SR_MSR] & MSR_IE)
373 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
374 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
375 env->exception_index = EXCP_IRQ;
376 do_interrupt(env);
377 next_tb = 0;
378 }
379#elif defined(TARGET_MIPS)
380 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
381 cpu_mips_hw_interrupts_pending(env)) {
382
383 env->exception_index = EXCP_EXT_INTERRUPT;
384 env->error_code = 0;
385 do_interrupt(env);
386 next_tb = 0;
387 }
388#elif defined(TARGET_OPENRISC)
389 {
390 int idx = -1;
391 if ((interrupt_request & CPU_INTERRUPT_HARD)
392 && (env->sr & SR_IEE)) {
393 idx = EXCP_INT;
394 }
395 if ((interrupt_request & CPU_INTERRUPT_TIMER)
396 && (env->sr & SR_TEE)) {
397 idx = EXCP_TICK;
398 }
399 if (idx >= 0) {
400 env->exception_index = idx;
401 do_interrupt(env);
402 next_tb = 0;
403 }
404 }
405#elif defined(TARGET_SPARC)
406 if (interrupt_request & CPU_INTERRUPT_HARD) {
407 if (cpu_interrupts_enabled(env) &&
408 env->interrupt_index > 0) {
409 int pil = env->interrupt_index & 0xf;
410 int type = env->interrupt_index & 0xf0;
411
412 if (((type == TT_EXTINT) &&
413 cpu_pil_allowed(env, pil)) ||
414 type != TT_EXTINT) {
415 env->exception_index = env->interrupt_index;
416 do_interrupt(env);
417 next_tb = 0;
418 }
419 }
420 }
421#elif defined(TARGET_ARM)
422 if (interrupt_request & CPU_INTERRUPT_FIQ
423 && !(env->uncached_cpsr & CPSR_F)) {
424 env->exception_index = EXCP_FIQ;
425 do_interrupt(env);
426 next_tb = 0;
427 }
428
429
430
431
432
433
434
435
436
437 if (interrupt_request & CPU_INTERRUPT_HARD
438 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
439 || !(env->uncached_cpsr & CPSR_I))) {
440 env->exception_index = EXCP_IRQ;
441 do_interrupt(env);
442 next_tb = 0;
443 }
444#elif defined(TARGET_UNICORE32)
445 if (interrupt_request & CPU_INTERRUPT_HARD
446 && !(env->uncached_asr & ASR_I)) {
447 env->exception_index = UC32_EXCP_INTR;
448 do_interrupt(env);
449 next_tb = 0;
450 }
451#elif defined(TARGET_SH4)
452 if (interrupt_request & CPU_INTERRUPT_HARD) {
453 do_interrupt(env);
454 next_tb = 0;
455 }
456#elif defined(TARGET_ALPHA)
457 {
458 int idx = -1;
459
460 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
461 case 0 ... 3:
462 if (interrupt_request & CPU_INTERRUPT_HARD) {
463 idx = EXCP_DEV_INTERRUPT;
464 }
465
466 case 4:
467 if (interrupt_request & CPU_INTERRUPT_TIMER) {
468 idx = EXCP_CLK_INTERRUPT;
469 }
470
471 case 5:
472 if (interrupt_request & CPU_INTERRUPT_SMP) {
473 idx = EXCP_SMP_INTERRUPT;
474 }
475
476 case 6:
477 if (interrupt_request & CPU_INTERRUPT_MCHK) {
478 idx = EXCP_MCHK;
479 }
480 }
481 if (idx >= 0) {
482 env->exception_index = idx;
483 env->error_code = 0;
484 do_interrupt(env);
485 next_tb = 0;
486 }
487 }
488#elif defined(TARGET_CRIS)
489 if (interrupt_request & CPU_INTERRUPT_HARD
490 && (env->pregs[PR_CCS] & I_FLAG)
491 && !env->locked_irq) {
492 env->exception_index = EXCP_IRQ;
493 do_interrupt(env);
494 next_tb = 0;
495 }
496 if (interrupt_request & CPU_INTERRUPT_NMI) {
497 unsigned int m_flag_archval;
498 if (env->pregs[PR_VR] < 32) {
499 m_flag_archval = M_FLAG_V10;
500 } else {
501 m_flag_archval = M_FLAG_V32;
502 }
503 if ((env->pregs[PR_CCS] & m_flag_archval)) {
504 env->exception_index = EXCP_NMI;
505 do_interrupt(env);
506 next_tb = 0;
507 }
508 }
509#elif defined(TARGET_M68K)
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && ((env->sr & SR_I) >> SR_I_SHIFT)
512 < env->pending_level) {
513
514
515
516
517
518 env->exception_index = env->pending_vector;
519 do_interrupt_m68k_hardirq(env);
520 next_tb = 0;
521 }
522#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
523 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
524 (env->psw.mask & PSW_MASK_EXT)) {
525 do_interrupt(env);
526 next_tb = 0;
527 }
528#elif defined(TARGET_XTENSA)
529 if (interrupt_request & CPU_INTERRUPT_HARD) {
530 env->exception_index = EXC_IRQ;
531 do_interrupt(env);
532 next_tb = 0;
533 }
534#endif
535
536
537 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
538 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
539
540
541 next_tb = 0;
542 }
543 }
544 if (unlikely(env->exit_request)) {
545 env->exit_request = 0;
546 env->exception_index = EXCP_INTERRUPT;
547 cpu_loop_exit(env);
548 }
549#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
550 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
551
552#if defined(TARGET_I386)
553 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
554 | (DF & DF_MASK);
555 log_cpu_state(env, X86_DUMP_CCOP);
556 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
557#elif defined(TARGET_M68K)
558 cpu_m68k_flush_flags(env, env->cc_op);
559 env->cc_op = CC_OP_FLAGS;
560 env->sr = (env->sr & 0xffe0)
561 | env->cc_dest | (env->cc_x << 4);
562 log_cpu_state(env, 0);
563#else
564 log_cpu_state(env, 0);
565#endif
566 }
567#endif
568 spin_lock(&tb_lock);
569 tb = tb_find_fast(env);
570
571
572 if (tb_invalidated_flag) {
573
574
575
576 next_tb = 0;
577 tb_invalidated_flag = 0;
578 }
579#ifdef CONFIG_DEBUG_EXEC
580 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
581 tb->tc_ptr, tb->pc,
582 lookup_symbol(tb->pc));
583#endif
584
585
586
587 if (next_tb != 0 && tb->page_addr[1] == -1) {
588 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
589 }
590 spin_unlock(&tb_lock);
591
592
593
594
595
596 env->current_tb = tb;
597 barrier();
598 if (likely(!env->exit_request)) {
599 tc_ptr = tb->tc_ptr;
600
601 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
602 if ((next_tb & 3) == 2) {
603
604 int insns_left;
605 tb = (TranslationBlock *)(next_tb & ~3);
606
607 cpu_pc_from_tb(env, tb);
608 insns_left = env->icount_decr.u32;
609 if (env->icount_extra && insns_left >= 0) {
610
611 env->icount_extra += insns_left;
612 if (env->icount_extra > 0xffff) {
613 insns_left = 0xffff;
614 } else {
615 insns_left = env->icount_extra;
616 }
617 env->icount_extra -= insns_left;
618 env->icount_decr.u16.low = insns_left;
619 } else {
620 if (insns_left > 0) {
621
622 cpu_exec_nocache(env, insns_left, tb);
623 }
624 env->exception_index = EXCP_INTERRUPT;
625 next_tb = 0;
626 cpu_loop_exit(env);
627 }
628 }
629 }
630 env->current_tb = NULL;
631
632
633 }
634 } else {
635
636
637 env = cpu_single_env;
638 }
639 }
640
641
642#if defined(TARGET_I386)
643
644 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
645 | (DF & DF_MASK);
646#elif defined(TARGET_ARM)
647
648#elif defined(TARGET_UNICORE32)
649#elif defined(TARGET_SPARC)
650#elif defined(TARGET_PPC)
651#elif defined(TARGET_LM32)
652#elif defined(TARGET_M68K)
653 cpu_m68k_flush_flags(env, env->cc_op);
654 env->cc_op = CC_OP_FLAGS;
655 env->sr = (env->sr & 0xffe0)
656 | env->cc_dest | (env->cc_x << 4);
657#elif defined(TARGET_MICROBLAZE)
658#elif defined(TARGET_MIPS)
659#elif defined(TARGET_OPENRISC)
660#elif defined(TARGET_SH4)
661#elif defined(TARGET_ALPHA)
662#elif defined(TARGET_CRIS)
663#elif defined(TARGET_S390X)
664#elif defined(TARGET_XTENSA)
665
666#else
667#error unsupported target CPU
668#endif
669
670
671 cpu_single_env = NULL;
672 return ret;
673}
674