1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "config.h"
20#include "cpu.h"
21#include "disas.h"
22#include "tcg.h"
23#include "qemu-barrier.h"
24
25int tb_invalidated_flag;
26
27
28
29bool qemu_cpu_has_work(CPUState *env)
30{
31 return cpu_has_work(env);
32}
33
34void cpu_loop_exit(CPUState *env)
35{
36 env->current_tb = NULL;
37 longjmp(env->jmp_env, 1);
38}
39
40
41
42
43#if defined(CONFIG_SOFTMMU)
44void cpu_resume_from_signal(CPUState *env, void *puc)
45{
46
47
48 env->exception_index = -1;
49 longjmp(env->jmp_env, 1);
50}
51#endif
52
53
54
55static void cpu_exec_nocache(CPUState *env, int max_cycles,
56 TranslationBlock *orig_tb)
57{
58 unsigned long next_tb;
59 TranslationBlock *tb;
60
61
62
63 if (max_cycles > CF_COUNT_MASK)
64 max_cycles = CF_COUNT_MASK;
65
66 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67 max_cycles);
68 env->current_tb = tb;
69
70 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
71 env->current_tb = NULL;
72
73 if ((next_tb & 3) == 2) {
74
75
76 cpu_pc_from_tb(env, tb);
77 }
78 tb_phys_invalidate(tb, -1);
79 tb_free(tb);
80}
81
82static TranslationBlock *tb_find_slow(CPUState *env,
83 target_ulong pc,
84 target_ulong cs_base,
85 uint64_t flags)
86{
87 TranslationBlock *tb, **ptb1;
88 unsigned int h;
89 tb_page_addr_t phys_pc, phys_page1;
90 target_ulong virt_page2;
91
92 tb_invalidated_flag = 0;
93
94
95 phys_pc = get_page_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
103 if (tb->pc == pc &&
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107
108 if (tb->page_addr[1] != -1) {
109 tb_page_addr_t phys_page2;
110
111 virt_page2 = (pc & TARGET_PAGE_MASK) +
112 TARGET_PAGE_SIZE;
113 phys_page2 = get_page_addr_code(env, virt_page2);
114 if (tb->page_addr[1] == phys_page2)
115 goto found;
116 } else {
117 goto found;
118 }
119 }
120 ptb1 = &tb->phys_hash_next;
121 }
122 not_found:
123
124 tb = tb_gen_code(env, pc, cs_base, flags, 0);
125
126 found:
127
128 if (likely(*ptb1)) {
129 *ptb1 = tb->phys_hash_next;
130 tb->phys_hash_next = tb_phys_hash[h];
131 tb_phys_hash[h] = tb;
132 }
133
134 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
135 return tb;
136}
137
138static inline TranslationBlock *tb_find_fast(CPUState *env)
139{
140 TranslationBlock *tb;
141 target_ulong cs_base, pc;
142 int flags;
143
144
145
146
147 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
148 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
149 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
150 tb->flags != flags)) {
151 tb = tb_find_slow(env, pc, cs_base, flags);
152 }
153 return tb;
154}
155
156static CPUDebugExcpHandler *debug_excp_handler;
157
158CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
159{
160 CPUDebugExcpHandler *old_handler = debug_excp_handler;
161
162 debug_excp_handler = handler;
163 return old_handler;
164}
165
166static void cpu_handle_debug_exception(CPUState *env)
167{
168 CPUWatchpoint *wp;
169
170 if (!env->watchpoint_hit) {
171 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
172 wp->flags &= ~BP_WATCHPOINT_HIT;
173 }
174 }
175 if (debug_excp_handler) {
176 debug_excp_handler(env);
177 }
178}
179
180
181
182volatile sig_atomic_t exit_request;
183
184int cpu_exec(CPUState *env)
185{
186 int ret, interrupt_request;
187 TranslationBlock *tb;
188 uint8_t *tc_ptr;
189 unsigned long next_tb;
190
191 if (env->halted) {
192 if (!cpu_has_work(env)) {
193 return EXCP_HALTED;
194 }
195
196 env->halted = 0;
197 }
198
199 cpu_single_env = env;
200
201 if (unlikely(exit_request)) {
202 env->exit_request = 1;
203 }
204
205#if defined(TARGET_I386)
206
207 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208 DF = 1 - (2 * ((env->eflags >> 10) & 1));
209 CC_OP = CC_OP_EFLAGS;
210 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
211#elif defined(TARGET_SPARC)
212#elif defined(TARGET_M68K)
213 env->cc_op = CC_OP_FLAGS;
214 env->cc_dest = env->sr & 0xf;
215 env->cc_x = (env->sr >> 4) & 1;
216#elif defined(TARGET_ALPHA)
217#elif defined(TARGET_ARM)
218#elif defined(TARGET_UNICORE32)
219#elif defined(TARGET_PPC)
220 env->reserve_addr = -1;
221#elif defined(TARGET_LM32)
222#elif defined(TARGET_MICROBLAZE)
223#elif defined(TARGET_MIPS)
224#elif defined(TARGET_SH4)
225#elif defined(TARGET_CRIS)
226#elif defined(TARGET_S390X)
227#elif defined(TARGET_XTENSA)
228
229#else
230#error unsupported target CPU
231#endif
232 env->exception_index = -1;
233
234
235 for(;;) {
236 if (setjmp(env->jmp_env) == 0) {
237
238 if (env->exception_index >= 0) {
239 if (env->exception_index >= EXCP_INTERRUPT) {
240
241 ret = env->exception_index;
242 if (ret == EXCP_DEBUG) {
243 cpu_handle_debug_exception(env);
244 }
245 break;
246 } else {
247#if defined(CONFIG_USER_ONLY)
248
249
250
251#if defined(TARGET_I386)
252 do_interrupt(env);
253#endif
254 ret = env->exception_index;
255 break;
256#else
257 do_interrupt(env);
258 env->exception_index = -1;
259#endif
260 }
261 }
262
263 next_tb = 0;
264 for(;;) {
265 interrupt_request = env->interrupt_request;
266 if (unlikely(interrupt_request)) {
267 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
268
269 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
270 }
271 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
272 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
273 env->exception_index = EXCP_DEBUG;
274 cpu_loop_exit(env);
275 }
276#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
277 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
278 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
279 if (interrupt_request & CPU_INTERRUPT_HALT) {
280 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
281 env->halted = 1;
282 env->exception_index = EXCP_HLT;
283 cpu_loop_exit(env);
284 }
285#endif
286#if defined(TARGET_I386)
287 if (interrupt_request & CPU_INTERRUPT_INIT) {
288 svm_check_intercept(env, SVM_EXIT_INIT);
289 do_cpu_init(env);
290 env->exception_index = EXCP_HALTED;
291 cpu_loop_exit(env);
292 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
293 do_cpu_sipi(env);
294 } else if (env->hflags2 & HF2_GIF_MASK) {
295 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
296 !(env->hflags & HF_SMM_MASK)) {
297 svm_check_intercept(env, SVM_EXIT_SMI);
298 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
299 do_smm_enter(env);
300 next_tb = 0;
301 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
302 !(env->hflags2 & HF2_NMI_MASK)) {
303 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
304 env->hflags2 |= HF2_NMI_MASK;
305 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
306 next_tb = 0;
307 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
308 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
309 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
310 next_tb = 0;
311 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
312 (((env->hflags2 & HF2_VINTR_MASK) &&
313 (env->hflags2 & HF2_HIF_MASK)) ||
314 (!(env->hflags2 & HF2_VINTR_MASK) &&
315 (env->eflags & IF_MASK &&
316 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
317 int intno;
318 svm_check_intercept(env, SVM_EXIT_INTR);
319 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
320 intno = cpu_get_pic_interrupt(env);
321 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
322 do_interrupt_x86_hardirq(env, intno, 1);
323
324
325 next_tb = 0;
326#if !defined(CONFIG_USER_ONLY)
327 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
328 (env->eflags & IF_MASK) &&
329 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
330 int intno;
331
332 svm_check_intercept(env, SVM_EXIT_VINTR);
333 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
334 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
335 do_interrupt_x86_hardirq(env, intno, 1);
336 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
337 next_tb = 0;
338#endif
339 }
340 }
341#elif defined(TARGET_PPC)
342#if 0
343 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
344 cpu_reset(env);
345 }
346#endif
347 if (interrupt_request & CPU_INTERRUPT_HARD) {
348 ppc_hw_interrupt(env);
349 if (env->pending_interrupts == 0)
350 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
351 next_tb = 0;
352 }
353#elif defined(TARGET_LM32)
354 if ((interrupt_request & CPU_INTERRUPT_HARD)
355 && (env->ie & IE_IE)) {
356 env->exception_index = EXCP_IRQ;
357 do_interrupt(env);
358 next_tb = 0;
359 }
360#elif defined(TARGET_MICROBLAZE)
361 if ((interrupt_request & CPU_INTERRUPT_HARD)
362 && (env->sregs[SR_MSR] & MSR_IE)
363 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
364 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
365 env->exception_index = EXCP_IRQ;
366 do_interrupt(env);
367 next_tb = 0;
368 }
369#elif defined(TARGET_MIPS)
370 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
371 cpu_mips_hw_interrupts_pending(env)) {
372
373 env->exception_index = EXCP_EXT_INTERRUPT;
374 env->error_code = 0;
375 do_interrupt(env);
376 next_tb = 0;
377 }
378#elif defined(TARGET_SPARC)
379 if (interrupt_request & CPU_INTERRUPT_HARD) {
380 if (cpu_interrupts_enabled(env) &&
381 env->interrupt_index > 0) {
382 int pil = env->interrupt_index & 0xf;
383 int type = env->interrupt_index & 0xf0;
384
385 if (((type == TT_EXTINT) &&
386 cpu_pil_allowed(env, pil)) ||
387 type != TT_EXTINT) {
388 env->exception_index = env->interrupt_index;
389 do_interrupt(env);
390 next_tb = 0;
391 }
392 }
393 }
394#elif defined(TARGET_ARM)
395 if (interrupt_request & CPU_INTERRUPT_FIQ
396 && !(env->uncached_cpsr & CPSR_F)) {
397 env->exception_index = EXCP_FIQ;
398 do_interrupt(env);
399 next_tb = 0;
400 }
401
402
403
404
405
406
407
408
409
410 if (interrupt_request & CPU_INTERRUPT_HARD
411 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
412 || !(env->uncached_cpsr & CPSR_I))) {
413 env->exception_index = EXCP_IRQ;
414 do_interrupt(env);
415 next_tb = 0;
416 }
417#elif defined(TARGET_UNICORE32)
418 if (interrupt_request & CPU_INTERRUPT_HARD
419 && !(env->uncached_asr & ASR_I)) {
420 do_interrupt(env);
421 next_tb = 0;
422 }
423#elif defined(TARGET_SH4)
424 if (interrupt_request & CPU_INTERRUPT_HARD) {
425 do_interrupt(env);
426 next_tb = 0;
427 }
428#elif defined(TARGET_ALPHA)
429 {
430 int idx = -1;
431
432 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
433 case 0 ... 3:
434 if (interrupt_request & CPU_INTERRUPT_HARD) {
435 idx = EXCP_DEV_INTERRUPT;
436 }
437
438 case 4:
439 if (interrupt_request & CPU_INTERRUPT_TIMER) {
440 idx = EXCP_CLK_INTERRUPT;
441 }
442
443 case 5:
444 if (interrupt_request & CPU_INTERRUPT_SMP) {
445 idx = EXCP_SMP_INTERRUPT;
446 }
447
448 case 6:
449 if (interrupt_request & CPU_INTERRUPT_MCHK) {
450 idx = EXCP_MCHK;
451 }
452 }
453 if (idx >= 0) {
454 env->exception_index = idx;
455 env->error_code = 0;
456 do_interrupt(env);
457 next_tb = 0;
458 }
459 }
460#elif defined(TARGET_CRIS)
461 if (interrupt_request & CPU_INTERRUPT_HARD
462 && (env->pregs[PR_CCS] & I_FLAG)
463 && !env->locked_irq) {
464 env->exception_index = EXCP_IRQ;
465 do_interrupt(env);
466 next_tb = 0;
467 }
468 if (interrupt_request & CPU_INTERRUPT_NMI
469 && (env->pregs[PR_CCS] & M_FLAG)) {
470 env->exception_index = EXCP_NMI;
471 do_interrupt(env);
472 next_tb = 0;
473 }
474#elif defined(TARGET_M68K)
475 if (interrupt_request & CPU_INTERRUPT_HARD
476 && ((env->sr & SR_I) >> SR_I_SHIFT)
477 < env->pending_level) {
478
479
480
481
482
483 env->exception_index = env->pending_vector;
484 do_interrupt_m68k_hardirq(env);
485 next_tb = 0;
486 }
487#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
488 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
489 (env->psw.mask & PSW_MASK_EXT)) {
490 do_interrupt(env);
491 next_tb = 0;
492 }
493#elif defined(TARGET_XTENSA)
494 if (interrupt_request & CPU_INTERRUPT_HARD) {
495 env->exception_index = EXC_IRQ;
496 do_interrupt(env);
497 next_tb = 0;
498 }
499#endif
500
501
502 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
503 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
504
505
506 next_tb = 0;
507 }
508 }
509 if (unlikely(env->exit_request)) {
510 env->exit_request = 0;
511 env->exception_index = EXCP_INTERRUPT;
512 cpu_loop_exit(env);
513 }
514#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
515 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
516
517#if defined(TARGET_I386)
518 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
519 | (DF & DF_MASK);
520 log_cpu_state(env, X86_DUMP_CCOP);
521 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
522#elif defined(TARGET_M68K)
523 cpu_m68k_flush_flags(env, env->cc_op);
524 env->cc_op = CC_OP_FLAGS;
525 env->sr = (env->sr & 0xffe0)
526 | env->cc_dest | (env->cc_x << 4);
527 log_cpu_state(env, 0);
528#else
529 log_cpu_state(env, 0);
530#endif
531 }
532#endif
533 spin_lock(&tb_lock);
534 tb = tb_find_fast(env);
535
536
537 if (tb_invalidated_flag) {
538
539
540
541 next_tb = 0;
542 tb_invalidated_flag = 0;
543 }
544#ifdef CONFIG_DEBUG_EXEC
545 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
546 (long)tb->tc_ptr, tb->pc,
547 lookup_symbol(tb->pc));
548#endif
549
550
551
552 if (next_tb != 0 && tb->page_addr[1] == -1) {
553 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
554 }
555 spin_unlock(&tb_lock);
556
557
558
559
560
561 env->current_tb = tb;
562 barrier();
563 if (likely(!env->exit_request)) {
564 tc_ptr = tb->tc_ptr;
565
566 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
567 if ((next_tb & 3) == 2) {
568
569 int insns_left;
570 tb = (TranslationBlock *)(long)(next_tb & ~3);
571
572 cpu_pc_from_tb(env, tb);
573 insns_left = env->icount_decr.u32;
574 if (env->icount_extra && insns_left >= 0) {
575
576 env->icount_extra += insns_left;
577 if (env->icount_extra > 0xffff) {
578 insns_left = 0xffff;
579 } else {
580 insns_left = env->icount_extra;
581 }
582 env->icount_extra -= insns_left;
583 env->icount_decr.u16.low = insns_left;
584 } else {
585 if (insns_left > 0) {
586
587 cpu_exec_nocache(env, insns_left, tb);
588 }
589 env->exception_index = EXCP_INTERRUPT;
590 next_tb = 0;
591 cpu_loop_exit(env);
592 }
593 }
594 }
595 env->current_tb = NULL;
596
597
598 }
599 } else {
600
601
602 env = cpu_single_env;
603 }
604 }
605
606
607#if defined(TARGET_I386)
608
609 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
610 | (DF & DF_MASK);
611#elif defined(TARGET_ARM)
612
613#elif defined(TARGET_UNICORE32)
614#elif defined(TARGET_SPARC)
615#elif defined(TARGET_PPC)
616#elif defined(TARGET_LM32)
617#elif defined(TARGET_M68K)
618 cpu_m68k_flush_flags(env, env->cc_op);
619 env->cc_op = CC_OP_FLAGS;
620 env->sr = (env->sr & 0xffe0)
621 | env->cc_dest | (env->cc_x << 4);
622#elif defined(TARGET_MICROBLAZE)
623#elif defined(TARGET_MIPS)
624#elif defined(TARGET_SH4)
625#elif defined(TARGET_ALPHA)
626#elif defined(TARGET_CRIS)
627#elif defined(TARGET_S390X)
628#elif defined(TARGET_XTENSA)
629
630#else
631#error unsupported target CPU
632#endif
633
634
635 cpu_single_env = NULL;
636 return ret;
637}
638