1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
25#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
32
33#include "qemu-common.h"
34#define NO_CPU_IO_DEFS
35#include "cpu.h"
36#include "trace.h"
37#include "disas/disas.h"
38#include "tcg.h"
39#if defined(CONFIG_USER_ONLY)
40#include "qemu.h"
41#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
56#else
57#include "exec/address-spaces.h"
58#endif
59
60#include "exec/cputlb.h"
61#include "translate-all.h"
62#include "qemu/timer.h"
63
64
65
66
67
68
69#if !defined(CONFIG_USER_ONLY)
70
71#undef DEBUG_TB_CHECK
72#endif
73
74#define SMC_BITMAP_USE_THRESHOLD 10
75
76typedef struct PageDesc {
77
78 TranslationBlock *first_tb;
79
80
81 unsigned int code_write_count;
82 uint8_t *code_bitmap;
83#if defined(CONFIG_USER_ONLY)
84 unsigned long flags;
85#endif
86} PageDesc;
87
88
89
90#if !defined(CONFIG_USER_ONLY)
91#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
92# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
93#else
94# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
95#endif
96#else
97# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
98#endif
99
100
101#define V_L2_BITS 10
102#define V_L2_SIZE (1 << V_L2_BITS)
103
104
105#define V_L1_BITS_REM \
106 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
107
108#if V_L1_BITS_REM < 4
109#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
110#else
111#define V_L1_BITS V_L1_BITS_REM
112#endif
113
114#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
115
116#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
117
118uintptr_t qemu_real_host_page_size;
119uintptr_t qemu_host_page_size;
120uintptr_t qemu_host_page_mask;
121
122
123
124static void *l1_map[V_L1_SIZE];
125
126
127TCGContext tcg_ctx;
128
129static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
130 tb_page_addr_t phys_page2);
131static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
132
133void cpu_gen_init(void)
134{
135 tcg_context_init(&tcg_ctx);
136}
137
138
139
140
141
142
143
144int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
145{
146 TCGContext *s = &tcg_ctx;
147 tcg_insn_unit *gen_code_buf;
148 int gen_code_size;
149#ifdef CONFIG_PROFILER
150 int64_t ti;
151#endif
152
153#ifdef CONFIG_PROFILER
154 s->tb_count1++;
155
156 ti = profile_getclock();
157#endif
158 tcg_func_start(s);
159
160 gen_intermediate_code(env, tb);
161
162 trace_translate_block(tb, tb->pc, tb->tc_ptr);
163
164
165 gen_code_buf = tb->tc_ptr;
166 tb->tb_next_offset[0] = 0xffff;
167 tb->tb_next_offset[1] = 0xffff;
168 s->tb_next_offset = tb->tb_next_offset;
169#ifdef USE_DIRECT_JUMP
170 s->tb_jmp_offset = tb->tb_jmp_offset;
171 s->tb_next = NULL;
172#else
173 s->tb_jmp_offset = NULL;
174 s->tb_next = tb->tb_next;
175#endif
176
177#ifdef CONFIG_PROFILER
178 s->tb_count++;
179 s->interm_time += profile_getclock() - ti;
180 s->code_time -= profile_getclock();
181#endif
182 gen_code_size = tcg_gen_code(s, gen_code_buf);
183 *gen_code_size_ptr = gen_code_size;
184#ifdef CONFIG_PROFILER
185 s->code_time += profile_getclock();
186 s->code_in_len += tb->size;
187 s->code_out_len += gen_code_size;
188#endif
189
190#ifdef DEBUG_DISAS
191 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
192 qemu_log("OUT: [size=%d]\n", gen_code_size);
193 log_disas(tb->tc_ptr, gen_code_size);
194 qemu_log("\n");
195 qemu_log_flush();
196 }
197#endif
198 return 0;
199}
200
201
202
203static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
204 uintptr_t searched_pc)
205{
206 CPUArchState *env = cpu->env_ptr;
207 TCGContext *s = &tcg_ctx;
208 int j;
209 uintptr_t tc_ptr;
210#ifdef CONFIG_PROFILER
211 int64_t ti;
212#endif
213
214#ifdef CONFIG_PROFILER
215 ti = profile_getclock();
216#endif
217 tcg_func_start(s);
218
219 gen_intermediate_code_pc(env, tb);
220
221 if (tb->cflags & CF_USE_ICOUNT) {
222
223 cpu->icount_decr.u16.low += tb->icount;
224
225 cpu->can_do_io = 0;
226 }
227
228
229 tc_ptr = (uintptr_t)tb->tc_ptr;
230 if (searched_pc < tc_ptr)
231 return -1;
232
233 s->tb_next_offset = tb->tb_next_offset;
234#ifdef USE_DIRECT_JUMP
235 s->tb_jmp_offset = tb->tb_jmp_offset;
236 s->tb_next = NULL;
237#else
238 s->tb_jmp_offset = NULL;
239 s->tb_next = tb->tb_next;
240#endif
241 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
242 searched_pc - tc_ptr);
243 if (j < 0)
244 return -1;
245
246 while (s->gen_opc_instr_start[j] == 0) {
247 j--;
248 }
249 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
250
251 restore_state_to_opc(env, tb, j);
252
253#ifdef CONFIG_PROFILER
254 s->restore_time += profile_getclock() - ti;
255 s->restore_count++;
256#endif
257 return 0;
258}
259
260bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
261{
262 TranslationBlock *tb;
263
264 tb = tb_find_pc(retaddr);
265 if (tb) {
266 cpu_restore_state_from_tb(cpu, tb, retaddr);
267 if (tb->cflags & CF_NOCACHE) {
268
269 cpu->current_tb = NULL;
270 tb_phys_invalidate(tb, -1);
271 tb_free(tb);
272 }
273 return true;
274 }
275 return false;
276}
277
278#ifdef _WIN32
279static __attribute__((unused)) void map_exec(void *addr, long size)
280{
281 DWORD old_protect;
282 VirtualProtect(addr, size,
283 PAGE_EXECUTE_READWRITE, &old_protect);
284}
285#else
286static __attribute__((unused)) void map_exec(void *addr, long size)
287{
288 unsigned long start, end, page_size;
289
290 page_size = getpagesize();
291 start = (unsigned long)addr;
292 start &= ~(page_size - 1);
293
294 end = (unsigned long)addr + size;
295 end += page_size - 1;
296 end &= ~(page_size - 1);
297
298 mprotect((void *)start, end - start,
299 PROT_READ | PROT_WRITE | PROT_EXEC);
300}
301#endif
302
303void page_size_init(void)
304{
305
306
307 qemu_real_host_page_size = getpagesize();
308 if (qemu_host_page_size == 0) {
309 qemu_host_page_size = qemu_real_host_page_size;
310 }
311 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
312 qemu_host_page_size = TARGET_PAGE_SIZE;
313 }
314 qemu_host_page_mask = ~(qemu_host_page_size - 1);
315}
316
317static void page_init(void)
318{
319 page_size_init();
320#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
321 {
322#ifdef HAVE_KINFO_GETVMMAP
323 struct kinfo_vmentry *freep;
324 int i, cnt;
325
326 freep = kinfo_getvmmap(getpid(), &cnt);
327 if (freep) {
328 mmap_lock();
329 for (i = 0; i < cnt; i++) {
330 unsigned long startaddr, endaddr;
331
332 startaddr = freep[i].kve_start;
333 endaddr = freep[i].kve_end;
334 if (h2g_valid(startaddr)) {
335 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
336
337 if (h2g_valid(endaddr)) {
338 endaddr = h2g(endaddr);
339 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
340 } else {
341#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
342 endaddr = ~0ul;
343 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
344#endif
345 }
346 }
347 }
348 free(freep);
349 mmap_unlock();
350 }
351#else
352 FILE *f;
353
354 last_brk = (unsigned long)sbrk(0);
355
356 f = fopen("/compat/linux/proc/self/maps", "r");
357 if (f) {
358 mmap_lock();
359
360 do {
361 unsigned long startaddr, endaddr;
362 int n;
363
364 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
365
366 if (n == 2 && h2g_valid(startaddr)) {
367 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
368
369 if (h2g_valid(endaddr)) {
370 endaddr = h2g(endaddr);
371 } else {
372 endaddr = ~0ul;
373 }
374 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
375 }
376 } while (!feof(f));
377
378 fclose(f);
379 mmap_unlock();
380 }
381#endif
382 }
383#endif
384}
385
386static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
387{
388 PageDesc *pd;
389 void **lp;
390 int i;
391
392#if defined(CONFIG_USER_ONLY)
393
394# define ALLOC(P, SIZE) \
395 do { \
396 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
397 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
398 } while (0)
399#else
400# define ALLOC(P, SIZE) \
401 do { P = g_malloc0(SIZE); } while (0)
402#endif
403
404
405 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
406
407
408 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
409 void **p = *lp;
410
411 if (p == NULL) {
412 if (!alloc) {
413 return NULL;
414 }
415 ALLOC(p, sizeof(void *) * V_L2_SIZE);
416 *lp = p;
417 }
418
419 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
420 }
421
422 pd = *lp;
423 if (pd == NULL) {
424 if (!alloc) {
425 return NULL;
426 }
427 ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
428 *lp = pd;
429 }
430
431#undef ALLOC
432
433 return pd + (index & (V_L2_SIZE - 1));
434}
435
436static inline PageDesc *page_find(tb_page_addr_t index)
437{
438 return page_find_alloc(index, 0);
439}
440
441#if !defined(CONFIG_USER_ONLY)
442#define mmap_lock() do { } while (0)
443#define mmap_unlock() do { } while (0)
444#endif
445
446#if defined(CONFIG_USER_ONLY)
447
448
449
450
451#define USE_STATIC_CODE_GEN_BUFFER
452#endif
453
454
455#if (defined(__linux__) \
456 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
457 || defined(__DragonFly__) || defined(__OpenBSD__) \
458 || defined(__NetBSD__))
459# define USE_MMAP
460#endif
461
462
463
464#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
465
466
467
468
469#if defined(__x86_64__)
470# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
471#elif defined(__sparc__)
472# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
473#elif defined(__aarch64__)
474# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
475#elif defined(__arm__)
476# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
477#elif defined(__s390x__)
478
479# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
480#elif defined(__mips__)
481
482
483# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
484#else
485# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
486#endif
487
488#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
489
490#define DEFAULT_CODE_GEN_BUFFER_SIZE \
491 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
492 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
493
494static inline size_t size_code_gen_buffer(size_t tb_size)
495{
496
497 if (tb_size == 0) {
498#ifdef USE_STATIC_CODE_GEN_BUFFER
499 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
500#else
501
502
503
504
505 tb_size = (unsigned long)(ram_size / 4);
506#endif
507 }
508 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
509 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
510 }
511 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
512 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
513 }
514 tcg_ctx.code_gen_buffer_size = tb_size;
515 return tb_size;
516}
517
518#ifdef __mips__
519
520
521static inline bool cross_256mb(void *addr, size_t size)
522{
523 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
524}
525
526
527
528
529static inline void *split_cross_256mb(void *buf1, size_t size1)
530{
531 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
532 size_t size2 = buf1 + size1 - buf2;
533
534 size1 = buf2 - buf1;
535 if (size1 < size2) {
536 size1 = size2;
537 buf1 = buf2;
538 }
539
540 tcg_ctx.code_gen_buffer_size = size1;
541 return buf1;
542}
543#endif
544
545#ifdef USE_STATIC_CODE_GEN_BUFFER
546static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
547 __attribute__((aligned(CODE_GEN_ALIGN)));
548
549static inline void *alloc_code_gen_buffer(void)
550{
551 void *buf = static_code_gen_buffer;
552#ifdef __mips__
553 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
554 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
555 }
556#endif
557 map_exec(buf, tcg_ctx.code_gen_buffer_size);
558 return buf;
559}
560#elif defined(USE_MMAP)
561static inline void *alloc_code_gen_buffer(void)
562{
563 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
564 uintptr_t start = 0;
565 void *buf;
566
567
568
569
570# if defined(__PIE__) || defined(__PIC__)
571
572
573
574
575# elif defined(__x86_64__) && defined(MAP_32BIT)
576
577
578 flags |= MAP_32BIT;
579
580 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
581 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
582 }
583# elif defined(__sparc__)
584 start = 0x40000000ul;
585# elif defined(__s390x__)
586 start = 0x90000000ul;
587# elif defined(__mips__)
588
589# ifdef CONFIG_USER_ONLY
590 start = 0x68000000ul;
591# elif _MIPS_SIM == _ABI64
592 start = 0x128000000ul;
593# else
594 start = 0x08000000ul;
595# endif
596# endif
597
598 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
599 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
600 if (buf == MAP_FAILED) {
601 return NULL;
602 }
603
604#ifdef __mips__
605 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
606
607
608 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
609 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
610 flags, -1, 0);
611 if (buf2 != MAP_FAILED) {
612 if (!cross_256mb(buf2, size1)) {
613
614 munmap(buf, size1);
615 return buf2;
616 }
617
618 munmap(buf2, size1);
619 }
620
621
622 buf2 = split_cross_256mb(buf, size1);
623 size2 = tcg_ctx.code_gen_buffer_size;
624 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
625 return buf2;
626 }
627#endif
628
629 return buf;
630}
631#else
632static inline void *alloc_code_gen_buffer(void)
633{
634 void *buf = g_try_malloc(tcg_ctx.code_gen_buffer_size);
635
636 if (buf == NULL) {
637 return NULL;
638 }
639
640#ifdef __mips__
641 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
642 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
643 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
644
645 free(buf);
646 buf = buf2;
647 } else {
648
649
650 free(buf2);
651 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
652 }
653 }
654#endif
655
656 map_exec(buf, tcg_ctx.code_gen_buffer_size);
657 return buf;
658}
659#endif
660
661static inline void code_gen_alloc(size_t tb_size)
662{
663 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
664 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
665 if (tcg_ctx.code_gen_buffer == NULL) {
666 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
667 exit(1);
668 }
669
670 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
671 QEMU_MADV_HUGEPAGE);
672
673
674
675
676
677
678 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
679 tcg_ctx.code_gen_buffer_size - 1024;
680 tcg_ctx.code_gen_buffer_size -= 1024;
681
682 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
683 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
684 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
685 CODE_GEN_AVG_BLOCK_SIZE;
686 tcg_ctx.tb_ctx.tbs =
687 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
688}
689
690
691
692
693void tcg_exec_init(unsigned long tb_size)
694{
695 cpu_gen_init();
696 code_gen_alloc(tb_size);
697 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
698 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
699 page_init();
700#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
701
702
703 tcg_prologue_init(&tcg_ctx);
704#endif
705}
706
707bool tcg_enabled(void)
708{
709 return tcg_ctx.code_gen_buffer != NULL;
710}
711
712
713
714static TranslationBlock *tb_alloc(target_ulong pc)
715{
716 TranslationBlock *tb;
717
718 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
719 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
720 tcg_ctx.code_gen_buffer_max_size) {
721 return NULL;
722 }
723 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
724 tb->pc = pc;
725 tb->cflags = 0;
726 return tb;
727}
728
729void tb_free(TranslationBlock *tb)
730{
731
732
733
734 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
735 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
736 tcg_ctx.code_gen_ptr = tb->tc_ptr;
737 tcg_ctx.tb_ctx.nb_tbs--;
738 }
739}
740
741static inline void invalidate_page_bitmap(PageDesc *p)
742{
743 if (p->code_bitmap) {
744 g_free(p->code_bitmap);
745 p->code_bitmap = NULL;
746 }
747 p->code_write_count = 0;
748}
749
750
751static void page_flush_tb_1(int level, void **lp)
752{
753 int i;
754
755 if (*lp == NULL) {
756 return;
757 }
758 if (level == 0) {
759 PageDesc *pd = *lp;
760
761 for (i = 0; i < V_L2_SIZE; ++i) {
762 pd[i].first_tb = NULL;
763 invalidate_page_bitmap(pd + i);
764 }
765 } else {
766 void **pp = *lp;
767
768 for (i = 0; i < V_L2_SIZE; ++i) {
769 page_flush_tb_1(level - 1, pp + i);
770 }
771 }
772}
773
774static void page_flush_tb(void)
775{
776 int i;
777
778 for (i = 0; i < V_L1_SIZE; i++) {
779 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
780 }
781}
782
783
784
785void tb_flush(CPUArchState *env1)
786{
787 CPUState *cpu = ENV_GET_CPU(env1);
788
789#if defined(DEBUG_FLUSH)
790 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
791 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
792 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
793 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
794 tcg_ctx.tb_ctx.nb_tbs : 0);
795#endif
796 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
797 > tcg_ctx.code_gen_buffer_size) {
798 cpu_abort(cpu, "Internal error: code buffer overflow\n");
799 }
800 tcg_ctx.tb_ctx.nb_tbs = 0;
801
802 CPU_FOREACH(cpu) {
803 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
804 }
805
806 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
807 page_flush_tb();
808
809 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
810
811
812 tcg_ctx.tb_ctx.tb_flush_count++;
813}
814
815#ifdef DEBUG_TB_CHECK
816
817static void tb_invalidate_check(target_ulong address)
818{
819 TranslationBlock *tb;
820 int i;
821
822 address &= TARGET_PAGE_MASK;
823 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
824 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
825 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
826 address >= tb->pc + tb->size)) {
827 printf("ERROR invalidate: address=" TARGET_FMT_lx
828 " PC=%08lx size=%04x\n",
829 address, (long)tb->pc, tb->size);
830 }
831 }
832 }
833}
834
835
836static void tb_page_check(void)
837{
838 TranslationBlock *tb;
839 int i, flags1, flags2;
840
841 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
842 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
843 tb = tb->phys_hash_next) {
844 flags1 = page_get_flags(tb->pc);
845 flags2 = page_get_flags(tb->pc + tb->size - 1);
846 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
847 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
848 (long)tb->pc, tb->size, flags1, flags2);
849 }
850 }
851 }
852}
853
854#endif
855
856static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
857{
858 TranslationBlock *tb1;
859
860 for (;;) {
861 tb1 = *ptb;
862 if (tb1 == tb) {
863 *ptb = tb1->phys_hash_next;
864 break;
865 }
866 ptb = &tb1->phys_hash_next;
867 }
868}
869
870static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
871{
872 TranslationBlock *tb1;
873 unsigned int n1;
874
875 for (;;) {
876 tb1 = *ptb;
877 n1 = (uintptr_t)tb1 & 3;
878 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
879 if (tb1 == tb) {
880 *ptb = tb1->page_next[n1];
881 break;
882 }
883 ptb = &tb1->page_next[n1];
884 }
885}
886
887static inline void tb_jmp_remove(TranslationBlock *tb, int n)
888{
889 TranslationBlock *tb1, **ptb;
890 unsigned int n1;
891
892 ptb = &tb->jmp_next[n];
893 tb1 = *ptb;
894 if (tb1) {
895
896 for (;;) {
897 tb1 = *ptb;
898 n1 = (uintptr_t)tb1 & 3;
899 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
900 if (n1 == n && tb1 == tb) {
901 break;
902 }
903 if (n1 == 2) {
904 ptb = &tb1->jmp_first;
905 } else {
906 ptb = &tb1->jmp_next[n1];
907 }
908 }
909
910 *ptb = tb->jmp_next[n];
911
912 tb->jmp_next[n] = NULL;
913 }
914}
915
916
917
918static inline void tb_reset_jump(TranslationBlock *tb, int n)
919{
920 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
921}
922
923
924void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
925{
926 CPUState *cpu;
927 PageDesc *p;
928 unsigned int h, n1;
929 tb_page_addr_t phys_pc;
930 TranslationBlock *tb1, *tb2;
931
932
933 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 h = tb_phys_hash_func(phys_pc);
935 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
936
937
938 if (tb->page_addr[0] != page_addr) {
939 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
940 tb_page_remove(&p->first_tb, tb);
941 invalidate_page_bitmap(p);
942 }
943 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
944 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
947 }
948
949 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
950
951
952 h = tb_jmp_cache_hash_func(tb->pc);
953 CPU_FOREACH(cpu) {
954 if (cpu->tb_jmp_cache[h] == tb) {
955 cpu->tb_jmp_cache[h] = NULL;
956 }
957 }
958
959
960 tb_jmp_remove(tb, 0);
961 tb_jmp_remove(tb, 1);
962
963
964 tb1 = tb->jmp_first;
965 for (;;) {
966 n1 = (uintptr_t)tb1 & 3;
967 if (n1 == 2) {
968 break;
969 }
970 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
971 tb2 = tb1->jmp_next[n1];
972 tb_reset_jump(tb1, n1);
973 tb1->jmp_next[n1] = NULL;
974 tb1 = tb2;
975 }
976 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
977
978 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
979}
980
981static inline void set_bits(uint8_t *tab, int start, int len)
982{
983 int end, mask, end1;
984
985 end = start + len;
986 tab += start >> 3;
987 mask = 0xff << (start & 7);
988 if ((start & ~7) == (end & ~7)) {
989 if (start < end) {
990 mask &= ~(0xff << (end & 7));
991 *tab |= mask;
992 }
993 } else {
994 *tab++ |= mask;
995 start = (start + 8) & ~7;
996 end1 = end & ~7;
997 while (start < end1) {
998 *tab++ = 0xff;
999 start += 8;
1000 }
1001 if (start < end) {
1002 mask = ~(0xff << (end & 7));
1003 *tab |= mask;
1004 }
1005 }
1006}
1007
1008static void build_page_bitmap(PageDesc *p)
1009{
1010 int n, tb_start, tb_end;
1011 TranslationBlock *tb;
1012
1013 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1014
1015 tb = p->first_tb;
1016 while (tb != NULL) {
1017 n = (uintptr_t)tb & 3;
1018 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1019
1020 if (n == 0) {
1021
1022
1023 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1024 tb_end = tb_start + tb->size;
1025 if (tb_end > TARGET_PAGE_SIZE) {
1026 tb_end = TARGET_PAGE_SIZE;
1027 }
1028 } else {
1029 tb_start = 0;
1030 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1031 }
1032 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1033 tb = tb->page_next[n];
1034 }
1035}
1036
1037TranslationBlock *tb_gen_code(CPUState *cpu,
1038 target_ulong pc, target_ulong cs_base,
1039 int flags, int cflags)
1040{
1041 CPUArchState *env = cpu->env_ptr;
1042 TranslationBlock *tb;
1043 tb_page_addr_t phys_pc, phys_page2;
1044 target_ulong virt_page2;
1045 int code_gen_size;
1046
1047 phys_pc = get_page_addr_code(env, pc);
1048 if (use_icount) {
1049 cflags |= CF_USE_ICOUNT;
1050 }
1051 tb = tb_alloc(pc);
1052 if (!tb) {
1053
1054 tb_flush(env);
1055
1056 tb = tb_alloc(pc);
1057
1058 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1059 }
1060 tb->tc_ptr = tcg_ctx.code_gen_ptr;
1061 tb->cs_base = cs_base;
1062 tb->flags = flags;
1063 tb->cflags = cflags;
1064 cpu_gen_code(env, tb, &code_gen_size);
1065 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1066 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1067
1068
1069 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1070 phys_page2 = -1;
1071 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1072 phys_page2 = get_page_addr_code(env, virt_page2);
1073 }
1074 tb_link_page(tb, phys_pc, phys_page2);
1075 return tb;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1086 int is_cpu_write_access)
1087{
1088 while (start < end) {
1089 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1090 start &= TARGET_PAGE_MASK;
1091 start += TARGET_PAGE_SIZE;
1092 }
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1103 int is_cpu_write_access)
1104{
1105 TranslationBlock *tb, *tb_next, *saved_tb;
1106 CPUState *cpu = current_cpu;
1107#if defined(TARGET_HAS_PRECISE_SMC)
1108 CPUArchState *env = NULL;
1109#endif
1110 tb_page_addr_t tb_start, tb_end;
1111 PageDesc *p;
1112 int n;
1113#ifdef TARGET_HAS_PRECISE_SMC
1114 int current_tb_not_found = is_cpu_write_access;
1115 TranslationBlock *current_tb = NULL;
1116 int current_tb_modified = 0;
1117 target_ulong current_pc = 0;
1118 target_ulong current_cs_base = 0;
1119 int current_flags = 0;
1120#endif
1121
1122 p = page_find(start >> TARGET_PAGE_BITS);
1123 if (!p) {
1124 return;
1125 }
1126 if (!p->code_bitmap &&
1127 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1128 is_cpu_write_access) {
1129
1130 build_page_bitmap(p);
1131 }
1132#if defined(TARGET_HAS_PRECISE_SMC)
1133 if (cpu != NULL) {
1134 env = cpu->env_ptr;
1135 }
1136#endif
1137
1138
1139
1140
1141 tb = p->first_tb;
1142 while (tb != NULL) {
1143 n = (uintptr_t)tb & 3;
1144 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1145 tb_next = tb->page_next[n];
1146
1147 if (n == 0) {
1148
1149
1150 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1151 tb_end = tb_start + tb->size;
1152 } else {
1153 tb_start = tb->page_addr[1];
1154 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1155 }
1156 if (!(tb_end <= start || tb_start >= end)) {
1157#ifdef TARGET_HAS_PRECISE_SMC
1158 if (current_tb_not_found) {
1159 current_tb_not_found = 0;
1160 current_tb = NULL;
1161 if (cpu->mem_io_pc) {
1162
1163 current_tb = tb_find_pc(cpu->mem_io_pc);
1164 }
1165 }
1166 if (current_tb == tb &&
1167 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1168
1169
1170
1171
1172
1173
1174 current_tb_modified = 1;
1175 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1176 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1177 ¤t_flags);
1178 }
1179#endif
1180
1181
1182 saved_tb = NULL;
1183 if (cpu != NULL) {
1184 saved_tb = cpu->current_tb;
1185 cpu->current_tb = NULL;
1186 }
1187 tb_phys_invalidate(tb, -1);
1188 if (cpu != NULL) {
1189 cpu->current_tb = saved_tb;
1190 if (cpu->interrupt_request && cpu->current_tb) {
1191 cpu_interrupt(cpu, cpu->interrupt_request);
1192 }
1193 }
1194 }
1195 tb = tb_next;
1196 }
1197#if !defined(CONFIG_USER_ONLY)
1198
1199 if (!p->first_tb) {
1200 invalidate_page_bitmap(p);
1201 if (is_cpu_write_access) {
1202 tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
1203 }
1204 }
1205#endif
1206#ifdef TARGET_HAS_PRECISE_SMC
1207 if (current_tb_modified) {
1208
1209
1210
1211 cpu->current_tb = NULL;
1212 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1213 cpu_resume_from_signal(cpu, NULL);
1214 }
1215#endif
1216}
1217
1218
1219void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1220{
1221 PageDesc *p;
1222 int offset, b;
1223
1224#if 0
1225 if (1) {
1226 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1227 cpu_single_env->mem_io_vaddr, len,
1228 cpu_single_env->eip,
1229 cpu_single_env->eip +
1230 (intptr_t)cpu_single_env->segs[R_CS].base);
1231 }
1232#endif
1233 p = page_find(start >> TARGET_PAGE_BITS);
1234 if (!p) {
1235 return;
1236 }
1237 if (p->code_bitmap) {
1238 offset = start & ~TARGET_PAGE_MASK;
1239 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1240 if (b & ((1 << len) - 1)) {
1241 goto do_invalidate;
1242 }
1243 } else {
1244 do_invalidate:
1245 tb_invalidate_phys_page_range(start, start + len, 1);
1246 }
1247}
1248
1249#if !defined(CONFIG_SOFTMMU)
1250static void tb_invalidate_phys_page(tb_page_addr_t addr,
1251 uintptr_t pc, void *puc,
1252 bool locked)
1253{
1254 TranslationBlock *tb;
1255 PageDesc *p;
1256 int n;
1257#ifdef TARGET_HAS_PRECISE_SMC
1258 TranslationBlock *current_tb = NULL;
1259 CPUState *cpu = current_cpu;
1260 CPUArchState *env = NULL;
1261 int current_tb_modified = 0;
1262 target_ulong current_pc = 0;
1263 target_ulong current_cs_base = 0;
1264 int current_flags = 0;
1265#endif
1266
1267 addr &= TARGET_PAGE_MASK;
1268 p = page_find(addr >> TARGET_PAGE_BITS);
1269 if (!p) {
1270 return;
1271 }
1272 tb = p->first_tb;
1273#ifdef TARGET_HAS_PRECISE_SMC
1274 if (tb && pc != 0) {
1275 current_tb = tb_find_pc(pc);
1276 }
1277 if (cpu != NULL) {
1278 env = cpu->env_ptr;
1279 }
1280#endif
1281 while (tb != NULL) {
1282 n = (uintptr_t)tb & 3;
1283 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1284#ifdef TARGET_HAS_PRECISE_SMC
1285 if (current_tb == tb &&
1286 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1287
1288
1289
1290
1291
1292
1293 current_tb_modified = 1;
1294 cpu_restore_state_from_tb(cpu, current_tb, pc);
1295 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1296 ¤t_flags);
1297 }
1298#endif
1299 tb_phys_invalidate(tb, addr);
1300 tb = tb->page_next[n];
1301 }
1302 p->first_tb = NULL;
1303#ifdef TARGET_HAS_PRECISE_SMC
1304 if (current_tb_modified) {
1305
1306
1307
1308 cpu->current_tb = NULL;
1309 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1310 if (locked) {
1311 mmap_unlock();
1312 }
1313 cpu_resume_from_signal(cpu, puc);
1314 }
1315#endif
1316}
1317#endif
1318
1319
1320static inline void tb_alloc_page(TranslationBlock *tb,
1321 unsigned int n, tb_page_addr_t page_addr)
1322{
1323 PageDesc *p;
1324#ifndef CONFIG_USER_ONLY
1325 bool page_already_protected;
1326#endif
1327
1328 tb->page_addr[n] = page_addr;
1329 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1330 tb->page_next[n] = p->first_tb;
1331#ifndef CONFIG_USER_ONLY
1332 page_already_protected = p->first_tb != NULL;
1333#endif
1334 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1335 invalidate_page_bitmap(p);
1336
1337#if defined(CONFIG_USER_ONLY)
1338 if (p->flags & PAGE_WRITE) {
1339 target_ulong addr;
1340 PageDesc *p2;
1341 int prot;
1342
1343
1344
1345 page_addr &= qemu_host_page_mask;
1346 prot = 0;
1347 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1348 addr += TARGET_PAGE_SIZE) {
1349
1350 p2 = page_find(addr >> TARGET_PAGE_BITS);
1351 if (!p2) {
1352 continue;
1353 }
1354 prot |= p2->flags;
1355 p2->flags &= ~PAGE_WRITE;
1356 }
1357 mprotect(g2h(page_addr), qemu_host_page_size,
1358 (prot & PAGE_BITS) & ~PAGE_WRITE);
1359#ifdef DEBUG_TB_INVALIDATE
1360 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1361 page_addr);
1362#endif
1363 }
1364#else
1365
1366
1367
1368 if (!page_already_protected) {
1369 tlb_protect_code(page_addr);
1370 }
1371#endif
1372}
1373
1374
1375
1376static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1377 tb_page_addr_t phys_page2)
1378{
1379 unsigned int h;
1380 TranslationBlock **ptb;
1381
1382
1383
1384 mmap_lock();
1385
1386 h = tb_phys_hash_func(phys_pc);
1387 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1388 tb->phys_hash_next = *ptb;
1389 *ptb = tb;
1390
1391
1392 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1393 if (phys_page2 != -1) {
1394 tb_alloc_page(tb, 1, phys_page2);
1395 } else {
1396 tb->page_addr[1] = -1;
1397 }
1398
1399 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1400 tb->jmp_next[0] = NULL;
1401 tb->jmp_next[1] = NULL;
1402
1403
1404 if (tb->tb_next_offset[0] != 0xffff) {
1405 tb_reset_jump(tb, 0);
1406 }
1407 if (tb->tb_next_offset[1] != 0xffff) {
1408 tb_reset_jump(tb, 1);
1409 }
1410
1411#ifdef DEBUG_TB_CHECK
1412 tb_page_check();
1413#endif
1414 mmap_unlock();
1415}
1416
1417
1418
1419static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1420{
1421 int m_min, m_max, m;
1422 uintptr_t v;
1423 TranslationBlock *tb;
1424
1425 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1426 return NULL;
1427 }
1428 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1429 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1430 return NULL;
1431 }
1432
1433 m_min = 0;
1434 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1435 while (m_min <= m_max) {
1436 m = (m_min + m_max) >> 1;
1437 tb = &tcg_ctx.tb_ctx.tbs[m];
1438 v = (uintptr_t)tb->tc_ptr;
1439 if (v == tc_ptr) {
1440 return tb;
1441 } else if (tc_ptr < v) {
1442 m_max = m - 1;
1443 } else {
1444 m_min = m + 1;
1445 }
1446 }
1447 return &tcg_ctx.tb_ctx.tbs[m_max];
1448}
1449
1450#if !defined(CONFIG_USER_ONLY)
1451void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1452{
1453 ram_addr_t ram_addr;
1454 MemoryRegion *mr;
1455 hwaddr l = 1;
1456
1457 mr = address_space_translate(as, addr, &addr, &l, false);
1458 if (!(memory_region_is_ram(mr)
1459 || memory_region_is_romd(mr))) {
1460 return;
1461 }
1462 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1463 + addr;
1464 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1465}
1466#endif
1467
1468void tb_check_watchpoint(CPUState *cpu)
1469{
1470 TranslationBlock *tb;
1471
1472 tb = tb_find_pc(cpu->mem_io_pc);
1473 if (!tb) {
1474 cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
1475 (void *)cpu->mem_io_pc);
1476 }
1477 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1478 tb_phys_invalidate(tb, -1);
1479}
1480
1481#ifndef CONFIG_USER_ONLY
1482
1483static void tcg_handle_interrupt(CPUState *cpu, int mask)
1484{
1485 int old_mask;
1486
1487 old_mask = cpu->interrupt_request;
1488 cpu->interrupt_request |= mask;
1489
1490
1491
1492
1493
1494 if (!qemu_cpu_is_self(cpu)) {
1495 qemu_cpu_kick(cpu);
1496 return;
1497 }
1498
1499 if (use_icount) {
1500 cpu->icount_decr.u16.high = 0xffff;
1501 if (!cpu_can_do_io(cpu)
1502 && (mask & ~old_mask) != 0) {
1503 cpu_abort(cpu, "Raised interrupt while not in I/O function");
1504 }
1505 } else {
1506 cpu->tcg_exit_req = 1;
1507 }
1508}
1509
1510CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1511
1512
1513
1514void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1515{
1516#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1517 CPUArchState *env = cpu->env_ptr;
1518#endif
1519 TranslationBlock *tb;
1520 uint32_t n, cflags;
1521 target_ulong pc, cs_base;
1522 uint64_t flags;
1523
1524 tb = tb_find_pc(retaddr);
1525 if (!tb) {
1526 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1527 (void *)retaddr);
1528 }
1529 n = cpu->icount_decr.u16.low + tb->icount;
1530 cpu_restore_state_from_tb(cpu, tb, retaddr);
1531
1532
1533 n = n - cpu->icount_decr.u16.low;
1534
1535 n++;
1536
1537
1538
1539
1540#if defined(TARGET_MIPS)
1541 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1542 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1543 cpu->icount_decr.u16.low++;
1544 env->hflags &= ~MIPS_HFLAG_BMASK;
1545 }
1546#elif defined(TARGET_SH4)
1547 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1548 && n > 1) {
1549 env->pc -= 2;
1550 cpu->icount_decr.u16.low++;
1551 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1552 }
1553#endif
1554
1555 if (n > CF_COUNT_MASK) {
1556 cpu_abort(cpu, "TB too big during recompile");
1557 }
1558
1559 cflags = n | CF_LAST_IO;
1560 pc = tb->pc;
1561 cs_base = tb->cs_base;
1562 flags = tb->flags;
1563 tb_phys_invalidate(tb, -1);
1564
1565
1566 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1567
1568
1569
1570
1571
1572 cpu_resume_from_signal(cpu, NULL);
1573}
1574
1575void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1576{
1577 unsigned int i;
1578
1579
1580
1581 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1582 memset(&cpu->tb_jmp_cache[i], 0,
1583 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1584
1585 i = tb_jmp_cache_hash_page(addr);
1586 memset(&cpu->tb_jmp_cache[i], 0,
1587 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1588}
1589
1590void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1591{
1592 int i, target_code_size, max_target_code_size;
1593 int direct_jmp_count, direct_jmp2_count, cross_page;
1594 TranslationBlock *tb;
1595
1596 target_code_size = 0;
1597 max_target_code_size = 0;
1598 cross_page = 0;
1599 direct_jmp_count = 0;
1600 direct_jmp2_count = 0;
1601 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1602 tb = &tcg_ctx.tb_ctx.tbs[i];
1603 target_code_size += tb->size;
1604 if (tb->size > max_target_code_size) {
1605 max_target_code_size = tb->size;
1606 }
1607 if (tb->page_addr[1] != -1) {
1608 cross_page++;
1609 }
1610 if (tb->tb_next_offset[0] != 0xffff) {
1611 direct_jmp_count++;
1612 if (tb->tb_next_offset[1] != 0xffff) {
1613 direct_jmp2_count++;
1614 }
1615 }
1616 }
1617
1618 cpu_fprintf(f, "Translation buffer state:\n");
1619 cpu_fprintf(f, "gen code size %td/%zd\n",
1620 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1621 tcg_ctx.code_gen_buffer_max_size);
1622 cpu_fprintf(f, "TB count %d/%d\n",
1623 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1624 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1625 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1626 tcg_ctx.tb_ctx.nb_tbs : 0,
1627 max_target_code_size);
1628 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1629 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1630 tcg_ctx.code_gen_buffer) /
1631 tcg_ctx.tb_ctx.nb_tbs : 0,
1632 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1633 tcg_ctx.code_gen_buffer) /
1634 target_code_size : 0);
1635 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1636 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1637 tcg_ctx.tb_ctx.nb_tbs : 0);
1638 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1639 direct_jmp_count,
1640 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1641 tcg_ctx.tb_ctx.nb_tbs : 0,
1642 direct_jmp2_count,
1643 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1644 tcg_ctx.tb_ctx.nb_tbs : 0);
1645 cpu_fprintf(f, "\nStatistics:\n");
1646 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1647 cpu_fprintf(f, "TB invalidate count %d\n",
1648 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1649 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1650 tcg_dump_info(f, cpu_fprintf);
1651}
1652
1653void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1654{
1655 tcg_dump_op_count(f, cpu_fprintf);
1656}
1657
1658#else
1659
1660void cpu_interrupt(CPUState *cpu, int mask)
1661{
1662 cpu->interrupt_request |= mask;
1663 cpu->tcg_exit_req = 1;
1664}
1665
1666
1667
1668
1669
1670struct walk_memory_regions_data {
1671 walk_memory_regions_fn fn;
1672 void *priv;
1673 target_ulong start;
1674 int prot;
1675};
1676
1677static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1678 target_ulong end, int new_prot)
1679{
1680 if (data->start != -1u) {
1681 int rc = data->fn(data->priv, data->start, end, data->prot);
1682 if (rc != 0) {
1683 return rc;
1684 }
1685 }
1686
1687 data->start = (new_prot ? end : -1u);
1688 data->prot = new_prot;
1689
1690 return 0;
1691}
1692
1693static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1694 target_ulong base, int level, void **lp)
1695{
1696 target_ulong pa;
1697 int i, rc;
1698
1699 if (*lp == NULL) {
1700 return walk_memory_regions_end(data, base, 0);
1701 }
1702
1703 if (level == 0) {
1704 PageDesc *pd = *lp;
1705
1706 for (i = 0; i < V_L2_SIZE; ++i) {
1707 int prot = pd[i].flags;
1708
1709 pa = base | (i << TARGET_PAGE_BITS);
1710 if (prot != data->prot) {
1711 rc = walk_memory_regions_end(data, pa, prot);
1712 if (rc != 0) {
1713 return rc;
1714 }
1715 }
1716 }
1717 } else {
1718 void **pp = *lp;
1719
1720 for (i = 0; i < V_L2_SIZE; ++i) {
1721 pa = base | ((target_ulong)i <<
1722 (TARGET_PAGE_BITS + V_L2_BITS * level));
1723 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1724 if (rc != 0) {
1725 return rc;
1726 }
1727 }
1728 }
1729
1730 return 0;
1731}
1732
1733int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1734{
1735 struct walk_memory_regions_data data;
1736 uintptr_t i;
1737
1738 data.fn = fn;
1739 data.priv = priv;
1740 data.start = -1u;
1741 data.prot = 0;
1742
1743 for (i = 0; i < V_L1_SIZE; i++) {
1744 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1745 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1746 if (rc != 0) {
1747 return rc;
1748 }
1749 }
1750
1751 return walk_memory_regions_end(&data, 0, 0);
1752}
1753
1754static int dump_region(void *priv, target_ulong start,
1755 target_ulong end, unsigned long prot)
1756{
1757 FILE *f = (FILE *)priv;
1758
1759 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1760 " "TARGET_FMT_lx" %c%c%c\n",
1761 start, end, end - start,
1762 ((prot & PAGE_READ) ? 'r' : '-'),
1763 ((prot & PAGE_WRITE) ? 'w' : '-'),
1764 ((prot & PAGE_EXEC) ? 'x' : '-'));
1765
1766 return 0;
1767}
1768
1769
1770void page_dump(FILE *f)
1771{
1772 const int length = sizeof(target_ulong) * 2;
1773 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1774 length, "start", length, "end", length, "size", "prot");
1775 walk_memory_regions(f, dump_region);
1776}
1777
1778int page_get_flags(target_ulong address)
1779{
1780 PageDesc *p;
1781
1782 p = page_find(address >> TARGET_PAGE_BITS);
1783 if (!p) {
1784 return 0;
1785 }
1786 return p->flags;
1787}
1788
1789
1790
1791
1792void page_set_flags(target_ulong start, target_ulong end, int flags)
1793{
1794 target_ulong addr, len;
1795
1796
1797
1798
1799#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1800 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1801#endif
1802 assert(start < end);
1803
1804 start = start & TARGET_PAGE_MASK;
1805 end = TARGET_PAGE_ALIGN(end);
1806
1807 if (flags & PAGE_WRITE) {
1808 flags |= PAGE_WRITE_ORG;
1809 }
1810
1811 for (addr = start, len = end - start;
1812 len != 0;
1813 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1814 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1815
1816
1817
1818 if (!(p->flags & PAGE_WRITE) &&
1819 (flags & PAGE_WRITE) &&
1820 p->first_tb) {
1821 tb_invalidate_phys_page(addr, 0, NULL, false);
1822 }
1823 p->flags = flags;
1824 }
1825}
1826
1827int page_check_range(target_ulong start, target_ulong len, int flags)
1828{
1829 PageDesc *p;
1830 target_ulong end;
1831 target_ulong addr;
1832
1833
1834
1835
1836#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1837 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1838#endif
1839
1840 if (len == 0) {
1841 return 0;
1842 }
1843 if (start + len - 1 < start) {
1844
1845 return -1;
1846 }
1847
1848
1849 end = TARGET_PAGE_ALIGN(start + len);
1850 start = start & TARGET_PAGE_MASK;
1851
1852 for (addr = start, len = end - start;
1853 len != 0;
1854 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1855 p = page_find(addr >> TARGET_PAGE_BITS);
1856 if (!p) {
1857 return -1;
1858 }
1859 if (!(p->flags & PAGE_VALID)) {
1860 return -1;
1861 }
1862
1863 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1864 return -1;
1865 }
1866 if (flags & PAGE_WRITE) {
1867 if (!(p->flags & PAGE_WRITE_ORG)) {
1868 return -1;
1869 }
1870
1871
1872 if (!(p->flags & PAGE_WRITE)) {
1873 if (!page_unprotect(addr, 0, NULL)) {
1874 return -1;
1875 }
1876 }
1877 }
1878 }
1879 return 0;
1880}
1881
1882
1883
1884int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1885{
1886 unsigned int prot;
1887 PageDesc *p;
1888 target_ulong host_start, host_end, addr;
1889
1890
1891
1892
1893 mmap_lock();
1894
1895 p = page_find(address >> TARGET_PAGE_BITS);
1896 if (!p) {
1897 mmap_unlock();
1898 return 0;
1899 }
1900
1901
1902
1903 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1904 host_start = address & qemu_host_page_mask;
1905 host_end = host_start + qemu_host_page_size;
1906
1907 prot = 0;
1908 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1909 p = page_find(addr >> TARGET_PAGE_BITS);
1910 p->flags |= PAGE_WRITE;
1911 prot |= p->flags;
1912
1913
1914
1915 tb_invalidate_phys_page(addr, pc, puc, true);
1916#ifdef DEBUG_TB_CHECK
1917 tb_invalidate_check(addr);
1918#endif
1919 }
1920 mprotect((void *)g2h(host_start), qemu_host_page_size,
1921 prot & PAGE_BITS);
1922
1923 mmap_unlock();
1924 return 1;
1925 }
1926 mmap_unlock();
1927 return 0;
1928}
1929#endif
1930