1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
25#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
32
33#include "qemu-common.h"
34#define NO_CPU_IO_DEFS
35#include "cpu.h"
36#include "disas/disas.h"
37#include "tcg.h"
38#if defined(CONFIG_USER_ONLY)
39#include "qemu.h"
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
55#else
56#include "exec/address-spaces.h"
57#endif
58
59#include "exec/cputlb.h"
60#include "translate-all.h"
61#include "qemu/timer.h"
62
63
64
65
66
67
68#if !defined(CONFIG_USER_ONLY)
69
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75typedef struct PageDesc {
76
77 TranslationBlock *first_tb;
78
79
80 unsigned int code_write_count;
81 uint8_t *code_bitmap;
82#if defined(CONFIG_USER_ONLY)
83 unsigned long flags;
84#endif
85} PageDesc;
86
87
88
89#if !defined(CONFIG_USER_ONLY)
90#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
92#else
93# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
94#endif
95#else
96# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
97#endif
98
99
100#define V_L2_BITS 10
101#define V_L2_SIZE (1 << V_L2_BITS)
102
103
104#define V_L1_BITS_REM \
105 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
106
107#if V_L1_BITS_REM < 4
108#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
109#else
110#define V_L1_BITS V_L1_BITS_REM
111#endif
112
113#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
114
115#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
116
117uintptr_t qemu_real_host_page_size;
118uintptr_t qemu_host_page_size;
119uintptr_t qemu_host_page_mask;
120
121
122
123static void *l1_map[V_L1_SIZE];
124
125
126TCGContext tcg_ctx;
127
128static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
129 tb_page_addr_t phys_page2);
130static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
131
132void cpu_gen_init(void)
133{
134 tcg_context_init(&tcg_ctx);
135}
136
137
138
139
140
141
142
143int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
144{
145 TCGContext *s = &tcg_ctx;
146 uint8_t *gen_code_buf;
147 int gen_code_size;
148#ifdef CONFIG_PROFILER
149 int64_t ti;
150#endif
151
152#ifdef CONFIG_PROFILER
153 s->tb_count1++;
154
155 ti = profile_getclock();
156#endif
157 tcg_func_start(s);
158
159 gen_intermediate_code(env, tb);
160
161
162 gen_code_buf = tb->tc_ptr;
163 tb->tb_next_offset[0] = 0xffff;
164 tb->tb_next_offset[1] = 0xffff;
165 s->tb_next_offset = tb->tb_next_offset;
166#ifdef USE_DIRECT_JUMP
167 s->tb_jmp_offset = tb->tb_jmp_offset;
168 s->tb_next = NULL;
169#else
170 s->tb_jmp_offset = NULL;
171 s->tb_next = tb->tb_next;
172#endif
173
174#ifdef CONFIG_PROFILER
175 s->tb_count++;
176 s->interm_time += profile_getclock() - ti;
177 s->code_time -= profile_getclock();
178#endif
179 gen_code_size = tcg_gen_code(s, gen_code_buf);
180 *gen_code_size_ptr = gen_code_size;
181#ifdef CONFIG_PROFILER
182 s->code_time += profile_getclock();
183 s->code_in_len += tb->size;
184 s->code_out_len += gen_code_size;
185#endif
186
187#ifdef DEBUG_DISAS
188 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
189 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
190 log_disas(tb->tc_ptr, *gen_code_size_ptr);
191 qemu_log("\n");
192 qemu_log_flush();
193 }
194#endif
195 return 0;
196}
197
198
199
200static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
201 uintptr_t searched_pc)
202{
203 TCGContext *s = &tcg_ctx;
204 int j;
205 uintptr_t tc_ptr;
206#ifdef CONFIG_PROFILER
207 int64_t ti;
208#endif
209
210#ifdef CONFIG_PROFILER
211 ti = profile_getclock();
212#endif
213 tcg_func_start(s);
214
215 gen_intermediate_code_pc(env, tb);
216
217 if (use_icount) {
218
219 env->icount_decr.u16.low += tb->icount;
220
221 env->can_do_io = 0;
222 }
223
224
225 tc_ptr = (uintptr_t)tb->tc_ptr;
226 if (searched_pc < tc_ptr)
227 return -1;
228
229 s->tb_next_offset = tb->tb_next_offset;
230#ifdef USE_DIRECT_JUMP
231 s->tb_jmp_offset = tb->tb_jmp_offset;
232 s->tb_next = NULL;
233#else
234 s->tb_jmp_offset = NULL;
235 s->tb_next = tb->tb_next;
236#endif
237 j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
238 if (j < 0)
239 return -1;
240
241 while (s->gen_opc_instr_start[j] == 0) {
242 j--;
243 }
244 env->icount_decr.u16.low -= s->gen_opc_icount[j];
245
246 restore_state_to_opc(env, tb, j);
247
248#ifdef CONFIG_PROFILER
249 s->restore_time += profile_getclock() - ti;
250 s->restore_count++;
251#endif
252 return 0;
253}
254
255bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
256{
257 TranslationBlock *tb;
258
259 tb = tb_find_pc(retaddr);
260 if (tb) {
261 cpu_restore_state_from_tb(tb, env, retaddr);
262 return true;
263 }
264 return false;
265}
266
267#ifdef _WIN32
268static inline void map_exec(void *addr, long size)
269{
270 DWORD old_protect;
271 VirtualProtect(addr, size,
272 PAGE_EXECUTE_READWRITE, &old_protect);
273}
274#else
275static inline void map_exec(void *addr, long size)
276{
277 unsigned long start, end, page_size;
278
279 page_size = getpagesize();
280 start = (unsigned long)addr;
281 start &= ~(page_size - 1);
282
283 end = (unsigned long)addr + size;
284 end += page_size - 1;
285 end &= ~(page_size - 1);
286
287 mprotect((void *)start, end - start,
288 PROT_READ | PROT_WRITE | PROT_EXEC);
289}
290#endif
291
292static void page_init(void)
293{
294
295
296#ifdef _WIN32
297 {
298 SYSTEM_INFO system_info;
299
300 GetSystemInfo(&system_info);
301 qemu_real_host_page_size = system_info.dwPageSize;
302 }
303#else
304 qemu_real_host_page_size = getpagesize();
305#endif
306 if (qemu_host_page_size == 0) {
307 qemu_host_page_size = qemu_real_host_page_size;
308 }
309 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
310 qemu_host_page_size = TARGET_PAGE_SIZE;
311 }
312 qemu_host_page_mask = ~(qemu_host_page_size - 1);
313
314#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
315 {
316#ifdef HAVE_KINFO_GETVMMAP
317 struct kinfo_vmentry *freep;
318 int i, cnt;
319
320 freep = kinfo_getvmmap(getpid(), &cnt);
321 if (freep) {
322 mmap_lock();
323 for (i = 0; i < cnt; i++) {
324 unsigned long startaddr, endaddr;
325
326 startaddr = freep[i].kve_start;
327 endaddr = freep[i].kve_end;
328 if (h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 } else {
335#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
336 endaddr = ~0ul;
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
338#endif
339 }
340 }
341 }
342 free(freep);
343 mmap_unlock();
344 }
345#else
346 FILE *f;
347
348 last_brk = (unsigned long)sbrk(0);
349
350 f = fopen("/compat/linux/proc/self/maps", "r");
351 if (f) {
352 mmap_lock();
353
354 do {
355 unsigned long startaddr, endaddr;
356 int n;
357
358 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
359
360 if (n == 2 && h2g_valid(startaddr)) {
361 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
362
363 if (h2g_valid(endaddr)) {
364 endaddr = h2g(endaddr);
365 } else {
366 endaddr = ~0ul;
367 }
368 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
369 }
370 } while (!feof(f));
371
372 fclose(f);
373 mmap_unlock();
374 }
375#endif
376 }
377#endif
378}
379
380static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
381{
382 PageDesc *pd;
383 void **lp;
384 int i;
385
386#if defined(CONFIG_USER_ONLY)
387
388# define ALLOC(P, SIZE) \
389 do { \
390 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
391 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
392 } while (0)
393#else
394# define ALLOC(P, SIZE) \
395 do { P = g_malloc0(SIZE); } while (0)
396#endif
397
398
399 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
400
401
402 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
403 void **p = *lp;
404
405 if (p == NULL) {
406 if (!alloc) {
407 return NULL;
408 }
409 ALLOC(p, sizeof(void *) * V_L2_SIZE);
410 *lp = p;
411 }
412
413 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
414 }
415
416 pd = *lp;
417 if (pd == NULL) {
418 if (!alloc) {
419 return NULL;
420 }
421 ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
422 *lp = pd;
423 }
424
425#undef ALLOC
426
427 return pd + (index & (V_L2_SIZE - 1));
428}
429
430static inline PageDesc *page_find(tb_page_addr_t index)
431{
432 return page_find_alloc(index, 0);
433}
434
435#if !defined(CONFIG_USER_ONLY)
436#define mmap_lock() do { } while (0)
437#define mmap_unlock() do { } while (0)
438#endif
439
440#if defined(CONFIG_USER_ONLY)
441
442
443
444
445#define USE_STATIC_CODE_GEN_BUFFER
446#endif
447
448
449#if (defined(__linux__) \
450 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
451 || defined(__DragonFly__) || defined(__OpenBSD__) \
452 || defined(__NetBSD__))
453# define USE_MMAP
454#endif
455
456
457
458#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
459
460
461
462
463#if defined(__x86_64__)
464# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
465#elif defined(__sparc__)
466# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
467#elif defined(__aarch64__)
468# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
469#elif defined(__arm__)
470# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
471#elif defined(__s390x__)
472
473# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
474#else
475# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
476#endif
477
478#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
479
480#define DEFAULT_CODE_GEN_BUFFER_SIZE \
481 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
482 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
483
484static inline size_t size_code_gen_buffer(size_t tb_size)
485{
486
487 if (tb_size == 0) {
488#ifdef USE_STATIC_CODE_GEN_BUFFER
489 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
490#else
491
492
493
494
495 tb_size = (unsigned long)(ram_size / 4);
496#endif
497 }
498 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
499 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
500 }
501 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
502 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
503 }
504 tcg_ctx.code_gen_buffer_size = tb_size;
505 return tb_size;
506}
507
508#ifdef USE_STATIC_CODE_GEN_BUFFER
509static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 __attribute__((aligned(CODE_GEN_ALIGN)));
511
512static inline void *alloc_code_gen_buffer(void)
513{
514 map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
515 return static_code_gen_buffer;
516}
517#elif defined(USE_MMAP)
518static inline void *alloc_code_gen_buffer(void)
519{
520 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
521 uintptr_t start = 0;
522 void *buf;
523
524
525
526
527# if defined(__PIE__) || defined(__PIC__)
528
529
530
531
532# elif defined(__x86_64__) && defined(MAP_32BIT)
533
534
535 flags |= MAP_32BIT;
536
537 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
538 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
539 }
540# elif defined(__sparc__)
541 start = 0x40000000ul;
542# elif defined(__s390x__)
543 start = 0x90000000ul;
544# endif
545
546 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
547 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
548 return buf == MAP_FAILED ? NULL : buf;
549}
550#else
551static inline void *alloc_code_gen_buffer(void)
552{
553 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
554
555 if (buf) {
556 map_exec(buf, tcg_ctx.code_gen_buffer_size);
557 }
558 return buf;
559}
560#endif
561
562static inline void code_gen_alloc(size_t tb_size)
563{
564 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
565 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
566 if (tcg_ctx.code_gen_buffer == NULL) {
567 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
568 exit(1);
569 }
570
571 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
572 QEMU_MADV_HUGEPAGE);
573
574
575
576
577
578
579 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
580 tcg_ctx.code_gen_buffer_size - 1024;
581 tcg_ctx.code_gen_buffer_size -= 1024;
582
583 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
584 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
585 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
586 CODE_GEN_AVG_BLOCK_SIZE;
587 tcg_ctx.tb_ctx.tbs =
588 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
589}
590
591
592
593
594void tcg_exec_init(unsigned long tb_size)
595{
596 cpu_gen_init();
597 code_gen_alloc(tb_size);
598 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
599 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
600 page_init();
601#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
602
603
604 tcg_prologue_init(&tcg_ctx);
605#endif
606}
607
608bool tcg_enabled(void)
609{
610 return tcg_ctx.code_gen_buffer != NULL;
611}
612
613
614
615static TranslationBlock *tb_alloc(target_ulong pc)
616{
617 TranslationBlock *tb;
618
619 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
620 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
621 tcg_ctx.code_gen_buffer_max_size) {
622 return NULL;
623 }
624 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
625 tb->pc = pc;
626 tb->cflags = 0;
627 return tb;
628}
629
630void tb_free(TranslationBlock *tb)
631{
632
633
634
635 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
636 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
637 tcg_ctx.code_gen_ptr = tb->tc_ptr;
638 tcg_ctx.tb_ctx.nb_tbs--;
639 }
640}
641
642static inline void invalidate_page_bitmap(PageDesc *p)
643{
644 if (p->code_bitmap) {
645 g_free(p->code_bitmap);
646 p->code_bitmap = NULL;
647 }
648 p->code_write_count = 0;
649}
650
651
652static void page_flush_tb_1(int level, void **lp)
653{
654 int i;
655
656 if (*lp == NULL) {
657 return;
658 }
659 if (level == 0) {
660 PageDesc *pd = *lp;
661
662 for (i = 0; i < V_L2_SIZE; ++i) {
663 pd[i].first_tb = NULL;
664 invalidate_page_bitmap(pd + i);
665 }
666 } else {
667 void **pp = *lp;
668
669 for (i = 0; i < V_L2_SIZE; ++i) {
670 page_flush_tb_1(level - 1, pp + i);
671 }
672 }
673}
674
675static void page_flush_tb(void)
676{
677 int i;
678
679 for (i = 0; i < V_L1_SIZE; i++) {
680 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
681 }
682}
683
684
685
686void tb_flush(CPUArchState *env1)
687{
688 CPUState *cpu;
689
690#if defined(DEBUG_FLUSH)
691 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
692 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
693 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
694 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
695 tcg_ctx.tb_ctx.nb_tbs : 0);
696#endif
697 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
698 > tcg_ctx.code_gen_buffer_size) {
699 cpu_abort(env1, "Internal error: code buffer overflow\n");
700 }
701 tcg_ctx.tb_ctx.nb_tbs = 0;
702
703 CPU_FOREACH(cpu) {
704 CPUArchState *env = cpu->env_ptr;
705
706 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
707 }
708
709 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
710 CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
711 page_flush_tb();
712
713 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
714
715
716 tcg_ctx.tb_ctx.tb_flush_count++;
717}
718
719#ifdef DEBUG_TB_CHECK
720
721static void tb_invalidate_check(target_ulong address)
722{
723 TranslationBlock *tb;
724 int i;
725
726 address &= TARGET_PAGE_MASK;
727 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
728 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
729 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
730 address >= tb->pc + tb->size)) {
731 printf("ERROR invalidate: address=" TARGET_FMT_lx
732 " PC=%08lx size=%04x\n",
733 address, (long)tb->pc, tb->size);
734 }
735 }
736 }
737}
738
739
740static void tb_page_check(void)
741{
742 TranslationBlock *tb;
743 int i, flags1, flags2;
744
745 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
746 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
747 tb = tb->phys_hash_next) {
748 flags1 = page_get_flags(tb->pc);
749 flags2 = page_get_flags(tb->pc + tb->size - 1);
750 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
751 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
752 (long)tb->pc, tb->size, flags1, flags2);
753 }
754 }
755 }
756}
757
758#endif
759
760static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
761{
762 TranslationBlock *tb1;
763
764 for (;;) {
765 tb1 = *ptb;
766 if (tb1 == tb) {
767 *ptb = tb1->phys_hash_next;
768 break;
769 }
770 ptb = &tb1->phys_hash_next;
771 }
772}
773
774static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
775{
776 TranslationBlock *tb1;
777 unsigned int n1;
778
779 for (;;) {
780 tb1 = *ptb;
781 n1 = (uintptr_t)tb1 & 3;
782 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
783 if (tb1 == tb) {
784 *ptb = tb1->page_next[n1];
785 break;
786 }
787 ptb = &tb1->page_next[n1];
788 }
789}
790
791static inline void tb_jmp_remove(TranslationBlock *tb, int n)
792{
793 TranslationBlock *tb1, **ptb;
794 unsigned int n1;
795
796 ptb = &tb->jmp_next[n];
797 tb1 = *ptb;
798 if (tb1) {
799
800 for (;;) {
801 tb1 = *ptb;
802 n1 = (uintptr_t)tb1 & 3;
803 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
804 if (n1 == n && tb1 == tb) {
805 break;
806 }
807 if (n1 == 2) {
808 ptb = &tb1->jmp_first;
809 } else {
810 ptb = &tb1->jmp_next[n1];
811 }
812 }
813
814 *ptb = tb->jmp_next[n];
815
816 tb->jmp_next[n] = NULL;
817 }
818}
819
820
821
822static inline void tb_reset_jump(TranslationBlock *tb, int n)
823{
824 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
825}
826
827
828void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
829{
830 CPUState *cpu;
831 PageDesc *p;
832 unsigned int h, n1;
833 tb_page_addr_t phys_pc;
834 TranslationBlock *tb1, *tb2;
835
836
837 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
838 h = tb_phys_hash_func(phys_pc);
839 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
840
841
842 if (tb->page_addr[0] != page_addr) {
843 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
844 tb_page_remove(&p->first_tb, tb);
845 invalidate_page_bitmap(p);
846 }
847 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
848 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
849 tb_page_remove(&p->first_tb, tb);
850 invalidate_page_bitmap(p);
851 }
852
853 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
854
855
856 h = tb_jmp_cache_hash_func(tb->pc);
857 CPU_FOREACH(cpu) {
858 CPUArchState *env = cpu->env_ptr;
859
860 if (env->tb_jmp_cache[h] == tb) {
861 env->tb_jmp_cache[h] = NULL;
862 }
863 }
864
865
866 tb_jmp_remove(tb, 0);
867 tb_jmp_remove(tb, 1);
868
869
870 tb1 = tb->jmp_first;
871 for (;;) {
872 n1 = (uintptr_t)tb1 & 3;
873 if (n1 == 2) {
874 break;
875 }
876 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
877 tb2 = tb1->jmp_next[n1];
878 tb_reset_jump(tb1, n1);
879 tb1->jmp_next[n1] = NULL;
880 tb1 = tb2;
881 }
882 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
883
884 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
885}
886
887static inline void set_bits(uint8_t *tab, int start, int len)
888{
889 int end, mask, end1;
890
891 end = start + len;
892 tab += start >> 3;
893 mask = 0xff << (start & 7);
894 if ((start & ~7) == (end & ~7)) {
895 if (start < end) {
896 mask &= ~(0xff << (end & 7));
897 *tab |= mask;
898 }
899 } else {
900 *tab++ |= mask;
901 start = (start + 8) & ~7;
902 end1 = end & ~7;
903 while (start < end1) {
904 *tab++ = 0xff;
905 start += 8;
906 }
907 if (start < end) {
908 mask = ~(0xff << (end & 7));
909 *tab |= mask;
910 }
911 }
912}
913
914static void build_page_bitmap(PageDesc *p)
915{
916 int n, tb_start, tb_end;
917 TranslationBlock *tb;
918
919 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
920
921 tb = p->first_tb;
922 while (tb != NULL) {
923 n = (uintptr_t)tb & 3;
924 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
925
926 if (n == 0) {
927
928
929 tb_start = tb->pc & ~TARGET_PAGE_MASK;
930 tb_end = tb_start + tb->size;
931 if (tb_end > TARGET_PAGE_SIZE) {
932 tb_end = TARGET_PAGE_SIZE;
933 }
934 } else {
935 tb_start = 0;
936 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
937 }
938 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
939 tb = tb->page_next[n];
940 }
941}
942
943TranslationBlock *tb_gen_code(CPUArchState *env,
944 target_ulong pc, target_ulong cs_base,
945 int flags, int cflags)
946{
947 TranslationBlock *tb;
948 uint8_t *tc_ptr;
949 tb_page_addr_t phys_pc, phys_page2;
950 target_ulong virt_page2;
951 int code_gen_size;
952
953 phys_pc = get_page_addr_code(env, pc);
954 tb = tb_alloc(pc);
955 if (!tb) {
956
957 tb_flush(env);
958
959 tb = tb_alloc(pc);
960
961 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
962 }
963 tc_ptr = tcg_ctx.code_gen_ptr;
964 tb->tc_ptr = tc_ptr;
965 tb->cs_base = cs_base;
966 tb->flags = flags;
967 tb->cflags = cflags;
968 cpu_gen_code(env, tb, &code_gen_size);
969 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
970 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
971
972
973 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
974 phys_page2 = -1;
975 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
976 phys_page2 = get_page_addr_code(env, virt_page2);
977 }
978 tb_link_page(tb, phys_pc, phys_page2);
979 return tb;
980}
981
982
983
984
985
986
987
988
989void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
990 int is_cpu_write_access)
991{
992 while (start < end) {
993 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
994 start &= TARGET_PAGE_MASK;
995 start += TARGET_PAGE_SIZE;
996 }
997}
998
999
1000
1001
1002
1003
1004
1005
1006void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1007 int is_cpu_write_access)
1008{
1009 TranslationBlock *tb, *tb_next, *saved_tb;
1010 CPUState *cpu = current_cpu;
1011#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1012 CPUArchState *env = NULL;
1013#endif
1014 tb_page_addr_t tb_start, tb_end;
1015 PageDesc *p;
1016 int n;
1017#ifdef TARGET_HAS_PRECISE_SMC
1018 int current_tb_not_found = is_cpu_write_access;
1019 TranslationBlock *current_tb = NULL;
1020 int current_tb_modified = 0;
1021 target_ulong current_pc = 0;
1022 target_ulong current_cs_base = 0;
1023 int current_flags = 0;
1024#endif
1025
1026 p = page_find(start >> TARGET_PAGE_BITS);
1027 if (!p) {
1028 return;
1029 }
1030 if (!p->code_bitmap &&
1031 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1032 is_cpu_write_access) {
1033
1034 build_page_bitmap(p);
1035 }
1036#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1037 if (cpu != NULL) {
1038 env = cpu->env_ptr;
1039 }
1040#endif
1041
1042
1043
1044
1045 tb = p->first_tb;
1046 while (tb != NULL) {
1047 n = (uintptr_t)tb & 3;
1048 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1049 tb_next = tb->page_next[n];
1050
1051 if (n == 0) {
1052
1053
1054 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1055 tb_end = tb_start + tb->size;
1056 } else {
1057 tb_start = tb->page_addr[1];
1058 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1059 }
1060 if (!(tb_end <= start || tb_start >= end)) {
1061#ifdef TARGET_HAS_PRECISE_SMC
1062 if (current_tb_not_found) {
1063 current_tb_not_found = 0;
1064 current_tb = NULL;
1065 if (env->mem_io_pc) {
1066
1067 current_tb = tb_find_pc(env->mem_io_pc);
1068 }
1069 }
1070 if (current_tb == tb &&
1071 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1072
1073
1074
1075
1076
1077
1078 current_tb_modified = 1;
1079 cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
1080 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1081 ¤t_flags);
1082 }
1083#endif
1084
1085
1086 saved_tb = NULL;
1087 if (cpu != NULL) {
1088 saved_tb = cpu->current_tb;
1089 cpu->current_tb = NULL;
1090 }
1091 tb_phys_invalidate(tb, -1);
1092 if (cpu != NULL) {
1093 cpu->current_tb = saved_tb;
1094 if (cpu->interrupt_request && cpu->current_tb) {
1095 cpu_interrupt(cpu, cpu->interrupt_request);
1096 }
1097 }
1098 }
1099 tb = tb_next;
1100 }
1101#if !defined(CONFIG_USER_ONLY)
1102
1103 if (!p->first_tb) {
1104 invalidate_page_bitmap(p);
1105 if (is_cpu_write_access) {
1106 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1107 }
1108 }
1109#endif
1110#ifdef TARGET_HAS_PRECISE_SMC
1111 if (current_tb_modified) {
1112
1113
1114
1115 cpu->current_tb = NULL;
1116 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1117 cpu_resume_from_signal(env, NULL);
1118 }
1119#endif
1120}
1121
1122
1123void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1124{
1125 PageDesc *p;
1126 int offset, b;
1127
1128#if 0
1129 if (1) {
1130 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1131 cpu_single_env->mem_io_vaddr, len,
1132 cpu_single_env->eip,
1133 cpu_single_env->eip +
1134 (intptr_t)cpu_single_env->segs[R_CS].base);
1135 }
1136#endif
1137 p = page_find(start >> TARGET_PAGE_BITS);
1138 if (!p) {
1139 return;
1140 }
1141 if (p->code_bitmap) {
1142 offset = start & ~TARGET_PAGE_MASK;
1143 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1144 if (b & ((1 << len) - 1)) {
1145 goto do_invalidate;
1146 }
1147 } else {
1148 do_invalidate:
1149 tb_invalidate_phys_page_range(start, start + len, 1);
1150 }
1151}
1152
1153#if !defined(CONFIG_SOFTMMU)
1154static void tb_invalidate_phys_page(tb_page_addr_t addr,
1155 uintptr_t pc, void *puc,
1156 bool locked)
1157{
1158 TranslationBlock *tb;
1159 PageDesc *p;
1160 int n;
1161#ifdef TARGET_HAS_PRECISE_SMC
1162 TranslationBlock *current_tb = NULL;
1163 CPUState *cpu = current_cpu;
1164 CPUArchState *env = NULL;
1165 int current_tb_modified = 0;
1166 target_ulong current_pc = 0;
1167 target_ulong current_cs_base = 0;
1168 int current_flags = 0;
1169#endif
1170
1171 addr &= TARGET_PAGE_MASK;
1172 p = page_find(addr >> TARGET_PAGE_BITS);
1173 if (!p) {
1174 return;
1175 }
1176 tb = p->first_tb;
1177#ifdef TARGET_HAS_PRECISE_SMC
1178 if (tb && pc != 0) {
1179 current_tb = tb_find_pc(pc);
1180 }
1181 if (cpu != NULL) {
1182 env = cpu->env_ptr;
1183 }
1184#endif
1185 while (tb != NULL) {
1186 n = (uintptr_t)tb & 3;
1187 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1188#ifdef TARGET_HAS_PRECISE_SMC
1189 if (current_tb == tb &&
1190 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1191
1192
1193
1194
1195
1196
1197 current_tb_modified = 1;
1198 cpu_restore_state_from_tb(current_tb, env, pc);
1199 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1200 ¤t_flags);
1201 }
1202#endif
1203 tb_phys_invalidate(tb, addr);
1204 tb = tb->page_next[n];
1205 }
1206 p->first_tb = NULL;
1207#ifdef TARGET_HAS_PRECISE_SMC
1208 if (current_tb_modified) {
1209
1210
1211
1212 cpu->current_tb = NULL;
1213 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1214 if (locked) {
1215 mmap_unlock();
1216 }
1217 cpu_resume_from_signal(env, puc);
1218 }
1219#endif
1220}
1221#endif
1222
1223
1224static inline void tb_alloc_page(TranslationBlock *tb,
1225 unsigned int n, tb_page_addr_t page_addr)
1226{
1227 PageDesc *p;
1228#ifndef CONFIG_USER_ONLY
1229 bool page_already_protected;
1230#endif
1231
1232 tb->page_addr[n] = page_addr;
1233 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1234 tb->page_next[n] = p->first_tb;
1235#ifndef CONFIG_USER_ONLY
1236 page_already_protected = p->first_tb != NULL;
1237#endif
1238 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1239 invalidate_page_bitmap(p);
1240
1241#if defined(TARGET_HAS_SMC) || 1
1242
1243#if defined(CONFIG_USER_ONLY)
1244 if (p->flags & PAGE_WRITE) {
1245 target_ulong addr;
1246 PageDesc *p2;
1247 int prot;
1248
1249
1250
1251 page_addr &= qemu_host_page_mask;
1252 prot = 0;
1253 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1254 addr += TARGET_PAGE_SIZE) {
1255
1256 p2 = page_find(addr >> TARGET_PAGE_BITS);
1257 if (!p2) {
1258 continue;
1259 }
1260 prot |= p2->flags;
1261 p2->flags &= ~PAGE_WRITE;
1262 }
1263 mprotect(g2h(page_addr), qemu_host_page_size,
1264 (prot & PAGE_BITS) & ~PAGE_WRITE);
1265#ifdef DEBUG_TB_INVALIDATE
1266 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1267 page_addr);
1268#endif
1269 }
1270#else
1271
1272
1273
1274 if (!page_already_protected) {
1275 tlb_protect_code(page_addr);
1276 }
1277#endif
1278
1279#endif
1280}
1281
1282
1283
1284static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1285 tb_page_addr_t phys_page2)
1286{
1287 unsigned int h;
1288 TranslationBlock **ptb;
1289
1290
1291
1292 mmap_lock();
1293
1294 h = tb_phys_hash_func(phys_pc);
1295 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1296 tb->phys_hash_next = *ptb;
1297 *ptb = tb;
1298
1299
1300 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1301 if (phys_page2 != -1) {
1302 tb_alloc_page(tb, 1, phys_page2);
1303 } else {
1304 tb->page_addr[1] = -1;
1305 }
1306
1307 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1308 tb->jmp_next[0] = NULL;
1309 tb->jmp_next[1] = NULL;
1310
1311
1312 if (tb->tb_next_offset[0] != 0xffff) {
1313 tb_reset_jump(tb, 0);
1314 }
1315 if (tb->tb_next_offset[1] != 0xffff) {
1316 tb_reset_jump(tb, 1);
1317 }
1318
1319#ifdef DEBUG_TB_CHECK
1320 tb_page_check();
1321#endif
1322 mmap_unlock();
1323}
1324
1325
1326
1327static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1328{
1329 int m_min, m_max, m;
1330 uintptr_t v;
1331 TranslationBlock *tb;
1332
1333 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1334 return NULL;
1335 }
1336 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1337 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1338 return NULL;
1339 }
1340
1341 m_min = 0;
1342 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1343 while (m_min <= m_max) {
1344 m = (m_min + m_max) >> 1;
1345 tb = &tcg_ctx.tb_ctx.tbs[m];
1346 v = (uintptr_t)tb->tc_ptr;
1347 if (v == tc_ptr) {
1348 return tb;
1349 } else if (tc_ptr < v) {
1350 m_max = m - 1;
1351 } else {
1352 m_min = m + 1;
1353 }
1354 }
1355 return &tcg_ctx.tb_ctx.tbs[m_max];
1356}
1357
1358#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1359void tb_invalidate_phys_addr(hwaddr addr)
1360{
1361 ram_addr_t ram_addr;
1362 MemoryRegion *mr;
1363 hwaddr l = 1;
1364
1365 mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
1366 if (!(memory_region_is_ram(mr)
1367 || memory_region_is_romd(mr))) {
1368 return;
1369 }
1370 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1371 + addr;
1372 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1373}
1374#endif
1375
1376void tb_check_watchpoint(CPUArchState *env)
1377{
1378 TranslationBlock *tb;
1379
1380 tb = tb_find_pc(env->mem_io_pc);
1381 if (!tb) {
1382 cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1383 (void *)env->mem_io_pc);
1384 }
1385 cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1386 tb_phys_invalidate(tb, -1);
1387}
1388
1389#ifndef CONFIG_USER_ONLY
1390
1391static void tcg_handle_interrupt(CPUState *cpu, int mask)
1392{
1393 CPUArchState *env = cpu->env_ptr;
1394 int old_mask;
1395
1396 old_mask = cpu->interrupt_request;
1397 cpu->interrupt_request |= mask;
1398
1399
1400
1401
1402
1403 if (!qemu_cpu_is_self(cpu)) {
1404 qemu_cpu_kick(cpu);
1405 return;
1406 }
1407
1408 if (use_icount) {
1409 env->icount_decr.u16.high = 0xffff;
1410 if (!can_do_io(env)
1411 && (mask & ~old_mask) != 0) {
1412 cpu_abort(env, "Raised interrupt while not in I/O function");
1413 }
1414 } else {
1415 cpu->tcg_exit_req = 1;
1416 }
1417}
1418
1419CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1420
1421
1422
1423void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1424{
1425 TranslationBlock *tb;
1426 uint32_t n, cflags;
1427 target_ulong pc, cs_base;
1428 uint64_t flags;
1429
1430 tb = tb_find_pc(retaddr);
1431 if (!tb) {
1432 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1433 (void *)retaddr);
1434 }
1435 n = env->icount_decr.u16.low + tb->icount;
1436 cpu_restore_state_from_tb(tb, env, retaddr);
1437
1438
1439 n = n - env->icount_decr.u16.low;
1440
1441 n++;
1442
1443
1444
1445
1446#if defined(TARGET_MIPS)
1447 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1448 env->active_tc.PC -= 4;
1449 env->icount_decr.u16.low++;
1450 env->hflags &= ~MIPS_HFLAG_BMASK;
1451 }
1452#elif defined(TARGET_SH4)
1453 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1454 && n > 1) {
1455 env->pc -= 2;
1456 env->icount_decr.u16.low++;
1457 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1458 }
1459#endif
1460
1461 if (n > CF_COUNT_MASK) {
1462 cpu_abort(env, "TB too big during recompile");
1463 }
1464
1465 cflags = n | CF_LAST_IO;
1466 pc = tb->pc;
1467 cs_base = tb->cs_base;
1468 flags = tb->flags;
1469 tb_phys_invalidate(tb, -1);
1470
1471
1472 tb_gen_code(env, pc, cs_base, flags, cflags);
1473
1474
1475
1476
1477
1478 cpu_resume_from_signal(env, NULL);
1479}
1480
1481void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1482{
1483 unsigned int i;
1484
1485
1486
1487 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1488 memset(&env->tb_jmp_cache[i], 0,
1489 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1490
1491 i = tb_jmp_cache_hash_page(addr);
1492 memset(&env->tb_jmp_cache[i], 0,
1493 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1494}
1495
1496void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1497{
1498 int i, target_code_size, max_target_code_size;
1499 int direct_jmp_count, direct_jmp2_count, cross_page;
1500 TranslationBlock *tb;
1501
1502 target_code_size = 0;
1503 max_target_code_size = 0;
1504 cross_page = 0;
1505 direct_jmp_count = 0;
1506 direct_jmp2_count = 0;
1507 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1508 tb = &tcg_ctx.tb_ctx.tbs[i];
1509 target_code_size += tb->size;
1510 if (tb->size > max_target_code_size) {
1511 max_target_code_size = tb->size;
1512 }
1513 if (tb->page_addr[1] != -1) {
1514 cross_page++;
1515 }
1516 if (tb->tb_next_offset[0] != 0xffff) {
1517 direct_jmp_count++;
1518 if (tb->tb_next_offset[1] != 0xffff) {
1519 direct_jmp2_count++;
1520 }
1521 }
1522 }
1523
1524 cpu_fprintf(f, "Translation buffer state:\n");
1525 cpu_fprintf(f, "gen code size %td/%zd\n",
1526 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1527 tcg_ctx.code_gen_buffer_max_size);
1528 cpu_fprintf(f, "TB count %d/%d\n",
1529 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1530 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1531 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1532 tcg_ctx.tb_ctx.nb_tbs : 0,
1533 max_target_code_size);
1534 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1535 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1536 tcg_ctx.code_gen_buffer) /
1537 tcg_ctx.tb_ctx.nb_tbs : 0,
1538 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1539 tcg_ctx.code_gen_buffer) /
1540 target_code_size : 0);
1541 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1542 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1543 tcg_ctx.tb_ctx.nb_tbs : 0);
1544 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1545 direct_jmp_count,
1546 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1547 tcg_ctx.tb_ctx.nb_tbs : 0,
1548 direct_jmp2_count,
1549 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1550 tcg_ctx.tb_ctx.nb_tbs : 0);
1551 cpu_fprintf(f, "\nStatistics:\n");
1552 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1553 cpu_fprintf(f, "TB invalidate count %d\n",
1554 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1555 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1556 tcg_dump_info(f, cpu_fprintf);
1557}
1558
1559#else
1560
1561void cpu_interrupt(CPUState *cpu, int mask)
1562{
1563 cpu->interrupt_request |= mask;
1564 cpu->tcg_exit_req = 1;
1565}
1566
1567
1568
1569
1570
1571struct walk_memory_regions_data {
1572 walk_memory_regions_fn fn;
1573 void *priv;
1574 uintptr_t start;
1575 int prot;
1576};
1577
1578static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1579 abi_ulong end, int new_prot)
1580{
1581 if (data->start != -1ul) {
1582 int rc = data->fn(data->priv, data->start, end, data->prot);
1583 if (rc != 0) {
1584 return rc;
1585 }
1586 }
1587
1588 data->start = (new_prot ? end : -1ul);
1589 data->prot = new_prot;
1590
1591 return 0;
1592}
1593
1594static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1595 abi_ulong base, int level, void **lp)
1596{
1597 abi_ulong pa;
1598 int i, rc;
1599
1600 if (*lp == NULL) {
1601 return walk_memory_regions_end(data, base, 0);
1602 }
1603
1604 if (level == 0) {
1605 PageDesc *pd = *lp;
1606
1607 for (i = 0; i < V_L2_SIZE; ++i) {
1608 int prot = pd[i].flags;
1609
1610 pa = base | (i << TARGET_PAGE_BITS);
1611 if (prot != data->prot) {
1612 rc = walk_memory_regions_end(data, pa, prot);
1613 if (rc != 0) {
1614 return rc;
1615 }
1616 }
1617 }
1618 } else {
1619 void **pp = *lp;
1620
1621 for (i = 0; i < V_L2_SIZE; ++i) {
1622 pa = base | ((abi_ulong)i <<
1623 (TARGET_PAGE_BITS + V_L2_BITS * level));
1624 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1625 if (rc != 0) {
1626 return rc;
1627 }
1628 }
1629 }
1630
1631 return 0;
1632}
1633
1634int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1635{
1636 struct walk_memory_regions_data data;
1637 uintptr_t i;
1638
1639 data.fn = fn;
1640 data.priv = priv;
1641 data.start = -1ul;
1642 data.prot = 0;
1643
1644 for (i = 0; i < V_L1_SIZE; i++) {
1645 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1646 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1647
1648 if (rc != 0) {
1649 return rc;
1650 }
1651 }
1652
1653 return walk_memory_regions_end(&data, 0, 0);
1654}
1655
1656static int dump_region(void *priv, abi_ulong start,
1657 abi_ulong end, unsigned long prot)
1658{
1659 FILE *f = (FILE *)priv;
1660
1661 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1662 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1663 start, end, end - start,
1664 ((prot & PAGE_READ) ? 'r' : '-'),
1665 ((prot & PAGE_WRITE) ? 'w' : '-'),
1666 ((prot & PAGE_EXEC) ? 'x' : '-'));
1667
1668 return 0;
1669}
1670
1671
1672void page_dump(FILE *f)
1673{
1674 const int length = sizeof(abi_ulong) * 2;
1675 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1676 length, "start", length, "end", length, "size", "prot");
1677 walk_memory_regions(f, dump_region);
1678}
1679
1680int page_get_flags(target_ulong address)
1681{
1682 PageDesc *p;
1683
1684 p = page_find(address >> TARGET_PAGE_BITS);
1685 if (!p) {
1686 return 0;
1687 }
1688 return p->flags;
1689}
1690
1691
1692
1693
1694void page_set_flags(target_ulong start, target_ulong end, int flags)
1695{
1696 target_ulong addr, len;
1697
1698
1699
1700
1701#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1702 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1703#endif
1704 assert(start < end);
1705
1706 start = start & TARGET_PAGE_MASK;
1707 end = TARGET_PAGE_ALIGN(end);
1708
1709 if (flags & PAGE_WRITE) {
1710 flags |= PAGE_WRITE_ORG;
1711 }
1712
1713 for (addr = start, len = end - start;
1714 len != 0;
1715 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1716 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1717
1718
1719
1720 if (!(p->flags & PAGE_WRITE) &&
1721 (flags & PAGE_WRITE) &&
1722 p->first_tb) {
1723 tb_invalidate_phys_page(addr, 0, NULL, false);
1724 }
1725 p->flags = flags;
1726 }
1727}
1728
1729int page_check_range(target_ulong start, target_ulong len, int flags)
1730{
1731 PageDesc *p;
1732 target_ulong end;
1733 target_ulong addr;
1734
1735
1736
1737
1738#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1739 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1740#endif
1741
1742 if (len == 0) {
1743 return 0;
1744 }
1745 if (start + len - 1 < start) {
1746
1747 return -1;
1748 }
1749
1750
1751 end = TARGET_PAGE_ALIGN(start + len);
1752 start = start & TARGET_PAGE_MASK;
1753
1754 for (addr = start, len = end - start;
1755 len != 0;
1756 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1757 p = page_find(addr >> TARGET_PAGE_BITS);
1758 if (!p) {
1759 return -1;
1760 }
1761 if (!(p->flags & PAGE_VALID)) {
1762 return -1;
1763 }
1764
1765 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1766 return -1;
1767 }
1768 if (flags & PAGE_WRITE) {
1769 if (!(p->flags & PAGE_WRITE_ORG)) {
1770 return -1;
1771 }
1772
1773
1774 if (!(p->flags & PAGE_WRITE)) {
1775 if (!page_unprotect(addr, 0, NULL)) {
1776 return -1;
1777 }
1778 }
1779 return 0;
1780 }
1781 }
1782 return 0;
1783}
1784
1785
1786
1787int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1788{
1789 unsigned int prot;
1790 PageDesc *p;
1791 target_ulong host_start, host_end, addr;
1792
1793
1794
1795
1796 mmap_lock();
1797
1798 p = page_find(address >> TARGET_PAGE_BITS);
1799 if (!p) {
1800 mmap_unlock();
1801 return 0;
1802 }
1803
1804
1805
1806 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1807 host_start = address & qemu_host_page_mask;
1808 host_end = host_start + qemu_host_page_size;
1809
1810 prot = 0;
1811 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1812 p = page_find(addr >> TARGET_PAGE_BITS);
1813 p->flags |= PAGE_WRITE;
1814 prot |= p->flags;
1815
1816
1817
1818 tb_invalidate_phys_page(addr, pc, puc, true);
1819#ifdef DEBUG_TB_CHECK
1820 tb_invalidate_check(addr);
1821#endif
1822 }
1823 mprotect((void *)g2h(host_start), qemu_host_page_size,
1824 prot & PAGE_BITS);
1825
1826 mmap_unlock();
1827 return 1;
1828 }
1829 mmap_unlock();
1830 return 0;
1831}
1832#endif
1833