1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#endif
22#include "qemu/osdep.h"
23
24
25#include "qemu-common.h"
26#define NO_CPU_IO_DEFS
27#include "cpu.h"
28#include "trace-root.h"
29#include "disas/disas.h"
30#include "exec/exec-all.h"
31#include "tcg.h"
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#include "exec/exec-all.h"
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
49#else
50#include "exec/address-spaces.h"
51#endif
52
53#include "exec/cputlb.h"
54#include "exec/tb-hash.h"
55#include "translate-all.h"
56#include "qemu/bitmap.h"
57#include "qemu/timer.h"
58#include "qemu/main-loop.h"
59#include "exec/log.h"
60#include "sysemu/cpus.h"
61
62
63
64
65
66
67#if !defined(CONFIG_USER_ONLY)
68
69#undef DEBUG_TB_CHECK
70#endif
71
72
73
74
75
76
77
78#ifdef CONFIG_SOFTMMU
79#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
80#else
81#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
82#endif
83
84#define SMC_BITMAP_USE_THRESHOLD 10
85
86typedef struct PageDesc {
87
88 TranslationBlock *first_tb;
89#ifdef CONFIG_SOFTMMU
90
91
92 unsigned int code_write_count;
93 unsigned long *code_bitmap;
94#else
95 unsigned long flags;
96#endif
97} PageDesc;
98
99
100
101#if !defined(CONFIG_USER_ONLY)
102#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
103# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
104#else
105# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
106#endif
107#else
108# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
109#endif
110
111
112#define V_L2_BITS 10
113#define V_L2_SIZE (1 << V_L2_BITS)
114
115uintptr_t qemu_host_page_size;
116intptr_t qemu_host_page_mask;
117
118
119
120
121static int v_l1_size;
122static int v_l1_shift;
123static int v_l2_levels;
124
125
126
127
128#define V_L1_MIN_BITS 4
129#define V_L1_MAX_BITS (V_L2_BITS + 3)
130#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
131
132static void *l1_map[V_L1_MAX_SIZE];
133
134
135TCGContext tcg_ctx;
136bool parallel_cpus;
137
138
139__thread int have_tb_lock;
140
141static void page_table_config_init(void)
142{
143 uint32_t v_l1_bits;
144
145 assert(TARGET_PAGE_BITS);
146
147 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
148 if (v_l1_bits < V_L1_MIN_BITS) {
149 v_l1_bits += V_L2_BITS;
150 }
151
152 v_l1_size = 1 << v_l1_bits;
153 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
154 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
155
156 assert(v_l1_bits <= V_L1_MAX_BITS);
157 assert(v_l1_shift % V_L2_BITS == 0);
158 assert(v_l2_levels >= 0);
159}
160
161#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
162#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
163
164void tb_lock(void)
165{
166 assert_tb_unlocked();
167 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
168 have_tb_lock++;
169}
170
171void tb_unlock(void)
172{
173 assert_tb_locked();
174 have_tb_lock--;
175 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
176}
177
178void tb_lock_reset(void)
179{
180 if (have_tb_lock) {
181 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
182 have_tb_lock = 0;
183 }
184}
185
186static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
187
188void cpu_gen_init(void)
189{
190 tcg_context_init(&tcg_ctx);
191}
192
193
194
195static uint8_t *encode_sleb128(uint8_t *p, target_long val)
196{
197 int more, byte;
198
199 do {
200 byte = val & 0x7f;
201 val >>= 7;
202 more = !((val == 0 && (byte & 0x40) == 0)
203 || (val == -1 && (byte & 0x40) != 0));
204 if (more) {
205 byte |= 0x80;
206 }
207 *p++ = byte;
208 } while (more);
209
210 return p;
211}
212
213
214
215static target_long decode_sleb128(uint8_t **pp)
216{
217 uint8_t *p = *pp;
218 target_long val = 0;
219 int byte, shift = 0;
220
221 do {
222 byte = *p++;
223 val |= (target_ulong)(byte & 0x7f) << shift;
224 shift += 7;
225 } while (byte & 0x80);
226 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
227 val |= -(target_ulong)1 << shift;
228 }
229
230 *pp = p;
231 return val;
232}
233
234
235
236
237
238
239
240
241
242
243
244
245
246static int encode_search(TranslationBlock *tb, uint8_t *block)
247{
248 uint8_t *highwater = tcg_ctx.code_gen_highwater;
249 uint8_t *p = block;
250 int i, j, n;
251
252 tb->tc_search = block;
253
254 for (i = 0, n = tb->icount; i < n; ++i) {
255 target_ulong prev;
256
257 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
258 if (i == 0) {
259 prev = (j == 0 ? tb->pc : 0);
260 } else {
261 prev = tcg_ctx.gen_insn_data[i - 1][j];
262 }
263 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
264 }
265 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
266 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
267
268
269
270
271
272 if (unlikely(p > highwater)) {
273 return -1;
274 }
275 }
276
277 return p - block;
278}
279
280
281
282
283static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
284 uintptr_t searched_pc)
285{
286 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
287 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
288 CPUArchState *env = cpu->env_ptr;
289 uint8_t *p = tb->tc_search;
290 int i, j, num_insns = tb->icount;
291#ifdef CONFIG_PROFILER
292 int64_t ti = profile_getclock();
293#endif
294
295 searched_pc -= GETPC_ADJ;
296
297 if (searched_pc < host_pc) {
298 return -1;
299 }
300
301
302
303 for (i = 0; i < num_insns; ++i) {
304 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
305 data[j] += decode_sleb128(&p);
306 }
307 host_pc += decode_sleb128(&p);
308 if (host_pc > searched_pc) {
309 goto found;
310 }
311 }
312 return -1;
313
314 found:
315 if (tb->cflags & CF_USE_ICOUNT) {
316 assert(use_icount);
317
318 cpu->icount_decr.u16.low += num_insns;
319
320 cpu->can_do_io = 0;
321 }
322 cpu->icount_decr.u16.low -= i;
323 restore_state_to_opc(env, tb, data);
324
325#ifdef CONFIG_PROFILER
326 tcg_ctx.restore_time += profile_getclock() - ti;
327 tcg_ctx.restore_count++;
328#endif
329 return 0;
330}
331
332bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
333{
334 TranslationBlock *tb;
335 bool r = false;
336
337
338
339
340
341
342
343
344
345
346 if (!retaddr) {
347 return r;
348 }
349
350 tb_lock();
351 tb = tb_find_pc(retaddr);
352 if (tb) {
353 cpu_restore_state_from_tb(cpu, tb, retaddr);
354 if (tb->cflags & CF_NOCACHE) {
355
356 tb_phys_invalidate(tb, -1);
357 tb_free(tb);
358 }
359 r = true;
360 }
361 tb_unlock();
362
363 return r;
364}
365
366void page_size_init(void)
367{
368
369
370 qemu_real_host_page_size = getpagesize();
371 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
372 if (qemu_host_page_size == 0) {
373 qemu_host_page_size = qemu_real_host_page_size;
374 }
375 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
376 qemu_host_page_size = TARGET_PAGE_SIZE;
377 }
378 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
379}
380
381static void page_init(void)
382{
383 page_size_init();
384 page_table_config_init();
385
386#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
387 {
388#ifdef HAVE_KINFO_GETVMMAP
389 struct kinfo_vmentry *freep;
390 int i, cnt;
391
392 freep = kinfo_getvmmap(getpid(), &cnt);
393 if (freep) {
394 mmap_lock();
395 for (i = 0; i < cnt; i++) {
396 unsigned long startaddr, endaddr;
397
398 startaddr = freep[i].kve_start;
399 endaddr = freep[i].kve_end;
400 if (h2g_valid(startaddr)) {
401 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
402
403 if (h2g_valid(endaddr)) {
404 endaddr = h2g(endaddr);
405 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
406 } else {
407#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
408 endaddr = ~0ul;
409 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
410#endif
411 }
412 }
413 }
414 free(freep);
415 mmap_unlock();
416 }
417#else
418 FILE *f;
419
420 last_brk = (unsigned long)sbrk(0);
421
422 f = fopen("/compat/linux/proc/self/maps", "r");
423 if (f) {
424 mmap_lock();
425
426 do {
427 unsigned long startaddr, endaddr;
428 int n;
429
430 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
431
432 if (n == 2 && h2g_valid(startaddr)) {
433 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
434
435 if (h2g_valid(endaddr)) {
436 endaddr = h2g(endaddr);
437 } else {
438 endaddr = ~0ul;
439 }
440 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
441 }
442 } while (!feof(f));
443
444 fclose(f);
445 mmap_unlock();
446 }
447#endif
448 }
449#endif
450}
451
452
453
454
455
456static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
457{
458 PageDesc *pd;
459 void **lp;
460 int i;
461
462 if (alloc) {
463 assert_memory_lock();
464 }
465
466
467 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
468
469
470 for (i = v_l2_levels; i > 0; i--) {
471 void **p = atomic_rcu_read(lp);
472
473 if (p == NULL) {
474 if (!alloc) {
475 return NULL;
476 }
477 p = g_new0(void *, V_L2_SIZE);
478 atomic_rcu_set(lp, p);
479 }
480
481 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
482 }
483
484 pd = atomic_rcu_read(lp);
485 if (pd == NULL) {
486 if (!alloc) {
487 return NULL;
488 }
489 pd = g_new0(PageDesc, V_L2_SIZE);
490 atomic_rcu_set(lp, pd);
491 }
492
493 return pd + (index & (V_L2_SIZE - 1));
494}
495
496static inline PageDesc *page_find(tb_page_addr_t index)
497{
498 return page_find_alloc(index, 0);
499}
500
501#if defined(CONFIG_USER_ONLY)
502
503
504
505
506#define USE_STATIC_CODE_GEN_BUFFER
507#endif
508
509
510
511#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
512
513
514
515
516#if defined(__x86_64__)
517# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
518#elif defined(__sparc__)
519# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
520#elif defined(__powerpc64__)
521# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
522#elif defined(__powerpc__)
523# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
524#elif defined(__aarch64__)
525# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
526#elif defined(__arm__)
527# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
528#elif defined(__s390x__)
529
530# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
531#elif defined(__mips__)
532
533
534# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
535#else
536# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
537#endif
538
539#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
540
541#define DEFAULT_CODE_GEN_BUFFER_SIZE \
542 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
543 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
544
545static inline size_t size_code_gen_buffer(size_t tb_size)
546{
547
548 if (tb_size == 0) {
549#ifdef USE_STATIC_CODE_GEN_BUFFER
550 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
551#else
552
553
554
555
556 tb_size = (unsigned long)(ram_size / 4);
557#endif
558 }
559 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
560 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
561 }
562 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
563 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
564 }
565 return tb_size;
566}
567
568#ifdef __mips__
569
570
571static inline bool cross_256mb(void *addr, size_t size)
572{
573 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
574}
575
576
577
578
579static inline void *split_cross_256mb(void *buf1, size_t size1)
580{
581 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
582 size_t size2 = buf1 + size1 - buf2;
583
584 size1 = buf2 - buf1;
585 if (size1 < size2) {
586 size1 = size2;
587 buf1 = buf2;
588 }
589
590 tcg_ctx.code_gen_buffer_size = size1;
591 return buf1;
592}
593#endif
594
595#ifdef USE_STATIC_CODE_GEN_BUFFER
596static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
597 __attribute__((aligned(CODE_GEN_ALIGN)));
598
599# ifdef _WIN32
600static inline void do_protect(void *addr, long size, int prot)
601{
602 DWORD old_protect;
603 VirtualProtect(addr, size, prot, &old_protect);
604}
605
606static inline void map_exec(void *addr, long size)
607{
608 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
609}
610
611static inline void map_none(void *addr, long size)
612{
613 do_protect(addr, size, PAGE_NOACCESS);
614}
615# else
616static inline void do_protect(void *addr, long size, int prot)
617{
618 uintptr_t start, end;
619
620 start = (uintptr_t)addr;
621 start &= qemu_real_host_page_mask;
622
623 end = (uintptr_t)addr + size;
624 end = ROUND_UP(end, qemu_real_host_page_size);
625
626 mprotect((void *)start, end - start, prot);
627}
628
629static inline void map_exec(void *addr, long size)
630{
631 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
632}
633
634static inline void map_none(void *addr, long size)
635{
636 do_protect(addr, size, PROT_NONE);
637}
638# endif
639
640static inline void *alloc_code_gen_buffer(void)
641{
642 void *buf = static_code_gen_buffer;
643 size_t full_size, size;
644
645
646 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
647 & qemu_real_host_page_mask) - (uintptr_t)buf;
648
649
650 size = full_size - qemu_real_host_page_size;
651
652
653 if (size > tcg_ctx.code_gen_buffer_size) {
654 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
655 & qemu_real_host_page_mask) - (uintptr_t)buf;
656 }
657 tcg_ctx.code_gen_buffer_size = size;
658
659#ifdef __mips__
660 if (cross_256mb(buf, size)) {
661 buf = split_cross_256mb(buf, size);
662 size = tcg_ctx.code_gen_buffer_size;
663 }
664#endif
665
666 map_exec(buf, size);
667 map_none(buf + size, qemu_real_host_page_size);
668 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
669
670 return buf;
671}
672#elif defined(_WIN32)
673static inline void *alloc_code_gen_buffer(void)
674{
675 size_t size = tcg_ctx.code_gen_buffer_size;
676 void *buf1, *buf2;
677
678
679
680 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
681 MEM_RESERVE, PAGE_NOACCESS);
682 if (buf1 != NULL) {
683 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
684 assert(buf1 == buf2);
685 }
686
687 return buf1;
688}
689#else
690static inline void *alloc_code_gen_buffer(void)
691{
692 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
693 uintptr_t start = 0;
694 size_t size = tcg_ctx.code_gen_buffer_size;
695 void *buf;
696
697
698
699
700# if defined(__PIE__) || defined(__PIC__)
701
702
703
704
705# elif defined(__x86_64__) && defined(MAP_32BIT)
706
707
708 flags |= MAP_32BIT;
709
710 if (size > 800u * 1024 * 1024) {
711 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
712 }
713# elif defined(__sparc__)
714 start = 0x40000000ul;
715# elif defined(__s390x__)
716 start = 0x90000000ul;
717# elif defined(__mips__)
718# if _MIPS_SIM == _ABI64
719 start = 0x128000000ul;
720# else
721 start = 0x08000000ul;
722# endif
723# endif
724
725 buf = mmap((void *)start, size + qemu_real_host_page_size,
726 PROT_NONE, flags, -1, 0);
727 if (buf == MAP_FAILED) {
728 return NULL;
729 }
730
731#ifdef __mips__
732 if (cross_256mb(buf, size)) {
733
734
735 size_t size2;
736 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
737 PROT_NONE, flags, -1, 0);
738 switch ((int)(buf2 != MAP_FAILED)) {
739 case 1:
740 if (!cross_256mb(buf2, size)) {
741
742 munmap(buf, size + qemu_real_host_page_size);
743 break;
744 }
745
746 munmap(buf2, size + qemu_real_host_page_size);
747
748 default:
749
750 buf2 = split_cross_256mb(buf, size);
751 size2 = tcg_ctx.code_gen_buffer_size;
752 if (buf == buf2) {
753 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
754 } else {
755 munmap(buf, size - size2);
756 }
757 size = size2;
758 break;
759 }
760 buf = buf2;
761 }
762#endif
763
764
765
766 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
767
768
769 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
770
771 return buf;
772}
773#endif
774
775static inline void code_gen_alloc(size_t tb_size)
776{
777 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
778 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
779 if (tcg_ctx.code_gen_buffer == NULL) {
780 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
781 exit(1);
782 }
783
784
785
786
787 tcg_ctx.code_gen_max_blocks
788 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
789 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
790
791 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
792}
793
794static void tb_htable_init(void)
795{
796 unsigned int mode = QHT_MODE_AUTO_RESIZE;
797
798 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
799}
800
801
802
803
804void tcg_exec_init(unsigned long tb_size)
805{
806 cpu_gen_init();
807 page_init();
808 tb_htable_init();
809 code_gen_alloc(tb_size);
810#if defined(CONFIG_SOFTMMU)
811
812
813 tcg_prologue_init(&tcg_ctx);
814#endif
815}
816
817bool tcg_enabled(void)
818{
819 return tcg_ctx.code_gen_buffer != NULL;
820}
821
822
823
824
825
826
827
828static TranslationBlock *tb_alloc(target_ulong pc)
829{
830 TranslationBlock *tb;
831
832 assert_tb_locked();
833
834 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
835 return NULL;
836 }
837 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
838 tb->pc = pc;
839 tb->cflags = 0;
840 tb->invalid = false;
841 return tb;
842}
843
844
845void tb_free(TranslationBlock *tb)
846{
847 assert_tb_locked();
848
849
850
851
852 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
853 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
854 tcg_ctx.code_gen_ptr = tb->tc_ptr;
855 tcg_ctx.tb_ctx.nb_tbs--;
856 }
857}
858
859static inline void invalidate_page_bitmap(PageDesc *p)
860{
861#ifdef CONFIG_SOFTMMU
862 g_free(p->code_bitmap);
863 p->code_bitmap = NULL;
864 p->code_write_count = 0;
865#endif
866}
867
868
869static void page_flush_tb_1(int level, void **lp)
870{
871 int i;
872
873 if (*lp == NULL) {
874 return;
875 }
876 if (level == 0) {
877 PageDesc *pd = *lp;
878
879 for (i = 0; i < V_L2_SIZE; ++i) {
880 pd[i].first_tb = NULL;
881 invalidate_page_bitmap(pd + i);
882 }
883 } else {
884 void **pp = *lp;
885
886 for (i = 0; i < V_L2_SIZE; ++i) {
887 page_flush_tb_1(level - 1, pp + i);
888 }
889 }
890}
891
892static void page_flush_tb(void)
893{
894 int i, l1_sz = v_l1_size;
895
896 for (i = 0; i < l1_sz; i++) {
897 page_flush_tb_1(v_l2_levels, l1_map + i);
898 }
899}
900
901
902static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
903{
904 tb_lock();
905
906
907
908
909 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
910 goto done;
911 }
912
913#if defined(DEBUG_TB_FLUSH)
914 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
915 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
916 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
917 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
918 tcg_ctx.tb_ctx.nb_tbs : 0);
919#endif
920 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
921 > tcg_ctx.code_gen_buffer_size) {
922 cpu_abort(cpu, "Internal error: code buffer overflow\n");
923 }
924
925 CPU_FOREACH(cpu) {
926 int i;
927
928 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
929 atomic_set(&cpu->tb_jmp_cache[i], NULL);
930 }
931 }
932
933 tcg_ctx.tb_ctx.nb_tbs = 0;
934 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
935 page_flush_tb();
936
937 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
938
939
940 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
941 tcg_ctx.tb_ctx.tb_flush_count + 1);
942
943done:
944 tb_unlock();
945}
946
947void tb_flush(CPUState *cpu)
948{
949 if (tcg_enabled()) {
950 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
951 async_safe_run_on_cpu(cpu, do_tb_flush,
952 RUN_ON_CPU_HOST_INT(tb_flush_count));
953 }
954}
955
956#ifdef DEBUG_TB_CHECK
957
958static void
959do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
960{
961 TranslationBlock *tb = p;
962 target_ulong addr = *(target_ulong *)userp;
963
964 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
965 printf("ERROR invalidate: address=" TARGET_FMT_lx
966 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
967 }
968}
969
970
971
972
973
974static void tb_invalidate_check(target_ulong address)
975{
976 address &= TARGET_PAGE_MASK;
977 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
978}
979
980static void
981do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
982{
983 TranslationBlock *tb = p;
984 int flags1, flags2;
985
986 flags1 = page_get_flags(tb->pc);
987 flags2 = page_get_flags(tb->pc + tb->size - 1);
988 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
989 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
990 (long)tb->pc, tb->size, flags1, flags2);
991 }
992}
993
994
995static void tb_page_check(void)
996{
997 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
998}
999
1000#endif
1001
1002static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1003{
1004 TranslationBlock *tb1;
1005 unsigned int n1;
1006
1007 for (;;) {
1008 tb1 = *ptb;
1009 n1 = (uintptr_t)tb1 & 3;
1010 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1011 if (tb1 == tb) {
1012 *ptb = tb1->page_next[n1];
1013 break;
1014 }
1015 ptb = &tb1->page_next[n1];
1016 }
1017}
1018
1019
1020static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1021{
1022 TranslationBlock *tb1;
1023 uintptr_t *ptb, ntb;
1024 unsigned int n1;
1025
1026 ptb = &tb->jmp_list_next[n];
1027 if (*ptb) {
1028
1029 for (;;) {
1030 ntb = *ptb;
1031 n1 = ntb & 3;
1032 tb1 = (TranslationBlock *)(ntb & ~3);
1033 if (n1 == n && tb1 == tb) {
1034 break;
1035 }
1036 if (n1 == 2) {
1037 ptb = &tb1->jmp_list_first;
1038 } else {
1039 ptb = &tb1->jmp_list_next[n1];
1040 }
1041 }
1042
1043 *ptb = tb->jmp_list_next[n];
1044
1045 tb->jmp_list_next[n] = (uintptr_t)NULL;
1046 }
1047}
1048
1049
1050
1051static inline void tb_reset_jump(TranslationBlock *tb, int n)
1052{
1053 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1054 tb_set_jmp_target(tb, n, addr);
1055}
1056
1057
1058static inline void tb_jmp_unlink(TranslationBlock *tb)
1059{
1060 TranslationBlock *tb1;
1061 uintptr_t *ptb, ntb;
1062 unsigned int n1;
1063
1064 ptb = &tb->jmp_list_first;
1065 for (;;) {
1066 ntb = *ptb;
1067 n1 = ntb & 3;
1068 tb1 = (TranslationBlock *)(ntb & ~3);
1069 if (n1 == 2) {
1070 break;
1071 }
1072 tb_reset_jump(tb1, n1);
1073 *ptb = tb1->jmp_list_next[n1];
1074 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1075 }
1076}
1077
1078
1079
1080
1081
1082void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1083{
1084 CPUState *cpu;
1085 PageDesc *p;
1086 uint32_t h;
1087 tb_page_addr_t phys_pc;
1088
1089 assert_tb_locked();
1090
1091 atomic_set(&tb->invalid, true);
1092
1093
1094 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1095 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1096 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1097
1098
1099 if (tb->page_addr[0] != page_addr) {
1100 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1101 tb_page_remove(&p->first_tb, tb);
1102 invalidate_page_bitmap(p);
1103 }
1104 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1105 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1106 tb_page_remove(&p->first_tb, tb);
1107 invalidate_page_bitmap(p);
1108 }
1109
1110
1111 h = tb_jmp_cache_hash_func(tb->pc);
1112 CPU_FOREACH(cpu) {
1113 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1114 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1115 }
1116 }
1117
1118
1119 tb_remove_from_jmp_list(tb, 0);
1120 tb_remove_from_jmp_list(tb, 1);
1121
1122
1123 tb_jmp_unlink(tb);
1124
1125 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1126}
1127
1128#ifdef CONFIG_SOFTMMU
1129static void build_page_bitmap(PageDesc *p)
1130{
1131 int n, tb_start, tb_end;
1132 TranslationBlock *tb;
1133
1134 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1135
1136 tb = p->first_tb;
1137 while (tb != NULL) {
1138 n = (uintptr_t)tb & 3;
1139 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1140
1141 if (n == 0) {
1142
1143
1144 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1145 tb_end = tb_start + tb->size;
1146 if (tb_end > TARGET_PAGE_SIZE) {
1147 tb_end = TARGET_PAGE_SIZE;
1148 }
1149 } else {
1150 tb_start = 0;
1151 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1152 }
1153 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1154 tb = tb->page_next[n];
1155 }
1156}
1157#endif
1158
1159
1160
1161
1162
1163static inline void tb_alloc_page(TranslationBlock *tb,
1164 unsigned int n, tb_page_addr_t page_addr)
1165{
1166 PageDesc *p;
1167#ifndef CONFIG_USER_ONLY
1168 bool page_already_protected;
1169#endif
1170
1171 assert_memory_lock();
1172
1173 tb->page_addr[n] = page_addr;
1174 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1175 tb->page_next[n] = p->first_tb;
1176#ifndef CONFIG_USER_ONLY
1177 page_already_protected = p->first_tb != NULL;
1178#endif
1179 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1180 invalidate_page_bitmap(p);
1181
1182#if defined(CONFIG_USER_ONLY)
1183 if (p->flags & PAGE_WRITE) {
1184 target_ulong addr;
1185 PageDesc *p2;
1186 int prot;
1187
1188
1189
1190 page_addr &= qemu_host_page_mask;
1191 prot = 0;
1192 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1193 addr += TARGET_PAGE_SIZE) {
1194
1195 p2 = page_find(addr >> TARGET_PAGE_BITS);
1196 if (!p2) {
1197 continue;
1198 }
1199 prot |= p2->flags;
1200 p2->flags &= ~PAGE_WRITE;
1201 }
1202 mprotect(g2h(page_addr), qemu_host_page_size,
1203 (prot & PAGE_BITS) & ~PAGE_WRITE);
1204#ifdef DEBUG_TB_INVALIDATE
1205 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1206 page_addr);
1207#endif
1208 }
1209#else
1210
1211
1212
1213 if (!page_already_protected) {
1214 tlb_protect_code(page_addr);
1215 }
1216#endif
1217}
1218
1219
1220
1221
1222
1223
1224static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1225 tb_page_addr_t phys_page2)
1226{
1227 uint32_t h;
1228
1229 assert_memory_lock();
1230
1231
1232 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1233 if (phys_page2 != -1) {
1234 tb_alloc_page(tb, 1, phys_page2);
1235 } else {
1236 tb->page_addr[1] = -1;
1237 }
1238
1239
1240 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1241 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1242
1243#ifdef DEBUG_TB_CHECK
1244 tb_page_check();
1245#endif
1246}
1247
1248
1249TranslationBlock *tb_gen_code(CPUState *cpu,
1250 target_ulong pc, target_ulong cs_base,
1251 uint32_t flags, int cflags)
1252{
1253 CPUArchState *env = cpu->env_ptr;
1254 TranslationBlock *tb;
1255 tb_page_addr_t phys_pc, phys_page2;
1256 target_ulong virt_page2;
1257 tcg_insn_unit *gen_code_buf;
1258 int gen_code_size, search_size;
1259#ifdef CONFIG_PROFILER
1260 int64_t ti;
1261#endif
1262 assert_memory_lock();
1263
1264 phys_pc = get_page_addr_code(env, pc);
1265 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1266 cflags |= CF_USE_ICOUNT;
1267 }
1268
1269 tb = tb_alloc(pc);
1270 if (unlikely(!tb)) {
1271 buffer_overflow:
1272
1273 tb_flush(cpu);
1274 mmap_unlock();
1275
1276 cpu->exception_index = EXCP_INTERRUPT;
1277 cpu_loop_exit(cpu);
1278 }
1279
1280 gen_code_buf = tcg_ctx.code_gen_ptr;
1281 tb->tc_ptr = gen_code_buf;
1282 tb->cs_base = cs_base;
1283 tb->flags = flags;
1284 tb->cflags = cflags;
1285
1286#ifdef CONFIG_PROFILER
1287 tcg_ctx.tb_count1++;
1288
1289 ti = profile_getclock();
1290#endif
1291
1292 tcg_func_start(&tcg_ctx);
1293
1294 tcg_ctx.cpu = ENV_GET_CPU(env);
1295 gen_intermediate_code(env, tb);
1296 tcg_ctx.cpu = NULL;
1297
1298 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1299
1300
1301 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1302 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1303 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1304#ifdef USE_DIRECT_JUMP
1305 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1306 tcg_ctx.tb_jmp_target_addr = NULL;
1307#else
1308 tcg_ctx.tb_jmp_insn_offset = NULL;
1309 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1310#endif
1311
1312#ifdef CONFIG_PROFILER
1313 tcg_ctx.tb_count++;
1314 tcg_ctx.interm_time += profile_getclock() - ti;
1315 tcg_ctx.code_time -= profile_getclock();
1316#endif
1317
1318
1319
1320
1321
1322
1323 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1324 if (unlikely(gen_code_size < 0)) {
1325 goto buffer_overflow;
1326 }
1327 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1328 if (unlikely(search_size < 0)) {
1329 goto buffer_overflow;
1330 }
1331
1332#ifdef CONFIG_PROFILER
1333 tcg_ctx.code_time += profile_getclock();
1334 tcg_ctx.code_in_len += tb->size;
1335 tcg_ctx.code_out_len += gen_code_size;
1336 tcg_ctx.search_out_len += search_size;
1337#endif
1338
1339#ifdef DEBUG_DISAS
1340 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1341 qemu_log_in_addr_range(tb->pc)) {
1342 qemu_log_lock();
1343 qemu_log("OUT: [size=%d]\n", gen_code_size);
1344 log_disas(tb->tc_ptr, gen_code_size);
1345 qemu_log("\n");
1346 qemu_log_flush();
1347 qemu_log_unlock();
1348 }
1349#endif
1350
1351 tcg_ctx.code_gen_ptr = (void *)
1352 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1353 CODE_GEN_ALIGN);
1354
1355
1356 assert(((uintptr_t)tb & 3) == 0);
1357 tb->jmp_list_first = (uintptr_t)tb | 2;
1358 tb->jmp_list_next[0] = (uintptr_t)NULL;
1359 tb->jmp_list_next[1] = (uintptr_t)NULL;
1360
1361
1362 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1363 tb_reset_jump(tb, 0);
1364 }
1365 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1366 tb_reset_jump(tb, 1);
1367 }
1368
1369
1370 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1371 phys_page2 = -1;
1372 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1373 phys_page2 = get_page_addr_code(env, virt_page2);
1374 }
1375
1376
1377
1378
1379
1380 tb_link_page(tb, phys_pc, phys_page2);
1381 return tb;
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1395{
1396 while (start < end) {
1397 tb_invalidate_phys_page_range(start, end, 0);
1398 start &= TARGET_PAGE_MASK;
1399 start += TARGET_PAGE_SIZE;
1400 }
1401}
1402
1403#ifdef CONFIG_SOFTMMU
1404void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1405{
1406 assert_tb_locked();
1407 tb_invalidate_phys_range_1(start, end);
1408}
1409#else
1410void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1411{
1412 assert_memory_lock();
1413 tb_lock();
1414 tb_invalidate_phys_range_1(start, end);
1415 tb_unlock();
1416}
1417#endif
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1429 int is_cpu_write_access)
1430{
1431 TranslationBlock *tb, *tb_next;
1432#if defined(TARGET_HAS_PRECISE_SMC)
1433 CPUState *cpu = current_cpu;
1434 CPUArchState *env = NULL;
1435#endif
1436 tb_page_addr_t tb_start, tb_end;
1437 PageDesc *p;
1438 int n;
1439#ifdef TARGET_HAS_PRECISE_SMC
1440 int current_tb_not_found = is_cpu_write_access;
1441 TranslationBlock *current_tb = NULL;
1442 int current_tb_modified = 0;
1443 target_ulong current_pc = 0;
1444 target_ulong current_cs_base = 0;
1445 uint32_t current_flags = 0;
1446#endif
1447
1448 assert_memory_lock();
1449 assert_tb_locked();
1450
1451 p = page_find(start >> TARGET_PAGE_BITS);
1452 if (!p) {
1453 return;
1454 }
1455#if defined(TARGET_HAS_PRECISE_SMC)
1456 if (cpu != NULL) {
1457 env = cpu->env_ptr;
1458 }
1459#endif
1460
1461
1462
1463
1464 tb = p->first_tb;
1465 while (tb != NULL) {
1466 n = (uintptr_t)tb & 3;
1467 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1468 tb_next = tb->page_next[n];
1469
1470 if (n == 0) {
1471
1472
1473 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1474 tb_end = tb_start + tb->size;
1475 } else {
1476 tb_start = tb->page_addr[1];
1477 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1478 }
1479 if (!(tb_end <= start || tb_start >= end)) {
1480#ifdef TARGET_HAS_PRECISE_SMC
1481 if (current_tb_not_found) {
1482 current_tb_not_found = 0;
1483 current_tb = NULL;
1484 if (cpu->mem_io_pc) {
1485
1486 current_tb = tb_find_pc(cpu->mem_io_pc);
1487 }
1488 }
1489 if (current_tb == tb &&
1490 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1491
1492
1493
1494
1495
1496
1497 current_tb_modified = 1;
1498 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1499 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1500 ¤t_flags);
1501 }
1502#endif
1503 tb_phys_invalidate(tb, -1);
1504 }
1505 tb = tb_next;
1506 }
1507#if !defined(CONFIG_USER_ONLY)
1508
1509 if (!p->first_tb) {
1510 invalidate_page_bitmap(p);
1511 tlb_unprotect_code(start);
1512 }
1513#endif
1514#ifdef TARGET_HAS_PRECISE_SMC
1515 if (current_tb_modified) {
1516
1517
1518
1519 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1520 cpu_loop_exit_noexc(cpu);
1521 }
1522#endif
1523}
1524
1525#ifdef CONFIG_SOFTMMU
1526
1527
1528
1529
1530void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1531{
1532 PageDesc *p;
1533
1534#if 0
1535 if (1) {
1536 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1537 cpu_single_env->mem_io_vaddr, len,
1538 cpu_single_env->eip,
1539 cpu_single_env->eip +
1540 (intptr_t)cpu_single_env->segs[R_CS].base);
1541 }
1542#endif
1543 assert_memory_lock();
1544
1545 p = page_find(start >> TARGET_PAGE_BITS);
1546 if (!p) {
1547 return;
1548 }
1549 if (!p->code_bitmap &&
1550 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1551
1552
1553
1554 build_page_bitmap(p);
1555 }
1556 if (p->code_bitmap) {
1557 unsigned int nr;
1558 unsigned long b;
1559
1560 nr = start & ~TARGET_PAGE_MASK;
1561 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1562 if (b & ((1 << len) - 1)) {
1563 goto do_invalidate;
1564 }
1565 } else {
1566 do_invalidate:
1567 tb_invalidate_phys_page_range(start, start + len, 1);
1568 }
1569}
1570#else
1571
1572
1573
1574
1575
1576
1577static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1578{
1579 TranslationBlock *tb;
1580 PageDesc *p;
1581 int n;
1582#ifdef TARGET_HAS_PRECISE_SMC
1583 TranslationBlock *current_tb = NULL;
1584 CPUState *cpu = current_cpu;
1585 CPUArchState *env = NULL;
1586 int current_tb_modified = 0;
1587 target_ulong current_pc = 0;
1588 target_ulong current_cs_base = 0;
1589 uint32_t current_flags = 0;
1590#endif
1591
1592 assert_memory_lock();
1593
1594 addr &= TARGET_PAGE_MASK;
1595 p = page_find(addr >> TARGET_PAGE_BITS);
1596 if (!p) {
1597 return false;
1598 }
1599
1600 tb_lock();
1601 tb = p->first_tb;
1602#ifdef TARGET_HAS_PRECISE_SMC
1603 if (tb && pc != 0) {
1604 current_tb = tb_find_pc(pc);
1605 }
1606 if (cpu != NULL) {
1607 env = cpu->env_ptr;
1608 }
1609#endif
1610 while (tb != NULL) {
1611 n = (uintptr_t)tb & 3;
1612 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1613#ifdef TARGET_HAS_PRECISE_SMC
1614 if (current_tb == tb &&
1615 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1616
1617
1618
1619
1620
1621
1622 current_tb_modified = 1;
1623 cpu_restore_state_from_tb(cpu, current_tb, pc);
1624 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1625 ¤t_flags);
1626 }
1627#endif
1628 tb_phys_invalidate(tb, addr);
1629 tb = tb->page_next[n];
1630 }
1631 p->first_tb = NULL;
1632#ifdef TARGET_HAS_PRECISE_SMC
1633 if (current_tb_modified) {
1634
1635
1636
1637 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1638
1639
1640 return true;
1641 }
1642#endif
1643 tb_unlock();
1644
1645 return false;
1646}
1647#endif
1648
1649
1650
1651static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1652{
1653 int m_min, m_max, m;
1654 uintptr_t v;
1655 TranslationBlock *tb;
1656
1657 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1658 return NULL;
1659 }
1660 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1661 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1662 return NULL;
1663 }
1664
1665 m_min = 0;
1666 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1667 while (m_min <= m_max) {
1668 m = (m_min + m_max) >> 1;
1669 tb = &tcg_ctx.tb_ctx.tbs[m];
1670 v = (uintptr_t)tb->tc_ptr;
1671 if (v == tc_ptr) {
1672 return tb;
1673 } else if (tc_ptr < v) {
1674 m_max = m - 1;
1675 } else {
1676 m_min = m + 1;
1677 }
1678 }
1679 return &tcg_ctx.tb_ctx.tbs[m_max];
1680}
1681
1682#if !defined(CONFIG_USER_ONLY)
1683void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1684{
1685 ram_addr_t ram_addr;
1686 MemoryRegion *mr;
1687 hwaddr l = 1;
1688
1689 rcu_read_lock();
1690 mr = address_space_translate(as, addr, &addr, &l, false);
1691 if (!(memory_region_is_ram(mr)
1692 || memory_region_is_romd(mr))) {
1693 rcu_read_unlock();
1694 return;
1695 }
1696 ram_addr = memory_region_get_ram_addr(mr) + addr;
1697 tb_lock();
1698 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1699 tb_unlock();
1700 rcu_read_unlock();
1701}
1702#endif
1703
1704
1705void tb_check_watchpoint(CPUState *cpu)
1706{
1707 TranslationBlock *tb;
1708
1709 tb = tb_find_pc(cpu->mem_io_pc);
1710 if (tb) {
1711
1712 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1713 tb_phys_invalidate(tb, -1);
1714 } else {
1715
1716
1717 CPUArchState *env = cpu->env_ptr;
1718 target_ulong pc, cs_base;
1719 tb_page_addr_t addr;
1720 uint32_t flags;
1721
1722 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1723 addr = get_page_addr_code(env, pc);
1724 tb_invalidate_phys_range(addr, addr + 1);
1725 }
1726}
1727
1728#ifndef CONFIG_USER_ONLY
1729
1730
1731
1732
1733
1734void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1735{
1736#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1737 CPUArchState *env = cpu->env_ptr;
1738#endif
1739 TranslationBlock *tb;
1740 uint32_t n, cflags;
1741 target_ulong pc, cs_base;
1742 uint32_t flags;
1743
1744 tb_lock();
1745 tb = tb_find_pc(retaddr);
1746 if (!tb) {
1747 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1748 (void *)retaddr);
1749 }
1750 n = cpu->icount_decr.u16.low + tb->icount;
1751 cpu_restore_state_from_tb(cpu, tb, retaddr);
1752
1753
1754 n = n - cpu->icount_decr.u16.low;
1755
1756 n++;
1757
1758
1759
1760
1761#if defined(TARGET_MIPS)
1762 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1763 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1764 cpu->icount_decr.u16.low++;
1765 env->hflags &= ~MIPS_HFLAG_BMASK;
1766 }
1767#elif defined(TARGET_SH4)
1768 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1769 && n > 1) {
1770 env->pc -= 2;
1771 cpu->icount_decr.u16.low++;
1772 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1773 }
1774#endif
1775
1776 if (n > CF_COUNT_MASK) {
1777 cpu_abort(cpu, "TB too big during recompile");
1778 }
1779
1780 cflags = n | CF_LAST_IO;
1781 pc = tb->pc;
1782 cs_base = tb->cs_base;
1783 flags = tb->flags;
1784 tb_phys_invalidate(tb, -1);
1785 if (tb->cflags & CF_NOCACHE) {
1786 if (tb->orig_tb) {
1787
1788
1789 tb_phys_invalidate(tb->orig_tb, -1);
1790 }
1791 tb_free(tb);
1792 }
1793
1794
1795 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806 cpu_loop_exit_noexc(cpu);
1807}
1808
1809void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1810{
1811 unsigned int i;
1812
1813
1814
1815 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1816 memset(&cpu->tb_jmp_cache[i], 0,
1817 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1818
1819 i = tb_jmp_cache_hash_page(addr);
1820 memset(&cpu->tb_jmp_cache[i], 0,
1821 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1822}
1823
1824static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1825 struct qht_stats hst)
1826{
1827 uint32_t hgram_opts;
1828 size_t hgram_bins;
1829 char *hgram;
1830
1831 if (!hst.head_buckets) {
1832 return;
1833 }
1834 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1835 hst.used_head_buckets, hst.head_buckets,
1836 (double)hst.used_head_buckets / hst.head_buckets * 100);
1837
1838 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1839 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1840 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1841 hgram_opts |= QDIST_PR_NODECIMAL;
1842 }
1843 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1844 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1845 qdist_avg(&hst.occupancy) * 100, hgram);
1846 g_free(hgram);
1847
1848 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1849 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1850 if (hgram_bins > 10) {
1851 hgram_bins = 10;
1852 } else {
1853 hgram_bins = 0;
1854 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1855 }
1856 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1857 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1858 qdist_avg(&hst.chain), hgram);
1859 g_free(hgram);
1860}
1861
1862void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1863{
1864 int i, target_code_size, max_target_code_size;
1865 int direct_jmp_count, direct_jmp2_count, cross_page;
1866 TranslationBlock *tb;
1867 struct qht_stats hst;
1868
1869 tb_lock();
1870
1871 target_code_size = 0;
1872 max_target_code_size = 0;
1873 cross_page = 0;
1874 direct_jmp_count = 0;
1875 direct_jmp2_count = 0;
1876 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1877 tb = &tcg_ctx.tb_ctx.tbs[i];
1878 target_code_size += tb->size;
1879 if (tb->size > max_target_code_size) {
1880 max_target_code_size = tb->size;
1881 }
1882 if (tb->page_addr[1] != -1) {
1883 cross_page++;
1884 }
1885 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1886 direct_jmp_count++;
1887 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1888 direct_jmp2_count++;
1889 }
1890 }
1891 }
1892
1893 cpu_fprintf(f, "Translation buffer state:\n");
1894 cpu_fprintf(f, "gen code size %td/%zd\n",
1895 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1896 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1897 cpu_fprintf(f, "TB count %d/%d\n",
1898 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1899 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1900 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1901 tcg_ctx.tb_ctx.nb_tbs : 0,
1902 max_target_code_size);
1903 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1904 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1905 tcg_ctx.code_gen_buffer) /
1906 tcg_ctx.tb_ctx.nb_tbs : 0,
1907 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1908 tcg_ctx.code_gen_buffer) /
1909 target_code_size : 0);
1910 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1911 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1912 tcg_ctx.tb_ctx.nb_tbs : 0);
1913 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1914 direct_jmp_count,
1915 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1916 tcg_ctx.tb_ctx.nb_tbs : 0,
1917 direct_jmp2_count,
1918 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1919 tcg_ctx.tb_ctx.nb_tbs : 0);
1920
1921 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1922 print_qht_statistics(f, cpu_fprintf, hst);
1923 qht_statistics_destroy(&hst);
1924
1925 cpu_fprintf(f, "\nStatistics:\n");
1926 cpu_fprintf(f, "TB flush count %u\n",
1927 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1928 cpu_fprintf(f, "TB invalidate count %d\n",
1929 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1930 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1931 tcg_dump_info(f, cpu_fprintf);
1932
1933 tb_unlock();
1934}
1935
1936void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1937{
1938 tcg_dump_op_count(f, cpu_fprintf);
1939}
1940
1941#else
1942
1943void cpu_interrupt(CPUState *cpu, int mask)
1944{
1945 g_assert(qemu_mutex_iothread_locked());
1946 cpu->interrupt_request |= mask;
1947 cpu->icount_decr.u16.high = -1;
1948}
1949
1950
1951
1952
1953
1954struct walk_memory_regions_data {
1955 walk_memory_regions_fn fn;
1956 void *priv;
1957 target_ulong start;
1958 int prot;
1959};
1960
1961static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1962 target_ulong end, int new_prot)
1963{
1964 if (data->start != -1u) {
1965 int rc = data->fn(data->priv, data->start, end, data->prot);
1966 if (rc != 0) {
1967 return rc;
1968 }
1969 }
1970
1971 data->start = (new_prot ? end : -1u);
1972 data->prot = new_prot;
1973
1974 return 0;
1975}
1976
1977static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1978 target_ulong base, int level, void **lp)
1979{
1980 target_ulong pa;
1981 int i, rc;
1982
1983 if (*lp == NULL) {
1984 return walk_memory_regions_end(data, base, 0);
1985 }
1986
1987 if (level == 0) {
1988 PageDesc *pd = *lp;
1989
1990 for (i = 0; i < V_L2_SIZE; ++i) {
1991 int prot = pd[i].flags;
1992
1993 pa = base | (i << TARGET_PAGE_BITS);
1994 if (prot != data->prot) {
1995 rc = walk_memory_regions_end(data, pa, prot);
1996 if (rc != 0) {
1997 return rc;
1998 }
1999 }
2000 }
2001 } else {
2002 void **pp = *lp;
2003
2004 for (i = 0; i < V_L2_SIZE; ++i) {
2005 pa = base | ((target_ulong)i <<
2006 (TARGET_PAGE_BITS + V_L2_BITS * level));
2007 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2008 if (rc != 0) {
2009 return rc;
2010 }
2011 }
2012 }
2013
2014 return 0;
2015}
2016
2017int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2018{
2019 struct walk_memory_regions_data data;
2020 uintptr_t i, l1_sz = v_l1_size;
2021
2022 data.fn = fn;
2023 data.priv = priv;
2024 data.start = -1u;
2025 data.prot = 0;
2026
2027 for (i = 0; i < l1_sz; i++) {
2028 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2029 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2030 if (rc != 0) {
2031 return rc;
2032 }
2033 }
2034
2035 return walk_memory_regions_end(&data, 0, 0);
2036}
2037
2038static int dump_region(void *priv, target_ulong start,
2039 target_ulong end, unsigned long prot)
2040{
2041 FILE *f = (FILE *)priv;
2042
2043 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2044 " "TARGET_FMT_lx" %c%c%c\n",
2045 start, end, end - start,
2046 ((prot & PAGE_READ) ? 'r' : '-'),
2047 ((prot & PAGE_WRITE) ? 'w' : '-'),
2048 ((prot & PAGE_EXEC) ? 'x' : '-'));
2049
2050 return 0;
2051}
2052
2053
2054void page_dump(FILE *f)
2055{
2056 const int length = sizeof(target_ulong) * 2;
2057 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2058 length, "start", length, "end", length, "size", "prot");
2059 walk_memory_regions(f, dump_region);
2060}
2061
2062int page_get_flags(target_ulong address)
2063{
2064 PageDesc *p;
2065
2066 p = page_find(address >> TARGET_PAGE_BITS);
2067 if (!p) {
2068 return 0;
2069 }
2070 return p->flags;
2071}
2072
2073
2074
2075
2076void page_set_flags(target_ulong start, target_ulong end, int flags)
2077{
2078 target_ulong addr, len;
2079
2080
2081
2082
2083#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2084 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2085#endif
2086 assert(start < end);
2087 assert_memory_lock();
2088
2089 start = start & TARGET_PAGE_MASK;
2090 end = TARGET_PAGE_ALIGN(end);
2091
2092 if (flags & PAGE_WRITE) {
2093 flags |= PAGE_WRITE_ORG;
2094 }
2095
2096 for (addr = start, len = end - start;
2097 len != 0;
2098 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2099 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2100
2101
2102
2103 if (!(p->flags & PAGE_WRITE) &&
2104 (flags & PAGE_WRITE) &&
2105 p->first_tb) {
2106 tb_invalidate_phys_page(addr, 0);
2107 }
2108 p->flags = flags;
2109 }
2110}
2111
2112int page_check_range(target_ulong start, target_ulong len, int flags)
2113{
2114 PageDesc *p;
2115 target_ulong end;
2116 target_ulong addr;
2117
2118
2119
2120
2121#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2122 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2123#endif
2124
2125 if (len == 0) {
2126 return 0;
2127 }
2128 if (start + len - 1 < start) {
2129
2130 return -1;
2131 }
2132
2133
2134 end = TARGET_PAGE_ALIGN(start + len);
2135 start = start & TARGET_PAGE_MASK;
2136
2137 for (addr = start, len = end - start;
2138 len != 0;
2139 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2140 p = page_find(addr >> TARGET_PAGE_BITS);
2141 if (!p) {
2142 return -1;
2143 }
2144 if (!(p->flags & PAGE_VALID)) {
2145 return -1;
2146 }
2147
2148 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2149 return -1;
2150 }
2151 if (flags & PAGE_WRITE) {
2152 if (!(p->flags & PAGE_WRITE_ORG)) {
2153 return -1;
2154 }
2155
2156
2157 if (!(p->flags & PAGE_WRITE)) {
2158 if (!page_unprotect(addr, 0)) {
2159 return -1;
2160 }
2161 }
2162 }
2163 }
2164 return 0;
2165}
2166
2167
2168
2169
2170
2171
2172
2173int page_unprotect(target_ulong address, uintptr_t pc)
2174{
2175 unsigned int prot;
2176 bool current_tb_invalidated;
2177 PageDesc *p;
2178 target_ulong host_start, host_end, addr;
2179
2180
2181
2182
2183 mmap_lock();
2184
2185 p = page_find(address >> TARGET_PAGE_BITS);
2186 if (!p) {
2187 mmap_unlock();
2188 return 0;
2189 }
2190
2191
2192
2193 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2194 host_start = address & qemu_host_page_mask;
2195 host_end = host_start + qemu_host_page_size;
2196
2197 prot = 0;
2198 current_tb_invalidated = false;
2199 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2200 p = page_find(addr >> TARGET_PAGE_BITS);
2201 p->flags |= PAGE_WRITE;
2202 prot |= p->flags;
2203
2204
2205
2206 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2207#ifdef DEBUG_TB_CHECK
2208 tb_invalidate_check(addr);
2209#endif
2210 }
2211 mprotect((void *)g2h(host_start), qemu_host_page_size,
2212 prot & PAGE_BITS);
2213
2214 mmap_unlock();
2215
2216 return current_tb_invalidated ? 2 : 1;
2217 }
2218 mmap_unlock();
2219 return 0;
2220}
2221#endif
2222