1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#endif
22#include "qemu/osdep.h"
23
24
25#include "qemu-common.h"
26#define NO_CPU_IO_DEFS
27#include "cpu.h"
28#include "trace.h"
29#include "disas/disas.h"
30#include "exec/exec-all.h"
31#include "tcg.h"
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#include "exec/exec-all.h"
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
49#else
50#include "exec/address-spaces.h"
51#endif
52
53#include "exec/cputlb.h"
54#include "exec/tb-hash.h"
55#include "translate-all.h"
56#include "qemu/bitmap.h"
57#include "qemu/timer.h"
58#include "exec/log.h"
59
60
61
62
63
64
65
66#if !defined(CONFIG_USER_ONLY)
67
68#undef DEBUG_TB_CHECK
69#endif
70
71
72
73
74
75
76
77#ifdef DEBUG_LOCKING
78#define DEBUG_MEM_LOCKS 1
79#else
80#define DEBUG_MEM_LOCKS 0
81#endif
82
83#ifdef CONFIG_SOFTMMU
84#define assert_memory_lock() do { } while (0)
85#else
86#define assert_memory_lock() do { \
87 if (DEBUG_MEM_LOCKS) { \
88 g_assert(have_mmap_lock()); \
89 } \
90 } while (0)
91#endif
92
93#define SMC_BITMAP_USE_THRESHOLD 10
94
95typedef struct PageDesc {
96
97 TranslationBlock *first_tb;
98#ifdef CONFIG_SOFTMMU
99
100
101 unsigned int code_write_count;
102 unsigned long *code_bitmap;
103#else
104 unsigned long flags;
105#endif
106} PageDesc;
107
108
109
110#if !defined(CONFIG_USER_ONLY)
111#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
112# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
113#else
114# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
115#endif
116#else
117# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
118#endif
119
120
121#define V_L2_BITS 10
122#define V_L2_SIZE (1 << V_L2_BITS)
123
124uintptr_t qemu_host_page_size;
125intptr_t qemu_host_page_mask;
126
127
128
129
130static int v_l1_size;
131static int v_l1_shift;
132static int v_l2_levels;
133
134
135
136
137#define V_L1_MIN_BITS 4
138#define V_L1_MAX_BITS (V_L2_BITS + 3)
139#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
140
141static void *l1_map[V_L1_MAX_SIZE];
142
143
144TCGContext tcg_ctx;
145bool parallel_cpus;
146
147
148#ifdef CONFIG_USER_ONLY
149__thread int have_tb_lock;
150#endif
151
152static void page_table_config_init(void)
153{
154 uint32_t v_l1_bits;
155
156 assert(TARGET_PAGE_BITS);
157
158 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
159 if (v_l1_bits < V_L1_MIN_BITS) {
160 v_l1_bits += V_L2_BITS;
161 }
162
163 v_l1_size = 1 << v_l1_bits;
164 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
165 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
166
167 assert(v_l1_bits <= V_L1_MAX_BITS);
168 assert(v_l1_shift % V_L2_BITS == 0);
169 assert(v_l2_levels >= 0);
170}
171
172void tb_lock(void)
173{
174#ifdef CONFIG_USER_ONLY
175 assert(!have_tb_lock);
176 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
177 have_tb_lock++;
178#endif
179}
180
181void tb_unlock(void)
182{
183#ifdef CONFIG_USER_ONLY
184 assert(have_tb_lock);
185 have_tb_lock--;
186 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
187#endif
188}
189
190void tb_lock_reset(void)
191{
192#ifdef CONFIG_USER_ONLY
193 if (have_tb_lock) {
194 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
195 have_tb_lock = 0;
196 }
197#endif
198}
199
200#ifdef DEBUG_LOCKING
201#define DEBUG_TB_LOCKS 1
202#else
203#define DEBUG_TB_LOCKS 0
204#endif
205
206#ifdef CONFIG_SOFTMMU
207#define assert_tb_lock() do { } while (0)
208#else
209#define assert_tb_lock() do { \
210 if (DEBUG_TB_LOCKS) { \
211 g_assert(have_tb_lock); \
212 } \
213 } while (0)
214#endif
215
216
217static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
218
219void cpu_gen_init(void)
220{
221 tcg_context_init(&tcg_ctx);
222}
223
224
225
226static uint8_t *encode_sleb128(uint8_t *p, target_long val)
227{
228 int more, byte;
229
230 do {
231 byte = val & 0x7f;
232 val >>= 7;
233 more = !((val == 0 && (byte & 0x40) == 0)
234 || (val == -1 && (byte & 0x40) != 0));
235 if (more) {
236 byte |= 0x80;
237 }
238 *p++ = byte;
239 } while (more);
240
241 return p;
242}
243
244
245
246static target_long decode_sleb128(uint8_t **pp)
247{
248 uint8_t *p = *pp;
249 target_long val = 0;
250 int byte, shift = 0;
251
252 do {
253 byte = *p++;
254 val |= (target_ulong)(byte & 0x7f) << shift;
255 shift += 7;
256 } while (byte & 0x80);
257 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
258 val |= -(target_ulong)1 << shift;
259 }
260
261 *pp = p;
262 return val;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277static int encode_search(TranslationBlock *tb, uint8_t *block)
278{
279 uint8_t *highwater = tcg_ctx.code_gen_highwater;
280 uint8_t *p = block;
281 int i, j, n;
282
283 tb->tc_search = block;
284
285 for (i = 0, n = tb->icount; i < n; ++i) {
286 target_ulong prev;
287
288 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
289 if (i == 0) {
290 prev = (j == 0 ? tb->pc : 0);
291 } else {
292 prev = tcg_ctx.gen_insn_data[i - 1][j];
293 }
294 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
295 }
296 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
297 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
298
299
300
301
302
303 if (unlikely(p > highwater)) {
304 return -1;
305 }
306 }
307
308 return p - block;
309}
310
311
312
313
314static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
315 uintptr_t searched_pc)
316{
317 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
318 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
319 CPUArchState *env = cpu->env_ptr;
320 uint8_t *p = tb->tc_search;
321 int i, j, num_insns = tb->icount;
322#ifdef CONFIG_PROFILER
323 int64_t ti = profile_getclock();
324#endif
325
326 searched_pc -= GETPC_ADJ;
327
328 if (searched_pc < host_pc) {
329 return -1;
330 }
331
332
333
334 for (i = 0; i < num_insns; ++i) {
335 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
336 data[j] += decode_sleb128(&p);
337 }
338 host_pc += decode_sleb128(&p);
339 if (host_pc > searched_pc) {
340 goto found;
341 }
342 }
343 return -1;
344
345 found:
346 if (tb->cflags & CF_USE_ICOUNT) {
347 assert(use_icount);
348
349 cpu->icount_decr.u16.low += num_insns;
350
351 cpu->can_do_io = 0;
352 }
353 cpu->icount_decr.u16.low -= i;
354 restore_state_to_opc(env, tb, data);
355
356#ifdef CONFIG_PROFILER
357 tcg_ctx.restore_time += profile_getclock() - ti;
358 tcg_ctx.restore_count++;
359#endif
360 return 0;
361}
362
363bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
364{
365 TranslationBlock *tb;
366 bool r = false;
367
368 tb_lock();
369 tb = tb_find_pc(retaddr);
370 if (tb) {
371 cpu_restore_state_from_tb(cpu, tb, retaddr);
372 if (tb->cflags & CF_NOCACHE) {
373
374 tb_phys_invalidate(tb, -1);
375 tb_free(tb);
376 }
377 r = true;
378 }
379 tb_unlock();
380
381 return r;
382}
383
384void page_size_init(void)
385{
386
387
388 qemu_real_host_page_size = getpagesize();
389 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
390 if (qemu_host_page_size == 0) {
391 qemu_host_page_size = qemu_real_host_page_size;
392 }
393 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
394 qemu_host_page_size = TARGET_PAGE_SIZE;
395 }
396 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
397}
398
399static void page_init(void)
400{
401 page_size_init();
402 page_table_config_init();
403
404#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
405 {
406#ifdef HAVE_KINFO_GETVMMAP
407 struct kinfo_vmentry *freep;
408 int i, cnt;
409
410 freep = kinfo_getvmmap(getpid(), &cnt);
411 if (freep) {
412 mmap_lock();
413 for (i = 0; i < cnt; i++) {
414 unsigned long startaddr, endaddr;
415
416 startaddr = freep[i].kve_start;
417 endaddr = freep[i].kve_end;
418 if (h2g_valid(startaddr)) {
419 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
420
421 if (h2g_valid(endaddr)) {
422 endaddr = h2g(endaddr);
423 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
424 } else {
425#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
426 endaddr = ~0ul;
427 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
428#endif
429 }
430 }
431 }
432 free(freep);
433 mmap_unlock();
434 }
435#else
436 FILE *f;
437
438 last_brk = (unsigned long)sbrk(0);
439
440 f = fopen("/compat/linux/proc/self/maps", "r");
441 if (f) {
442 mmap_lock();
443
444 do {
445 unsigned long startaddr, endaddr;
446 int n;
447
448 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
449
450 if (n == 2 && h2g_valid(startaddr)) {
451 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
452
453 if (h2g_valid(endaddr)) {
454 endaddr = h2g(endaddr);
455 } else {
456 endaddr = ~0ul;
457 }
458 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
459 }
460 } while (!feof(f));
461
462 fclose(f);
463 mmap_unlock();
464 }
465#endif
466 }
467#endif
468}
469
470
471
472
473
474static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
475{
476 PageDesc *pd;
477 void **lp;
478 int i;
479
480 if (alloc) {
481 assert_memory_lock();
482 }
483
484
485 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
486
487
488 for (i = v_l2_levels; i > 0; i--) {
489 void **p = atomic_rcu_read(lp);
490
491 if (p == NULL) {
492 if (!alloc) {
493 return NULL;
494 }
495 p = g_new0(void *, V_L2_SIZE);
496 atomic_rcu_set(lp, p);
497 }
498
499 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
500 }
501
502 pd = atomic_rcu_read(lp);
503 if (pd == NULL) {
504 if (!alloc) {
505 return NULL;
506 }
507 pd = g_new0(PageDesc, V_L2_SIZE);
508 atomic_rcu_set(lp, pd);
509 }
510
511 return pd + (index & (V_L2_SIZE - 1));
512}
513
514static inline PageDesc *page_find(tb_page_addr_t index)
515{
516 return page_find_alloc(index, 0);
517}
518
519#if defined(CONFIG_USER_ONLY)
520
521
522
523
524#define USE_STATIC_CODE_GEN_BUFFER
525#endif
526
527
528
529#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
530
531
532
533
534#if defined(__x86_64__)
535# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
536#elif defined(__sparc__)
537# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
538#elif defined(__powerpc64__)
539# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
540#elif defined(__powerpc__)
541# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
542#elif defined(__aarch64__)
543# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
544#elif defined(__arm__)
545# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
546#elif defined(__s390x__)
547
548# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
549#elif defined(__mips__)
550
551
552# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
553#else
554# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
555#endif
556
557#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
558
559#define DEFAULT_CODE_GEN_BUFFER_SIZE \
560 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
561 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
562
563static inline size_t size_code_gen_buffer(size_t tb_size)
564{
565
566 if (tb_size == 0) {
567#ifdef USE_STATIC_CODE_GEN_BUFFER
568 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
569#else
570
571
572
573
574 tb_size = (unsigned long)(ram_size / 4);
575#endif
576 }
577 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
578 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
579 }
580 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
581 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
582 }
583 return tb_size;
584}
585
586#ifdef __mips__
587
588
589static inline bool cross_256mb(void *addr, size_t size)
590{
591 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
592}
593
594
595
596
597static inline void *split_cross_256mb(void *buf1, size_t size1)
598{
599 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
600 size_t size2 = buf1 + size1 - buf2;
601
602 size1 = buf2 - buf1;
603 if (size1 < size2) {
604 size1 = size2;
605 buf1 = buf2;
606 }
607
608 tcg_ctx.code_gen_buffer_size = size1;
609 return buf1;
610}
611#endif
612
613#ifdef USE_STATIC_CODE_GEN_BUFFER
614static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
615 __attribute__((aligned(CODE_GEN_ALIGN)));
616
617# ifdef _WIN32
618static inline void do_protect(void *addr, long size, int prot)
619{
620 DWORD old_protect;
621 VirtualProtect(addr, size, prot, &old_protect);
622}
623
624static inline void map_exec(void *addr, long size)
625{
626 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
627}
628
629static inline void map_none(void *addr, long size)
630{
631 do_protect(addr, size, PAGE_NOACCESS);
632}
633# else
634static inline void do_protect(void *addr, long size, int prot)
635{
636 uintptr_t start, end;
637
638 start = (uintptr_t)addr;
639 start &= qemu_real_host_page_mask;
640
641 end = (uintptr_t)addr + size;
642 end = ROUND_UP(end, qemu_real_host_page_size);
643
644 mprotect((void *)start, end - start, prot);
645}
646
647static inline void map_exec(void *addr, long size)
648{
649 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
650}
651
652static inline void map_none(void *addr, long size)
653{
654 do_protect(addr, size, PROT_NONE);
655}
656# endif
657
658static inline void *alloc_code_gen_buffer(void)
659{
660 void *buf = static_code_gen_buffer;
661 size_t full_size, size;
662
663
664 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
665 & qemu_real_host_page_mask) - (uintptr_t)buf;
666
667
668 size = full_size - qemu_real_host_page_size;
669
670
671 if (size > tcg_ctx.code_gen_buffer_size) {
672 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
673 & qemu_real_host_page_mask) - (uintptr_t)buf;
674 }
675 tcg_ctx.code_gen_buffer_size = size;
676
677#ifdef __mips__
678 if (cross_256mb(buf, size)) {
679 buf = split_cross_256mb(buf, size);
680 size = tcg_ctx.code_gen_buffer_size;
681 }
682#endif
683
684 map_exec(buf, size);
685 map_none(buf + size, qemu_real_host_page_size);
686 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
687
688 return buf;
689}
690#elif defined(_WIN32)
691static inline void *alloc_code_gen_buffer(void)
692{
693 size_t size = tcg_ctx.code_gen_buffer_size;
694 void *buf1, *buf2;
695
696
697
698 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
699 MEM_RESERVE, PAGE_NOACCESS);
700 if (buf1 != NULL) {
701 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
702 assert(buf1 == buf2);
703 }
704
705 return buf1;
706}
707#else
708static inline void *alloc_code_gen_buffer(void)
709{
710 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
711 uintptr_t start = 0;
712 size_t size = tcg_ctx.code_gen_buffer_size;
713 void *buf;
714
715
716
717
718# if defined(__PIE__) || defined(__PIC__)
719
720
721
722
723# elif defined(__x86_64__) && defined(MAP_32BIT)
724
725
726 flags |= MAP_32BIT;
727
728 if (size > 800u * 1024 * 1024) {
729 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
730 }
731# elif defined(__sparc__)
732 start = 0x40000000ul;
733# elif defined(__s390x__)
734 start = 0x90000000ul;
735# elif defined(__mips__)
736# if _MIPS_SIM == _ABI64
737 start = 0x128000000ul;
738# else
739 start = 0x08000000ul;
740# endif
741# endif
742
743 buf = mmap((void *)start, size + qemu_real_host_page_size,
744 PROT_NONE, flags, -1, 0);
745 if (buf == MAP_FAILED) {
746 return NULL;
747 }
748
749#ifdef __mips__
750 if (cross_256mb(buf, size)) {
751
752
753 size_t size2;
754 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
755 PROT_NONE, flags, -1, 0);
756 switch (buf2 != MAP_FAILED) {
757 case 1:
758 if (!cross_256mb(buf2, size)) {
759
760 munmap(buf, size + qemu_real_host_page_size);
761 break;
762 }
763
764 munmap(buf2, size + qemu_real_host_page_size);
765
766 default:
767
768 buf2 = split_cross_256mb(buf, size);
769 size2 = tcg_ctx.code_gen_buffer_size;
770 if (buf == buf2) {
771 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
772 } else {
773 munmap(buf, size - size2);
774 }
775 size = size2;
776 break;
777 }
778 buf = buf2;
779 }
780#endif
781
782
783
784 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
785
786
787 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
788
789 return buf;
790}
791#endif
792
793static inline void code_gen_alloc(size_t tb_size)
794{
795 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
796 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
797 if (tcg_ctx.code_gen_buffer == NULL) {
798 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
799 exit(1);
800 }
801
802
803
804
805 tcg_ctx.code_gen_max_blocks
806 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
807 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
808
809 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
810}
811
812static void tb_htable_init(void)
813{
814 unsigned int mode = QHT_MODE_AUTO_RESIZE;
815
816 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
817}
818
819
820
821
822void tcg_exec_init(unsigned long tb_size)
823{
824 cpu_gen_init();
825 page_init();
826 tb_htable_init();
827 code_gen_alloc(tb_size);
828#if defined(CONFIG_SOFTMMU)
829
830
831 tcg_prologue_init(&tcg_ctx);
832#endif
833}
834
835bool tcg_enabled(void)
836{
837 return tcg_ctx.code_gen_buffer != NULL;
838}
839
840
841
842
843
844
845
846static TranslationBlock *tb_alloc(target_ulong pc)
847{
848 TranslationBlock *tb;
849
850 assert_tb_lock();
851
852 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
853 return NULL;
854 }
855 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
856 tb->pc = pc;
857 tb->cflags = 0;
858 tb->invalid = false;
859 return tb;
860}
861
862
863void tb_free(TranslationBlock *tb)
864{
865 assert_tb_lock();
866
867
868
869
870 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
871 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
872 tcg_ctx.code_gen_ptr = tb->tc_ptr;
873 tcg_ctx.tb_ctx.nb_tbs--;
874 }
875}
876
877static inline void invalidate_page_bitmap(PageDesc *p)
878{
879#ifdef CONFIG_SOFTMMU
880 g_free(p->code_bitmap);
881 p->code_bitmap = NULL;
882 p->code_write_count = 0;
883#endif
884}
885
886
887static void page_flush_tb_1(int level, void **lp)
888{
889 int i;
890
891 if (*lp == NULL) {
892 return;
893 }
894 if (level == 0) {
895 PageDesc *pd = *lp;
896
897 for (i = 0; i < V_L2_SIZE; ++i) {
898 pd[i].first_tb = NULL;
899 invalidate_page_bitmap(pd + i);
900 }
901 } else {
902 void **pp = *lp;
903
904 for (i = 0; i < V_L2_SIZE; ++i) {
905 page_flush_tb_1(level - 1, pp + i);
906 }
907 }
908}
909
910static void page_flush_tb(void)
911{
912 int i, l1_sz = v_l1_size;
913
914 for (i = 0; i < l1_sz; i++) {
915 page_flush_tb_1(v_l2_levels, l1_map + i);
916 }
917}
918
919
920static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
921{
922 tb_lock();
923
924
925
926
927 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
928 goto done;
929 }
930
931#if defined(DEBUG_TB_FLUSH)
932 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
933 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
934 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
935 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
936 tcg_ctx.tb_ctx.nb_tbs : 0);
937#endif
938 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
939 > tcg_ctx.code_gen_buffer_size) {
940 cpu_abort(cpu, "Internal error: code buffer overflow\n");
941 }
942
943 CPU_FOREACH(cpu) {
944 int i;
945
946 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
947 atomic_set(&cpu->tb_jmp_cache[i], NULL);
948 }
949 }
950
951 tcg_ctx.tb_ctx.nb_tbs = 0;
952 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
953 page_flush_tb();
954
955 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
956
957
958 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
959 tcg_ctx.tb_ctx.tb_flush_count + 1);
960
961done:
962 tb_unlock();
963}
964
965void tb_flush(CPUState *cpu)
966{
967 if (tcg_enabled()) {
968 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
969 async_safe_run_on_cpu(cpu, do_tb_flush,
970 RUN_ON_CPU_HOST_INT(tb_flush_count));
971 }
972}
973
974#ifdef DEBUG_TB_CHECK
975
976static void
977do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
978{
979 TranslationBlock *tb = p;
980 target_ulong addr = *(target_ulong *)userp;
981
982 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
983 printf("ERROR invalidate: address=" TARGET_FMT_lx
984 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
985 }
986}
987
988
989
990
991
992static void tb_invalidate_check(target_ulong address)
993{
994 address &= TARGET_PAGE_MASK;
995 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
996}
997
998static void
999do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
1000{
1001 TranslationBlock *tb = p;
1002 int flags1, flags2;
1003
1004 flags1 = page_get_flags(tb->pc);
1005 flags2 = page_get_flags(tb->pc + tb->size - 1);
1006 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1007 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1008 (long)tb->pc, tb->size, flags1, flags2);
1009 }
1010}
1011
1012
1013static void tb_page_check(void)
1014{
1015 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
1016}
1017
1018#endif
1019
1020static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1021{
1022 TranslationBlock *tb1;
1023 unsigned int n1;
1024
1025 for (;;) {
1026 tb1 = *ptb;
1027 n1 = (uintptr_t)tb1 & 3;
1028 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1029 if (tb1 == tb) {
1030 *ptb = tb1->page_next[n1];
1031 break;
1032 }
1033 ptb = &tb1->page_next[n1];
1034 }
1035}
1036
1037
1038static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1039{
1040 TranslationBlock *tb1;
1041 uintptr_t *ptb, ntb;
1042 unsigned int n1;
1043
1044 ptb = &tb->jmp_list_next[n];
1045 if (*ptb) {
1046
1047 for (;;) {
1048 ntb = *ptb;
1049 n1 = ntb & 3;
1050 tb1 = (TranslationBlock *)(ntb & ~3);
1051 if (n1 == n && tb1 == tb) {
1052 break;
1053 }
1054 if (n1 == 2) {
1055 ptb = &tb1->jmp_list_first;
1056 } else {
1057 ptb = &tb1->jmp_list_next[n1];
1058 }
1059 }
1060
1061 *ptb = tb->jmp_list_next[n];
1062
1063 tb->jmp_list_next[n] = (uintptr_t)NULL;
1064 }
1065}
1066
1067
1068
1069static inline void tb_reset_jump(TranslationBlock *tb, int n)
1070{
1071 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1072 tb_set_jmp_target(tb, n, addr);
1073}
1074
1075
1076static inline void tb_jmp_unlink(TranslationBlock *tb)
1077{
1078 TranslationBlock *tb1;
1079 uintptr_t *ptb, ntb;
1080 unsigned int n1;
1081
1082 ptb = &tb->jmp_list_first;
1083 for (;;) {
1084 ntb = *ptb;
1085 n1 = ntb & 3;
1086 tb1 = (TranslationBlock *)(ntb & ~3);
1087 if (n1 == 2) {
1088 break;
1089 }
1090 tb_reset_jump(tb1, n1);
1091 *ptb = tb1->jmp_list_next[n1];
1092 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1093 }
1094}
1095
1096
1097
1098
1099
1100void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1101{
1102 CPUState *cpu;
1103 PageDesc *p;
1104 uint32_t h;
1105 tb_page_addr_t phys_pc;
1106
1107 assert_tb_lock();
1108
1109 atomic_set(&tb->invalid, true);
1110
1111
1112 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1113 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1114 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1115
1116
1117 if (tb->page_addr[0] != page_addr) {
1118 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1119 tb_page_remove(&p->first_tb, tb);
1120 invalidate_page_bitmap(p);
1121 }
1122 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1123 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1124 tb_page_remove(&p->first_tb, tb);
1125 invalidate_page_bitmap(p);
1126 }
1127
1128
1129 h = tb_jmp_cache_hash_func(tb->pc);
1130 CPU_FOREACH(cpu) {
1131 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1132 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1133 }
1134 }
1135
1136
1137 tb_remove_from_jmp_list(tb, 0);
1138 tb_remove_from_jmp_list(tb, 1);
1139
1140
1141 tb_jmp_unlink(tb);
1142
1143 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1144}
1145
1146#ifdef CONFIG_SOFTMMU
1147static void build_page_bitmap(PageDesc *p)
1148{
1149 int n, tb_start, tb_end;
1150 TranslationBlock *tb;
1151
1152 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1153
1154 tb = p->first_tb;
1155 while (tb != NULL) {
1156 n = (uintptr_t)tb & 3;
1157 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1158
1159 if (n == 0) {
1160
1161
1162 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1163 tb_end = tb_start + tb->size;
1164 if (tb_end > TARGET_PAGE_SIZE) {
1165 tb_end = TARGET_PAGE_SIZE;
1166 }
1167 } else {
1168 tb_start = 0;
1169 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1170 }
1171 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1172 tb = tb->page_next[n];
1173 }
1174}
1175#endif
1176
1177
1178
1179
1180
1181static inline void tb_alloc_page(TranslationBlock *tb,
1182 unsigned int n, tb_page_addr_t page_addr)
1183{
1184 PageDesc *p;
1185#ifndef CONFIG_USER_ONLY
1186 bool page_already_protected;
1187#endif
1188
1189 assert_memory_lock();
1190
1191 tb->page_addr[n] = page_addr;
1192 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1193 tb->page_next[n] = p->first_tb;
1194#ifndef CONFIG_USER_ONLY
1195 page_already_protected = p->first_tb != NULL;
1196#endif
1197 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1198 invalidate_page_bitmap(p);
1199
1200#if defined(CONFIG_USER_ONLY)
1201 if (p->flags & PAGE_WRITE) {
1202 target_ulong addr;
1203 PageDesc *p2;
1204 int prot;
1205
1206
1207
1208 page_addr &= qemu_host_page_mask;
1209 prot = 0;
1210 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1211 addr += TARGET_PAGE_SIZE) {
1212
1213 p2 = page_find(addr >> TARGET_PAGE_BITS);
1214 if (!p2) {
1215 continue;
1216 }
1217 prot |= p2->flags;
1218 p2->flags &= ~PAGE_WRITE;
1219 }
1220 mprotect(g2h(page_addr), qemu_host_page_size,
1221 (prot & PAGE_BITS) & ~PAGE_WRITE);
1222#ifdef DEBUG_TB_INVALIDATE
1223 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1224 page_addr);
1225#endif
1226 }
1227#else
1228
1229
1230
1231 if (!page_already_protected) {
1232 tlb_protect_code(page_addr);
1233 }
1234#endif
1235}
1236
1237
1238
1239
1240
1241
1242static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1243 tb_page_addr_t phys_page2)
1244{
1245 uint32_t h;
1246
1247 assert_memory_lock();
1248
1249
1250 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1251 if (phys_page2 != -1) {
1252 tb_alloc_page(tb, 1, phys_page2);
1253 } else {
1254 tb->page_addr[1] = -1;
1255 }
1256
1257
1258 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1259 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1260
1261#ifdef DEBUG_TB_CHECK
1262 tb_page_check();
1263#endif
1264}
1265
1266
1267TranslationBlock *tb_gen_code(CPUState *cpu,
1268 target_ulong pc, target_ulong cs_base,
1269 uint32_t flags, int cflags)
1270{
1271 CPUArchState *env = cpu->env_ptr;
1272 TranslationBlock *tb;
1273 tb_page_addr_t phys_pc, phys_page2;
1274 target_ulong virt_page2;
1275 tcg_insn_unit *gen_code_buf;
1276 int gen_code_size, search_size;
1277#ifdef CONFIG_PROFILER
1278 int64_t ti;
1279#endif
1280 assert_memory_lock();
1281
1282 phys_pc = get_page_addr_code(env, pc);
1283 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1284 cflags |= CF_USE_ICOUNT;
1285 }
1286
1287 tb = tb_alloc(pc);
1288 if (unlikely(!tb)) {
1289 buffer_overflow:
1290
1291 tb_flush(cpu);
1292 mmap_unlock();
1293 cpu_loop_exit(cpu);
1294 }
1295
1296 gen_code_buf = tcg_ctx.code_gen_ptr;
1297 tb->tc_ptr = gen_code_buf;
1298 tb->cs_base = cs_base;
1299 tb->flags = flags;
1300 tb->cflags = cflags;
1301
1302#ifdef CONFIG_PROFILER
1303 tcg_ctx.tb_count1++;
1304
1305 ti = profile_getclock();
1306#endif
1307
1308 tcg_func_start(&tcg_ctx);
1309
1310 tcg_ctx.cpu = ENV_GET_CPU(env);
1311 gen_intermediate_code(env, tb);
1312 tcg_ctx.cpu = NULL;
1313
1314 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1315
1316
1317 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1318 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1319 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1320#ifdef USE_DIRECT_JUMP
1321 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1322 tcg_ctx.tb_jmp_target_addr = NULL;
1323#else
1324 tcg_ctx.tb_jmp_insn_offset = NULL;
1325 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1326#endif
1327
1328#ifdef CONFIG_PROFILER
1329 tcg_ctx.tb_count++;
1330 tcg_ctx.interm_time += profile_getclock() - ti;
1331 tcg_ctx.code_time -= profile_getclock();
1332#endif
1333
1334
1335
1336
1337
1338
1339 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1340 if (unlikely(gen_code_size < 0)) {
1341 goto buffer_overflow;
1342 }
1343 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1344 if (unlikely(search_size < 0)) {
1345 goto buffer_overflow;
1346 }
1347
1348#ifdef CONFIG_PROFILER
1349 tcg_ctx.code_time += profile_getclock();
1350 tcg_ctx.code_in_len += tb->size;
1351 tcg_ctx.code_out_len += gen_code_size;
1352 tcg_ctx.search_out_len += search_size;
1353#endif
1354
1355#ifdef DEBUG_DISAS
1356 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1357 qemu_log_in_addr_range(tb->pc)) {
1358 qemu_log_lock();
1359 qemu_log("OUT: [size=%d]\n", gen_code_size);
1360 log_disas(tb->tc_ptr, gen_code_size);
1361 qemu_log("\n");
1362 qemu_log_flush();
1363 qemu_log_unlock();
1364 }
1365#endif
1366
1367 tcg_ctx.code_gen_ptr = (void *)
1368 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1369 CODE_GEN_ALIGN);
1370
1371
1372 assert(((uintptr_t)tb & 3) == 0);
1373 tb->jmp_list_first = (uintptr_t)tb | 2;
1374 tb->jmp_list_next[0] = (uintptr_t)NULL;
1375 tb->jmp_list_next[1] = (uintptr_t)NULL;
1376
1377
1378 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1379 tb_reset_jump(tb, 0);
1380 }
1381 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1382 tb_reset_jump(tb, 1);
1383 }
1384
1385
1386 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1387 phys_page2 = -1;
1388 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1389 phys_page2 = get_page_addr_code(env, virt_page2);
1390 }
1391
1392
1393
1394
1395
1396 tb_link_page(tb, phys_pc, phys_page2);
1397 return tb;
1398}
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1411{
1412 while (start < end) {
1413 tb_invalidate_phys_page_range(start, end, 0);
1414 start &= TARGET_PAGE_MASK;
1415 start += TARGET_PAGE_SIZE;
1416 }
1417}
1418
1419#ifdef CONFIG_SOFTMMU
1420void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1421{
1422 assert_tb_lock();
1423 tb_invalidate_phys_range_1(start, end);
1424}
1425#else
1426void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1427{
1428 assert_memory_lock();
1429 tb_lock();
1430 tb_invalidate_phys_range_1(start, end);
1431 tb_unlock();
1432}
1433#endif
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1445 int is_cpu_write_access)
1446{
1447 TranslationBlock *tb, *tb_next;
1448#if defined(TARGET_HAS_PRECISE_SMC)
1449 CPUState *cpu = current_cpu;
1450 CPUArchState *env = NULL;
1451#endif
1452 tb_page_addr_t tb_start, tb_end;
1453 PageDesc *p;
1454 int n;
1455#ifdef TARGET_HAS_PRECISE_SMC
1456 int current_tb_not_found = is_cpu_write_access;
1457 TranslationBlock *current_tb = NULL;
1458 int current_tb_modified = 0;
1459 target_ulong current_pc = 0;
1460 target_ulong current_cs_base = 0;
1461 uint32_t current_flags = 0;
1462#endif
1463
1464 assert_memory_lock();
1465 assert_tb_lock();
1466
1467 p = page_find(start >> TARGET_PAGE_BITS);
1468 if (!p) {
1469 return;
1470 }
1471#if defined(TARGET_HAS_PRECISE_SMC)
1472 if (cpu != NULL) {
1473 env = cpu->env_ptr;
1474 }
1475#endif
1476
1477
1478
1479
1480 tb = p->first_tb;
1481 while (tb != NULL) {
1482 n = (uintptr_t)tb & 3;
1483 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1484 tb_next = tb->page_next[n];
1485
1486 if (n == 0) {
1487
1488
1489 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1490 tb_end = tb_start + tb->size;
1491 } else {
1492 tb_start = tb->page_addr[1];
1493 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1494 }
1495 if (!(tb_end <= start || tb_start >= end)) {
1496#ifdef TARGET_HAS_PRECISE_SMC
1497 if (current_tb_not_found) {
1498 current_tb_not_found = 0;
1499 current_tb = NULL;
1500 if (cpu->mem_io_pc) {
1501
1502 current_tb = tb_find_pc(cpu->mem_io_pc);
1503 }
1504 }
1505 if (current_tb == tb &&
1506 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1507
1508
1509
1510
1511
1512
1513 current_tb_modified = 1;
1514 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1515 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1516 ¤t_flags);
1517 }
1518#endif
1519 tb_phys_invalidate(tb, -1);
1520 }
1521 tb = tb_next;
1522 }
1523#if !defined(CONFIG_USER_ONLY)
1524
1525 if (!p->first_tb) {
1526 invalidate_page_bitmap(p);
1527 tlb_unprotect_code(start);
1528 }
1529#endif
1530#ifdef TARGET_HAS_PRECISE_SMC
1531 if (current_tb_modified) {
1532
1533
1534
1535 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1536 cpu_loop_exit_noexc(cpu);
1537 }
1538#endif
1539}
1540
1541#ifdef CONFIG_SOFTMMU
1542
1543
1544
1545
1546void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1547{
1548 PageDesc *p;
1549
1550#if 0
1551 if (1) {
1552 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1553 cpu_single_env->mem_io_vaddr, len,
1554 cpu_single_env->eip,
1555 cpu_single_env->eip +
1556 (intptr_t)cpu_single_env->segs[R_CS].base);
1557 }
1558#endif
1559 assert_memory_lock();
1560
1561 p = page_find(start >> TARGET_PAGE_BITS);
1562 if (!p) {
1563 return;
1564 }
1565 if (!p->code_bitmap &&
1566 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1567
1568
1569
1570 build_page_bitmap(p);
1571 }
1572 if (p->code_bitmap) {
1573 unsigned int nr;
1574 unsigned long b;
1575
1576 nr = start & ~TARGET_PAGE_MASK;
1577 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1578 if (b & ((1 << len) - 1)) {
1579 goto do_invalidate;
1580 }
1581 } else {
1582 do_invalidate:
1583 tb_invalidate_phys_page_range(start, start + len, 1);
1584 }
1585}
1586#else
1587
1588
1589
1590
1591
1592
1593static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1594{
1595 TranslationBlock *tb;
1596 PageDesc *p;
1597 int n;
1598#ifdef TARGET_HAS_PRECISE_SMC
1599 TranslationBlock *current_tb = NULL;
1600 CPUState *cpu = current_cpu;
1601 CPUArchState *env = NULL;
1602 int current_tb_modified = 0;
1603 target_ulong current_pc = 0;
1604 target_ulong current_cs_base = 0;
1605 uint32_t current_flags = 0;
1606#endif
1607
1608 assert_memory_lock();
1609
1610 addr &= TARGET_PAGE_MASK;
1611 p = page_find(addr >> TARGET_PAGE_BITS);
1612 if (!p) {
1613 return false;
1614 }
1615
1616 tb_lock();
1617 tb = p->first_tb;
1618#ifdef TARGET_HAS_PRECISE_SMC
1619 if (tb && pc != 0) {
1620 current_tb = tb_find_pc(pc);
1621 }
1622 if (cpu != NULL) {
1623 env = cpu->env_ptr;
1624 }
1625#endif
1626 while (tb != NULL) {
1627 n = (uintptr_t)tb & 3;
1628 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1629#ifdef TARGET_HAS_PRECISE_SMC
1630 if (current_tb == tb &&
1631 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1632
1633
1634
1635
1636
1637
1638 current_tb_modified = 1;
1639 cpu_restore_state_from_tb(cpu, current_tb, pc);
1640 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1641 ¤t_flags);
1642 }
1643#endif
1644 tb_phys_invalidate(tb, addr);
1645 tb = tb->page_next[n];
1646 }
1647 p->first_tb = NULL;
1648#ifdef TARGET_HAS_PRECISE_SMC
1649 if (current_tb_modified) {
1650
1651
1652
1653 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1654
1655
1656 return true;
1657 }
1658#endif
1659 tb_unlock();
1660
1661 return false;
1662}
1663#endif
1664
1665
1666
1667static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1668{
1669 int m_min, m_max, m;
1670 uintptr_t v;
1671 TranslationBlock *tb;
1672
1673 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1674 return NULL;
1675 }
1676 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1677 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1678 return NULL;
1679 }
1680
1681 m_min = 0;
1682 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1683 while (m_min <= m_max) {
1684 m = (m_min + m_max) >> 1;
1685 tb = &tcg_ctx.tb_ctx.tbs[m];
1686 v = (uintptr_t)tb->tc_ptr;
1687 if (v == tc_ptr) {
1688 return tb;
1689 } else if (tc_ptr < v) {
1690 m_max = m - 1;
1691 } else {
1692 m_min = m + 1;
1693 }
1694 }
1695 return &tcg_ctx.tb_ctx.tbs[m_max];
1696}
1697
1698#if !defined(CONFIG_USER_ONLY)
1699void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1700{
1701 ram_addr_t ram_addr;
1702 MemoryRegion *mr;
1703 hwaddr l = 1;
1704
1705 rcu_read_lock();
1706 mr = address_space_translate(as, addr, &addr, &l, false);
1707 if (!(memory_region_is_ram(mr)
1708 || memory_region_is_romd(mr))) {
1709 rcu_read_unlock();
1710 return;
1711 }
1712 ram_addr = memory_region_get_ram_addr(mr) + addr;
1713 tb_lock();
1714 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1715 tb_unlock();
1716 rcu_read_unlock();
1717}
1718#endif
1719
1720
1721void tb_check_watchpoint(CPUState *cpu)
1722{
1723 TranslationBlock *tb;
1724
1725 tb = tb_find_pc(cpu->mem_io_pc);
1726 if (tb) {
1727
1728 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1729 tb_phys_invalidate(tb, -1);
1730 } else {
1731
1732
1733 CPUArchState *env = cpu->env_ptr;
1734 target_ulong pc, cs_base;
1735 tb_page_addr_t addr;
1736 uint32_t flags;
1737
1738 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1739 addr = get_page_addr_code(env, pc);
1740 tb_invalidate_phys_range(addr, addr + 1);
1741 }
1742}
1743
1744#ifndef CONFIG_USER_ONLY
1745
1746
1747void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1748{
1749#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1750 CPUArchState *env = cpu->env_ptr;
1751#endif
1752 TranslationBlock *tb;
1753 uint32_t n, cflags;
1754 target_ulong pc, cs_base;
1755 uint32_t flags;
1756
1757 tb_lock();
1758 tb = tb_find_pc(retaddr);
1759 if (!tb) {
1760 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1761 (void *)retaddr);
1762 }
1763 n = cpu->icount_decr.u16.low + tb->icount;
1764 cpu_restore_state_from_tb(cpu, tb, retaddr);
1765
1766
1767 n = n - cpu->icount_decr.u16.low;
1768
1769 n++;
1770
1771
1772
1773
1774#if defined(TARGET_MIPS)
1775 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1776 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1777 cpu->icount_decr.u16.low++;
1778 env->hflags &= ~MIPS_HFLAG_BMASK;
1779 }
1780#elif defined(TARGET_SH4)
1781 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1782 && n > 1) {
1783 env->pc -= 2;
1784 cpu->icount_decr.u16.low++;
1785 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1786 }
1787#endif
1788
1789 if (n > CF_COUNT_MASK) {
1790 cpu_abort(cpu, "TB too big during recompile");
1791 }
1792
1793 cflags = n | CF_LAST_IO;
1794 pc = tb->pc;
1795 cs_base = tb->cs_base;
1796 flags = tb->flags;
1797 tb_phys_invalidate(tb, -1);
1798 if (tb->cflags & CF_NOCACHE) {
1799 if (tb->orig_tb) {
1800
1801
1802 tb_phys_invalidate(tb->orig_tb, -1);
1803 }
1804 tb_free(tb);
1805 }
1806
1807
1808 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 cpu_loop_exit_noexc(cpu);
1820}
1821
1822void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1823{
1824 unsigned int i;
1825
1826
1827
1828 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1829 memset(&cpu->tb_jmp_cache[i], 0,
1830 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1831
1832 i = tb_jmp_cache_hash_page(addr);
1833 memset(&cpu->tb_jmp_cache[i], 0,
1834 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1835}
1836
1837static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1838 struct qht_stats hst)
1839{
1840 uint32_t hgram_opts;
1841 size_t hgram_bins;
1842 char *hgram;
1843
1844 if (!hst.head_buckets) {
1845 return;
1846 }
1847 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1848 hst.used_head_buckets, hst.head_buckets,
1849 (double)hst.used_head_buckets / hst.head_buckets * 100);
1850
1851 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1852 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1853 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1854 hgram_opts |= QDIST_PR_NODECIMAL;
1855 }
1856 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1857 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1858 qdist_avg(&hst.occupancy) * 100, hgram);
1859 g_free(hgram);
1860
1861 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1862 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1863 if (hgram_bins > 10) {
1864 hgram_bins = 10;
1865 } else {
1866 hgram_bins = 0;
1867 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1868 }
1869 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1870 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1871 qdist_avg(&hst.chain), hgram);
1872 g_free(hgram);
1873}
1874
1875void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1876{
1877 int i, target_code_size, max_target_code_size;
1878 int direct_jmp_count, direct_jmp2_count, cross_page;
1879 TranslationBlock *tb;
1880 struct qht_stats hst;
1881
1882 tb_lock();
1883
1884 target_code_size = 0;
1885 max_target_code_size = 0;
1886 cross_page = 0;
1887 direct_jmp_count = 0;
1888 direct_jmp2_count = 0;
1889 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1890 tb = &tcg_ctx.tb_ctx.tbs[i];
1891 target_code_size += tb->size;
1892 if (tb->size > max_target_code_size) {
1893 max_target_code_size = tb->size;
1894 }
1895 if (tb->page_addr[1] != -1) {
1896 cross_page++;
1897 }
1898 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1899 direct_jmp_count++;
1900 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1901 direct_jmp2_count++;
1902 }
1903 }
1904 }
1905
1906 cpu_fprintf(f, "Translation buffer state:\n");
1907 cpu_fprintf(f, "gen code size %td/%zd\n",
1908 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1909 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1910 cpu_fprintf(f, "TB count %d/%d\n",
1911 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1912 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1913 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1914 tcg_ctx.tb_ctx.nb_tbs : 0,
1915 max_target_code_size);
1916 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1917 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1918 tcg_ctx.code_gen_buffer) /
1919 tcg_ctx.tb_ctx.nb_tbs : 0,
1920 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1921 tcg_ctx.code_gen_buffer) /
1922 target_code_size : 0);
1923 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1924 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1925 tcg_ctx.tb_ctx.nb_tbs : 0);
1926 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1927 direct_jmp_count,
1928 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1929 tcg_ctx.tb_ctx.nb_tbs : 0,
1930 direct_jmp2_count,
1931 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1932 tcg_ctx.tb_ctx.nb_tbs : 0);
1933
1934 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1935 print_qht_statistics(f, cpu_fprintf, hst);
1936 qht_statistics_destroy(&hst);
1937
1938 cpu_fprintf(f, "\nStatistics:\n");
1939 cpu_fprintf(f, "TB flush count %u\n",
1940 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1941 cpu_fprintf(f, "TB invalidate count %d\n",
1942 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1943 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1944 tcg_dump_info(f, cpu_fprintf);
1945
1946 tb_unlock();
1947}
1948
1949void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1950{
1951 tcg_dump_op_count(f, cpu_fprintf);
1952}
1953
1954#else
1955
1956void cpu_interrupt(CPUState *cpu, int mask)
1957{
1958 cpu->interrupt_request |= mask;
1959 cpu->tcg_exit_req = 1;
1960}
1961
1962
1963
1964
1965
1966struct walk_memory_regions_data {
1967 walk_memory_regions_fn fn;
1968 void *priv;
1969 target_ulong start;
1970 int prot;
1971};
1972
1973static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1974 target_ulong end, int new_prot)
1975{
1976 if (data->start != -1u) {
1977 int rc = data->fn(data->priv, data->start, end, data->prot);
1978 if (rc != 0) {
1979 return rc;
1980 }
1981 }
1982
1983 data->start = (new_prot ? end : -1u);
1984 data->prot = new_prot;
1985
1986 return 0;
1987}
1988
1989static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1990 target_ulong base, int level, void **lp)
1991{
1992 target_ulong pa;
1993 int i, rc;
1994
1995 if (*lp == NULL) {
1996 return walk_memory_regions_end(data, base, 0);
1997 }
1998
1999 if (level == 0) {
2000 PageDesc *pd = *lp;
2001
2002 for (i = 0; i < V_L2_SIZE; ++i) {
2003 int prot = pd[i].flags;
2004
2005 pa = base | (i << TARGET_PAGE_BITS);
2006 if (prot != data->prot) {
2007 rc = walk_memory_regions_end(data, pa, prot);
2008 if (rc != 0) {
2009 return rc;
2010 }
2011 }
2012 }
2013 } else {
2014 void **pp = *lp;
2015
2016 for (i = 0; i < V_L2_SIZE; ++i) {
2017 pa = base | ((target_ulong)i <<
2018 (TARGET_PAGE_BITS + V_L2_BITS * level));
2019 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2020 if (rc != 0) {
2021 return rc;
2022 }
2023 }
2024 }
2025
2026 return 0;
2027}
2028
2029int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2030{
2031 struct walk_memory_regions_data data;
2032 uintptr_t i, l1_sz = v_l1_size;
2033
2034 data.fn = fn;
2035 data.priv = priv;
2036 data.start = -1u;
2037 data.prot = 0;
2038
2039 for (i = 0; i < l1_sz; i++) {
2040 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2041 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2042 if (rc != 0) {
2043 return rc;
2044 }
2045 }
2046
2047 return walk_memory_regions_end(&data, 0, 0);
2048}
2049
2050static int dump_region(void *priv, target_ulong start,
2051 target_ulong end, unsigned long prot)
2052{
2053 FILE *f = (FILE *)priv;
2054
2055 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2056 " "TARGET_FMT_lx" %c%c%c\n",
2057 start, end, end - start,
2058 ((prot & PAGE_READ) ? 'r' : '-'),
2059 ((prot & PAGE_WRITE) ? 'w' : '-'),
2060 ((prot & PAGE_EXEC) ? 'x' : '-'));
2061
2062 return 0;
2063}
2064
2065
2066void page_dump(FILE *f)
2067{
2068 const int length = sizeof(target_ulong) * 2;
2069 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2070 length, "start", length, "end", length, "size", "prot");
2071 walk_memory_regions(f, dump_region);
2072}
2073
2074int page_get_flags(target_ulong address)
2075{
2076 PageDesc *p;
2077
2078 p = page_find(address >> TARGET_PAGE_BITS);
2079 if (!p) {
2080 return 0;
2081 }
2082 return p->flags;
2083}
2084
2085
2086
2087
2088void page_set_flags(target_ulong start, target_ulong end, int flags)
2089{
2090 target_ulong addr, len;
2091
2092
2093
2094
2095#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2096 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2097#endif
2098 assert(start < end);
2099 assert_memory_lock();
2100
2101 start = start & TARGET_PAGE_MASK;
2102 end = TARGET_PAGE_ALIGN(end);
2103
2104 if (flags & PAGE_WRITE) {
2105 flags |= PAGE_WRITE_ORG;
2106 }
2107
2108 for (addr = start, len = end - start;
2109 len != 0;
2110 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2111 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2112
2113
2114
2115 if (!(p->flags & PAGE_WRITE) &&
2116 (flags & PAGE_WRITE) &&
2117 p->first_tb) {
2118 tb_invalidate_phys_page(addr, 0);
2119 }
2120 p->flags = flags;
2121 }
2122}
2123
2124int page_check_range(target_ulong start, target_ulong len, int flags)
2125{
2126 PageDesc *p;
2127 target_ulong end;
2128 target_ulong addr;
2129
2130
2131
2132
2133#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2134 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2135#endif
2136
2137 if (len == 0) {
2138 return 0;
2139 }
2140 if (start + len - 1 < start) {
2141
2142 return -1;
2143 }
2144
2145
2146 end = TARGET_PAGE_ALIGN(start + len);
2147 start = start & TARGET_PAGE_MASK;
2148
2149 for (addr = start, len = end - start;
2150 len != 0;
2151 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2152 p = page_find(addr >> TARGET_PAGE_BITS);
2153 if (!p) {
2154 return -1;
2155 }
2156 if (!(p->flags & PAGE_VALID)) {
2157 return -1;
2158 }
2159
2160 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2161 return -1;
2162 }
2163 if (flags & PAGE_WRITE) {
2164 if (!(p->flags & PAGE_WRITE_ORG)) {
2165 return -1;
2166 }
2167
2168
2169 if (!(p->flags & PAGE_WRITE)) {
2170 if (!page_unprotect(addr, 0)) {
2171 return -1;
2172 }
2173 }
2174 }
2175 }
2176 return 0;
2177}
2178
2179
2180
2181
2182
2183
2184
2185int page_unprotect(target_ulong address, uintptr_t pc)
2186{
2187 unsigned int prot;
2188 bool current_tb_invalidated;
2189 PageDesc *p;
2190 target_ulong host_start, host_end, addr;
2191
2192
2193
2194
2195 mmap_lock();
2196
2197 p = page_find(address >> TARGET_PAGE_BITS);
2198 if (!p) {
2199 mmap_unlock();
2200 return 0;
2201 }
2202
2203
2204
2205 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2206 host_start = address & qemu_host_page_mask;
2207 host_end = host_start + qemu_host_page_size;
2208
2209 prot = 0;
2210 current_tb_invalidated = false;
2211 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2212 p = page_find(addr >> TARGET_PAGE_BITS);
2213 p->flags |= PAGE_WRITE;
2214 prot |= p->flags;
2215
2216
2217
2218 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2219#ifdef DEBUG_TB_CHECK
2220 tb_invalidate_check(addr);
2221#endif
2222 }
2223 mprotect((void *)g2h(host_start), qemu_host_page_size,
2224 prot & PAGE_BITS);
2225
2226 mmap_unlock();
2227
2228 return current_tb_invalidated ? 2 : 1;
2229 }
2230 mmap_unlock();
2231 return 0;
2232}
2233#endif
2234