1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#endif
22#include "qemu/osdep.h"
23
24
25#include "qemu-common.h"
26#define NO_CPU_IO_DEFS
27#include "cpu.h"
28#include "trace.h"
29#include "disas/disas.h"
30#include "exec/exec-all.h"
31#include "tcg.h"
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35#include <sys/param.h>
36#if __FreeBSD_version >= 700104
37#define HAVE_KINFO_GETVMMAP
38#define sigqueue sigqueue_freebsd
39#include <sys/proc.h>
40#include <machine/profile.h>
41#define _KERNEL
42#include <sys/user.h>
43#undef _KERNEL
44#undef sigqueue
45#include <libutil.h>
46#endif
47#endif
48#else
49#include "exec/address-spaces.h"
50#endif
51
52#include "exec/cputlb.h"
53#include "exec/tb-hash.h"
54#include "translate-all.h"
55#include "qemu/bitmap.h"
56#include "qemu/error-report.h"
57#include "qemu/timer.h"
58#include "qemu/main-loop.h"
59#include "exec/log.h"
60#include "sysemu/cpus.h"
61
62
63
64
65
66
67#ifdef DEBUG_TB_INVALIDATE
68#define DEBUG_TB_INVALIDATE_GATE 1
69#else
70#define DEBUG_TB_INVALIDATE_GATE 0
71#endif
72
73#ifdef DEBUG_TB_FLUSH
74#define DEBUG_TB_FLUSH_GATE 1
75#else
76#define DEBUG_TB_FLUSH_GATE 0
77#endif
78
79#if !defined(CONFIG_USER_ONLY)
80
81#undef DEBUG_TB_CHECK
82#endif
83
84#ifdef DEBUG_TB_CHECK
85#define DEBUG_TB_CHECK_GATE 1
86#else
87#define DEBUG_TB_CHECK_GATE 0
88#endif
89
90
91
92
93
94
95
96#ifdef CONFIG_SOFTMMU
97#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
98#else
99#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100#endif
101
102#define SMC_BITMAP_USE_THRESHOLD 10
103
104typedef struct PageDesc {
105
106 TranslationBlock *first_tb;
107#ifdef CONFIG_SOFTMMU
108
109
110 unsigned int code_write_count;
111 unsigned long *code_bitmap;
112#else
113 unsigned long flags;
114#endif
115} PageDesc;
116
117
118
119#if !defined(CONFIG_USER_ONLY)
120#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
121# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
122#else
123# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
124#endif
125#else
126# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
127#endif
128
129
130#define V_L2_BITS 10
131#define V_L2_SIZE (1 << V_L2_BITS)
132
133
134QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
135 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
136 * BITS_PER_BYTE);
137
138
139
140
141static int v_l1_size;
142static int v_l1_shift;
143static int v_l2_levels;
144
145
146
147
148#define V_L1_MIN_BITS 4
149#define V_L1_MAX_BITS (V_L2_BITS + 3)
150#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
151
152static void *l1_map[V_L1_MAX_SIZE];
153
154
155TCGContext tcg_init_ctx;
156__thread TCGContext *tcg_ctx;
157TBContext tb_ctx;
158bool parallel_cpus;
159
160
161static __thread int have_tb_lock;
162
163static void page_table_config_init(void)
164{
165 uint32_t v_l1_bits;
166
167 assert(TARGET_PAGE_BITS);
168
169 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
170 if (v_l1_bits < V_L1_MIN_BITS) {
171 v_l1_bits += V_L2_BITS;
172 }
173
174 v_l1_size = 1 << v_l1_bits;
175 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
176 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
177
178 assert(v_l1_bits <= V_L1_MAX_BITS);
179 assert(v_l1_shift % V_L2_BITS == 0);
180 assert(v_l2_levels >= 0);
181}
182
183#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
184#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
185
186void tb_lock(void)
187{
188 assert_tb_unlocked();
189 qemu_mutex_lock(&tb_ctx.tb_lock);
190 have_tb_lock++;
191}
192
193void tb_unlock(void)
194{
195 assert_tb_locked();
196 have_tb_lock--;
197 qemu_mutex_unlock(&tb_ctx.tb_lock);
198}
199
200void tb_lock_reset(void)
201{
202 if (have_tb_lock) {
203 qemu_mutex_unlock(&tb_ctx.tb_lock);
204 have_tb_lock = 0;
205 }
206}
207
208static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
209
210void cpu_gen_init(void)
211{
212 tcg_context_init(&tcg_init_ctx);
213}
214
215
216
217static uint8_t *encode_sleb128(uint8_t *p, target_long val)
218{
219 int more, byte;
220
221 do {
222 byte = val & 0x7f;
223 val >>= 7;
224 more = !((val == 0 && (byte & 0x40) == 0)
225 || (val == -1 && (byte & 0x40) != 0));
226 if (more) {
227 byte |= 0x80;
228 }
229 *p++ = byte;
230 } while (more);
231
232 return p;
233}
234
235
236
237static target_long decode_sleb128(uint8_t **pp)
238{
239 uint8_t *p = *pp;
240 target_long val = 0;
241 int byte, shift = 0;
242
243 do {
244 byte = *p++;
245 val |= (target_ulong)(byte & 0x7f) << shift;
246 shift += 7;
247 } while (byte & 0x80);
248 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
249 val |= -(target_ulong)1 << shift;
250 }
251
252 *pp = p;
253 return val;
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268static int encode_search(TranslationBlock *tb, uint8_t *block)
269{
270 uint8_t *highwater = tcg_ctx->code_gen_highwater;
271 uint8_t *p = block;
272 int i, j, n;
273
274 for (i = 0, n = tb->icount; i < n; ++i) {
275 target_ulong prev;
276
277 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
278 if (i == 0) {
279 prev = (j == 0 ? tb->pc : 0);
280 } else {
281 prev = tcg_ctx->gen_insn_data[i - 1][j];
282 }
283 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
284 }
285 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
286 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
287
288
289
290
291
292 if (unlikely(p > highwater)) {
293 return -1;
294 }
295 }
296
297 return p - block;
298}
299
300
301
302
303
304
305static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
306 uintptr_t searched_pc, bool reset_icount)
307{
308 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
309 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
310 CPUArchState *env = cpu->env_ptr;
311 uint8_t *p = tb->tc.ptr + tb->tc.size;
312 int i, j, num_insns = tb->icount;
313#ifdef CONFIG_PROFILER
314 TCGProfile *prof = &tcg_ctx->prof;
315 int64_t ti = profile_getclock();
316#endif
317
318 searched_pc -= GETPC_ADJ;
319
320 if (searched_pc < host_pc) {
321 return -1;
322 }
323
324
325
326 for (i = 0; i < num_insns; ++i) {
327 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
328 data[j] += decode_sleb128(&p);
329 }
330 host_pc += decode_sleb128(&p);
331 if (host_pc > searched_pc) {
332 goto found;
333 }
334 }
335 return -1;
336
337 found:
338 if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) {
339 assert(use_icount);
340
341
342 cpu->icount_decr.u16.low += num_insns - i;
343 }
344 restore_state_to_opc(env, tb, data);
345
346#ifdef CONFIG_PROFILER
347 atomic_set(&prof->restore_time,
348 prof->restore_time + profile_getclock() - ti);
349 atomic_set(&prof->restore_count, prof->restore_count + 1);
350#endif
351 return 0;
352}
353
354bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
355{
356 TranslationBlock *tb;
357 bool r = false;
358 uintptr_t check_offset;
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
375
376 if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
377 tb_lock();
378 tb = tb_find_pc(host_pc);
379 if (tb) {
380 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
381 if (tb->cflags & CF_NOCACHE) {
382
383 tb_phys_invalidate(tb, -1);
384 tb_remove(tb);
385 }
386 r = true;
387 }
388 tb_unlock();
389 }
390
391 return r;
392}
393
394static void page_init(void)
395{
396 page_size_init();
397 page_table_config_init();
398
399#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
400 {
401#ifdef HAVE_KINFO_GETVMMAP
402 struct kinfo_vmentry *freep;
403 int i, cnt;
404
405 freep = kinfo_getvmmap(getpid(), &cnt);
406 if (freep) {
407 mmap_lock();
408 for (i = 0; i < cnt; i++) {
409 unsigned long startaddr, endaddr;
410
411 startaddr = freep[i].kve_start;
412 endaddr = freep[i].kve_end;
413 if (h2g_valid(startaddr)) {
414 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
415
416 if (h2g_valid(endaddr)) {
417 endaddr = h2g(endaddr);
418 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
419 } else {
420#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
421 endaddr = ~0ul;
422 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
423#endif
424 }
425 }
426 }
427 free(freep);
428 mmap_unlock();
429 }
430#else
431 FILE *f;
432
433 last_brk = (unsigned long)sbrk(0);
434
435 f = fopen("/compat/linux/proc/self/maps", "r");
436 if (f) {
437 mmap_lock();
438
439 do {
440 unsigned long startaddr, endaddr;
441 int n;
442
443 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
444
445 if (n == 2 && h2g_valid(startaddr)) {
446 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
447
448 if (h2g_valid(endaddr)) {
449 endaddr = h2g(endaddr);
450 } else {
451 endaddr = ~0ul;
452 }
453 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
454 }
455 } while (!feof(f));
456
457 fclose(f);
458 mmap_unlock();
459 }
460#endif
461 }
462#endif
463}
464
465
466
467
468
469static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
470{
471 PageDesc *pd;
472 void **lp;
473 int i;
474
475 if (alloc) {
476 assert_memory_lock();
477 }
478
479
480 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
481
482
483 for (i = v_l2_levels; i > 0; i--) {
484 void **p = atomic_rcu_read(lp);
485
486 if (p == NULL) {
487 if (!alloc) {
488 return NULL;
489 }
490 p = g_new0(void *, V_L2_SIZE);
491 atomic_rcu_set(lp, p);
492 }
493
494 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
495 }
496
497 pd = atomic_rcu_read(lp);
498 if (pd == NULL) {
499 if (!alloc) {
500 return NULL;
501 }
502 pd = g_new0(PageDesc, V_L2_SIZE);
503 atomic_rcu_set(lp, pd);
504 }
505
506 return pd + (index & (V_L2_SIZE - 1));
507}
508
509static inline PageDesc *page_find(tb_page_addr_t index)
510{
511 return page_find_alloc(index, 0);
512}
513
514#if defined(CONFIG_USER_ONLY)
515
516
517
518
519#define USE_STATIC_CODE_GEN_BUFFER
520#endif
521
522
523
524#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
525
526
527
528
529#if defined(__x86_64__)
530# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
531#elif defined(__sparc__)
532# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
533#elif defined(__powerpc64__)
534# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
535#elif defined(__powerpc__)
536# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
537#elif defined(__aarch64__)
538# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
539#elif defined(__s390x__)
540
541# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
542#elif defined(__mips__)
543
544
545# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
546#else
547# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
548#endif
549
550#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
551
552#define DEFAULT_CODE_GEN_BUFFER_SIZE \
553 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
554 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
555
556static inline size_t size_code_gen_buffer(size_t tb_size)
557{
558
559 if (tb_size == 0) {
560#ifdef USE_STATIC_CODE_GEN_BUFFER
561 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
562#else
563
564
565
566
567 tb_size = (unsigned long)(ram_size / 4);
568#endif
569 }
570 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
571 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
572 }
573 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
574 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
575 }
576 return tb_size;
577}
578
579#ifdef __mips__
580
581
582static inline bool cross_256mb(void *addr, size_t size)
583{
584 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
585}
586
587
588
589
590static inline void *split_cross_256mb(void *buf1, size_t size1)
591{
592 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
593 size_t size2 = buf1 + size1 - buf2;
594
595 size1 = buf2 - buf1;
596 if (size1 < size2) {
597 size1 = size2;
598 buf1 = buf2;
599 }
600
601 tcg_ctx->code_gen_buffer_size = size1;
602 return buf1;
603}
604#endif
605
606#ifdef USE_STATIC_CODE_GEN_BUFFER
607static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
608 __attribute__((aligned(CODE_GEN_ALIGN)));
609
610static inline void *alloc_code_gen_buffer(void)
611{
612 void *buf = static_code_gen_buffer;
613 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
614 size_t size;
615
616
617 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
618 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
619
620 size = end - buf;
621
622
623 if (size > tcg_ctx->code_gen_buffer_size) {
624 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
625 qemu_real_host_page_size);
626 }
627 tcg_ctx->code_gen_buffer_size = size;
628
629#ifdef __mips__
630 if (cross_256mb(buf, size)) {
631 buf = split_cross_256mb(buf, size);
632 size = tcg_ctx->code_gen_buffer_size;
633 }
634#endif
635
636 if (qemu_mprotect_rwx(buf, size)) {
637 abort();
638 }
639 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
640
641 return buf;
642}
643#elif defined(_WIN32)
644static inline void *alloc_code_gen_buffer(void)
645{
646 size_t size = tcg_ctx->code_gen_buffer_size;
647 void *buf;
648
649 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
650 PAGE_EXECUTE_READWRITE);
651 return buf;
652}
653#else
654static inline void *alloc_code_gen_buffer(void)
655{
656 int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
657 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
658 uintptr_t start = 0;
659 size_t size = tcg_ctx->code_gen_buffer_size;
660 void *buf;
661
662
663
664
665# if defined(__PIE__) || defined(__PIC__)
666
667
668
669
670# elif defined(__x86_64__) && defined(MAP_32BIT)
671
672
673 flags |= MAP_32BIT;
674
675 if (size > 800u * 1024 * 1024) {
676 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
677 }
678# elif defined(__sparc__)
679 start = 0x40000000ul;
680# elif defined(__s390x__)
681 start = 0x90000000ul;
682# elif defined(__mips__)
683# if _MIPS_SIM == _ABI64
684 start = 0x128000000ul;
685# else
686 start = 0x08000000ul;
687# endif
688# endif
689
690 buf = mmap((void *)start, size, prot, flags, -1, 0);
691 if (buf == MAP_FAILED) {
692 return NULL;
693 }
694
695#ifdef __mips__
696 if (cross_256mb(buf, size)) {
697
698
699 size_t size2;
700 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
701 switch ((int)(buf2 != MAP_FAILED)) {
702 case 1:
703 if (!cross_256mb(buf2, size)) {
704
705 munmap(buf, size);
706 break;
707 }
708
709 munmap(buf2, size);
710
711 default:
712
713 buf2 = split_cross_256mb(buf, size);
714 size2 = tcg_ctx->code_gen_buffer_size;
715 if (buf == buf2) {
716 munmap(buf + size2, size - size2);
717 } else {
718 munmap(buf, size - size2);
719 }
720 size = size2;
721 break;
722 }
723 buf = buf2;
724 }
725#endif
726
727
728 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
729
730 return buf;
731}
732#endif
733
734
735static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
736{
737 if (ptr >= s->ptr + s->size) {
738 return 1;
739 } else if (ptr < s->ptr) {
740 return -1;
741 }
742 return 0;
743}
744
745static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
746{
747 const struct tb_tc *a = ap;
748 const struct tb_tc *b = bp;
749
750
751
752
753
754
755 if (likely(a->size && b->size)) {
756 if (a->ptr > b->ptr) {
757 return 1;
758 } else if (a->ptr < b->ptr) {
759 return -1;
760 }
761
762 g_assert(a->size == b->size);
763 return 0;
764 }
765
766
767
768
769
770 if (likely(a->size == 0)) {
771 return ptr_cmp_tb_tc(a->ptr, b);
772 }
773 return ptr_cmp_tb_tc(b->ptr, a);
774}
775
776static inline void code_gen_alloc(size_t tb_size)
777{
778 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
779 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
780 if (tcg_ctx->code_gen_buffer == NULL) {
781 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
782 exit(1);
783 }
784 tb_ctx.tb_tree = g_tree_new(tb_tc_cmp);
785 qemu_mutex_init(&tb_ctx.tb_lock);
786}
787
788static void tb_htable_init(void)
789{
790 unsigned int mode = QHT_MODE_AUTO_RESIZE;
791
792 qht_init(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
793}
794
795
796
797
798void tcg_exec_init(unsigned long tb_size)
799{
800 tcg_allowed = true;
801 cpu_gen_init();
802 page_init();
803 tb_htable_init();
804 code_gen_alloc(tb_size);
805#if defined(CONFIG_SOFTMMU)
806
807
808 tcg_prologue_init(tcg_ctx);
809#endif
810}
811
812
813
814
815
816
817
818static TranslationBlock *tb_alloc(target_ulong pc)
819{
820 TranslationBlock *tb;
821
822 assert_tb_locked();
823
824 tb = tcg_tb_alloc(tcg_ctx);
825 if (unlikely(tb == NULL)) {
826 return NULL;
827 }
828 return tb;
829}
830
831
832void tb_remove(TranslationBlock *tb)
833{
834 assert_tb_locked();
835
836 g_tree_remove(tb_ctx.tb_tree, &tb->tc);
837}
838
839static inline void invalidate_page_bitmap(PageDesc *p)
840{
841#ifdef CONFIG_SOFTMMU
842 g_free(p->code_bitmap);
843 p->code_bitmap = NULL;
844 p->code_write_count = 0;
845#endif
846}
847
848
849static void page_flush_tb_1(int level, void **lp)
850{
851 int i;
852
853 if (*lp == NULL) {
854 return;
855 }
856 if (level == 0) {
857 PageDesc *pd = *lp;
858
859 for (i = 0; i < V_L2_SIZE; ++i) {
860 pd[i].first_tb = NULL;
861 invalidate_page_bitmap(pd + i);
862 }
863 } else {
864 void **pp = *lp;
865
866 for (i = 0; i < V_L2_SIZE; ++i) {
867 page_flush_tb_1(level - 1, pp + i);
868 }
869 }
870}
871
872static void page_flush_tb(void)
873{
874 int i, l1_sz = v_l1_size;
875
876 for (i = 0; i < l1_sz; i++) {
877 page_flush_tb_1(v_l2_levels, l1_map + i);
878 }
879}
880
881static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
882{
883 const TranslationBlock *tb = value;
884 size_t *size = data;
885
886 *size += tb->tc.size;
887 return false;
888}
889
890
891static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
892{
893 tb_lock();
894
895
896
897
898 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
899 goto done;
900 }
901
902 if (DEBUG_TB_FLUSH_GATE) {
903 size_t nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
904 size_t host_size = 0;
905
906 g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size);
907 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
908 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
909 }
910
911 CPU_FOREACH(cpu) {
912 cpu_tb_jmp_cache_clear(cpu);
913 }
914
915
916 g_tree_ref(tb_ctx.tb_tree);
917 g_tree_destroy(tb_ctx.tb_tree);
918
919 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
920 page_flush_tb();
921
922 tcg_region_reset_all();
923
924
925 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
926
927done:
928 tb_unlock();
929}
930
931void tb_flush(CPUState *cpu)
932{
933 if (tcg_enabled()) {
934 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
935 async_safe_run_on_cpu(cpu, do_tb_flush,
936 RUN_ON_CPU_HOST_INT(tb_flush_count));
937 }
938}
939
940
941
942
943
944
945
946#ifdef CONFIG_USER_ONLY
947
948static void
949do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
950{
951 TranslationBlock *tb = p;
952 target_ulong addr = *(target_ulong *)userp;
953
954 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
955 printf("ERROR invalidate: address=" TARGET_FMT_lx
956 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
957 }
958}
959
960
961
962
963
964static void tb_invalidate_check(target_ulong address)
965{
966 address &= TARGET_PAGE_MASK;
967 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
968}
969
970static void
971do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
972{
973 TranslationBlock *tb = p;
974 int flags1, flags2;
975
976 flags1 = page_get_flags(tb->pc);
977 flags2 = page_get_flags(tb->pc + tb->size - 1);
978 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
979 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
980 (long)tb->pc, tb->size, flags1, flags2);
981 }
982}
983
984
985static void tb_page_check(void)
986{
987 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
988}
989
990#endif
991
992static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
993{
994 TranslationBlock *tb1;
995 unsigned int n1;
996
997 for (;;) {
998 tb1 = *ptb;
999 n1 = (uintptr_t)tb1 & 3;
1000 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1001 if (tb1 == tb) {
1002 *ptb = tb1->page_next[n1];
1003 break;
1004 }
1005 ptb = &tb1->page_next[n1];
1006 }
1007}
1008
1009
1010static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1011{
1012 TranslationBlock *tb1;
1013 uintptr_t *ptb, ntb;
1014 unsigned int n1;
1015
1016 ptb = &tb->jmp_list_next[n];
1017 if (*ptb) {
1018
1019 for (;;) {
1020 ntb = *ptb;
1021 n1 = ntb & 3;
1022 tb1 = (TranslationBlock *)(ntb & ~3);
1023 if (n1 == n && tb1 == tb) {
1024 break;
1025 }
1026 if (n1 == 2) {
1027 ptb = &tb1->jmp_list_first;
1028 } else {
1029 ptb = &tb1->jmp_list_next[n1];
1030 }
1031 }
1032
1033 *ptb = tb->jmp_list_next[n];
1034
1035 tb->jmp_list_next[n] = (uintptr_t)NULL;
1036 }
1037}
1038
1039
1040
1041static inline void tb_reset_jump(TranslationBlock *tb, int n)
1042{
1043 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1044 tb_set_jmp_target(tb, n, addr);
1045}
1046
1047
1048static inline void tb_jmp_unlink(TranslationBlock *tb)
1049{
1050 TranslationBlock *tb1;
1051 uintptr_t *ptb, ntb;
1052 unsigned int n1;
1053
1054 ptb = &tb->jmp_list_first;
1055 for (;;) {
1056 ntb = *ptb;
1057 n1 = ntb & 3;
1058 tb1 = (TranslationBlock *)(ntb & ~3);
1059 if (n1 == 2) {
1060 break;
1061 }
1062 tb_reset_jump(tb1, n1);
1063 *ptb = tb1->jmp_list_next[n1];
1064 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1065 }
1066}
1067
1068
1069
1070
1071
1072void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1073{
1074 CPUState *cpu;
1075 PageDesc *p;
1076 uint32_t h;
1077 tb_page_addr_t phys_pc;
1078
1079 assert_tb_locked();
1080
1081 atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1082
1083
1084 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1085 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1086 tb->trace_vcpu_dstate);
1087 if (!qht_remove(&tb_ctx.htable, tb, h)) {
1088 return;
1089 }
1090
1091
1092 if (tb->page_addr[0] != page_addr) {
1093 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1094 tb_page_remove(&p->first_tb, tb);
1095 invalidate_page_bitmap(p);
1096 }
1097 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1098 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1099 tb_page_remove(&p->first_tb, tb);
1100 invalidate_page_bitmap(p);
1101 }
1102
1103
1104 h = tb_jmp_cache_hash_func(tb->pc);
1105 CPU_FOREACH(cpu) {
1106 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1107 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1108 }
1109 }
1110
1111
1112 tb_remove_from_jmp_list(tb, 0);
1113 tb_remove_from_jmp_list(tb, 1);
1114
1115
1116 tb_jmp_unlink(tb);
1117
1118 tb_ctx.tb_phys_invalidate_count++;
1119}
1120
1121#ifdef CONFIG_SOFTMMU
1122static void build_page_bitmap(PageDesc *p)
1123{
1124 int n, tb_start, tb_end;
1125 TranslationBlock *tb;
1126
1127 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1128
1129 tb = p->first_tb;
1130 while (tb != NULL) {
1131 n = (uintptr_t)tb & 3;
1132 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1133
1134 if (n == 0) {
1135
1136
1137 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1138 tb_end = tb_start + tb->size;
1139 if (tb_end > TARGET_PAGE_SIZE) {
1140 tb_end = TARGET_PAGE_SIZE;
1141 }
1142 } else {
1143 tb_start = 0;
1144 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1145 }
1146 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1147 tb = tb->page_next[n];
1148 }
1149}
1150#endif
1151
1152
1153
1154
1155
1156static inline void tb_alloc_page(TranslationBlock *tb,
1157 unsigned int n, tb_page_addr_t page_addr)
1158{
1159 PageDesc *p;
1160#ifndef CONFIG_USER_ONLY
1161 bool page_already_protected;
1162#endif
1163
1164 assert_memory_lock();
1165
1166 tb->page_addr[n] = page_addr;
1167 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1168 tb->page_next[n] = p->first_tb;
1169#ifndef CONFIG_USER_ONLY
1170 page_already_protected = p->first_tb != NULL;
1171#endif
1172 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1173 invalidate_page_bitmap(p);
1174
1175#if defined(CONFIG_USER_ONLY)
1176 if (p->flags & PAGE_WRITE) {
1177 target_ulong addr;
1178 PageDesc *p2;
1179 int prot;
1180
1181
1182
1183 page_addr &= qemu_host_page_mask;
1184 prot = 0;
1185 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1186 addr += TARGET_PAGE_SIZE) {
1187
1188 p2 = page_find(addr >> TARGET_PAGE_BITS);
1189 if (!p2) {
1190 continue;
1191 }
1192 prot |= p2->flags;
1193 p2->flags &= ~PAGE_WRITE;
1194 }
1195 mprotect(g2h(page_addr), qemu_host_page_size,
1196 (prot & PAGE_BITS) & ~PAGE_WRITE);
1197 if (DEBUG_TB_INVALIDATE_GATE) {
1198 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1199 }
1200 }
1201#else
1202
1203
1204
1205 if (!page_already_protected) {
1206 tlb_protect_code(page_addr);
1207 }
1208#endif
1209}
1210
1211
1212
1213
1214
1215
1216static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1217 tb_page_addr_t phys_page2)
1218{
1219 uint32_t h;
1220
1221 assert_memory_lock();
1222
1223
1224 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1225 if (phys_page2 != -1) {
1226 tb_alloc_page(tb, 1, phys_page2);
1227 } else {
1228 tb->page_addr[1] = -1;
1229 }
1230
1231
1232 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1233 tb->trace_vcpu_dstate);
1234 qht_insert(&tb_ctx.htable, tb, h);
1235
1236#ifdef CONFIG_USER_ONLY
1237 if (DEBUG_TB_CHECK_GATE) {
1238 tb_page_check();
1239 }
1240#endif
1241}
1242
1243
1244TranslationBlock *tb_gen_code(CPUState *cpu,
1245 target_ulong pc, target_ulong cs_base,
1246 uint32_t flags, int cflags)
1247{
1248 CPUArchState *env = cpu->env_ptr;
1249 TranslationBlock *tb;
1250 tb_page_addr_t phys_pc, phys_page2;
1251 target_ulong virt_page2;
1252 tcg_insn_unit *gen_code_buf;
1253 int gen_code_size, search_size;
1254#ifdef CONFIG_PROFILER
1255 TCGProfile *prof = &tcg_ctx->prof;
1256 int64_t ti;
1257#endif
1258 assert_memory_lock();
1259
1260 phys_pc = get_page_addr_code(env, pc);
1261
1262 buffer_overflow:
1263 tb = tb_alloc(pc);
1264 if (unlikely(!tb)) {
1265
1266 tb_flush(cpu);
1267 mmap_unlock();
1268
1269 cpu->exception_index = EXCP_INTERRUPT;
1270 cpu_loop_exit(cpu);
1271 }
1272
1273 gen_code_buf = tcg_ctx->code_gen_ptr;
1274 tb->tc.ptr = gen_code_buf;
1275 tb->pc = pc;
1276 tb->cs_base = cs_base;
1277 tb->flags = flags;
1278 tb->cflags = cflags;
1279 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1280 tcg_ctx->tb_cflags = cflags;
1281
1282#ifdef CONFIG_PROFILER
1283
1284 atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1285 ti = profile_getclock();
1286#endif
1287
1288 tcg_func_start(tcg_ctx);
1289
1290 tcg_ctx->cpu = ENV_GET_CPU(env);
1291 gen_intermediate_code(cpu, tb);
1292 tcg_ctx->cpu = NULL;
1293
1294 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1295
1296
1297 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1298 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1299 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1300 if (TCG_TARGET_HAS_direct_jump) {
1301 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1302 tcg_ctx->tb_jmp_target_addr = NULL;
1303 } else {
1304 tcg_ctx->tb_jmp_insn_offset = NULL;
1305 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1306 }
1307
1308#ifdef CONFIG_PROFILER
1309 atomic_set(&prof->tb_count, prof->tb_count + 1);
1310 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1311 ti = profile_getclock();
1312#endif
1313
1314
1315
1316
1317
1318
1319 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1320 if (unlikely(gen_code_size < 0)) {
1321 goto buffer_overflow;
1322 }
1323 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1324 if (unlikely(search_size < 0)) {
1325 goto buffer_overflow;
1326 }
1327 tb->tc.size = gen_code_size;
1328
1329#ifdef CONFIG_PROFILER
1330 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1331 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1332 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1333 atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1334#endif
1335
1336#ifdef DEBUG_DISAS
1337 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1338 qemu_log_in_addr_range(tb->pc)) {
1339 qemu_log_lock();
1340 qemu_log("OUT: [size=%d]\n", gen_code_size);
1341 if (tcg_ctx->data_gen_ptr) {
1342 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1343 size_t data_size = gen_code_size - code_size;
1344 size_t i;
1345
1346 log_disas(tb->tc.ptr, code_size);
1347
1348 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1349 if (sizeof(tcg_target_ulong) == 8) {
1350 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1351 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1352 *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1353 } else {
1354 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
1355 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1356 *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1357 }
1358 }
1359 } else {
1360 log_disas(tb->tc.ptr, gen_code_size);
1361 }
1362 qemu_log("\n");
1363 qemu_log_flush();
1364 qemu_log_unlock();
1365 }
1366#endif
1367
1368 atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1369 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1370 CODE_GEN_ALIGN));
1371
1372
1373 assert(((uintptr_t)tb & 3) == 0);
1374 tb->jmp_list_first = (uintptr_t)tb | 2;
1375 tb->jmp_list_next[0] = (uintptr_t)NULL;
1376 tb->jmp_list_next[1] = (uintptr_t)NULL;
1377
1378
1379 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1380 tb_reset_jump(tb, 0);
1381 }
1382 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1383 tb_reset_jump(tb, 1);
1384 }
1385
1386
1387 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1388 phys_page2 = -1;
1389 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1390 phys_page2 = get_page_addr_code(env, virt_page2);
1391 }
1392
1393
1394
1395
1396
1397 tb_link_page(tb, phys_pc, phys_page2);
1398 g_tree_insert(tb_ctx.tb_tree, &tb->tc, tb);
1399 return tb;
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1413{
1414 while (start < end) {
1415 tb_invalidate_phys_page_range(start, end, 0);
1416 start &= TARGET_PAGE_MASK;
1417 start += TARGET_PAGE_SIZE;
1418 }
1419}
1420
1421#ifdef CONFIG_SOFTMMU
1422void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1423{
1424 assert_tb_locked();
1425 tb_invalidate_phys_range_1(start, end);
1426}
1427#else
1428void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1429{
1430 assert_memory_lock();
1431 tb_lock();
1432 tb_invalidate_phys_range_1(start, end);
1433 tb_unlock();
1434}
1435#endif
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1447 int is_cpu_write_access)
1448{
1449 TranslationBlock *tb, *tb_next;
1450 tb_page_addr_t tb_start, tb_end;
1451 PageDesc *p;
1452 int n;
1453#ifdef TARGET_HAS_PRECISE_SMC
1454 CPUState *cpu = current_cpu;
1455 CPUArchState *env = NULL;
1456 int current_tb_not_found = is_cpu_write_access;
1457 TranslationBlock *current_tb = NULL;
1458 int current_tb_modified = 0;
1459 target_ulong current_pc = 0;
1460 target_ulong current_cs_base = 0;
1461 uint32_t current_flags = 0;
1462#endif
1463
1464 assert_memory_lock();
1465 assert_tb_locked();
1466
1467 p = page_find(start >> TARGET_PAGE_BITS);
1468 if (!p) {
1469 return;
1470 }
1471#if defined(TARGET_HAS_PRECISE_SMC)
1472 if (cpu != NULL) {
1473 env = cpu->env_ptr;
1474 }
1475#endif
1476
1477
1478
1479
1480 tb = p->first_tb;
1481 while (tb != NULL) {
1482 n = (uintptr_t)tb & 3;
1483 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1484 tb_next = tb->page_next[n];
1485
1486 if (n == 0) {
1487
1488
1489 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1490 tb_end = tb_start + tb->size;
1491 } else {
1492 tb_start = tb->page_addr[1];
1493 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1494 }
1495 if (!(tb_end <= start || tb_start >= end)) {
1496#ifdef TARGET_HAS_PRECISE_SMC
1497 if (current_tb_not_found) {
1498 current_tb_not_found = 0;
1499 current_tb = NULL;
1500 if (cpu->mem_io_pc) {
1501
1502 current_tb = tb_find_pc(cpu->mem_io_pc);
1503 }
1504 }
1505 if (current_tb == tb &&
1506 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1507
1508
1509
1510
1511
1512
1513 current_tb_modified = 1;
1514 cpu_restore_state_from_tb(cpu, current_tb,
1515 cpu->mem_io_pc, true);
1516 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1517 ¤t_flags);
1518 }
1519#endif
1520 tb_phys_invalidate(tb, -1);
1521 }
1522 tb = tb_next;
1523 }
1524#if !defined(CONFIG_USER_ONLY)
1525
1526 if (!p->first_tb) {
1527 invalidate_page_bitmap(p);
1528 tlb_unprotect_code(start);
1529 }
1530#endif
1531#ifdef TARGET_HAS_PRECISE_SMC
1532 if (current_tb_modified) {
1533
1534 cpu->cflags_next_tb = 1 | curr_cflags();
1535 cpu_loop_exit_noexc(cpu);
1536 }
1537#endif
1538}
1539
1540#ifdef CONFIG_SOFTMMU
1541
1542
1543
1544
1545void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1546{
1547 PageDesc *p;
1548
1549#if 0
1550 if (1) {
1551 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1552 cpu_single_env->mem_io_vaddr, len,
1553 cpu_single_env->eip,
1554 cpu_single_env->eip +
1555 (intptr_t)cpu_single_env->segs[R_CS].base);
1556 }
1557#endif
1558 assert_memory_lock();
1559
1560 p = page_find(start >> TARGET_PAGE_BITS);
1561 if (!p) {
1562 return;
1563 }
1564 if (!p->code_bitmap &&
1565 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1566
1567
1568
1569 build_page_bitmap(p);
1570 }
1571 if (p->code_bitmap) {
1572 unsigned int nr;
1573 unsigned long b;
1574
1575 nr = start & ~TARGET_PAGE_MASK;
1576 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1577 if (b & ((1 << len) - 1)) {
1578 goto do_invalidate;
1579 }
1580 } else {
1581 do_invalidate:
1582 tb_invalidate_phys_page_range(start, start + len, 1);
1583 }
1584}
1585#else
1586
1587
1588
1589
1590
1591
1592static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1593{
1594 TranslationBlock *tb;
1595 PageDesc *p;
1596 int n;
1597#ifdef TARGET_HAS_PRECISE_SMC
1598 TranslationBlock *current_tb = NULL;
1599 CPUState *cpu = current_cpu;
1600 CPUArchState *env = NULL;
1601 int current_tb_modified = 0;
1602 target_ulong current_pc = 0;
1603 target_ulong current_cs_base = 0;
1604 uint32_t current_flags = 0;
1605#endif
1606
1607 assert_memory_lock();
1608
1609 addr &= TARGET_PAGE_MASK;
1610 p = page_find(addr >> TARGET_PAGE_BITS);
1611 if (!p) {
1612 return false;
1613 }
1614
1615 tb_lock();
1616 tb = p->first_tb;
1617#ifdef TARGET_HAS_PRECISE_SMC
1618 if (tb && pc != 0) {
1619 current_tb = tb_find_pc(pc);
1620 }
1621 if (cpu != NULL) {
1622 env = cpu->env_ptr;
1623 }
1624#endif
1625 while (tb != NULL) {
1626 n = (uintptr_t)tb & 3;
1627 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1628#ifdef TARGET_HAS_PRECISE_SMC
1629 if (current_tb == tb &&
1630 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1631
1632
1633
1634
1635
1636
1637 current_tb_modified = 1;
1638 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
1639 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1640 ¤t_flags);
1641 }
1642#endif
1643 tb_phys_invalidate(tb, addr);
1644 tb = tb->page_next[n];
1645 }
1646 p->first_tb = NULL;
1647#ifdef TARGET_HAS_PRECISE_SMC
1648 if (current_tb_modified) {
1649
1650 cpu->cflags_next_tb = 1 | curr_cflags();
1651
1652
1653 return true;
1654 }
1655#endif
1656 tb_unlock();
1657
1658 return false;
1659}
1660#endif
1661
1662
1663
1664
1665
1666
1667static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1668{
1669 struct tb_tc s = { .ptr = (void *)tc_ptr };
1670
1671 return g_tree_lookup(tb_ctx.tb_tree, &s);
1672}
1673
1674#if !defined(CONFIG_USER_ONLY)
1675void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1676{
1677 ram_addr_t ram_addr;
1678 MemoryRegion *mr;
1679 hwaddr l = 1;
1680
1681 rcu_read_lock();
1682 mr = address_space_translate(as, addr, &addr, &l, false);
1683 if (!(memory_region_is_ram(mr)
1684 || memory_region_is_romd(mr))) {
1685 rcu_read_unlock();
1686 return;
1687 }
1688 ram_addr = memory_region_get_ram_addr(mr) + addr;
1689 tb_lock();
1690 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1691 tb_unlock();
1692 rcu_read_unlock();
1693}
1694#endif
1695
1696
1697void tb_check_watchpoint(CPUState *cpu)
1698{
1699 TranslationBlock *tb;
1700
1701 tb = tb_find_pc(cpu->mem_io_pc);
1702 if (tb) {
1703
1704 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
1705 tb_phys_invalidate(tb, -1);
1706 } else {
1707
1708
1709 CPUArchState *env = cpu->env_ptr;
1710 target_ulong pc, cs_base;
1711 tb_page_addr_t addr;
1712 uint32_t flags;
1713
1714 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1715 addr = get_page_addr_code(env, pc);
1716 tb_invalidate_phys_range(addr, addr + 1);
1717 }
1718}
1719
1720#ifndef CONFIG_USER_ONLY
1721
1722
1723
1724
1725
1726void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1727{
1728#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1729 CPUArchState *env = cpu->env_ptr;
1730#endif
1731 TranslationBlock *tb;
1732 uint32_t n;
1733
1734 tb_lock();
1735 tb = tb_find_pc(retaddr);
1736 if (!tb) {
1737 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1738 (void *)retaddr);
1739 }
1740 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
1741
1742
1743
1744
1745
1746 n = 1;
1747#if defined(TARGET_MIPS)
1748 if ((env->hflags & MIPS_HFLAG_BMASK) != 0
1749 && env->active_tc.PC != tb->pc) {
1750 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1751 cpu->icount_decr.u16.low++;
1752 env->hflags &= ~MIPS_HFLAG_BMASK;
1753 n = 2;
1754 }
1755#elif defined(TARGET_SH4)
1756 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1757 && env->pc != tb->pc) {
1758 env->pc -= 2;
1759 cpu->icount_decr.u16.low++;
1760 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1761 n = 2;
1762 }
1763#endif
1764
1765
1766 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
1767
1768 if (tb->cflags & CF_NOCACHE) {
1769 if (tb->orig_tb) {
1770
1771
1772 tb_phys_invalidate(tb->orig_tb, -1);
1773 }
1774 tb_remove(tb);
1775 }
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786 cpu_loop_exit_noexc(cpu);
1787}
1788
1789static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1790{
1791 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1792
1793 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1794 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1795 }
1796}
1797
1798void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1799{
1800
1801
1802 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1803 tb_jmp_cache_clear_page(cpu, addr);
1804}
1805
1806static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1807 struct qht_stats hst)
1808{
1809 uint32_t hgram_opts;
1810 size_t hgram_bins;
1811 char *hgram;
1812
1813 if (!hst.head_buckets) {
1814 return;
1815 }
1816 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1817 hst.used_head_buckets, hst.head_buckets,
1818 (double)hst.used_head_buckets / hst.head_buckets * 100);
1819
1820 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1821 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1822 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1823 hgram_opts |= QDIST_PR_NODECIMAL;
1824 }
1825 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1826 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1827 qdist_avg(&hst.occupancy) * 100, hgram);
1828 g_free(hgram);
1829
1830 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1831 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1832 if (hgram_bins > 10) {
1833 hgram_bins = 10;
1834 } else {
1835 hgram_bins = 0;
1836 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1837 }
1838 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1839 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1840 qdist_avg(&hst.chain), hgram);
1841 g_free(hgram);
1842}
1843
1844struct tb_tree_stats {
1845 size_t host_size;
1846 size_t target_size;
1847 size_t max_target_size;
1848 size_t direct_jmp_count;
1849 size_t direct_jmp2_count;
1850 size_t cross_page;
1851};
1852
1853static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
1854{
1855 const TranslationBlock *tb = value;
1856 struct tb_tree_stats *tst = data;
1857
1858 tst->host_size += tb->tc.size;
1859 tst->target_size += tb->size;
1860 if (tb->size > tst->max_target_size) {
1861 tst->max_target_size = tb->size;
1862 }
1863 if (tb->page_addr[1] != -1) {
1864 tst->cross_page++;
1865 }
1866 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1867 tst->direct_jmp_count++;
1868 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1869 tst->direct_jmp2_count++;
1870 }
1871 }
1872 return false;
1873}
1874
1875void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1876{
1877 struct tb_tree_stats tst = {};
1878 struct qht_stats hst;
1879 size_t nb_tbs;
1880
1881 tb_lock();
1882
1883 nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
1884 g_tree_foreach(tb_ctx.tb_tree, tb_tree_stats_iter, &tst);
1885
1886 cpu_fprintf(f, "Translation buffer state:\n");
1887
1888
1889
1890
1891
1892 cpu_fprintf(f, "gen code size %zu/%zu\n",
1893 tcg_code_size(), tcg_code_capacity());
1894 cpu_fprintf(f, "TB count %zu\n", nb_tbs);
1895 cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n",
1896 nb_tbs ? tst.target_size / nb_tbs : 0,
1897 tst.max_target_size);
1898 cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
1899 nb_tbs ? tst.host_size / nb_tbs : 0,
1900 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
1901 cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
1902 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
1903 cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
1904 tst.direct_jmp_count,
1905 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
1906 tst.direct_jmp2_count,
1907 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
1908
1909 qht_statistics_init(&tb_ctx.htable, &hst);
1910 print_qht_statistics(f, cpu_fprintf, hst);
1911 qht_statistics_destroy(&hst);
1912
1913 cpu_fprintf(f, "\nStatistics:\n");
1914 cpu_fprintf(f, "TB flush count %u\n",
1915 atomic_read(&tb_ctx.tb_flush_count));
1916 cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
1917 cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
1918 tcg_dump_info(f, cpu_fprintf);
1919
1920 tb_unlock();
1921}
1922
1923void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1924{
1925 tcg_dump_op_count(f, cpu_fprintf);
1926}
1927
1928#else
1929
1930void cpu_interrupt(CPUState *cpu, int mask)
1931{
1932 g_assert(qemu_mutex_iothread_locked());
1933 cpu->interrupt_request |= mask;
1934 cpu->icount_decr.u16.high = -1;
1935}
1936
1937
1938
1939
1940
1941struct walk_memory_regions_data {
1942 walk_memory_regions_fn fn;
1943 void *priv;
1944 target_ulong start;
1945 int prot;
1946};
1947
1948static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1949 target_ulong end, int new_prot)
1950{
1951 if (data->start != -1u) {
1952 int rc = data->fn(data->priv, data->start, end, data->prot);
1953 if (rc != 0) {
1954 return rc;
1955 }
1956 }
1957
1958 data->start = (new_prot ? end : -1u);
1959 data->prot = new_prot;
1960
1961 return 0;
1962}
1963
1964static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1965 target_ulong base, int level, void **lp)
1966{
1967 target_ulong pa;
1968 int i, rc;
1969
1970 if (*lp == NULL) {
1971 return walk_memory_regions_end(data, base, 0);
1972 }
1973
1974 if (level == 0) {
1975 PageDesc *pd = *lp;
1976
1977 for (i = 0; i < V_L2_SIZE; ++i) {
1978 int prot = pd[i].flags;
1979
1980 pa = base | (i << TARGET_PAGE_BITS);
1981 if (prot != data->prot) {
1982 rc = walk_memory_regions_end(data, pa, prot);
1983 if (rc != 0) {
1984 return rc;
1985 }
1986 }
1987 }
1988 } else {
1989 void **pp = *lp;
1990
1991 for (i = 0; i < V_L2_SIZE; ++i) {
1992 pa = base | ((target_ulong)i <<
1993 (TARGET_PAGE_BITS + V_L2_BITS * level));
1994 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1995 if (rc != 0) {
1996 return rc;
1997 }
1998 }
1999 }
2000
2001 return 0;
2002}
2003
2004int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2005{
2006 struct walk_memory_regions_data data;
2007 uintptr_t i, l1_sz = v_l1_size;
2008
2009 data.fn = fn;
2010 data.priv = priv;
2011 data.start = -1u;
2012 data.prot = 0;
2013
2014 for (i = 0; i < l1_sz; i++) {
2015 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2016 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2017 if (rc != 0) {
2018 return rc;
2019 }
2020 }
2021
2022 return walk_memory_regions_end(&data, 0, 0);
2023}
2024
2025static int dump_region(void *priv, target_ulong start,
2026 target_ulong end, unsigned long prot)
2027{
2028 FILE *f = (FILE *)priv;
2029
2030 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2031 " "TARGET_FMT_lx" %c%c%c\n",
2032 start, end, end - start,
2033 ((prot & PAGE_READ) ? 'r' : '-'),
2034 ((prot & PAGE_WRITE) ? 'w' : '-'),
2035 ((prot & PAGE_EXEC) ? 'x' : '-'));
2036
2037 return 0;
2038}
2039
2040
2041void page_dump(FILE *f)
2042{
2043 const int length = sizeof(target_ulong) * 2;
2044 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2045 length, "start", length, "end", length, "size", "prot");
2046 walk_memory_regions(f, dump_region);
2047}
2048
2049int page_get_flags(target_ulong address)
2050{
2051 PageDesc *p;
2052
2053 p = page_find(address >> TARGET_PAGE_BITS);
2054 if (!p) {
2055 return 0;
2056 }
2057 return p->flags;
2058}
2059
2060
2061
2062
2063void page_set_flags(target_ulong start, target_ulong end, int flags)
2064{
2065 target_ulong addr, len;
2066
2067
2068
2069
2070#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2071 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2072#endif
2073 assert(start < end);
2074 assert_memory_lock();
2075
2076 start = start & TARGET_PAGE_MASK;
2077 end = TARGET_PAGE_ALIGN(end);
2078
2079 if (flags & PAGE_WRITE) {
2080 flags |= PAGE_WRITE_ORG;
2081 }
2082
2083 for (addr = start, len = end - start;
2084 len != 0;
2085 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2086 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2087
2088
2089
2090 if (!(p->flags & PAGE_WRITE) &&
2091 (flags & PAGE_WRITE) &&
2092 p->first_tb) {
2093 tb_invalidate_phys_page(addr, 0);
2094 }
2095 p->flags = flags;
2096 }
2097}
2098
2099int page_check_range(target_ulong start, target_ulong len, int flags)
2100{
2101 PageDesc *p;
2102 target_ulong end;
2103 target_ulong addr;
2104
2105
2106
2107
2108#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2109 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2110#endif
2111
2112 if (len == 0) {
2113 return 0;
2114 }
2115 if (start + len - 1 < start) {
2116
2117 return -1;
2118 }
2119
2120
2121 end = TARGET_PAGE_ALIGN(start + len);
2122 start = start & TARGET_PAGE_MASK;
2123
2124 for (addr = start, len = end - start;
2125 len != 0;
2126 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2127 p = page_find(addr >> TARGET_PAGE_BITS);
2128 if (!p) {
2129 return -1;
2130 }
2131 if (!(p->flags & PAGE_VALID)) {
2132 return -1;
2133 }
2134
2135 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2136 return -1;
2137 }
2138 if (flags & PAGE_WRITE) {
2139 if (!(p->flags & PAGE_WRITE_ORG)) {
2140 return -1;
2141 }
2142
2143
2144 if (!(p->flags & PAGE_WRITE)) {
2145 if (!page_unprotect(addr, 0)) {
2146 return -1;
2147 }
2148 }
2149 }
2150 }
2151 return 0;
2152}
2153
2154
2155
2156
2157
2158
2159
2160int page_unprotect(target_ulong address, uintptr_t pc)
2161{
2162 unsigned int prot;
2163 bool current_tb_invalidated;
2164 PageDesc *p;
2165 target_ulong host_start, host_end, addr;
2166
2167
2168
2169
2170 mmap_lock();
2171
2172 p = page_find(address >> TARGET_PAGE_BITS);
2173 if (!p) {
2174 mmap_unlock();
2175 return 0;
2176 }
2177
2178
2179
2180 if (p->flags & PAGE_WRITE_ORG) {
2181 current_tb_invalidated = false;
2182 if (p->flags & PAGE_WRITE) {
2183
2184
2185
2186
2187#ifdef TARGET_HAS_PRECISE_SMC
2188 TranslationBlock *current_tb = tb_find_pc(pc);
2189 if (current_tb) {
2190 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2191 }
2192#endif
2193 } else {
2194 host_start = address & qemu_host_page_mask;
2195 host_end = host_start + qemu_host_page_size;
2196
2197 prot = 0;
2198 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2199 p = page_find(addr >> TARGET_PAGE_BITS);
2200 p->flags |= PAGE_WRITE;
2201 prot |= p->flags;
2202
2203
2204
2205 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2206#ifdef CONFIG_USER_ONLY
2207 if (DEBUG_TB_CHECK_GATE) {
2208 tb_invalidate_check(addr);
2209 }
2210#endif
2211 }
2212 mprotect((void *)g2h(host_start), qemu_host_page_size,
2213 prot & PAGE_BITS);
2214 }
2215 mmap_unlock();
2216
2217 return current_tb_invalidated ? 2 : 1;
2218 }
2219 mmap_unlock();
2220 return 0;
2221}
2222#endif
2223
2224
2225void tcg_flush_softmmu_tlb(CPUState *cs)
2226{
2227#ifdef CONFIG_SOFTMMU
2228 tlb_flush(cs);
2229#endif
2230}
2231