1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#endif
22#include "qemu/osdep.h"
23
24
25#include "qemu-common.h"
26#define NO_CPU_IO_DEFS
27#include "cpu.h"
28#include "trace.h"
29#include "disas/disas.h"
30#include "exec/exec-all.h"
31#include "tcg.h"
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#include "exec/exec-all.h"
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
49#else
50#include "exec/address-spaces.h"
51#endif
52
53#include "exec/cputlb.h"
54#include "exec/tb-hash.h"
55#include "translate-all.h"
56#include "qemu/bitmap.h"
57#include "qemu/error-report.h"
58#include "qemu/timer.h"
59#include "qemu/main-loop.h"
60#include "exec/log.h"
61#include "sysemu/cpus.h"
62
63
64
65
66
67
68#ifdef DEBUG_TB_INVALIDATE
69#define DEBUG_TB_INVALIDATE_GATE 1
70#else
71#define DEBUG_TB_INVALIDATE_GATE 0
72#endif
73
74#ifdef DEBUG_TB_FLUSH
75#define DEBUG_TB_FLUSH_GATE 1
76#else
77#define DEBUG_TB_FLUSH_GATE 0
78#endif
79
80#if !defined(CONFIG_USER_ONLY)
81
82#undef DEBUG_TB_CHECK
83#endif
84
85#ifdef DEBUG_TB_CHECK
86#define DEBUG_TB_CHECK_GATE 1
87#else
88#define DEBUG_TB_CHECK_GATE 0
89#endif
90
91
92
93
94
95
96
97#ifdef CONFIG_SOFTMMU
98#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
99#else
100#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
101#endif
102
103#define SMC_BITMAP_USE_THRESHOLD 10
104
105typedef struct PageDesc {
106
107 TranslationBlock *first_tb;
108#ifdef CONFIG_SOFTMMU
109
110
111 unsigned int code_write_count;
112 unsigned long *code_bitmap;
113#else
114 unsigned long flags;
115#endif
116} PageDesc;
117
118
119
120#if !defined(CONFIG_USER_ONLY)
121#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
122# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
123#else
124# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
125#endif
126#else
127# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
128#endif
129
130
131#define V_L2_BITS 10
132#define V_L2_SIZE (1 << V_L2_BITS)
133
134
135QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
136 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
137 * BITS_PER_BYTE);
138
139
140
141
142static int v_l1_size;
143static int v_l1_shift;
144static int v_l2_levels;
145
146
147
148
149#define V_L1_MIN_BITS 4
150#define V_L1_MAX_BITS (V_L2_BITS + 3)
151#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
152
153static void *l1_map[V_L1_MAX_SIZE];
154
155
156TCGContext tcg_init_ctx;
157__thread TCGContext *tcg_ctx;
158TBContext tb_ctx;
159bool parallel_cpus;
160
161
162static __thread int have_tb_lock;
163
164static void page_table_config_init(void)
165{
166 uint32_t v_l1_bits;
167
168 assert(TARGET_PAGE_BITS);
169
170 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
171 if (v_l1_bits < V_L1_MIN_BITS) {
172 v_l1_bits += V_L2_BITS;
173 }
174
175 v_l1_size = 1 << v_l1_bits;
176 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
177 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
178
179 assert(v_l1_bits <= V_L1_MAX_BITS);
180 assert(v_l1_shift % V_L2_BITS == 0);
181 assert(v_l2_levels >= 0);
182}
183
184#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
185#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
186
187void tb_lock(void)
188{
189 assert_tb_unlocked();
190 qemu_mutex_lock(&tb_ctx.tb_lock);
191 have_tb_lock++;
192}
193
194void tb_unlock(void)
195{
196 assert_tb_locked();
197 have_tb_lock--;
198 qemu_mutex_unlock(&tb_ctx.tb_lock);
199}
200
201void tb_lock_reset(void)
202{
203 if (have_tb_lock) {
204 qemu_mutex_unlock(&tb_ctx.tb_lock);
205 have_tb_lock = 0;
206 }
207}
208
209static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
210
211void cpu_gen_init(void)
212{
213 tcg_context_init(&tcg_init_ctx);
214}
215
216
217
218static uint8_t *encode_sleb128(uint8_t *p, target_long val)
219{
220 int more, byte;
221
222 do {
223 byte = val & 0x7f;
224 val >>= 7;
225 more = !((val == 0 && (byte & 0x40) == 0)
226 || (val == -1 && (byte & 0x40) != 0));
227 if (more) {
228 byte |= 0x80;
229 }
230 *p++ = byte;
231 } while (more);
232
233 return p;
234}
235
236
237
238static target_long decode_sleb128(uint8_t **pp)
239{
240 uint8_t *p = *pp;
241 target_long val = 0;
242 int byte, shift = 0;
243
244 do {
245 byte = *p++;
246 val |= (target_ulong)(byte & 0x7f) << shift;
247 shift += 7;
248 } while (byte & 0x80);
249 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
250 val |= -(target_ulong)1 << shift;
251 }
252
253 *pp = p;
254 return val;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269static int encode_search(TranslationBlock *tb, uint8_t *block)
270{
271 uint8_t *highwater = tcg_ctx->code_gen_highwater;
272 uint8_t *p = block;
273 int i, j, n;
274
275 for (i = 0, n = tb->icount; i < n; ++i) {
276 target_ulong prev;
277
278 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
279 if (i == 0) {
280 prev = (j == 0 ? tb->pc : 0);
281 } else {
282 prev = tcg_ctx->gen_insn_data[i - 1][j];
283 }
284 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
285 }
286 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
287 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
288
289
290
291
292
293 if (unlikely(p > highwater)) {
294 return -1;
295 }
296 }
297
298 return p - block;
299}
300
301
302
303
304static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
305 uintptr_t searched_pc)
306{
307 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
308 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
309 CPUArchState *env = cpu->env_ptr;
310 uint8_t *p = tb->tc.ptr + tb->tc.size;
311 int i, j, num_insns = tb->icount;
312#ifdef CONFIG_PROFILER
313 TCGProfile *prof = &tcg_ctx->prof;
314 int64_t ti = profile_getclock();
315#endif
316
317 searched_pc -= GETPC_ADJ;
318
319 if (searched_pc < host_pc) {
320 return -1;
321 }
322
323
324
325 for (i = 0; i < num_insns; ++i) {
326 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
327 data[j] += decode_sleb128(&p);
328 }
329 host_pc += decode_sleb128(&p);
330 if (host_pc > searched_pc) {
331 goto found;
332 }
333 }
334 return -1;
335
336 found:
337 if (tb->cflags & CF_USE_ICOUNT) {
338 assert(use_icount);
339
340 cpu->icount_decr.u16.low += num_insns;
341
342 cpu->can_do_io = 0;
343 }
344 cpu->icount_decr.u16.low -= i;
345 restore_state_to_opc(env, tb, data);
346
347#ifdef CONFIG_PROFILER
348 atomic_set(&prof->restore_time,
349 prof->restore_time + profile_getclock() - ti);
350 atomic_set(&prof->restore_count, prof->restore_count + 1);
351#endif
352 return 0;
353}
354
355bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
356{
357 TranslationBlock *tb;
358 bool r = false;
359 uintptr_t check_offset;
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
376
377 if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
378 tb_lock();
379 tb = tb_find_pc(host_pc);
380 if (tb) {
381 cpu_restore_state_from_tb(cpu, tb, host_pc);
382 if (tb->cflags & CF_NOCACHE) {
383
384 tb_phys_invalidate(tb, -1);
385 tb_remove(tb);
386 }
387 r = true;
388 }
389 tb_unlock();
390 }
391
392 return r;
393}
394
395static void page_init(void)
396{
397 page_size_init();
398 page_table_config_init();
399
400#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
401 {
402#ifdef HAVE_KINFO_GETVMMAP
403 struct kinfo_vmentry *freep;
404 int i, cnt;
405
406 freep = kinfo_getvmmap(getpid(), &cnt);
407 if (freep) {
408 mmap_lock();
409 for (i = 0; i < cnt; i++) {
410 unsigned long startaddr, endaddr;
411
412 startaddr = freep[i].kve_start;
413 endaddr = freep[i].kve_end;
414 if (h2g_valid(startaddr)) {
415 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
416
417 if (h2g_valid(endaddr)) {
418 endaddr = h2g(endaddr);
419 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
420 } else {
421#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
422 endaddr = ~0ul;
423 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
424#endif
425 }
426 }
427 }
428 free(freep);
429 mmap_unlock();
430 }
431#else
432 FILE *f;
433
434 last_brk = (unsigned long)sbrk(0);
435
436 f = fopen("/compat/linux/proc/self/maps", "r");
437 if (f) {
438 mmap_lock();
439
440 do {
441 unsigned long startaddr, endaddr;
442 int n;
443
444 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
445
446 if (n == 2 && h2g_valid(startaddr)) {
447 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
448
449 if (h2g_valid(endaddr)) {
450 endaddr = h2g(endaddr);
451 } else {
452 endaddr = ~0ul;
453 }
454 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
455 }
456 } while (!feof(f));
457
458 fclose(f);
459 mmap_unlock();
460 }
461#endif
462 }
463#endif
464}
465
466
467
468
469
470static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
471{
472 PageDesc *pd;
473 void **lp;
474 int i;
475
476 if (alloc) {
477 assert_memory_lock();
478 }
479
480
481 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
482
483
484 for (i = v_l2_levels; i > 0; i--) {
485 void **p = atomic_rcu_read(lp);
486
487 if (p == NULL) {
488 if (!alloc) {
489 return NULL;
490 }
491 p = g_new0(void *, V_L2_SIZE);
492 atomic_rcu_set(lp, p);
493 }
494
495 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
496 }
497
498 pd = atomic_rcu_read(lp);
499 if (pd == NULL) {
500 if (!alloc) {
501 return NULL;
502 }
503 pd = g_new0(PageDesc, V_L2_SIZE);
504 atomic_rcu_set(lp, pd);
505 }
506
507 return pd + (index & (V_L2_SIZE - 1));
508}
509
510static inline PageDesc *page_find(tb_page_addr_t index)
511{
512 return page_find_alloc(index, 0);
513}
514
515#if defined(CONFIG_USER_ONLY)
516
517
518
519
520#define USE_STATIC_CODE_GEN_BUFFER
521#endif
522
523
524
525#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
526
527
528
529
530#if defined(__x86_64__)
531# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
532#elif defined(__sparc__)
533# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
534#elif defined(__powerpc64__)
535# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
536#elif defined(__powerpc__)
537# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
538#elif defined(__aarch64__)
539# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
540#elif defined(__s390x__)
541
542# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
543#elif defined(__mips__)
544
545
546# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
547#else
548# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
549#endif
550
551#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
552
553#define DEFAULT_CODE_GEN_BUFFER_SIZE \
554 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
555 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
556
557static inline size_t size_code_gen_buffer(size_t tb_size)
558{
559
560 if (tb_size == 0) {
561#ifdef USE_STATIC_CODE_GEN_BUFFER
562 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
563#else
564
565
566
567
568 tb_size = (unsigned long)(ram_size / 4);
569#endif
570 }
571 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
572 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
573 }
574 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
575 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
576 }
577 return tb_size;
578}
579
580#ifdef __mips__
581
582
583static inline bool cross_256mb(void *addr, size_t size)
584{
585 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
586}
587
588
589
590
591static inline void *split_cross_256mb(void *buf1, size_t size1)
592{
593 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
594 size_t size2 = buf1 + size1 - buf2;
595
596 size1 = buf2 - buf1;
597 if (size1 < size2) {
598 size1 = size2;
599 buf1 = buf2;
600 }
601
602 tcg_ctx->code_gen_buffer_size = size1;
603 return buf1;
604}
605#endif
606
607#ifdef USE_STATIC_CODE_GEN_BUFFER
608static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
609 __attribute__((aligned(CODE_GEN_ALIGN)));
610
611static inline void *alloc_code_gen_buffer(void)
612{
613 void *buf = static_code_gen_buffer;
614 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
615 size_t size;
616
617
618 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
619 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
620
621 size = end - buf;
622
623
624 if (size > tcg_ctx->code_gen_buffer_size) {
625 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
626 qemu_real_host_page_size);
627 }
628 tcg_ctx->code_gen_buffer_size = size;
629
630#ifdef __mips__
631 if (cross_256mb(buf, size)) {
632 buf = split_cross_256mb(buf, size);
633 size = tcg_ctx->code_gen_buffer_size;
634 }
635#endif
636
637 if (qemu_mprotect_rwx(buf, size)) {
638 abort();
639 }
640 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
641
642 return buf;
643}
644#elif defined(_WIN32)
645static inline void *alloc_code_gen_buffer(void)
646{
647 size_t size = tcg_ctx->code_gen_buffer_size;
648 void *buf;
649
650 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
651 PAGE_EXECUTE_READWRITE);
652 return buf;
653}
654#else
655static inline void *alloc_code_gen_buffer(void)
656{
657 int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
658 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
659 uintptr_t start = 0;
660 size_t size = tcg_ctx->code_gen_buffer_size;
661 void *buf;
662
663
664
665
666# if defined(__PIE__) || defined(__PIC__)
667
668
669
670
671# elif defined(__x86_64__) && defined(MAP_32BIT)
672
673
674 flags |= MAP_32BIT;
675
676 if (size > 800u * 1024 * 1024) {
677 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
678 }
679# elif defined(__sparc__)
680 start = 0x40000000ul;
681# elif defined(__s390x__)
682 start = 0x90000000ul;
683# elif defined(__mips__)
684# if _MIPS_SIM == _ABI64
685 start = 0x128000000ul;
686# else
687 start = 0x08000000ul;
688# endif
689# endif
690
691 buf = mmap((void *)start, size, prot, flags, -1, 0);
692 if (buf == MAP_FAILED) {
693 return NULL;
694 }
695
696#ifdef __mips__
697 if (cross_256mb(buf, size)) {
698
699
700 size_t size2;
701 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
702 switch ((int)(buf2 != MAP_FAILED)) {
703 case 1:
704 if (!cross_256mb(buf2, size)) {
705
706 munmap(buf, size);
707 break;
708 }
709
710 munmap(buf2, size);
711
712 default:
713
714 buf2 = split_cross_256mb(buf, size);
715 size2 = tcg_ctx->code_gen_buffer_size;
716 if (buf == buf2) {
717 munmap(buf + size2, size - size2);
718 } else {
719 munmap(buf, size - size2);
720 }
721 size = size2;
722 break;
723 }
724 buf = buf2;
725 }
726#endif
727
728
729 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
730
731 return buf;
732}
733#endif
734
735
736static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
737{
738 if (ptr >= s->ptr + s->size) {
739 return 1;
740 } else if (ptr < s->ptr) {
741 return -1;
742 }
743 return 0;
744}
745
746static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
747{
748 const struct tb_tc *a = ap;
749 const struct tb_tc *b = bp;
750
751
752
753
754
755
756 if (likely(a->size && b->size)) {
757 if (a->ptr > b->ptr) {
758 return 1;
759 } else if (a->ptr < b->ptr) {
760 return -1;
761 }
762
763 g_assert(a->size == b->size);
764 return 0;
765 }
766
767
768
769
770
771 if (likely(a->size == 0)) {
772 return ptr_cmp_tb_tc(a->ptr, b);
773 }
774 return ptr_cmp_tb_tc(b->ptr, a);
775}
776
777static inline void code_gen_alloc(size_t tb_size)
778{
779 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
780 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
781 if (tcg_ctx->code_gen_buffer == NULL) {
782 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
783 exit(1);
784 }
785 tb_ctx.tb_tree = g_tree_new(tb_tc_cmp);
786 qemu_mutex_init(&tb_ctx.tb_lock);
787}
788
789static void tb_htable_init(void)
790{
791 unsigned int mode = QHT_MODE_AUTO_RESIZE;
792
793 qht_init(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
794}
795
796
797
798
799void tcg_exec_init(unsigned long tb_size)
800{
801 tcg_allowed = true;
802 cpu_gen_init();
803 page_init();
804 tb_htable_init();
805 code_gen_alloc(tb_size);
806#if defined(CONFIG_SOFTMMU)
807
808
809 tcg_prologue_init(tcg_ctx);
810#endif
811}
812
813
814
815
816
817
818
819static TranslationBlock *tb_alloc(target_ulong pc)
820{
821 TranslationBlock *tb;
822
823 assert_tb_locked();
824
825 tb = tcg_tb_alloc(tcg_ctx);
826 if (unlikely(tb == NULL)) {
827 return NULL;
828 }
829 return tb;
830}
831
832
833void tb_remove(TranslationBlock *tb)
834{
835 assert_tb_locked();
836
837 g_tree_remove(tb_ctx.tb_tree, &tb->tc);
838}
839
840static inline void invalidate_page_bitmap(PageDesc *p)
841{
842#ifdef CONFIG_SOFTMMU
843 g_free(p->code_bitmap);
844 p->code_bitmap = NULL;
845 p->code_write_count = 0;
846#endif
847}
848
849
850static void page_flush_tb_1(int level, void **lp)
851{
852 int i;
853
854 if (*lp == NULL) {
855 return;
856 }
857 if (level == 0) {
858 PageDesc *pd = *lp;
859
860 for (i = 0; i < V_L2_SIZE; ++i) {
861 pd[i].first_tb = NULL;
862 invalidate_page_bitmap(pd + i);
863 }
864 } else {
865 void **pp = *lp;
866
867 for (i = 0; i < V_L2_SIZE; ++i) {
868 page_flush_tb_1(level - 1, pp + i);
869 }
870 }
871}
872
873static void page_flush_tb(void)
874{
875 int i, l1_sz = v_l1_size;
876
877 for (i = 0; i < l1_sz; i++) {
878 page_flush_tb_1(v_l2_levels, l1_map + i);
879 }
880}
881
882static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
883{
884 const TranslationBlock *tb = value;
885 size_t *size = data;
886
887 *size += tb->tc.size;
888 return false;
889}
890
891
892static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
893{
894 tb_lock();
895
896
897
898
899 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
900 goto done;
901 }
902
903 if (DEBUG_TB_FLUSH_GATE) {
904 size_t nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
905 size_t host_size = 0;
906
907 g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size);
908 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
909 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
910 }
911
912 CPU_FOREACH(cpu) {
913 cpu_tb_jmp_cache_clear(cpu);
914 }
915
916
917 g_tree_ref(tb_ctx.tb_tree);
918 g_tree_destroy(tb_ctx.tb_tree);
919
920 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
921 page_flush_tb();
922
923 tcg_region_reset_all();
924
925
926 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
927
928done:
929 tb_unlock();
930}
931
932void tb_flush(CPUState *cpu)
933{
934 if (tcg_enabled()) {
935 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
936 async_safe_run_on_cpu(cpu, do_tb_flush,
937 RUN_ON_CPU_HOST_INT(tb_flush_count));
938 }
939}
940
941
942
943
944
945
946
947#ifdef CONFIG_USER_ONLY
948
949static void
950do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
951{
952 TranslationBlock *tb = p;
953 target_ulong addr = *(target_ulong *)userp;
954
955 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
956 printf("ERROR invalidate: address=" TARGET_FMT_lx
957 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
958 }
959}
960
961
962
963
964
965static void tb_invalidate_check(target_ulong address)
966{
967 address &= TARGET_PAGE_MASK;
968 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
969}
970
971static void
972do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
973{
974 TranslationBlock *tb = p;
975 int flags1, flags2;
976
977 flags1 = page_get_flags(tb->pc);
978 flags2 = page_get_flags(tb->pc + tb->size - 1);
979 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
980 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
981 (long)tb->pc, tb->size, flags1, flags2);
982 }
983}
984
985
986static void tb_page_check(void)
987{
988 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
989}
990
991#endif
992
993static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
994{
995 TranslationBlock *tb1;
996 unsigned int n1;
997
998 for (;;) {
999 tb1 = *ptb;
1000 n1 = (uintptr_t)tb1 & 3;
1001 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1002 if (tb1 == tb) {
1003 *ptb = tb1->page_next[n1];
1004 break;
1005 }
1006 ptb = &tb1->page_next[n1];
1007 }
1008}
1009
1010
1011static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1012{
1013 TranslationBlock *tb1;
1014 uintptr_t *ptb, ntb;
1015 unsigned int n1;
1016
1017 ptb = &tb->jmp_list_next[n];
1018 if (*ptb) {
1019
1020 for (;;) {
1021 ntb = *ptb;
1022 n1 = ntb & 3;
1023 tb1 = (TranslationBlock *)(ntb & ~3);
1024 if (n1 == n && tb1 == tb) {
1025 break;
1026 }
1027 if (n1 == 2) {
1028 ptb = &tb1->jmp_list_first;
1029 } else {
1030 ptb = &tb1->jmp_list_next[n1];
1031 }
1032 }
1033
1034 *ptb = tb->jmp_list_next[n];
1035
1036 tb->jmp_list_next[n] = (uintptr_t)NULL;
1037 }
1038}
1039
1040
1041
1042static inline void tb_reset_jump(TranslationBlock *tb, int n)
1043{
1044 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1045 tb_set_jmp_target(tb, n, addr);
1046}
1047
1048
1049static inline void tb_jmp_unlink(TranslationBlock *tb)
1050{
1051 TranslationBlock *tb1;
1052 uintptr_t *ptb, ntb;
1053 unsigned int n1;
1054
1055 ptb = &tb->jmp_list_first;
1056 for (;;) {
1057 ntb = *ptb;
1058 n1 = ntb & 3;
1059 tb1 = (TranslationBlock *)(ntb & ~3);
1060 if (n1 == 2) {
1061 break;
1062 }
1063 tb_reset_jump(tb1, n1);
1064 *ptb = tb1->jmp_list_next[n1];
1065 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1066 }
1067}
1068
1069
1070
1071
1072
1073void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1074{
1075 CPUState *cpu;
1076 PageDesc *p;
1077 uint32_t h;
1078 tb_page_addr_t phys_pc;
1079
1080 assert_tb_locked();
1081
1082 atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1083
1084
1085 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1086 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1087 tb->trace_vcpu_dstate);
1088 if (!qht_remove(&tb_ctx.htable, tb, h)) {
1089 return;
1090 }
1091
1092
1093 if (tb->page_addr[0] != page_addr) {
1094 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1095 tb_page_remove(&p->first_tb, tb);
1096 invalidate_page_bitmap(p);
1097 }
1098 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1099 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1100 tb_page_remove(&p->first_tb, tb);
1101 invalidate_page_bitmap(p);
1102 }
1103
1104
1105 h = tb_jmp_cache_hash_func(tb->pc);
1106 CPU_FOREACH(cpu) {
1107 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1108 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1109 }
1110 }
1111
1112
1113 tb_remove_from_jmp_list(tb, 0);
1114 tb_remove_from_jmp_list(tb, 1);
1115
1116
1117 tb_jmp_unlink(tb);
1118
1119 tb_ctx.tb_phys_invalidate_count++;
1120}
1121
1122#ifdef CONFIG_SOFTMMU
1123static void build_page_bitmap(PageDesc *p)
1124{
1125 int n, tb_start, tb_end;
1126 TranslationBlock *tb;
1127
1128 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1129
1130 tb = p->first_tb;
1131 while (tb != NULL) {
1132 n = (uintptr_t)tb & 3;
1133 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1134
1135 if (n == 0) {
1136
1137
1138 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1139 tb_end = tb_start + tb->size;
1140 if (tb_end > TARGET_PAGE_SIZE) {
1141 tb_end = TARGET_PAGE_SIZE;
1142 }
1143 } else {
1144 tb_start = 0;
1145 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1146 }
1147 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1148 tb = tb->page_next[n];
1149 }
1150}
1151#endif
1152
1153
1154
1155
1156
1157static inline void tb_alloc_page(TranslationBlock *tb,
1158 unsigned int n, tb_page_addr_t page_addr)
1159{
1160 PageDesc *p;
1161#ifndef CONFIG_USER_ONLY
1162 bool page_already_protected;
1163#endif
1164
1165 assert_memory_lock();
1166
1167 tb->page_addr[n] = page_addr;
1168 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1169 tb->page_next[n] = p->first_tb;
1170#ifndef CONFIG_USER_ONLY
1171 page_already_protected = p->first_tb != NULL;
1172#endif
1173 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1174 invalidate_page_bitmap(p);
1175
1176#if defined(CONFIG_USER_ONLY)
1177 if (p->flags & PAGE_WRITE) {
1178 target_ulong addr;
1179 PageDesc *p2;
1180 int prot;
1181
1182
1183
1184 page_addr &= qemu_host_page_mask;
1185 prot = 0;
1186 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1187 addr += TARGET_PAGE_SIZE) {
1188
1189 p2 = page_find(addr >> TARGET_PAGE_BITS);
1190 if (!p2) {
1191 continue;
1192 }
1193 prot |= p2->flags;
1194 p2->flags &= ~PAGE_WRITE;
1195 }
1196 mprotect(g2h(page_addr), qemu_host_page_size,
1197 (prot & PAGE_BITS) & ~PAGE_WRITE);
1198 if (DEBUG_TB_INVALIDATE_GATE) {
1199 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1200 }
1201 }
1202#else
1203
1204
1205
1206 if (!page_already_protected) {
1207 tlb_protect_code(page_addr);
1208 }
1209#endif
1210}
1211
1212
1213
1214
1215
1216
1217static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1218 tb_page_addr_t phys_page2)
1219{
1220 uint32_t h;
1221
1222 assert_memory_lock();
1223
1224
1225 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1226 if (phys_page2 != -1) {
1227 tb_alloc_page(tb, 1, phys_page2);
1228 } else {
1229 tb->page_addr[1] = -1;
1230 }
1231
1232
1233 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1234 tb->trace_vcpu_dstate);
1235 qht_insert(&tb_ctx.htable, tb, h);
1236
1237#ifdef CONFIG_USER_ONLY
1238 if (DEBUG_TB_CHECK_GATE) {
1239 tb_page_check();
1240 }
1241#endif
1242}
1243
1244
1245TranslationBlock *tb_gen_code(CPUState *cpu,
1246 target_ulong pc, target_ulong cs_base,
1247 uint32_t flags, int cflags)
1248{
1249 CPUArchState *env = cpu->env_ptr;
1250 TranslationBlock *tb;
1251 tb_page_addr_t phys_pc, phys_page2;
1252 target_ulong virt_page2;
1253 tcg_insn_unit *gen_code_buf;
1254 int gen_code_size, search_size;
1255#ifdef CONFIG_PROFILER
1256 TCGProfile *prof = &tcg_ctx->prof;
1257 int64_t ti;
1258#endif
1259 assert_memory_lock();
1260
1261 phys_pc = get_page_addr_code(env, pc);
1262
1263 buffer_overflow:
1264 tb = tb_alloc(pc);
1265 if (unlikely(!tb)) {
1266
1267 tb_flush(cpu);
1268 mmap_unlock();
1269
1270 cpu->exception_index = EXCP_INTERRUPT;
1271 cpu_loop_exit(cpu);
1272 }
1273
1274 gen_code_buf = tcg_ctx->code_gen_ptr;
1275 tb->tc.ptr = gen_code_buf;
1276 tb->pc = pc;
1277 tb->cs_base = cs_base;
1278 tb->flags = flags;
1279 tb->cflags = cflags;
1280 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1281 tcg_ctx->tb_cflags = cflags;
1282
1283#ifdef CONFIG_PROFILER
1284
1285 atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1286 ti = profile_getclock();
1287#endif
1288
1289 tcg_func_start(tcg_ctx);
1290
1291 tcg_ctx->cpu = ENV_GET_CPU(env);
1292 gen_intermediate_code(cpu, tb);
1293 tcg_ctx->cpu = NULL;
1294
1295 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1296
1297
1298 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1299 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1300 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1301 if (TCG_TARGET_HAS_direct_jump) {
1302 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1303 tcg_ctx->tb_jmp_target_addr = NULL;
1304 } else {
1305 tcg_ctx->tb_jmp_insn_offset = NULL;
1306 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1307 }
1308
1309#ifdef CONFIG_PROFILER
1310 atomic_set(&prof->tb_count, prof->tb_count + 1);
1311 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1312 ti = profile_getclock();
1313#endif
1314
1315
1316
1317
1318
1319
1320 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1321 if (unlikely(gen_code_size < 0)) {
1322 goto buffer_overflow;
1323 }
1324 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1325 if (unlikely(search_size < 0)) {
1326 goto buffer_overflow;
1327 }
1328 tb->tc.size = gen_code_size;
1329
1330#ifdef CONFIG_PROFILER
1331 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1332 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1333 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1334 atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1335#endif
1336
1337#ifdef DEBUG_DISAS
1338 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1339 qemu_log_in_addr_range(tb->pc)) {
1340 qemu_log_lock();
1341 qemu_log("OUT: [size=%d]\n", gen_code_size);
1342 if (tcg_ctx->data_gen_ptr) {
1343 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1344 size_t data_size = gen_code_size - code_size;
1345 size_t i;
1346
1347 log_disas(tb->tc.ptr, code_size);
1348
1349 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1350 if (sizeof(tcg_target_ulong) == 8) {
1351 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1352 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1353 *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1354 } else {
1355 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
1356 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1357 *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1358 }
1359 }
1360 } else {
1361 log_disas(tb->tc.ptr, gen_code_size);
1362 }
1363 qemu_log("\n");
1364 qemu_log_flush();
1365 qemu_log_unlock();
1366 }
1367#endif
1368
1369 atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1370 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1371 CODE_GEN_ALIGN));
1372
1373
1374 assert(((uintptr_t)tb & 3) == 0);
1375 tb->jmp_list_first = (uintptr_t)tb | 2;
1376 tb->jmp_list_next[0] = (uintptr_t)NULL;
1377 tb->jmp_list_next[1] = (uintptr_t)NULL;
1378
1379
1380 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1381 tb_reset_jump(tb, 0);
1382 }
1383 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1384 tb_reset_jump(tb, 1);
1385 }
1386
1387
1388 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1389 phys_page2 = -1;
1390 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1391 phys_page2 = get_page_addr_code(env, virt_page2);
1392 }
1393
1394
1395
1396
1397
1398 tb_link_page(tb, phys_pc, phys_page2);
1399 g_tree_insert(tb_ctx.tb_tree, &tb->tc, tb);
1400 return tb;
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1414{
1415 while (start < end) {
1416 tb_invalidate_phys_page_range(start, end, 0);
1417 start &= TARGET_PAGE_MASK;
1418 start += TARGET_PAGE_SIZE;
1419 }
1420}
1421
1422#ifdef CONFIG_SOFTMMU
1423void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1424{
1425 assert_tb_locked();
1426 tb_invalidate_phys_range_1(start, end);
1427}
1428#else
1429void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1430{
1431 assert_memory_lock();
1432 tb_lock();
1433 tb_invalidate_phys_range_1(start, end);
1434 tb_unlock();
1435}
1436#endif
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1448 int is_cpu_write_access)
1449{
1450 TranslationBlock *tb, *tb_next;
1451 tb_page_addr_t tb_start, tb_end;
1452 PageDesc *p;
1453 int n;
1454#ifdef TARGET_HAS_PRECISE_SMC
1455 CPUState *cpu = current_cpu;
1456 CPUArchState *env = NULL;
1457 int current_tb_not_found = is_cpu_write_access;
1458 TranslationBlock *current_tb = NULL;
1459 int current_tb_modified = 0;
1460 target_ulong current_pc = 0;
1461 target_ulong current_cs_base = 0;
1462 uint32_t current_flags = 0;
1463#endif
1464
1465 assert_memory_lock();
1466 assert_tb_locked();
1467
1468 p = page_find(start >> TARGET_PAGE_BITS);
1469 if (!p) {
1470 return;
1471 }
1472#if defined(TARGET_HAS_PRECISE_SMC)
1473 if (cpu != NULL) {
1474 env = cpu->env_ptr;
1475 }
1476#endif
1477
1478
1479
1480
1481 tb = p->first_tb;
1482 while (tb != NULL) {
1483 n = (uintptr_t)tb & 3;
1484 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1485 tb_next = tb->page_next[n];
1486
1487 if (n == 0) {
1488
1489
1490 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1491 tb_end = tb_start + tb->size;
1492 } else {
1493 tb_start = tb->page_addr[1];
1494 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1495 }
1496 if (!(tb_end <= start || tb_start >= end)) {
1497#ifdef TARGET_HAS_PRECISE_SMC
1498 if (current_tb_not_found) {
1499 current_tb_not_found = 0;
1500 current_tb = NULL;
1501 if (cpu->mem_io_pc) {
1502
1503 current_tb = tb_find_pc(cpu->mem_io_pc);
1504 }
1505 }
1506 if (current_tb == tb &&
1507 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1508
1509
1510
1511
1512
1513
1514 current_tb_modified = 1;
1515 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1516 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1517 ¤t_flags);
1518 }
1519#endif
1520 tb_phys_invalidate(tb, -1);
1521 }
1522 tb = tb_next;
1523 }
1524#if !defined(CONFIG_USER_ONLY)
1525
1526 if (!p->first_tb) {
1527 invalidate_page_bitmap(p);
1528 tlb_unprotect_code(start);
1529 }
1530#endif
1531#ifdef TARGET_HAS_PRECISE_SMC
1532 if (current_tb_modified) {
1533
1534 cpu->cflags_next_tb = 1 | curr_cflags();
1535 cpu_loop_exit_noexc(cpu);
1536 }
1537#endif
1538}
1539
1540#ifdef CONFIG_SOFTMMU
1541
1542
1543
1544
1545void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1546{
1547 PageDesc *p;
1548
1549#if 0
1550 if (1) {
1551 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1552 cpu_single_env->mem_io_vaddr, len,
1553 cpu_single_env->eip,
1554 cpu_single_env->eip +
1555 (intptr_t)cpu_single_env->segs[R_CS].base);
1556 }
1557#endif
1558 assert_memory_lock();
1559
1560 p = page_find(start >> TARGET_PAGE_BITS);
1561 if (!p) {
1562 return;
1563 }
1564 if (!p->code_bitmap &&
1565 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1566
1567
1568
1569 build_page_bitmap(p);
1570 }
1571 if (p->code_bitmap) {
1572 unsigned int nr;
1573 unsigned long b;
1574
1575 nr = start & ~TARGET_PAGE_MASK;
1576 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1577 if (b & ((1 << len) - 1)) {
1578 goto do_invalidate;
1579 }
1580 } else {
1581 do_invalidate:
1582 tb_invalidate_phys_page_range(start, start + len, 1);
1583 }
1584}
1585#else
1586
1587
1588
1589
1590
1591
1592static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1593{
1594 TranslationBlock *tb;
1595 PageDesc *p;
1596 int n;
1597#ifdef TARGET_HAS_PRECISE_SMC
1598 TranslationBlock *current_tb = NULL;
1599 CPUState *cpu = current_cpu;
1600 CPUArchState *env = NULL;
1601 int current_tb_modified = 0;
1602 target_ulong current_pc = 0;
1603 target_ulong current_cs_base = 0;
1604 uint32_t current_flags = 0;
1605#endif
1606
1607 assert_memory_lock();
1608
1609 addr &= TARGET_PAGE_MASK;
1610 p = page_find(addr >> TARGET_PAGE_BITS);
1611 if (!p) {
1612 return false;
1613 }
1614
1615 tb_lock();
1616 tb = p->first_tb;
1617#ifdef TARGET_HAS_PRECISE_SMC
1618 if (tb && pc != 0) {
1619 current_tb = tb_find_pc(pc);
1620 }
1621 if (cpu != NULL) {
1622 env = cpu->env_ptr;
1623 }
1624#endif
1625 while (tb != NULL) {
1626 n = (uintptr_t)tb & 3;
1627 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1628#ifdef TARGET_HAS_PRECISE_SMC
1629 if (current_tb == tb &&
1630 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1631
1632
1633
1634
1635
1636
1637 current_tb_modified = 1;
1638 cpu_restore_state_from_tb(cpu, current_tb, pc);
1639 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1640 ¤t_flags);
1641 }
1642#endif
1643 tb_phys_invalidate(tb, addr);
1644 tb = tb->page_next[n];
1645 }
1646 p->first_tb = NULL;
1647#ifdef TARGET_HAS_PRECISE_SMC
1648 if (current_tb_modified) {
1649
1650 cpu->cflags_next_tb = 1 | curr_cflags();
1651
1652
1653 return true;
1654 }
1655#endif
1656 tb_unlock();
1657
1658 return false;
1659}
1660#endif
1661
1662
1663
1664
1665
1666
1667static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1668{
1669 struct tb_tc s = { .ptr = (void *)tc_ptr };
1670
1671 return g_tree_lookup(tb_ctx.tb_tree, &s);
1672}
1673
1674#if !defined(CONFIG_USER_ONLY)
1675void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1676{
1677 ram_addr_t ram_addr;
1678 MemoryRegion *mr;
1679 hwaddr l = 1;
1680
1681 rcu_read_lock();
1682 mr = address_space_translate(as, addr, &addr, &l, false);
1683 if (!(memory_region_is_ram(mr)
1684 || memory_region_is_romd(mr))) {
1685 rcu_read_unlock();
1686 return;
1687 }
1688 ram_addr = memory_region_get_ram_addr(mr) + addr;
1689 tb_lock();
1690 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1691 tb_unlock();
1692 rcu_read_unlock();
1693}
1694#endif
1695
1696
1697void tb_check_watchpoint(CPUState *cpu)
1698{
1699 TranslationBlock *tb;
1700
1701 tb = tb_find_pc(cpu->mem_io_pc);
1702 if (tb) {
1703
1704 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1705 tb_phys_invalidate(tb, -1);
1706 } else {
1707
1708
1709 CPUArchState *env = cpu->env_ptr;
1710 target_ulong pc, cs_base;
1711 tb_page_addr_t addr;
1712 uint32_t flags;
1713
1714 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1715 addr = get_page_addr_code(env, pc);
1716 tb_invalidate_phys_range(addr, addr + 1);
1717 }
1718}
1719
1720#ifndef CONFIG_USER_ONLY
1721
1722
1723
1724
1725
1726void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1727{
1728#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1729 CPUArchState *env = cpu->env_ptr;
1730#endif
1731 TranslationBlock *tb;
1732 uint32_t n;
1733
1734 tb_lock();
1735 tb = tb_find_pc(retaddr);
1736 if (!tb) {
1737 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1738 (void *)retaddr);
1739 }
1740 n = cpu->icount_decr.u16.low + tb->icount;
1741 cpu_restore_state_from_tb(cpu, tb, retaddr);
1742
1743
1744 n = n - cpu->icount_decr.u16.low;
1745
1746 n++;
1747
1748
1749
1750
1751#if defined(TARGET_MIPS)
1752 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1753 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1754 cpu->icount_decr.u16.low++;
1755 env->hflags &= ~MIPS_HFLAG_BMASK;
1756 }
1757#elif defined(TARGET_SH4)
1758 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1759 && n > 1) {
1760 env->pc -= 2;
1761 cpu->icount_decr.u16.low++;
1762 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1763 }
1764#endif
1765
1766 if (n > CF_COUNT_MASK) {
1767 cpu_abort(cpu, "TB too big during recompile");
1768 }
1769
1770
1771 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
1772
1773 if (tb->cflags & CF_NOCACHE) {
1774 if (tb->orig_tb) {
1775
1776
1777 tb_phys_invalidate(tb->orig_tb, -1);
1778 }
1779 tb_remove(tb);
1780 }
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791 cpu_loop_exit_noexc(cpu);
1792}
1793
1794static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1795{
1796 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1797
1798 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1799 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1800 }
1801}
1802
1803void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1804{
1805
1806
1807 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1808 tb_jmp_cache_clear_page(cpu, addr);
1809}
1810
1811static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1812 struct qht_stats hst)
1813{
1814 uint32_t hgram_opts;
1815 size_t hgram_bins;
1816 char *hgram;
1817
1818 if (!hst.head_buckets) {
1819 return;
1820 }
1821 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1822 hst.used_head_buckets, hst.head_buckets,
1823 (double)hst.used_head_buckets / hst.head_buckets * 100);
1824
1825 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1826 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1827 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1828 hgram_opts |= QDIST_PR_NODECIMAL;
1829 }
1830 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1831 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1832 qdist_avg(&hst.occupancy) * 100, hgram);
1833 g_free(hgram);
1834
1835 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1836 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1837 if (hgram_bins > 10) {
1838 hgram_bins = 10;
1839 } else {
1840 hgram_bins = 0;
1841 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1842 }
1843 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1844 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1845 qdist_avg(&hst.chain), hgram);
1846 g_free(hgram);
1847}
1848
1849struct tb_tree_stats {
1850 size_t host_size;
1851 size_t target_size;
1852 size_t max_target_size;
1853 size_t direct_jmp_count;
1854 size_t direct_jmp2_count;
1855 size_t cross_page;
1856};
1857
1858static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
1859{
1860 const TranslationBlock *tb = value;
1861 struct tb_tree_stats *tst = data;
1862
1863 tst->host_size += tb->tc.size;
1864 tst->target_size += tb->size;
1865 if (tb->size > tst->max_target_size) {
1866 tst->max_target_size = tb->size;
1867 }
1868 if (tb->page_addr[1] != -1) {
1869 tst->cross_page++;
1870 }
1871 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1872 tst->direct_jmp_count++;
1873 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1874 tst->direct_jmp2_count++;
1875 }
1876 }
1877 return false;
1878}
1879
1880void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1881{
1882 struct tb_tree_stats tst = {};
1883 struct qht_stats hst;
1884 size_t nb_tbs;
1885
1886 tb_lock();
1887
1888 nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
1889 g_tree_foreach(tb_ctx.tb_tree, tb_tree_stats_iter, &tst);
1890
1891 cpu_fprintf(f, "Translation buffer state:\n");
1892
1893
1894
1895
1896
1897 cpu_fprintf(f, "gen code size %zu/%zu\n",
1898 tcg_code_size(), tcg_code_capacity());
1899 cpu_fprintf(f, "TB count %zu\n", nb_tbs);
1900 cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n",
1901 nb_tbs ? tst.target_size / nb_tbs : 0,
1902 tst.max_target_size);
1903 cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
1904 nb_tbs ? tst.host_size / nb_tbs : 0,
1905 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
1906 cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
1907 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
1908 cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
1909 tst.direct_jmp_count,
1910 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
1911 tst.direct_jmp2_count,
1912 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
1913
1914 qht_statistics_init(&tb_ctx.htable, &hst);
1915 print_qht_statistics(f, cpu_fprintf, hst);
1916 qht_statistics_destroy(&hst);
1917
1918 cpu_fprintf(f, "\nStatistics:\n");
1919 cpu_fprintf(f, "TB flush count %u\n",
1920 atomic_read(&tb_ctx.tb_flush_count));
1921 cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
1922 cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
1923 tcg_dump_info(f, cpu_fprintf);
1924
1925 tb_unlock();
1926}
1927
1928void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1929{
1930 tcg_dump_op_count(f, cpu_fprintf);
1931}
1932
1933#else
1934
1935void cpu_interrupt(CPUState *cpu, int mask)
1936{
1937 g_assert(qemu_mutex_iothread_locked());
1938 cpu->interrupt_request |= mask;
1939 cpu->icount_decr.u16.high = -1;
1940}
1941
1942
1943
1944
1945
1946struct walk_memory_regions_data {
1947 walk_memory_regions_fn fn;
1948 void *priv;
1949 target_ulong start;
1950 int prot;
1951};
1952
1953static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1954 target_ulong end, int new_prot)
1955{
1956 if (data->start != -1u) {
1957 int rc = data->fn(data->priv, data->start, end, data->prot);
1958 if (rc != 0) {
1959 return rc;
1960 }
1961 }
1962
1963 data->start = (new_prot ? end : -1u);
1964 data->prot = new_prot;
1965
1966 return 0;
1967}
1968
1969static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1970 target_ulong base, int level, void **lp)
1971{
1972 target_ulong pa;
1973 int i, rc;
1974
1975 if (*lp == NULL) {
1976 return walk_memory_regions_end(data, base, 0);
1977 }
1978
1979 if (level == 0) {
1980 PageDesc *pd = *lp;
1981
1982 for (i = 0; i < V_L2_SIZE; ++i) {
1983 int prot = pd[i].flags;
1984
1985 pa = base | (i << TARGET_PAGE_BITS);
1986 if (prot != data->prot) {
1987 rc = walk_memory_regions_end(data, pa, prot);
1988 if (rc != 0) {
1989 return rc;
1990 }
1991 }
1992 }
1993 } else {
1994 void **pp = *lp;
1995
1996 for (i = 0; i < V_L2_SIZE; ++i) {
1997 pa = base | ((target_ulong)i <<
1998 (TARGET_PAGE_BITS + V_L2_BITS * level));
1999 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2000 if (rc != 0) {
2001 return rc;
2002 }
2003 }
2004 }
2005
2006 return 0;
2007}
2008
2009int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2010{
2011 struct walk_memory_regions_data data;
2012 uintptr_t i, l1_sz = v_l1_size;
2013
2014 data.fn = fn;
2015 data.priv = priv;
2016 data.start = -1u;
2017 data.prot = 0;
2018
2019 for (i = 0; i < l1_sz; i++) {
2020 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2021 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2022 if (rc != 0) {
2023 return rc;
2024 }
2025 }
2026
2027 return walk_memory_regions_end(&data, 0, 0);
2028}
2029
2030static int dump_region(void *priv, target_ulong start,
2031 target_ulong end, unsigned long prot)
2032{
2033 FILE *f = (FILE *)priv;
2034
2035 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2036 " "TARGET_FMT_lx" %c%c%c\n",
2037 start, end, end - start,
2038 ((prot & PAGE_READ) ? 'r' : '-'),
2039 ((prot & PAGE_WRITE) ? 'w' : '-'),
2040 ((prot & PAGE_EXEC) ? 'x' : '-'));
2041
2042 return 0;
2043}
2044
2045
2046void page_dump(FILE *f)
2047{
2048 const int length = sizeof(target_ulong) * 2;
2049 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2050 length, "start", length, "end", length, "size", "prot");
2051 walk_memory_regions(f, dump_region);
2052}
2053
2054int page_get_flags(target_ulong address)
2055{
2056 PageDesc *p;
2057
2058 p = page_find(address >> TARGET_PAGE_BITS);
2059 if (!p) {
2060 return 0;
2061 }
2062 return p->flags;
2063}
2064
2065
2066
2067
2068void page_set_flags(target_ulong start, target_ulong end, int flags)
2069{
2070 target_ulong addr, len;
2071
2072
2073
2074
2075#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2076 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2077#endif
2078 assert(start < end);
2079 assert_memory_lock();
2080
2081 start = start & TARGET_PAGE_MASK;
2082 end = TARGET_PAGE_ALIGN(end);
2083
2084 if (flags & PAGE_WRITE) {
2085 flags |= PAGE_WRITE_ORG;
2086 }
2087
2088 for (addr = start, len = end - start;
2089 len != 0;
2090 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2091 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2092
2093
2094
2095 if (!(p->flags & PAGE_WRITE) &&
2096 (flags & PAGE_WRITE) &&
2097 p->first_tb) {
2098 tb_invalidate_phys_page(addr, 0);
2099 }
2100 p->flags = flags;
2101 }
2102}
2103
2104int page_check_range(target_ulong start, target_ulong len, int flags)
2105{
2106 PageDesc *p;
2107 target_ulong end;
2108 target_ulong addr;
2109
2110
2111
2112
2113#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2114 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2115#endif
2116
2117 if (len == 0) {
2118 return 0;
2119 }
2120 if (start + len - 1 < start) {
2121
2122 return -1;
2123 }
2124
2125
2126 end = TARGET_PAGE_ALIGN(start + len);
2127 start = start & TARGET_PAGE_MASK;
2128
2129 for (addr = start, len = end - start;
2130 len != 0;
2131 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2132 p = page_find(addr >> TARGET_PAGE_BITS);
2133 if (!p) {
2134 return -1;
2135 }
2136 if (!(p->flags & PAGE_VALID)) {
2137 return -1;
2138 }
2139
2140 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2141 return -1;
2142 }
2143 if (flags & PAGE_WRITE) {
2144 if (!(p->flags & PAGE_WRITE_ORG)) {
2145 return -1;
2146 }
2147
2148
2149 if (!(p->flags & PAGE_WRITE)) {
2150 if (!page_unprotect(addr, 0)) {
2151 return -1;
2152 }
2153 }
2154 }
2155 }
2156 return 0;
2157}
2158
2159
2160
2161
2162
2163
2164
2165int page_unprotect(target_ulong address, uintptr_t pc)
2166{
2167 unsigned int prot;
2168 bool current_tb_invalidated;
2169 PageDesc *p;
2170 target_ulong host_start, host_end, addr;
2171
2172
2173
2174
2175 mmap_lock();
2176
2177 p = page_find(address >> TARGET_PAGE_BITS);
2178 if (!p) {
2179 mmap_unlock();
2180 return 0;
2181 }
2182
2183
2184
2185 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2186 host_start = address & qemu_host_page_mask;
2187 host_end = host_start + qemu_host_page_size;
2188
2189 prot = 0;
2190 current_tb_invalidated = false;
2191 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2192 p = page_find(addr >> TARGET_PAGE_BITS);
2193 p->flags |= PAGE_WRITE;
2194 prot |= p->flags;
2195
2196
2197
2198 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2199#ifdef CONFIG_USER_ONLY
2200 if (DEBUG_TB_CHECK_GATE) {
2201 tb_invalidate_check(addr);
2202 }
2203#endif
2204 }
2205 mprotect((void *)g2h(host_start), qemu_host_page_size,
2206 prot & PAGE_BITS);
2207
2208 mmap_unlock();
2209
2210 return current_tb_invalidated ? 2 : 1;
2211 }
2212 mmap_unlock();
2213 return 0;
2214}
2215#endif
2216
2217
2218void tcg_flush_softmmu_tlb(CPUState *cs)
2219{
2220#ifdef CONFIG_SOFTMMU
2221 tlb_flush(cs);
2222#endif
2223}
2224