1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#endif
22#include "qemu/osdep.h"
23
24
25#include "qemu-common.h"
26#define NO_CPU_IO_DEFS
27#include "cpu.h"
28#include "trace.h"
29#include "disas/disas.h"
30#include "exec/exec-all.h"
31#include "tcg.h"
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#include "exec/exec-all.h"
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
49#else
50#include "exec/address-spaces.h"
51#endif
52
53#include "exec/cputlb.h"
54#include "exec/tb-hash.h"
55#include "translate-all.h"
56#include "qemu/bitmap.h"
57#include "qemu/error-report.h"
58#include "qemu/timer.h"
59#include "qemu/main-loop.h"
60#include "exec/log.h"
61#include "qemu/etrace.h"
62#include "sysemu/cpus.h"
63
64
65
66
67
68
69#ifdef DEBUG_TB_INVALIDATE
70#define DEBUG_TB_INVALIDATE_GATE 1
71#else
72#define DEBUG_TB_INVALIDATE_GATE 0
73#endif
74
75#ifdef DEBUG_TB_FLUSH
76#define DEBUG_TB_FLUSH_GATE 1
77#else
78#define DEBUG_TB_FLUSH_GATE 0
79#endif
80
81#if !defined(CONFIG_USER_ONLY)
82
83#undef DEBUG_TB_CHECK
84#endif
85
86#ifdef DEBUG_TB_CHECK
87#define DEBUG_TB_CHECK_GATE 1
88#else
89#define DEBUG_TB_CHECK_GATE 0
90#endif
91
92
93
94
95
96
97
98#ifdef CONFIG_SOFTMMU
99#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
100#else
101#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
102#endif
103
104#define SMC_BITMAP_USE_THRESHOLD 10
105
106typedef struct PageDesc {
107
108 TranslationBlock *first_tb;
109#ifdef CONFIG_SOFTMMU
110
111
112 unsigned int code_write_count;
113 unsigned long *code_bitmap;
114#else
115 unsigned long flags;
116#endif
117} PageDesc;
118
119
120
121#if !defined(CONFIG_USER_ONLY)
122#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
123# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
124#else
125# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
126#endif
127#else
128# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
129#endif
130
131
132#define V_L2_BITS 10
133#define V_L2_SIZE (1 << V_L2_BITS)
134
135
136QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
137 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
138 * BITS_PER_BYTE);
139
140
141
142
143static int v_l1_size;
144static int v_l1_shift;
145static int v_l2_levels;
146
147
148
149
150#define V_L1_MIN_BITS 4
151#define V_L1_MAX_BITS (V_L2_BITS + 3)
152#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
153
154static void *l1_map[V_L1_MAX_SIZE];
155
156
157TCGContext tcg_init_ctx;
158__thread TCGContext *tcg_ctx;
159TBContext tb_ctx;
160bool parallel_cpus;
161
162
163static __thread int have_tb_lock;
164
165static void page_table_config_init(void)
166{
167 uint32_t v_l1_bits;
168
169 assert(TARGET_PAGE_BITS);
170
171 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
172 if (v_l1_bits < V_L1_MIN_BITS) {
173 v_l1_bits += V_L2_BITS;
174 }
175
176 v_l1_size = 1 << v_l1_bits;
177 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
178 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
179
180 assert(v_l1_bits <= V_L1_MAX_BITS);
181 assert(v_l1_shift % V_L2_BITS == 0);
182 assert(v_l2_levels >= 0);
183}
184
185#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
186#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
187
188void tb_lock(void)
189{
190 assert_tb_unlocked();
191 qemu_mutex_lock(&tb_ctx.tb_lock);
192 have_tb_lock++;
193}
194
195void tb_unlock(void)
196{
197 assert_tb_locked();
198 have_tb_lock--;
199 qemu_mutex_unlock(&tb_ctx.tb_lock);
200}
201
202void tb_lock_reset(void)
203{
204 if (have_tb_lock) {
205 qemu_mutex_unlock(&tb_ctx.tb_lock);
206 have_tb_lock = 0;
207 }
208}
209
210static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
211
212void cpu_gen_init(void)
213{
214 tcg_context_init(&tcg_init_ctx);
215}
216
217
218
219static uint8_t *encode_sleb128(uint8_t *p, target_long val)
220{
221 int more, byte;
222
223 do {
224 byte = val & 0x7f;
225 val >>= 7;
226 more = !((val == 0 && (byte & 0x40) == 0)
227 || (val == -1 && (byte & 0x40) != 0));
228 if (more) {
229 byte |= 0x80;
230 }
231 *p++ = byte;
232 } while (more);
233
234 return p;
235}
236
237
238
239static target_long decode_sleb128(uint8_t **pp)
240{
241 uint8_t *p = *pp;
242 target_long val = 0;
243 int byte, shift = 0;
244
245 do {
246 byte = *p++;
247 val |= (target_ulong)(byte & 0x7f) << shift;
248 shift += 7;
249 } while (byte & 0x80);
250 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
251 val |= -(target_ulong)1 << shift;
252 }
253
254 *pp = p;
255 return val;
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270static int encode_search(TranslationBlock *tb, uint8_t *block)
271{
272 uint8_t *highwater = tcg_ctx->code_gen_highwater;
273 uint8_t *p = block;
274 int i, j, n;
275
276 for (i = 0, n = tb->icount; i < n; ++i) {
277 target_ulong prev;
278
279 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
280 if (i == 0) {
281 prev = (j == 0 ? tb->pc : 0);
282 } else {
283 prev = tcg_ctx->gen_insn_data[i - 1][j];
284 }
285 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
286 }
287 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
288 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
289
290
291
292
293
294 if (unlikely(p > highwater)) {
295 return -1;
296 }
297 }
298
299 return p - block;
300}
301
302
303
304
305static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
306 uintptr_t searched_pc)
307{
308 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
309 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
310 CPUArchState *env = cpu->env_ptr;
311 uint8_t *p = tb->tc.ptr + tb->tc.size;
312 int i, j, num_insns = tb->icount;
313#ifdef CONFIG_PROFILER
314 TCGProfile *prof = &tcg_ctx->prof;
315 int64_t ti = profile_getclock();
316#endif
317
318 searched_pc -= GETPC_ADJ;
319
320 if (searched_pc < host_pc) {
321 return -1;
322 }
323
324
325
326 for (i = 0; i < num_insns; ++i) {
327 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
328 data[j] += decode_sleb128(&p);
329 }
330 host_pc += decode_sleb128(&p);
331 if (host_pc > searched_pc) {
332 goto found;
333 }
334 }
335 return -1;
336
337 found:
338 if (tb->cflags & CF_USE_ICOUNT) {
339 assert(use_icount);
340
341 cpu->icount_decr.u16.low += num_insns;
342
343 cpu->can_do_io = 0;
344 }
345 cpu->icount_decr.u16.low -= i;
346 restore_state_to_opc(env, tb, data);
347
348#ifdef CONFIG_PROFILER
349 atomic_set(&prof->restore_time,
350 prof->restore_time + profile_getclock() - ti);
351 atomic_set(&prof->restore_count, prof->restore_count + 1);
352#endif
353 return 0;
354}
355
356bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
357{
358 TranslationBlock *tb;
359 bool r = false;
360 uintptr_t check_offset;
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
377
378 if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
379 tb_lock();
380 tb = tb_find_pc(host_pc);
381 if (tb) {
382 cpu_restore_state_from_tb(cpu, tb, host_pc);
383 if (tb->cflags & CF_NOCACHE) {
384
385 tb_phys_invalidate(tb, -1);
386 tb_remove(tb);
387 }
388 r = true;
389 }
390 tb_unlock();
391 }
392
393 return r;
394}
395
396static void page_init(void)
397{
398 page_size_init();
399 page_table_config_init();
400
401#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
402 {
403#ifdef HAVE_KINFO_GETVMMAP
404 struct kinfo_vmentry *freep;
405 int i, cnt;
406
407 freep = kinfo_getvmmap(getpid(), &cnt);
408 if (freep) {
409 mmap_lock();
410 for (i = 0; i < cnt; i++) {
411 unsigned long startaddr, endaddr;
412
413 startaddr = freep[i].kve_start;
414 endaddr = freep[i].kve_end;
415 if (h2g_valid(startaddr)) {
416 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
417
418 if (h2g_valid(endaddr)) {
419 endaddr = h2g(endaddr);
420 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
421 } else {
422#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
423 endaddr = ~0ul;
424 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
425#endif
426 }
427 }
428 }
429 free(freep);
430 mmap_unlock();
431 }
432#else
433 FILE *f;
434
435 last_brk = (unsigned long)sbrk(0);
436
437 f = fopen("/compat/linux/proc/self/maps", "r");
438 if (f) {
439 mmap_lock();
440
441 do {
442 unsigned long startaddr, endaddr;
443 int n;
444
445 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
446
447 if (n == 2 && h2g_valid(startaddr)) {
448 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
449
450 if (h2g_valid(endaddr)) {
451 endaddr = h2g(endaddr);
452 } else {
453 endaddr = ~0ul;
454 }
455 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
456 }
457 } while (!feof(f));
458
459 fclose(f);
460 mmap_unlock();
461 }
462#endif
463 }
464#endif
465}
466
467
468
469
470
471static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
472{
473 PageDesc *pd;
474 void **lp;
475 int i;
476
477 if (alloc) {
478 assert_memory_lock();
479 }
480
481
482 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
483
484
485 for (i = v_l2_levels; i > 0; i--) {
486 void **p = atomic_rcu_read(lp);
487
488 if (p == NULL) {
489 if (!alloc) {
490 return NULL;
491 }
492 p = g_new0(void *, V_L2_SIZE);
493 atomic_rcu_set(lp, p);
494 }
495
496 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
497 }
498
499 pd = atomic_rcu_read(lp);
500 if (pd == NULL) {
501 if (!alloc) {
502 return NULL;
503 }
504 pd = g_new0(PageDesc, V_L2_SIZE);
505 atomic_rcu_set(lp, pd);
506 }
507
508 return pd + (index & (V_L2_SIZE - 1));
509}
510
511static inline PageDesc *page_find(tb_page_addr_t index)
512{
513 return page_find_alloc(index, 0);
514}
515
516#if defined(CONFIG_USER_ONLY)
517
518
519
520
521#define USE_STATIC_CODE_GEN_BUFFER
522#endif
523
524
525
526#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
527
528
529
530
531#if defined(__x86_64__)
532# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
533#elif defined(__sparc__)
534# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
535#elif defined(__powerpc64__)
536# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
537#elif defined(__powerpc__)
538# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
539#elif defined(__aarch64__)
540# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
541#elif defined(__s390x__)
542
543# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
544#elif defined(__mips__)
545
546
547# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
548#else
549# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
550#endif
551
552#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
553
554#define DEFAULT_CODE_GEN_BUFFER_SIZE \
555 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
556 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
557
558static inline size_t size_code_gen_buffer(size_t tb_size)
559{
560
561 if (tb_size == 0) {
562#ifdef USE_STATIC_CODE_GEN_BUFFER
563 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
564#else
565
566
567
568
569 tb_size = (unsigned long)(ram_size / 4);
570#endif
571 }
572 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
573 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
574 }
575 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
576 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
577 }
578 return tb_size;
579}
580
581#ifdef __mips__
582
583
584static inline bool cross_256mb(void *addr, size_t size)
585{
586 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
587}
588
589
590
591
592static inline void *split_cross_256mb(void *buf1, size_t size1)
593{
594 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
595 size_t size2 = buf1 + size1 - buf2;
596
597 size1 = buf2 - buf1;
598 if (size1 < size2) {
599 size1 = size2;
600 buf1 = buf2;
601 }
602
603 tcg_ctx->code_gen_buffer_size = size1;
604 return buf1;
605}
606#endif
607
608#ifdef USE_STATIC_CODE_GEN_BUFFER
609static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
610 __attribute__((aligned(CODE_GEN_ALIGN)));
611
612static inline void *alloc_code_gen_buffer(void)
613{
614 void *buf = static_code_gen_buffer;
615 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
616 size_t size;
617
618
619 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
620 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
621
622 size = end - buf;
623
624
625 if (size > tcg_ctx->code_gen_buffer_size) {
626 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
627 qemu_real_host_page_size);
628 }
629 tcg_ctx->code_gen_buffer_size = size;
630
631#ifdef __mips__
632 if (cross_256mb(buf, size)) {
633 buf = split_cross_256mb(buf, size);
634 size = tcg_ctx->code_gen_buffer_size;
635 }
636#endif
637
638 if (qemu_mprotect_rwx(buf, size)) {
639 abort();
640 }
641 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
642
643 return buf;
644}
645#elif defined(_WIN32)
646static inline void *alloc_code_gen_buffer(void)
647{
648 size_t size = tcg_ctx->code_gen_buffer_size;
649 void *buf;
650
651 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
652 PAGE_EXECUTE_READWRITE);
653 return buf;
654}
655#else
656static inline void *alloc_code_gen_buffer(void)
657{
658 int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
659 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
660 uintptr_t start = 0;
661 size_t size = tcg_ctx->code_gen_buffer_size;
662 void *buf;
663
664
665
666
667# if defined(__PIE__) || defined(__PIC__)
668
669
670
671
672# elif defined(__x86_64__) && defined(MAP_32BIT)
673
674
675 flags |= MAP_32BIT;
676
677 if (size > 800u * 1024 * 1024) {
678 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
679 }
680# elif defined(__sparc__)
681 start = 0x40000000ul;
682# elif defined(__s390x__)
683 start = 0x90000000ul;
684# elif defined(__mips__)
685# if _MIPS_SIM == _ABI64
686 start = 0x128000000ul;
687# else
688 start = 0x08000000ul;
689# endif
690# endif
691
692 buf = mmap((void *)start, size, prot, flags, -1, 0);
693 if (buf == MAP_FAILED) {
694 return NULL;
695 }
696
697#ifdef __mips__
698 if (cross_256mb(buf, size)) {
699
700
701 size_t size2;
702 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
703 switch ((int)(buf2 != MAP_FAILED)) {
704 case 1:
705 if (!cross_256mb(buf2, size)) {
706
707 munmap(buf, size);
708 break;
709 }
710
711 munmap(buf2, size);
712
713 default:
714
715 buf2 = split_cross_256mb(buf, size);
716 size2 = tcg_ctx->code_gen_buffer_size;
717 if (buf == buf2) {
718 munmap(buf + size2, size - size2);
719 } else {
720 munmap(buf, size - size2);
721 }
722 size = size2;
723 break;
724 }
725 buf = buf2;
726 }
727#endif
728
729
730 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
731
732 return buf;
733}
734#endif
735
736
737static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
738{
739 if (ptr >= s->ptr + s->size) {
740 return 1;
741 } else if (ptr < s->ptr) {
742 return -1;
743 }
744 return 0;
745}
746
747static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
748{
749 const struct tb_tc *a = ap;
750 const struct tb_tc *b = bp;
751
752
753
754
755
756
757 if (likely(a->size && b->size)) {
758 if (a->ptr > b->ptr) {
759 return 1;
760 } else if (a->ptr < b->ptr) {
761 return -1;
762 }
763
764 g_assert(a->size == b->size);
765 return 0;
766 }
767
768
769
770
771
772 if (likely(a->size == 0)) {
773 return ptr_cmp_tb_tc(a->ptr, b);
774 }
775 return ptr_cmp_tb_tc(b->ptr, a);
776}
777
778static inline void code_gen_alloc(size_t tb_size)
779{
780 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
781 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
782 if (tcg_ctx->code_gen_buffer == NULL) {
783 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
784 exit(1);
785 }
786 tb_ctx.tb_tree = g_tree_new(tb_tc_cmp);
787 qemu_mutex_init(&tb_ctx.tb_lock);
788}
789
790static void tb_htable_init(void)
791{
792 unsigned int mode = QHT_MODE_AUTO_RESIZE;
793
794 qht_init(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
795}
796
797
798
799
800void tcg_exec_init(unsigned long tb_size)
801{
802 tcg_allowed = true;
803 cpu_gen_init();
804 page_init();
805 tb_htable_init();
806 code_gen_alloc(tb_size);
807#if defined(CONFIG_SOFTMMU)
808
809
810 tcg_prologue_init(tcg_ctx);
811#endif
812}
813
814
815
816
817
818
819
820static TranslationBlock *tb_alloc(target_ulong pc)
821{
822 TranslationBlock *tb;
823
824 assert_tb_locked();
825
826 tb = tcg_tb_alloc(tcg_ctx);
827 if (unlikely(tb == NULL)) {
828 return NULL;
829 }
830 return tb;
831}
832
833
834void tb_remove(TranslationBlock *tb)
835{
836 assert_tb_locked();
837
838 g_tree_remove(tb_ctx.tb_tree, &tb->tc);
839}
840
841static inline void invalidate_page_bitmap(PageDesc *p)
842{
843#ifdef CONFIG_SOFTMMU
844 g_free(p->code_bitmap);
845 p->code_bitmap = NULL;
846 p->code_write_count = 0;
847#endif
848}
849
850
851static void page_flush_tb_1(int level, void **lp)
852{
853 int i;
854
855 if (*lp == NULL) {
856 return;
857 }
858 if (level == 0) {
859 PageDesc *pd = *lp;
860
861 for (i = 0; i < V_L2_SIZE; ++i) {
862 pd[i].first_tb = NULL;
863 invalidate_page_bitmap(pd + i);
864 }
865 } else {
866 void **pp = *lp;
867
868 for (i = 0; i < V_L2_SIZE; ++i) {
869 page_flush_tb_1(level - 1, pp + i);
870 }
871 }
872}
873
874static void page_flush_tb(void)
875{
876 int i, l1_sz = v_l1_size;
877
878 for (i = 0; i < l1_sz; i++) {
879 page_flush_tb_1(v_l2_levels, l1_map + i);
880 }
881}
882
883static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
884{
885 const TranslationBlock *tb = value;
886 size_t *size = data;
887
888 *size += tb->tc.size;
889 return false;
890}
891
892
893static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
894{
895 tb_lock();
896
897
898
899
900 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
901 goto done;
902 }
903
904 if (DEBUG_TB_FLUSH_GATE) {
905 size_t nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
906 size_t host_size = 0;
907
908 g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size);
909 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
910 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
911 }
912
913 CPU_FOREACH(cpu) {
914 cpu_tb_jmp_cache_clear(cpu);
915 }
916
917
918 g_tree_ref(tb_ctx.tb_tree);
919 g_tree_destroy(tb_ctx.tb_tree);
920
921 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
922 page_flush_tb();
923
924 tcg_region_reset_all();
925
926
927 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
928
929done:
930 tb_unlock();
931}
932
933void tb_flush(CPUState *cpu)
934{
935 if (tcg_enabled()) {
936 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
937 async_safe_run_on_cpu(cpu, do_tb_flush,
938 RUN_ON_CPU_HOST_INT(tb_flush_count));
939 }
940}
941
942
943
944
945
946
947
948#ifdef CONFIG_USER_ONLY
949
950static void
951do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
952{
953 TranslationBlock *tb = p;
954 target_ulong addr = *(target_ulong *)userp;
955
956 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
957 printf("ERROR invalidate: address=" TARGET_FMT_lx
958 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
959 }
960}
961
962
963
964
965
966static void tb_invalidate_check(target_ulong address)
967{
968 address &= TARGET_PAGE_MASK;
969 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
970}
971
972static void
973do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
974{
975 TranslationBlock *tb = p;
976 int flags1, flags2;
977
978 flags1 = page_get_flags(tb->pc);
979 flags2 = page_get_flags(tb->pc + tb->size - 1);
980 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
981 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
982 (long)tb->pc, tb->size, flags1, flags2);
983 }
984}
985
986
987static void tb_page_check(void)
988{
989 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
990}
991
992#endif
993
994static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
995{
996 TranslationBlock *tb1;
997 unsigned int n1;
998
999 for (;;) {
1000 tb1 = *ptb;
1001 n1 = (uintptr_t)tb1 & 3;
1002 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1003 if (tb1 == tb) {
1004 *ptb = tb1->page_next[n1];
1005 break;
1006 }
1007 ptb = &tb1->page_next[n1];
1008 }
1009}
1010
1011
1012static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1013{
1014 TranslationBlock *tb1;
1015 uintptr_t *ptb, ntb;
1016 unsigned int n1;
1017
1018 ptb = &tb->jmp_list_next[n];
1019 if (*ptb) {
1020
1021 for (;;) {
1022 ntb = *ptb;
1023 n1 = ntb & 3;
1024 tb1 = (TranslationBlock *)(ntb & ~3);
1025 if (n1 == n && tb1 == tb) {
1026 break;
1027 }
1028 if (n1 == 2) {
1029 ptb = &tb1->jmp_list_first;
1030 } else {
1031 ptb = &tb1->jmp_list_next[n1];
1032 }
1033 }
1034
1035 *ptb = tb->jmp_list_next[n];
1036
1037 tb->jmp_list_next[n] = (uintptr_t)NULL;
1038 }
1039}
1040
1041
1042
1043static inline void tb_reset_jump(TranslationBlock *tb, int n)
1044{
1045 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1046 tb_set_jmp_target(tb, n, addr);
1047}
1048
1049
1050static inline void tb_jmp_unlink(TranslationBlock *tb)
1051{
1052 TranslationBlock *tb1;
1053 uintptr_t *ptb, ntb;
1054 unsigned int n1;
1055
1056 ptb = &tb->jmp_list_first;
1057 for (;;) {
1058 ntb = *ptb;
1059 n1 = ntb & 3;
1060 tb1 = (TranslationBlock *)(ntb & ~3);
1061 if (n1 == 2) {
1062 break;
1063 }
1064 tb_reset_jump(tb1, n1);
1065 *ptb = tb1->jmp_list_next[n1];
1066 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1067 }
1068}
1069
1070
1071
1072
1073
1074void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1075{
1076 CPUState *cpu;
1077 PageDesc *p;
1078 uint32_t h;
1079 tb_page_addr_t phys_pc;
1080
1081 assert_tb_locked();
1082
1083 atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1084
1085
1086 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1087 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1088 tb->trace_vcpu_dstate);
1089 if (!qht_remove(&tb_ctx.htable, tb, h)) {
1090 return;
1091 }
1092
1093
1094 if (tb->page_addr[0] != page_addr) {
1095 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1096 tb_page_remove(&p->first_tb, tb);
1097 invalidate_page_bitmap(p);
1098 }
1099 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1100 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1101 tb_page_remove(&p->first_tb, tb);
1102 invalidate_page_bitmap(p);
1103 }
1104
1105
1106 h = tb_jmp_cache_hash_func(tb->pc);
1107 CPU_FOREACH(cpu) {
1108 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1109 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1110 }
1111 }
1112
1113
1114 tb_remove_from_jmp_list(tb, 0);
1115 tb_remove_from_jmp_list(tb, 1);
1116
1117
1118 tb_jmp_unlink(tb);
1119
1120 tb_ctx.tb_phys_invalidate_count++;
1121}
1122
1123#ifdef CONFIG_SOFTMMU
1124static void build_page_bitmap(PageDesc *p)
1125{
1126 int n, tb_start, tb_end;
1127 TranslationBlock *tb;
1128
1129 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1130
1131 tb = p->first_tb;
1132 while (tb != NULL) {
1133 n = (uintptr_t)tb & 3;
1134 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1135
1136 if (n == 0) {
1137
1138
1139 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1140 tb_end = tb_start + tb->size;
1141 if (tb_end > TARGET_PAGE_SIZE) {
1142 tb_end = TARGET_PAGE_SIZE;
1143 }
1144 } else {
1145 tb_start = 0;
1146 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1147 }
1148 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1149 tb = tb->page_next[n];
1150 }
1151}
1152#endif
1153
1154
1155
1156
1157
1158static inline void tb_alloc_page(TranslationBlock *tb,
1159 unsigned int n, tb_page_addr_t page_addr)
1160{
1161 PageDesc *p;
1162#ifndef CONFIG_USER_ONLY
1163 bool page_already_protected;
1164#endif
1165
1166 assert_memory_lock();
1167
1168 tb->page_addr[n] = page_addr;
1169 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1170 tb->page_next[n] = p->first_tb;
1171#ifndef CONFIG_USER_ONLY
1172 page_already_protected = p->first_tb != NULL;
1173#endif
1174 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1175 invalidate_page_bitmap(p);
1176
1177#if defined(CONFIG_USER_ONLY)
1178 if (p->flags & PAGE_WRITE) {
1179 target_ulong addr;
1180 PageDesc *p2;
1181 int prot;
1182
1183
1184
1185 page_addr &= qemu_host_page_mask;
1186 prot = 0;
1187 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1188 addr += TARGET_PAGE_SIZE) {
1189
1190 p2 = page_find(addr >> TARGET_PAGE_BITS);
1191 if (!p2) {
1192 continue;
1193 }
1194 prot |= p2->flags;
1195 p2->flags &= ~PAGE_WRITE;
1196 }
1197 mprotect(g2h(page_addr), qemu_host_page_size,
1198 (prot & PAGE_BITS) & ~PAGE_WRITE);
1199 if (DEBUG_TB_INVALIDATE_GATE) {
1200 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1201 }
1202 }
1203#else
1204
1205
1206
1207 if (!page_already_protected) {
1208 tlb_protect_code(page_addr);
1209 }
1210#endif
1211}
1212
1213
1214
1215
1216
1217
1218static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1219 tb_page_addr_t phys_page2)
1220{
1221 uint32_t h;
1222
1223 assert_memory_lock();
1224
1225
1226 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1227 if (phys_page2 != -1) {
1228 tb_alloc_page(tb, 1, phys_page2);
1229 } else {
1230 tb->page_addr[1] = -1;
1231 }
1232
1233
1234 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1235 tb->trace_vcpu_dstate);
1236 qht_insert(&tb_ctx.htable, tb, h);
1237
1238#ifdef CONFIG_USER_ONLY
1239 if (DEBUG_TB_CHECK_GATE) {
1240 tb_page_check();
1241 }
1242#endif
1243}
1244
1245
1246TranslationBlock *tb_gen_code(CPUState *cpu,
1247 target_ulong pc, target_ulong cs_base,
1248 uint32_t flags, int cflags)
1249{
1250 CPUArchState *env = cpu->env_ptr;
1251 TranslationBlock *tb;
1252 tb_page_addr_t phys_pc, phys_page2;
1253 target_ulong virt_page2;
1254 tcg_insn_unit *gen_code_buf;
1255 int gen_code_size, search_size;
1256#ifdef CONFIG_PROFILER
1257 TCGProfile *prof = &tcg_ctx->prof;
1258 int64_t ti;
1259#endif
1260 assert_memory_lock();
1261
1262 phys_pc = get_page_addr_code(env, pc);
1263
1264 buffer_overflow:
1265 tb = tb_alloc(pc);
1266 if (unlikely(!tb)) {
1267
1268 tb_flush(cpu);
1269 mmap_unlock();
1270
1271 cpu->exception_index = EXCP_INTERRUPT;
1272 cpu_loop_exit(cpu);
1273 }
1274
1275 gen_code_buf = tcg_ctx->code_gen_ptr;
1276 tb->tc.ptr = gen_code_buf;
1277 tb->pc = pc;
1278 tb->cs_base = cs_base;
1279 tb->flags = flags;
1280 tb->cflags = cflags;
1281 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1282 tcg_ctx->tb_cflags = cflags;
1283
1284#ifdef CONFIG_PROFILER
1285
1286 atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1287 ti = profile_getclock();
1288#endif
1289
1290 tcg_func_start(tcg_ctx);
1291
1292 tcg_ctx->cpu = ENV_GET_CPU(env);
1293 gen_intermediate_code(cpu, tb);
1294 tcg_ctx->cpu = NULL;
1295
1296 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1297
1298
1299 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1300 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1301 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1302 if (TCG_TARGET_HAS_direct_jump) {
1303 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1304 tcg_ctx->tb_jmp_target_addr = NULL;
1305 } else {
1306 tcg_ctx->tb_jmp_insn_offset = NULL;
1307 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1308 }
1309
1310#ifdef CONFIG_PROFILER
1311 atomic_set(&prof->tb_count, prof->tb_count + 1);
1312 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1313 ti = profile_getclock();
1314#endif
1315
1316
1317
1318
1319
1320
1321 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1322 if (unlikely(gen_code_size < 0)) {
1323 goto buffer_overflow;
1324 }
1325 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1326 if (unlikely(search_size < 0)) {
1327 goto buffer_overflow;
1328 }
1329 tb->tc.size = gen_code_size;
1330
1331#ifdef CONFIG_PROFILER
1332 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1333 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1334 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1335 atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1336#endif
1337
1338#ifdef DEBUG_DISAS
1339 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1340 qemu_log_in_addr_range(tb->pc)) {
1341 qemu_log_lock();
1342 qemu_log("OUT: [size=%d]\n", gen_code_size);
1343 if (tcg_ctx->data_gen_ptr) {
1344 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1345 size_t data_size = gen_code_size - code_size;
1346 size_t i;
1347
1348 log_disas(tb->tc.ptr, code_size);
1349
1350 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1351 if (sizeof(tcg_target_ulong) == 8) {
1352 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1353 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1354 *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1355 } else {
1356 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
1357 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1358 *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1359 }
1360 }
1361 } else {
1362 log_disas(tb->tc.ptr, gen_code_size);
1363 }
1364 qemu_log("\n");
1365 qemu_log_flush();
1366 qemu_log_unlock();
1367 }
1368#endif
1369
1370 atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1371 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1372 CODE_GEN_ALIGN));
1373
1374
1375 assert(((uintptr_t)tb & 3) == 0);
1376 tb->jmp_list_first = (uintptr_t)tb | 2;
1377 tb->jmp_list_next[0] = (uintptr_t)NULL;
1378 tb->jmp_list_next[1] = (uintptr_t)NULL;
1379
1380
1381 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1382 tb_reset_jump(tb, 0);
1383 }
1384 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1385 tb_reset_jump(tb, 1);
1386 }
1387
1388
1389 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1390 phys_page2 = -1;
1391 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1392 phys_page2 = get_page_addr_code(env, virt_page2);
1393 }
1394
1395
1396
1397
1398
1399 tb_link_page(tb, phys_pc, phys_page2);
1400 g_tree_insert(tb_ctx.tb_tree, &tb->tc, tb);
1401
1402 if (qemu_etrace_mask(ETRACE_F_TRANSLATION)) {
1403 CPUState *cpu = ENV_GET_CPU(env);
1404 hwaddr phys_addr = pc;
1405
1406#if !defined(CONFIG_USER_ONLY)
1407 phys_addr = cpu_get_phys_page_debug(cpu, pc & TARGET_PAGE_MASK);
1408 phys_addr += pc & ~TARGET_PAGE_MASK;
1409#endif
1410 etrace_dump_tb(&qemu_etracer, NULL, cpu->cpu_index,
1411 tb->pc, phys_addr, tb->size,
1412 tb->tc.ptr, gen_code_size);
1413 }
1414
1415 return tb;
1416}
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1429{
1430 while (start < end) {
1431 tb_invalidate_phys_page_range(start, end, 0);
1432 start &= TARGET_PAGE_MASK;
1433 start += TARGET_PAGE_SIZE;
1434 }
1435}
1436
1437#ifdef CONFIG_SOFTMMU
1438void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1439{
1440 assert_tb_locked();
1441 tb_invalidate_phys_range_1(start, end);
1442}
1443#else
1444void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1445{
1446 assert_memory_lock();
1447 tb_lock();
1448 tb_invalidate_phys_range_1(start, end);
1449 tb_unlock();
1450}
1451#endif
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1463 int is_cpu_write_access)
1464{
1465 TranslationBlock *tb, *tb_next;
1466 tb_page_addr_t tb_start, tb_end;
1467 PageDesc *p;
1468 int n;
1469#ifdef TARGET_HAS_PRECISE_SMC
1470 CPUState *cpu = current_cpu;
1471 CPUArchState *env = NULL;
1472 int current_tb_not_found = is_cpu_write_access;
1473 TranslationBlock *current_tb = NULL;
1474 int current_tb_modified = 0;
1475 target_ulong current_pc = 0;
1476 target_ulong current_cs_base = 0;
1477 uint32_t current_flags = 0;
1478#endif
1479
1480 assert_memory_lock();
1481 assert_tb_locked();
1482
1483 p = page_find(start >> TARGET_PAGE_BITS);
1484 if (!p) {
1485 return;
1486 }
1487#if defined(TARGET_HAS_PRECISE_SMC)
1488 if (cpu != NULL) {
1489 env = cpu->env_ptr;
1490 }
1491#endif
1492
1493
1494
1495
1496 tb = p->first_tb;
1497 while (tb != NULL) {
1498 n = (uintptr_t)tb & 3;
1499 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1500 tb_next = tb->page_next[n];
1501
1502 if (n == 0) {
1503
1504
1505 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1506 tb_end = tb_start + tb->size;
1507 } else {
1508 tb_start = tb->page_addr[1];
1509 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1510 }
1511 if (!(tb_end <= start || tb_start >= end)) {
1512#ifdef TARGET_HAS_PRECISE_SMC
1513 if (current_tb_not_found) {
1514 current_tb_not_found = 0;
1515 current_tb = NULL;
1516 if (cpu->mem_io_pc) {
1517
1518 current_tb = tb_find_pc(cpu->mem_io_pc);
1519 }
1520 }
1521 if (current_tb == tb &&
1522 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1523
1524
1525
1526
1527
1528
1529 current_tb_modified = 1;
1530 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1531 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1532 ¤t_flags);
1533 }
1534#endif
1535 tb_phys_invalidate(tb, -1);
1536 }
1537 tb = tb_next;
1538 }
1539#if !defined(CONFIG_USER_ONLY)
1540
1541 if (!p->first_tb) {
1542 invalidate_page_bitmap(p);
1543 tlb_unprotect_code(start);
1544 }
1545#endif
1546#ifdef TARGET_HAS_PRECISE_SMC
1547 if (current_tb_modified) {
1548
1549 cpu->cflags_next_tb = 1 | curr_cflags();
1550 cpu_loop_exit_noexc(cpu);
1551 }
1552#endif
1553}
1554
1555#ifdef CONFIG_SOFTMMU
1556
1557
1558
1559
1560void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1561{
1562 PageDesc *p;
1563
1564#if 0
1565 if (1) {
1566 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1567 cpu_single_env->mem_io_vaddr, len,
1568 cpu_single_env->eip,
1569 cpu_single_env->eip +
1570 (intptr_t)cpu_single_env->segs[R_CS].base);
1571 }
1572#endif
1573 assert_memory_lock();
1574
1575 p = page_find(start >> TARGET_PAGE_BITS);
1576 if (!p) {
1577 return;
1578 }
1579 if (!p->code_bitmap &&
1580 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1581
1582
1583
1584 build_page_bitmap(p);
1585 }
1586 if (p->code_bitmap) {
1587 unsigned int nr;
1588 unsigned long b;
1589
1590 nr = start & ~TARGET_PAGE_MASK;
1591 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1592 if (b & ((1 << len) - 1)) {
1593 goto do_invalidate;
1594 }
1595 } else {
1596 do_invalidate:
1597 tb_invalidate_phys_page_range(start, start + len, 1);
1598 }
1599}
1600#else
1601
1602
1603
1604
1605
1606
1607static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1608{
1609 TranslationBlock *tb;
1610 PageDesc *p;
1611 int n;
1612#ifdef TARGET_HAS_PRECISE_SMC
1613 TranslationBlock *current_tb = NULL;
1614 CPUState *cpu = current_cpu;
1615 CPUArchState *env = NULL;
1616 int current_tb_modified = 0;
1617 target_ulong current_pc = 0;
1618 target_ulong current_cs_base = 0;
1619 uint32_t current_flags = 0;
1620#endif
1621
1622 assert_memory_lock();
1623
1624 addr &= TARGET_PAGE_MASK;
1625 p = page_find(addr >> TARGET_PAGE_BITS);
1626 if (!p) {
1627 return false;
1628 }
1629
1630 tb_lock();
1631 tb = p->first_tb;
1632#ifdef TARGET_HAS_PRECISE_SMC
1633 if (tb && pc != 0) {
1634 current_tb = tb_find_pc(pc);
1635 }
1636 if (cpu != NULL) {
1637 env = cpu->env_ptr;
1638 }
1639#endif
1640 while (tb != NULL) {
1641 n = (uintptr_t)tb & 3;
1642 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1643#ifdef TARGET_HAS_PRECISE_SMC
1644 if (current_tb == tb &&
1645 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1646
1647
1648
1649
1650
1651
1652 current_tb_modified = 1;
1653 cpu_restore_state_from_tb(cpu, current_tb, pc);
1654 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1655 ¤t_flags);
1656 }
1657#endif
1658 tb_phys_invalidate(tb, addr);
1659 tb = tb->page_next[n];
1660 }
1661 p->first_tb = NULL;
1662#ifdef TARGET_HAS_PRECISE_SMC
1663 if (current_tb_modified) {
1664
1665 cpu->cflags_next_tb = 1 | curr_cflags();
1666
1667
1668 return true;
1669 }
1670#endif
1671 tb_unlock();
1672
1673 return false;
1674}
1675#endif
1676
1677
1678
1679
1680
1681
1682static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1683{
1684 struct tb_tc s = { .ptr = (void *)tc_ptr };
1685
1686 return g_tree_lookup(tb_ctx.tb_tree, &s);
1687}
1688
1689#if !defined(CONFIG_USER_ONLY)
1690void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1691{
1692 ram_addr_t ram_addr;
1693 MemoryRegion *mr;
1694 hwaddr l = 1;
1695
1696 rcu_read_lock();
1697 mr = address_space_translate(as, addr, &addr, &l, false);
1698 if (!(memory_region_is_ram(mr)
1699 || memory_region_is_romd(mr))) {
1700 rcu_read_unlock();
1701 return;
1702 }
1703 ram_addr = memory_region_get_ram_addr(mr) + addr;
1704 tb_lock();
1705 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1706 tb_unlock();
1707 rcu_read_unlock();
1708}
1709#endif
1710
1711
1712void tb_check_watchpoint(CPUState *cpu)
1713{
1714 TranslationBlock *tb;
1715
1716 tb = tb_find_pc(cpu->mem_io_pc);
1717 if (tb) {
1718
1719 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1720 tb_phys_invalidate(tb, -1);
1721 } else {
1722
1723
1724 CPUArchState *env = cpu->env_ptr;
1725 target_ulong pc, cs_base;
1726 tb_page_addr_t addr;
1727 uint32_t flags;
1728
1729 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1730 addr = get_page_addr_code(env, pc);
1731 tb_invalidate_phys_range(addr, addr + 1);
1732 }
1733}
1734
1735#ifndef CONFIG_USER_ONLY
1736
1737
1738
1739
1740
1741void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1742{
1743#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1744 CPUArchState *env = cpu->env_ptr;
1745#endif
1746 TranslationBlock *tb;
1747 uint32_t n;
1748
1749 tb_lock();
1750 tb = tb_find_pc(retaddr);
1751 if (!tb) {
1752 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1753 (void *)retaddr);
1754 }
1755 n = cpu->icount_decr.u16.low + tb->icount;
1756 cpu_restore_state_from_tb(cpu, tb, retaddr);
1757
1758
1759 n = n - cpu->icount_decr.u16.low;
1760
1761 n++;
1762
1763
1764
1765
1766#if defined(TARGET_MIPS)
1767 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1768 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1769 cpu->icount_decr.u16.low++;
1770 env->hflags &= ~MIPS_HFLAG_BMASK;
1771 }
1772#elif defined(TARGET_SH4)
1773 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1774 && n > 1) {
1775 env->pc -= 2;
1776 cpu->icount_decr.u16.low++;
1777 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1778 }
1779#endif
1780
1781 if (n > CF_COUNT_MASK) {
1782 cpu_abort(cpu, "TB too big during recompile");
1783 }
1784
1785
1786 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
1787
1788 if (tb->cflags & CF_NOCACHE) {
1789 if (tb->orig_tb) {
1790
1791
1792 tb_phys_invalidate(tb->orig_tb, -1);
1793 }
1794 tb_remove(tb);
1795 }
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806 cpu_loop_exit_noexc(cpu);
1807}
1808
1809static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1810{
1811 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1812
1813 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1814 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1815 }
1816}
1817
1818void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1819{
1820
1821
1822 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1823 tb_jmp_cache_clear_page(cpu, addr);
1824}
1825
1826static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1827 struct qht_stats hst)
1828{
1829 uint32_t hgram_opts;
1830 size_t hgram_bins;
1831 char *hgram;
1832
1833 if (!hst.head_buckets) {
1834 return;
1835 }
1836 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1837 hst.used_head_buckets, hst.head_buckets,
1838 (double)hst.used_head_buckets / hst.head_buckets * 100);
1839
1840 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1841 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1842 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1843 hgram_opts |= QDIST_PR_NODECIMAL;
1844 }
1845 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1846 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1847 qdist_avg(&hst.occupancy) * 100, hgram);
1848 g_free(hgram);
1849
1850 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1851 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1852 if (hgram_bins > 10) {
1853 hgram_bins = 10;
1854 } else {
1855 hgram_bins = 0;
1856 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1857 }
1858 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1859 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1860 qdist_avg(&hst.chain), hgram);
1861 g_free(hgram);
1862}
1863
1864struct tb_tree_stats {
1865 size_t host_size;
1866 size_t target_size;
1867 size_t max_target_size;
1868 size_t direct_jmp_count;
1869 size_t direct_jmp2_count;
1870 size_t cross_page;
1871};
1872
1873static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
1874{
1875 const TranslationBlock *tb = value;
1876 struct tb_tree_stats *tst = data;
1877
1878 tst->host_size += tb->tc.size;
1879 tst->target_size += tb->size;
1880 if (tb->size > tst->max_target_size) {
1881 tst->max_target_size = tb->size;
1882 }
1883 if (tb->page_addr[1] != -1) {
1884 tst->cross_page++;
1885 }
1886 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1887 tst->direct_jmp_count++;
1888 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1889 tst->direct_jmp2_count++;
1890 }
1891 }
1892 return false;
1893}
1894
1895void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1896{
1897 struct tb_tree_stats tst = {};
1898 struct qht_stats hst;
1899 size_t nb_tbs;
1900
1901 tb_lock();
1902
1903 nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
1904 g_tree_foreach(tb_ctx.tb_tree, tb_tree_stats_iter, &tst);
1905
1906 cpu_fprintf(f, "Translation buffer state:\n");
1907
1908
1909
1910
1911
1912 cpu_fprintf(f, "gen code size %zu/%zu\n",
1913 tcg_code_size(), tcg_code_capacity());
1914 cpu_fprintf(f, "TB count %zu\n", nb_tbs);
1915 cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n",
1916 nb_tbs ? tst.target_size / nb_tbs : 0,
1917 tst.max_target_size);
1918 cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
1919 nb_tbs ? tst.host_size / nb_tbs : 0,
1920 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
1921 cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
1922 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
1923 cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
1924 tst.direct_jmp_count,
1925 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
1926 tst.direct_jmp2_count,
1927 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
1928
1929 qht_statistics_init(&tb_ctx.htable, &hst);
1930 print_qht_statistics(f, cpu_fprintf, hst);
1931 qht_statistics_destroy(&hst);
1932
1933 cpu_fprintf(f, "\nStatistics:\n");
1934 cpu_fprintf(f, "TB flush count %u\n",
1935 atomic_read(&tb_ctx.tb_flush_count));
1936 cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
1937 cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
1938 tcg_dump_info(f, cpu_fprintf);
1939
1940 tb_unlock();
1941}
1942
1943void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1944{
1945 tcg_dump_op_count(f, cpu_fprintf);
1946}
1947
1948#else
1949
1950void cpu_interrupt(CPUState *cpu, int mask)
1951{
1952 g_assert(qemu_mutex_iothread_locked());
1953 cpu->interrupt_request |= mask;
1954 cpu->icount_decr.u16.high = -1;
1955}
1956
1957
1958
1959
1960
1961struct walk_memory_regions_data {
1962 walk_memory_regions_fn fn;
1963 void *priv;
1964 target_ulong start;
1965 int prot;
1966};
1967
1968static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1969 target_ulong end, int new_prot)
1970{
1971 if (data->start != -1u) {
1972 int rc = data->fn(data->priv, data->start, end, data->prot);
1973 if (rc != 0) {
1974 return rc;
1975 }
1976 }
1977
1978 data->start = (new_prot ? end : -1u);
1979 data->prot = new_prot;
1980
1981 return 0;
1982}
1983
1984static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1985 target_ulong base, int level, void **lp)
1986{
1987 target_ulong pa;
1988 int i, rc;
1989
1990 if (*lp == NULL) {
1991 return walk_memory_regions_end(data, base, 0);
1992 }
1993
1994 if (level == 0) {
1995 PageDesc *pd = *lp;
1996
1997 for (i = 0; i < V_L2_SIZE; ++i) {
1998 int prot = pd[i].flags;
1999
2000 pa = base | (i << TARGET_PAGE_BITS);
2001 if (prot != data->prot) {
2002 rc = walk_memory_regions_end(data, pa, prot);
2003 if (rc != 0) {
2004 return rc;
2005 }
2006 }
2007 }
2008 } else {
2009 void **pp = *lp;
2010
2011 for (i = 0; i < V_L2_SIZE; ++i) {
2012 pa = base | ((target_ulong)i <<
2013 (TARGET_PAGE_BITS + V_L2_BITS * level));
2014 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2015 if (rc != 0) {
2016 return rc;
2017 }
2018 }
2019 }
2020
2021 return 0;
2022}
2023
2024int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2025{
2026 struct walk_memory_regions_data data;
2027 uintptr_t i, l1_sz = v_l1_size;
2028
2029 data.fn = fn;
2030 data.priv = priv;
2031 data.start = -1u;
2032 data.prot = 0;
2033
2034 for (i = 0; i < l1_sz; i++) {
2035 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2036 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2037 if (rc != 0) {
2038 return rc;
2039 }
2040 }
2041
2042 return walk_memory_regions_end(&data, 0, 0);
2043}
2044
2045static int dump_region(void *priv, target_ulong start,
2046 target_ulong end, unsigned long prot)
2047{
2048 FILE *f = (FILE *)priv;
2049
2050 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2051 " "TARGET_FMT_lx" %c%c%c\n",
2052 start, end, end - start,
2053 ((prot & PAGE_READ) ? 'r' : '-'),
2054 ((prot & PAGE_WRITE) ? 'w' : '-'),
2055 ((prot & PAGE_EXEC) ? 'x' : '-'));
2056
2057 return 0;
2058}
2059
2060
2061void page_dump(FILE *f)
2062{
2063 const int length = sizeof(target_ulong) * 2;
2064 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2065 length, "start", length, "end", length, "size", "prot");
2066 walk_memory_regions(f, dump_region);
2067}
2068
2069int page_get_flags(target_ulong address)
2070{
2071 PageDesc *p;
2072
2073 p = page_find(address >> TARGET_PAGE_BITS);
2074 if (!p) {
2075 return 0;
2076 }
2077 return p->flags;
2078}
2079
2080
2081
2082
2083void page_set_flags(target_ulong start, target_ulong end, int flags)
2084{
2085 target_ulong addr, len;
2086
2087
2088
2089
2090#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2091 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2092#endif
2093 assert(start < end);
2094 assert_memory_lock();
2095
2096 start = start & TARGET_PAGE_MASK;
2097 end = TARGET_PAGE_ALIGN(end);
2098
2099 if (flags & PAGE_WRITE) {
2100 flags |= PAGE_WRITE_ORG;
2101 }
2102
2103 for (addr = start, len = end - start;
2104 len != 0;
2105 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2106 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2107
2108
2109
2110 if (!(p->flags & PAGE_WRITE) &&
2111 (flags & PAGE_WRITE) &&
2112 p->first_tb) {
2113 tb_invalidate_phys_page(addr, 0);
2114 }
2115 p->flags = flags;
2116 }
2117}
2118
2119int page_check_range(target_ulong start, target_ulong len, int flags)
2120{
2121 PageDesc *p;
2122 target_ulong end;
2123 target_ulong addr;
2124
2125
2126
2127
2128#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2129 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2130#endif
2131
2132 if (len == 0) {
2133 return 0;
2134 }
2135 if (start + len - 1 < start) {
2136
2137 return -1;
2138 }
2139
2140
2141 end = TARGET_PAGE_ALIGN(start + len);
2142 start = start & TARGET_PAGE_MASK;
2143
2144 for (addr = start, len = end - start;
2145 len != 0;
2146 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2147 p = page_find(addr >> TARGET_PAGE_BITS);
2148 if (!p) {
2149 return -1;
2150 }
2151 if (!(p->flags & PAGE_VALID)) {
2152 return -1;
2153 }
2154
2155 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2156 return -1;
2157 }
2158 if (flags & PAGE_WRITE) {
2159 if (!(p->flags & PAGE_WRITE_ORG)) {
2160 return -1;
2161 }
2162
2163
2164 if (!(p->flags & PAGE_WRITE)) {
2165 if (!page_unprotect(addr, 0)) {
2166 return -1;
2167 }
2168 }
2169 }
2170 }
2171 return 0;
2172}
2173
2174
2175
2176
2177
2178
2179
2180int page_unprotect(target_ulong address, uintptr_t pc)
2181{
2182 unsigned int prot;
2183 bool current_tb_invalidated;
2184 PageDesc *p;
2185 target_ulong host_start, host_end, addr;
2186
2187
2188
2189
2190 mmap_lock();
2191
2192 p = page_find(address >> TARGET_PAGE_BITS);
2193 if (!p) {
2194 mmap_unlock();
2195 return 0;
2196 }
2197
2198
2199
2200 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2201 host_start = address & qemu_host_page_mask;
2202 host_end = host_start + qemu_host_page_size;
2203
2204 prot = 0;
2205 current_tb_invalidated = false;
2206 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2207 p = page_find(addr >> TARGET_PAGE_BITS);
2208 p->flags |= PAGE_WRITE;
2209 prot |= p->flags;
2210
2211
2212
2213 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2214#ifdef CONFIG_USER_ONLY
2215 if (DEBUG_TB_CHECK_GATE) {
2216 tb_invalidate_check(addr);
2217 }
2218#endif
2219 }
2220 mprotect((void *)g2h(host_start), qemu_host_page_size,
2221 prot & PAGE_BITS);
2222
2223 mmap_unlock();
2224
2225 return current_tb_invalidated ? 2 : 1;
2226 }
2227 mmap_unlock();
2228 return 0;
2229}
2230#endif
2231
2232
2233void tcg_flush_softmmu_tlb(CPUState *cs)
2234{
2235#ifdef CONFIG_SOFTMMU
2236 tlb_flush(cs);
2237#endif
2238}
2239