1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#endif
22#include "qemu/osdep.h"
23
24
25#include "qemu-common.h"
26#define NO_CPU_IO_DEFS
27#include "cpu.h"
28#include "trace.h"
29#include "disas/disas.h"
30#include "exec/exec-all.h"
31#include "tcg.h"
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#include "exec/exec-all.h"
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
49#else
50#include "exec/address-spaces.h"
51#endif
52
53#include "exec/cputlb.h"
54#include "exec/tb-hash.h"
55#include "translate-all.h"
56#include "qemu/bitmap.h"
57#include "qemu/error-report.h"
58#include "qemu/timer.h"
59#include "qemu/main-loop.h"
60#include "exec/log.h"
61#include "sysemu/cpus.h"
62
63
64
65
66
67
68#if !defined(CONFIG_USER_ONLY)
69
70#undef DEBUG_TB_CHECK
71#endif
72
73
74
75
76
77
78
79#ifdef CONFIG_SOFTMMU
80#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
81#else
82#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
83#endif
84
85#define SMC_BITMAP_USE_THRESHOLD 10
86
87typedef struct PageDesc {
88
89 TranslationBlock *first_tb;
90#ifdef CONFIG_SOFTMMU
91
92
93 unsigned int code_write_count;
94 unsigned long *code_bitmap;
95#else
96 unsigned long flags;
97#endif
98} PageDesc;
99
100
101
102#if !defined(CONFIG_USER_ONLY)
103#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
104# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
105#else
106# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
107#endif
108#else
109# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
110#endif
111
112
113#define V_L2_BITS 10
114#define V_L2_SIZE (1 << V_L2_BITS)
115
116
117QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
118 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
119 * BITS_PER_BYTE);
120
121
122
123
124static int v_l1_size;
125static int v_l1_shift;
126static int v_l2_levels;
127
128
129
130
131#define V_L1_MIN_BITS 4
132#define V_L1_MAX_BITS (V_L2_BITS + 3)
133#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
134
135static void *l1_map[V_L1_MAX_SIZE];
136
137
138TCGContext tcg_ctx;
139bool parallel_cpus;
140
141
142__thread int have_tb_lock;
143
144static void page_table_config_init(void)
145{
146 uint32_t v_l1_bits;
147
148 assert(TARGET_PAGE_BITS);
149
150 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
151 if (v_l1_bits < V_L1_MIN_BITS) {
152 v_l1_bits += V_L2_BITS;
153 }
154
155 v_l1_size = 1 << v_l1_bits;
156 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
157 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
158
159 assert(v_l1_bits <= V_L1_MAX_BITS);
160 assert(v_l1_shift % V_L2_BITS == 0);
161 assert(v_l2_levels >= 0);
162}
163
164#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
165#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
166
167void tb_lock(void)
168{
169 assert_tb_unlocked();
170 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
171 have_tb_lock++;
172}
173
174void tb_unlock(void)
175{
176 assert_tb_locked();
177 have_tb_lock--;
178 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
179}
180
181void tb_lock_reset(void)
182{
183 if (have_tb_lock) {
184 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
185 have_tb_lock = 0;
186 }
187}
188
189static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
190
191void cpu_gen_init(void)
192{
193 tcg_context_init(&tcg_ctx);
194}
195
196
197
198static uint8_t *encode_sleb128(uint8_t *p, target_long val)
199{
200 int more, byte;
201
202 do {
203 byte = val & 0x7f;
204 val >>= 7;
205 more = !((val == 0 && (byte & 0x40) == 0)
206 || (val == -1 && (byte & 0x40) != 0));
207 if (more) {
208 byte |= 0x80;
209 }
210 *p++ = byte;
211 } while (more);
212
213 return p;
214}
215
216
217
218static target_long decode_sleb128(uint8_t **pp)
219{
220 uint8_t *p = *pp;
221 target_long val = 0;
222 int byte, shift = 0;
223
224 do {
225 byte = *p++;
226 val |= (target_ulong)(byte & 0x7f) << shift;
227 shift += 7;
228 } while (byte & 0x80);
229 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
230 val |= -(target_ulong)1 << shift;
231 }
232
233 *pp = p;
234 return val;
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249static int encode_search(TranslationBlock *tb, uint8_t *block)
250{
251 uint8_t *highwater = tcg_ctx.code_gen_highwater;
252 uint8_t *p = block;
253 int i, j, n;
254
255 tb->tc_search = block;
256
257 for (i = 0, n = tb->icount; i < n; ++i) {
258 target_ulong prev;
259
260 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
261 if (i == 0) {
262 prev = (j == 0 ? tb->pc : 0);
263 } else {
264 prev = tcg_ctx.gen_insn_data[i - 1][j];
265 }
266 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
267 }
268 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
269 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
270
271
272
273
274
275 if (unlikely(p > highwater)) {
276 return -1;
277 }
278 }
279
280 return p - block;
281}
282
283
284
285
286static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
287 uintptr_t searched_pc)
288{
289 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
290 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
291 CPUArchState *env = cpu->env_ptr;
292 uint8_t *p = tb->tc_search;
293 int i, j, num_insns = tb->icount;
294#ifdef CONFIG_PROFILER
295 int64_t ti = profile_getclock();
296#endif
297
298 searched_pc -= GETPC_ADJ;
299
300 if (searched_pc < host_pc) {
301 return -1;
302 }
303
304
305
306 for (i = 0; i < num_insns; ++i) {
307 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
308 data[j] += decode_sleb128(&p);
309 }
310 host_pc += decode_sleb128(&p);
311 if (host_pc > searched_pc) {
312 goto found;
313 }
314 }
315 return -1;
316
317 found:
318 if (tb->cflags & CF_USE_ICOUNT) {
319 assert(use_icount);
320
321 cpu->icount_decr.u16.low += num_insns;
322
323 cpu->can_do_io = 0;
324 }
325 cpu->icount_decr.u16.low -= i;
326 restore_state_to_opc(env, tb, data);
327
328#ifdef CONFIG_PROFILER
329 tcg_ctx.restore_time += profile_getclock() - ti;
330 tcg_ctx.restore_count++;
331#endif
332 return 0;
333}
334
335bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
336{
337 TranslationBlock *tb;
338 bool r = false;
339
340
341
342
343
344
345
346
347
348
349 if (!retaddr) {
350 return r;
351 }
352
353 tb_lock();
354 tb = tb_find_pc(retaddr);
355 if (tb) {
356 cpu_restore_state_from_tb(cpu, tb, retaddr);
357 if (tb->cflags & CF_NOCACHE) {
358
359 tb_phys_invalidate(tb, -1);
360 tb_free(tb);
361 }
362 r = true;
363 }
364 tb_unlock();
365
366 return r;
367}
368
369static void page_init(void)
370{
371 page_size_init();
372 page_table_config_init();
373
374#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
375 {
376#ifdef HAVE_KINFO_GETVMMAP
377 struct kinfo_vmentry *freep;
378 int i, cnt;
379
380 freep = kinfo_getvmmap(getpid(), &cnt);
381 if (freep) {
382 mmap_lock();
383 for (i = 0; i < cnt; i++) {
384 unsigned long startaddr, endaddr;
385
386 startaddr = freep[i].kve_start;
387 endaddr = freep[i].kve_end;
388 if (h2g_valid(startaddr)) {
389 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
390
391 if (h2g_valid(endaddr)) {
392 endaddr = h2g(endaddr);
393 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
394 } else {
395#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
396 endaddr = ~0ul;
397 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
398#endif
399 }
400 }
401 }
402 free(freep);
403 mmap_unlock();
404 }
405#else
406 FILE *f;
407
408 last_brk = (unsigned long)sbrk(0);
409
410 f = fopen("/compat/linux/proc/self/maps", "r");
411 if (f) {
412 mmap_lock();
413
414 do {
415 unsigned long startaddr, endaddr;
416 int n;
417
418 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
419
420 if (n == 2 && h2g_valid(startaddr)) {
421 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
422
423 if (h2g_valid(endaddr)) {
424 endaddr = h2g(endaddr);
425 } else {
426 endaddr = ~0ul;
427 }
428 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
429 }
430 } while (!feof(f));
431
432 fclose(f);
433 mmap_unlock();
434 }
435#endif
436 }
437#endif
438}
439
440
441
442
443
444static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
445{
446 PageDesc *pd;
447 void **lp;
448 int i;
449
450 if (alloc) {
451 assert_memory_lock();
452 }
453
454
455 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
456
457
458 for (i = v_l2_levels; i > 0; i--) {
459 void **p = atomic_rcu_read(lp);
460
461 if (p == NULL) {
462 if (!alloc) {
463 return NULL;
464 }
465 p = g_new0(void *, V_L2_SIZE);
466 atomic_rcu_set(lp, p);
467 }
468
469 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
470 }
471
472 pd = atomic_rcu_read(lp);
473 if (pd == NULL) {
474 if (!alloc) {
475 return NULL;
476 }
477 pd = g_new0(PageDesc, V_L2_SIZE);
478 atomic_rcu_set(lp, pd);
479 }
480
481 return pd + (index & (V_L2_SIZE - 1));
482}
483
484static inline PageDesc *page_find(tb_page_addr_t index)
485{
486 return page_find_alloc(index, 0);
487}
488
489#if defined(CONFIG_USER_ONLY)
490
491
492
493
494#define USE_STATIC_CODE_GEN_BUFFER
495#endif
496
497
498
499#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
500
501
502
503
504#if defined(__x86_64__)
505# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
506#elif defined(__sparc__)
507# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
508#elif defined(__powerpc64__)
509# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
510#elif defined(__powerpc__)
511# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
512#elif defined(__aarch64__)
513# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
514#elif defined(__s390x__)
515
516# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
517#elif defined(__mips__)
518
519
520# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
521#else
522# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
523#endif
524
525#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
526
527#define DEFAULT_CODE_GEN_BUFFER_SIZE \
528 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
529 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
530
531static inline size_t size_code_gen_buffer(size_t tb_size)
532{
533
534 if (tb_size == 0) {
535#ifdef USE_STATIC_CODE_GEN_BUFFER
536 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
537#else
538
539
540
541
542 tb_size = (unsigned long)(ram_size / 4);
543#endif
544 }
545 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
546 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
547 }
548 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
549 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
550 }
551 return tb_size;
552}
553
554#ifdef __mips__
555
556
557static inline bool cross_256mb(void *addr, size_t size)
558{
559 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
560}
561
562
563
564
565static inline void *split_cross_256mb(void *buf1, size_t size1)
566{
567 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
568 size_t size2 = buf1 + size1 - buf2;
569
570 size1 = buf2 - buf1;
571 if (size1 < size2) {
572 size1 = size2;
573 buf1 = buf2;
574 }
575
576 tcg_ctx.code_gen_buffer_size = size1;
577 return buf1;
578}
579#endif
580
581#ifdef USE_STATIC_CODE_GEN_BUFFER
582static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
583 __attribute__((aligned(CODE_GEN_ALIGN)));
584
585# ifdef _WIN32
586static inline void do_protect(void *addr, long size, int prot)
587{
588 DWORD old_protect;
589 VirtualProtect(addr, size, prot, &old_protect);
590}
591
592static inline void map_exec(void *addr, long size)
593{
594 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
595}
596
597static inline void map_none(void *addr, long size)
598{
599 do_protect(addr, size, PAGE_NOACCESS);
600}
601# else
602static inline void do_protect(void *addr, long size, int prot)
603{
604 uintptr_t start, end;
605
606 start = (uintptr_t)addr;
607 start &= qemu_real_host_page_mask;
608
609 end = (uintptr_t)addr + size;
610 end = ROUND_UP(end, qemu_real_host_page_size);
611
612 mprotect((void *)start, end - start, prot);
613}
614
615static inline void map_exec(void *addr, long size)
616{
617 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
618}
619
620static inline void map_none(void *addr, long size)
621{
622 do_protect(addr, size, PROT_NONE);
623}
624# endif
625
626static inline void *alloc_code_gen_buffer(void)
627{
628 void *buf = static_code_gen_buffer;
629 size_t full_size, size;
630
631
632 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
633 & qemu_real_host_page_mask) - (uintptr_t)buf;
634
635
636 size = full_size - qemu_real_host_page_size;
637
638
639 if (size > tcg_ctx.code_gen_buffer_size) {
640 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
641 & qemu_real_host_page_mask) - (uintptr_t)buf;
642 }
643 tcg_ctx.code_gen_buffer_size = size;
644
645#ifdef __mips__
646 if (cross_256mb(buf, size)) {
647 buf = split_cross_256mb(buf, size);
648 size = tcg_ctx.code_gen_buffer_size;
649 }
650#endif
651
652 map_exec(buf, size);
653 map_none(buf + size, qemu_real_host_page_size);
654 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
655
656 return buf;
657}
658#elif defined(_WIN32)
659static inline void *alloc_code_gen_buffer(void)
660{
661 size_t size = tcg_ctx.code_gen_buffer_size;
662 void *buf1, *buf2;
663
664
665
666 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
667 MEM_RESERVE, PAGE_NOACCESS);
668 if (buf1 != NULL) {
669 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
670 assert(buf1 == buf2);
671 }
672
673 return buf1;
674}
675#else
676static inline void *alloc_code_gen_buffer(void)
677{
678 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
679 uintptr_t start = 0;
680 size_t size = tcg_ctx.code_gen_buffer_size;
681 void *buf;
682
683
684
685
686# if defined(__PIE__) || defined(__PIC__)
687
688
689
690
691# elif defined(__x86_64__) && defined(MAP_32BIT)
692
693
694 flags |= MAP_32BIT;
695
696 if (size > 800u * 1024 * 1024) {
697 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
698 }
699# elif defined(__sparc__)
700 start = 0x40000000ul;
701# elif defined(__s390x__)
702 start = 0x90000000ul;
703# elif defined(__mips__)
704# if _MIPS_SIM == _ABI64
705 start = 0x128000000ul;
706# else
707 start = 0x08000000ul;
708# endif
709# endif
710
711 buf = mmap((void *)start, size + qemu_real_host_page_size,
712 PROT_NONE, flags, -1, 0);
713 if (buf == MAP_FAILED) {
714 return NULL;
715 }
716
717#ifdef __mips__
718 if (cross_256mb(buf, size)) {
719
720
721 size_t size2;
722 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
723 PROT_NONE, flags, -1, 0);
724 switch ((int)(buf2 != MAP_FAILED)) {
725 case 1:
726 if (!cross_256mb(buf2, size)) {
727
728 munmap(buf, size + qemu_real_host_page_size);
729 break;
730 }
731
732 munmap(buf2, size + qemu_real_host_page_size);
733
734 default:
735
736 buf2 = split_cross_256mb(buf, size);
737 size2 = tcg_ctx.code_gen_buffer_size;
738 if (buf == buf2) {
739 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
740 } else {
741 munmap(buf, size - size2);
742 }
743 size = size2;
744 break;
745 }
746 buf = buf2;
747 }
748#endif
749
750
751
752 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
753
754
755 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
756
757 return buf;
758}
759#endif
760
761static inline void code_gen_alloc(size_t tb_size)
762{
763 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
764 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
765 if (tcg_ctx.code_gen_buffer == NULL) {
766 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
767 exit(1);
768 }
769
770
771 tcg_ctx.tb_ctx.tbs_size =
772 tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8;
773 if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) {
774 tcg_ctx.tb_ctx.tbs_size = 64 * 1024;
775 }
776 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size);
777
778 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
779}
780
781static void tb_htable_init(void)
782{
783 unsigned int mode = QHT_MODE_AUTO_RESIZE;
784
785 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
786}
787
788
789
790
791void tcg_exec_init(unsigned long tb_size)
792{
793 tcg_allowed = true;
794 cpu_gen_init();
795 page_init();
796 tb_htable_init();
797 code_gen_alloc(tb_size);
798#if defined(CONFIG_SOFTMMU)
799
800
801 tcg_prologue_init(&tcg_ctx);
802#endif
803}
804
805
806
807
808
809
810
811static TranslationBlock *tb_alloc(target_ulong pc)
812{
813 TranslationBlock *tb;
814 TBContext *ctx;
815
816 assert_tb_locked();
817
818 tb = tcg_tb_alloc(&tcg_ctx);
819 if (unlikely(tb == NULL)) {
820 return NULL;
821 }
822 ctx = &tcg_ctx.tb_ctx;
823 if (unlikely(ctx->nb_tbs == ctx->tbs_size)) {
824 ctx->tbs_size *= 2;
825 ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size);
826 }
827 ctx->tbs[ctx->nb_tbs++] = tb;
828 return tb;
829}
830
831
832void tb_free(TranslationBlock *tb)
833{
834 assert_tb_locked();
835
836
837
838
839 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
840 tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
841 size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize);
842
843 tcg_ctx.code_gen_ptr = tb->tc_ptr - struct_size;
844 tcg_ctx.tb_ctx.nb_tbs--;
845 }
846}
847
848static inline void invalidate_page_bitmap(PageDesc *p)
849{
850#ifdef CONFIG_SOFTMMU
851 g_free(p->code_bitmap);
852 p->code_bitmap = NULL;
853 p->code_write_count = 0;
854#endif
855}
856
857
858static void page_flush_tb_1(int level, void **lp)
859{
860 int i;
861
862 if (*lp == NULL) {
863 return;
864 }
865 if (level == 0) {
866 PageDesc *pd = *lp;
867
868 for (i = 0; i < V_L2_SIZE; ++i) {
869 pd[i].first_tb = NULL;
870 invalidate_page_bitmap(pd + i);
871 }
872 } else {
873 void **pp = *lp;
874
875 for (i = 0; i < V_L2_SIZE; ++i) {
876 page_flush_tb_1(level - 1, pp + i);
877 }
878 }
879}
880
881static void page_flush_tb(void)
882{
883 int i, l1_sz = v_l1_size;
884
885 for (i = 0; i < l1_sz; i++) {
886 page_flush_tb_1(v_l2_levels, l1_map + i);
887 }
888}
889
890
891static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
892{
893 tb_lock();
894
895
896
897
898 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
899 goto done;
900 }
901
902#if defined(DEBUG_TB_FLUSH)
903 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
904 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
905 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
906 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
907 tcg_ctx.tb_ctx.nb_tbs : 0);
908#endif
909 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
910 > tcg_ctx.code_gen_buffer_size) {
911 cpu_abort(cpu, "Internal error: code buffer overflow\n");
912 }
913
914 CPU_FOREACH(cpu) {
915 cpu_tb_jmp_cache_clear(cpu);
916 }
917
918 tcg_ctx.tb_ctx.nb_tbs = 0;
919 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
920 page_flush_tb();
921
922 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
923
924
925 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
926 tcg_ctx.tb_ctx.tb_flush_count + 1);
927
928done:
929 tb_unlock();
930}
931
932void tb_flush(CPUState *cpu)
933{
934 if (tcg_enabled()) {
935 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
936 async_safe_run_on_cpu(cpu, do_tb_flush,
937 RUN_ON_CPU_HOST_INT(tb_flush_count));
938 }
939}
940
941#ifdef DEBUG_TB_CHECK
942
943static void
944do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
945{
946 TranslationBlock *tb = p;
947 target_ulong addr = *(target_ulong *)userp;
948
949 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
950 printf("ERROR invalidate: address=" TARGET_FMT_lx
951 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
952 }
953}
954
955
956
957
958
959static void tb_invalidate_check(target_ulong address)
960{
961 address &= TARGET_PAGE_MASK;
962 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
963}
964
965static void
966do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
967{
968 TranslationBlock *tb = p;
969 int flags1, flags2;
970
971 flags1 = page_get_flags(tb->pc);
972 flags2 = page_get_flags(tb->pc + tb->size - 1);
973 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
974 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
975 (long)tb->pc, tb->size, flags1, flags2);
976 }
977}
978
979
980static void tb_page_check(void)
981{
982 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
983}
984
985#endif
986
987static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
988{
989 TranslationBlock *tb1;
990 unsigned int n1;
991
992 for (;;) {
993 tb1 = *ptb;
994 n1 = (uintptr_t)tb1 & 3;
995 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
996 if (tb1 == tb) {
997 *ptb = tb1->page_next[n1];
998 break;
999 }
1000 ptb = &tb1->page_next[n1];
1001 }
1002}
1003
1004
1005static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1006{
1007 TranslationBlock *tb1;
1008 uintptr_t *ptb, ntb;
1009 unsigned int n1;
1010
1011 ptb = &tb->jmp_list_next[n];
1012 if (*ptb) {
1013
1014 for (;;) {
1015 ntb = *ptb;
1016 n1 = ntb & 3;
1017 tb1 = (TranslationBlock *)(ntb & ~3);
1018 if (n1 == n && tb1 == tb) {
1019 break;
1020 }
1021 if (n1 == 2) {
1022 ptb = &tb1->jmp_list_first;
1023 } else {
1024 ptb = &tb1->jmp_list_next[n1];
1025 }
1026 }
1027
1028 *ptb = tb->jmp_list_next[n];
1029
1030 tb->jmp_list_next[n] = (uintptr_t)NULL;
1031 }
1032}
1033
1034
1035
1036static inline void tb_reset_jump(TranslationBlock *tb, int n)
1037{
1038 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1039 tb_set_jmp_target(tb, n, addr);
1040}
1041
1042
1043static inline void tb_jmp_unlink(TranslationBlock *tb)
1044{
1045 TranslationBlock *tb1;
1046 uintptr_t *ptb, ntb;
1047 unsigned int n1;
1048
1049 ptb = &tb->jmp_list_first;
1050 for (;;) {
1051 ntb = *ptb;
1052 n1 = ntb & 3;
1053 tb1 = (TranslationBlock *)(ntb & ~3);
1054 if (n1 == 2) {
1055 break;
1056 }
1057 tb_reset_jump(tb1, n1);
1058 *ptb = tb1->jmp_list_next[n1];
1059 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1060 }
1061}
1062
1063
1064
1065
1066
1067void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1068{
1069 CPUState *cpu;
1070 PageDesc *p;
1071 uint32_t h;
1072 tb_page_addr_t phys_pc;
1073
1074 assert_tb_locked();
1075
1076 atomic_set(&tb->invalid, true);
1077
1078
1079 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1080 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
1081 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1082
1083
1084 if (tb->page_addr[0] != page_addr) {
1085 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1086 tb_page_remove(&p->first_tb, tb);
1087 invalidate_page_bitmap(p);
1088 }
1089 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1090 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1091 tb_page_remove(&p->first_tb, tb);
1092 invalidate_page_bitmap(p);
1093 }
1094
1095
1096 h = tb_jmp_cache_hash_func(tb->pc);
1097 CPU_FOREACH(cpu) {
1098 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1099 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1100 }
1101 }
1102
1103
1104 tb_remove_from_jmp_list(tb, 0);
1105 tb_remove_from_jmp_list(tb, 1);
1106
1107
1108 tb_jmp_unlink(tb);
1109
1110 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1111}
1112
1113#ifdef CONFIG_SOFTMMU
1114static void build_page_bitmap(PageDesc *p)
1115{
1116 int n, tb_start, tb_end;
1117 TranslationBlock *tb;
1118
1119 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1120
1121 tb = p->first_tb;
1122 while (tb != NULL) {
1123 n = (uintptr_t)tb & 3;
1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1125
1126 if (n == 0) {
1127
1128
1129 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1130 tb_end = tb_start + tb->size;
1131 if (tb_end > TARGET_PAGE_SIZE) {
1132 tb_end = TARGET_PAGE_SIZE;
1133 }
1134 } else {
1135 tb_start = 0;
1136 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1137 }
1138 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1139 tb = tb->page_next[n];
1140 }
1141}
1142#endif
1143
1144
1145
1146
1147
1148static inline void tb_alloc_page(TranslationBlock *tb,
1149 unsigned int n, tb_page_addr_t page_addr)
1150{
1151 PageDesc *p;
1152#ifndef CONFIG_USER_ONLY
1153 bool page_already_protected;
1154#endif
1155
1156 assert_memory_lock();
1157
1158 tb->page_addr[n] = page_addr;
1159 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1160 tb->page_next[n] = p->first_tb;
1161#ifndef CONFIG_USER_ONLY
1162 page_already_protected = p->first_tb != NULL;
1163#endif
1164 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1165 invalidate_page_bitmap(p);
1166
1167#if defined(CONFIG_USER_ONLY)
1168 if (p->flags & PAGE_WRITE) {
1169 target_ulong addr;
1170 PageDesc *p2;
1171 int prot;
1172
1173
1174
1175 page_addr &= qemu_host_page_mask;
1176 prot = 0;
1177 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1178 addr += TARGET_PAGE_SIZE) {
1179
1180 p2 = page_find(addr >> TARGET_PAGE_BITS);
1181 if (!p2) {
1182 continue;
1183 }
1184 prot |= p2->flags;
1185 p2->flags &= ~PAGE_WRITE;
1186 }
1187 mprotect(g2h(page_addr), qemu_host_page_size,
1188 (prot & PAGE_BITS) & ~PAGE_WRITE);
1189#ifdef DEBUG_TB_INVALIDATE
1190 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1191 page_addr);
1192#endif
1193 }
1194#else
1195
1196
1197
1198 if (!page_already_protected) {
1199 tlb_protect_code(page_addr);
1200 }
1201#endif
1202}
1203
1204
1205
1206
1207
1208
1209static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1210 tb_page_addr_t phys_page2)
1211{
1212 uint32_t h;
1213
1214 assert_memory_lock();
1215
1216
1217 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1218 if (phys_page2 != -1) {
1219 tb_alloc_page(tb, 1, phys_page2);
1220 } else {
1221 tb->page_addr[1] = -1;
1222 }
1223
1224
1225 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
1226 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1227
1228#ifdef DEBUG_TB_CHECK
1229 tb_page_check();
1230#endif
1231}
1232
1233
1234TranslationBlock *tb_gen_code(CPUState *cpu,
1235 target_ulong pc, target_ulong cs_base,
1236 uint32_t flags, int cflags)
1237{
1238 CPUArchState *env = cpu->env_ptr;
1239 TranslationBlock *tb;
1240 tb_page_addr_t phys_pc, phys_page2;
1241 target_ulong virt_page2;
1242 tcg_insn_unit *gen_code_buf;
1243 int gen_code_size, search_size;
1244#ifdef CONFIG_PROFILER
1245 int64_t ti;
1246#endif
1247 assert_memory_lock();
1248
1249 phys_pc = get_page_addr_code(env, pc);
1250 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1251 cflags |= CF_USE_ICOUNT;
1252 }
1253
1254 tb = tb_alloc(pc);
1255 if (unlikely(!tb)) {
1256 buffer_overflow:
1257
1258 tb_flush(cpu);
1259 mmap_unlock();
1260
1261 cpu->exception_index = EXCP_INTERRUPT;
1262 cpu_loop_exit(cpu);
1263 }
1264
1265 gen_code_buf = tcg_ctx.code_gen_ptr;
1266 tb->tc_ptr = gen_code_buf;
1267 tb->pc = pc;
1268 tb->cs_base = cs_base;
1269 tb->flags = flags;
1270 tb->cflags = cflags;
1271 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1272 tb->invalid = false;
1273
1274#ifdef CONFIG_PROFILER
1275 tcg_ctx.tb_count1++;
1276
1277 ti = profile_getclock();
1278#endif
1279
1280 tcg_func_start(&tcg_ctx);
1281
1282 tcg_ctx.cpu = ENV_GET_CPU(env);
1283 gen_intermediate_code(cpu, tb);
1284 tcg_ctx.cpu = NULL;
1285
1286 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1287
1288
1289 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1290 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1291 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1292#ifdef USE_DIRECT_JUMP
1293 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1294 tcg_ctx.tb_jmp_target_addr = NULL;
1295#else
1296 tcg_ctx.tb_jmp_insn_offset = NULL;
1297 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1298#endif
1299
1300#ifdef CONFIG_PROFILER
1301 tcg_ctx.tb_count++;
1302 tcg_ctx.interm_time += profile_getclock() - ti;
1303 tcg_ctx.code_time -= profile_getclock();
1304#endif
1305
1306
1307
1308
1309
1310
1311 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1312 if (unlikely(gen_code_size < 0)) {
1313 goto buffer_overflow;
1314 }
1315 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1316 if (unlikely(search_size < 0)) {
1317 goto buffer_overflow;
1318 }
1319
1320#ifdef CONFIG_PROFILER
1321 tcg_ctx.code_time += profile_getclock();
1322 tcg_ctx.code_in_len += tb->size;
1323 tcg_ctx.code_out_len += gen_code_size;
1324 tcg_ctx.search_out_len += search_size;
1325#endif
1326
1327#ifdef DEBUG_DISAS
1328 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1329 qemu_log_in_addr_range(tb->pc)) {
1330 qemu_log_lock();
1331 qemu_log("OUT: [size=%d]\n", gen_code_size);
1332 log_disas(tb->tc_ptr, gen_code_size);
1333 qemu_log("\n");
1334 qemu_log_flush();
1335 qemu_log_unlock();
1336 }
1337#endif
1338
1339 tcg_ctx.code_gen_ptr = (void *)
1340 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1341 CODE_GEN_ALIGN);
1342
1343
1344 assert(((uintptr_t)tb & 3) == 0);
1345 tb->jmp_list_first = (uintptr_t)tb | 2;
1346 tb->jmp_list_next[0] = (uintptr_t)NULL;
1347 tb->jmp_list_next[1] = (uintptr_t)NULL;
1348
1349
1350 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1351 tb_reset_jump(tb, 0);
1352 }
1353 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1354 tb_reset_jump(tb, 1);
1355 }
1356
1357
1358 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1359 phys_page2 = -1;
1360 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1361 phys_page2 = get_page_addr_code(env, virt_page2);
1362 }
1363
1364
1365
1366
1367
1368 tb_link_page(tb, phys_pc, phys_page2);
1369 return tb;
1370}
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1383{
1384 while (start < end) {
1385 tb_invalidate_phys_page_range(start, end, 0);
1386 start &= TARGET_PAGE_MASK;
1387 start += TARGET_PAGE_SIZE;
1388 }
1389}
1390
1391#ifdef CONFIG_SOFTMMU
1392void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1393{
1394 assert_tb_locked();
1395 tb_invalidate_phys_range_1(start, end);
1396}
1397#else
1398void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1399{
1400 assert_memory_lock();
1401 tb_lock();
1402 tb_invalidate_phys_range_1(start, end);
1403 tb_unlock();
1404}
1405#endif
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1417 int is_cpu_write_access)
1418{
1419 TranslationBlock *tb, *tb_next;
1420#if defined(TARGET_HAS_PRECISE_SMC)
1421 CPUState *cpu = current_cpu;
1422 CPUArchState *env = NULL;
1423#endif
1424 tb_page_addr_t tb_start, tb_end;
1425 PageDesc *p;
1426 int n;
1427#ifdef TARGET_HAS_PRECISE_SMC
1428 int current_tb_not_found = is_cpu_write_access;
1429 TranslationBlock *current_tb = NULL;
1430 int current_tb_modified = 0;
1431 target_ulong current_pc = 0;
1432 target_ulong current_cs_base = 0;
1433 uint32_t current_flags = 0;
1434#endif
1435
1436 assert_memory_lock();
1437 assert_tb_locked();
1438
1439 p = page_find(start >> TARGET_PAGE_BITS);
1440 if (!p) {
1441 return;
1442 }
1443#if defined(TARGET_HAS_PRECISE_SMC)
1444 if (cpu != NULL) {
1445 env = cpu->env_ptr;
1446 }
1447#endif
1448
1449
1450
1451
1452 tb = p->first_tb;
1453 while (tb != NULL) {
1454 n = (uintptr_t)tb & 3;
1455 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1456 tb_next = tb->page_next[n];
1457
1458 if (n == 0) {
1459
1460
1461 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1462 tb_end = tb_start + tb->size;
1463 } else {
1464 tb_start = tb->page_addr[1];
1465 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1466 }
1467 if (!(tb_end <= start || tb_start >= end)) {
1468#ifdef TARGET_HAS_PRECISE_SMC
1469 if (current_tb_not_found) {
1470 current_tb_not_found = 0;
1471 current_tb = NULL;
1472 if (cpu->mem_io_pc) {
1473
1474 current_tb = tb_find_pc(cpu->mem_io_pc);
1475 }
1476 }
1477 if (current_tb == tb &&
1478 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1479
1480
1481
1482
1483
1484
1485 current_tb_modified = 1;
1486 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1487 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1488 ¤t_flags);
1489 }
1490#endif
1491 tb_phys_invalidate(tb, -1);
1492 }
1493 tb = tb_next;
1494 }
1495#if !defined(CONFIG_USER_ONLY)
1496
1497 if (!p->first_tb) {
1498 invalidate_page_bitmap(p);
1499 tlb_unprotect_code(start);
1500 }
1501#endif
1502#ifdef TARGET_HAS_PRECISE_SMC
1503 if (current_tb_modified) {
1504
1505
1506
1507 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1508 cpu_loop_exit_noexc(cpu);
1509 }
1510#endif
1511}
1512
1513#ifdef CONFIG_SOFTMMU
1514
1515
1516
1517
1518void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1519{
1520 PageDesc *p;
1521
1522#if 0
1523 if (1) {
1524 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1525 cpu_single_env->mem_io_vaddr, len,
1526 cpu_single_env->eip,
1527 cpu_single_env->eip +
1528 (intptr_t)cpu_single_env->segs[R_CS].base);
1529 }
1530#endif
1531 assert_memory_lock();
1532
1533 p = page_find(start >> TARGET_PAGE_BITS);
1534 if (!p) {
1535 return;
1536 }
1537 if (!p->code_bitmap &&
1538 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1539
1540
1541
1542 build_page_bitmap(p);
1543 }
1544 if (p->code_bitmap) {
1545 unsigned int nr;
1546 unsigned long b;
1547
1548 nr = start & ~TARGET_PAGE_MASK;
1549 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1550 if (b & ((1 << len) - 1)) {
1551 goto do_invalidate;
1552 }
1553 } else {
1554 do_invalidate:
1555 tb_invalidate_phys_page_range(start, start + len, 1);
1556 }
1557}
1558#else
1559
1560
1561
1562
1563
1564
1565static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1566{
1567 TranslationBlock *tb;
1568 PageDesc *p;
1569 int n;
1570#ifdef TARGET_HAS_PRECISE_SMC
1571 TranslationBlock *current_tb = NULL;
1572 CPUState *cpu = current_cpu;
1573 CPUArchState *env = NULL;
1574 int current_tb_modified = 0;
1575 target_ulong current_pc = 0;
1576 target_ulong current_cs_base = 0;
1577 uint32_t current_flags = 0;
1578#endif
1579
1580 assert_memory_lock();
1581
1582 addr &= TARGET_PAGE_MASK;
1583 p = page_find(addr >> TARGET_PAGE_BITS);
1584 if (!p) {
1585 return false;
1586 }
1587
1588 tb_lock();
1589 tb = p->first_tb;
1590#ifdef TARGET_HAS_PRECISE_SMC
1591 if (tb && pc != 0) {
1592 current_tb = tb_find_pc(pc);
1593 }
1594 if (cpu != NULL) {
1595 env = cpu->env_ptr;
1596 }
1597#endif
1598 while (tb != NULL) {
1599 n = (uintptr_t)tb & 3;
1600 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1601#ifdef TARGET_HAS_PRECISE_SMC
1602 if (current_tb == tb &&
1603 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1604
1605
1606
1607
1608
1609
1610 current_tb_modified = 1;
1611 cpu_restore_state_from_tb(cpu, current_tb, pc);
1612 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1613 ¤t_flags);
1614 }
1615#endif
1616 tb_phys_invalidate(tb, addr);
1617 tb = tb->page_next[n];
1618 }
1619 p->first_tb = NULL;
1620#ifdef TARGET_HAS_PRECISE_SMC
1621 if (current_tb_modified) {
1622
1623
1624
1625 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1626
1627
1628 return true;
1629 }
1630#endif
1631 tb_unlock();
1632
1633 return false;
1634}
1635#endif
1636
1637
1638
1639static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1640{
1641 int m_min, m_max, m;
1642 uintptr_t v;
1643 TranslationBlock *tb;
1644
1645 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1646 return NULL;
1647 }
1648 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1649 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1650 return NULL;
1651 }
1652
1653 m_min = 0;
1654 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1655 while (m_min <= m_max) {
1656 m = (m_min + m_max) >> 1;
1657 tb = tcg_ctx.tb_ctx.tbs[m];
1658 v = (uintptr_t)tb->tc_ptr;
1659 if (v == tc_ptr) {
1660 return tb;
1661 } else if (tc_ptr < v) {
1662 m_max = m - 1;
1663 } else {
1664 m_min = m + 1;
1665 }
1666 }
1667 return tcg_ctx.tb_ctx.tbs[m_max];
1668}
1669
1670#if !defined(CONFIG_USER_ONLY)
1671void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1672{
1673 ram_addr_t ram_addr;
1674 MemoryRegion *mr;
1675 hwaddr l = 1;
1676
1677 rcu_read_lock();
1678 mr = address_space_translate(as, addr, &addr, &l, false);
1679 if (!(memory_region_is_ram(mr)
1680 || memory_region_is_romd(mr))) {
1681 rcu_read_unlock();
1682 return;
1683 }
1684 ram_addr = memory_region_get_ram_addr(mr) + addr;
1685 tb_lock();
1686 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1687 tb_unlock();
1688 rcu_read_unlock();
1689}
1690#endif
1691
1692
1693void tb_check_watchpoint(CPUState *cpu)
1694{
1695 TranslationBlock *tb;
1696
1697 tb = tb_find_pc(cpu->mem_io_pc);
1698 if (tb) {
1699
1700 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1701 tb_phys_invalidate(tb, -1);
1702 } else {
1703
1704
1705 CPUArchState *env = cpu->env_ptr;
1706 target_ulong pc, cs_base;
1707 tb_page_addr_t addr;
1708 uint32_t flags;
1709
1710 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1711 addr = get_page_addr_code(env, pc);
1712 tb_invalidate_phys_range(addr, addr + 1);
1713 }
1714}
1715
1716#ifndef CONFIG_USER_ONLY
1717
1718
1719
1720
1721
1722void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1723{
1724#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1725 CPUArchState *env = cpu->env_ptr;
1726#endif
1727 TranslationBlock *tb;
1728 uint32_t n, cflags;
1729 target_ulong pc, cs_base;
1730 uint32_t flags;
1731
1732 tb_lock();
1733 tb = tb_find_pc(retaddr);
1734 if (!tb) {
1735 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1736 (void *)retaddr);
1737 }
1738 n = cpu->icount_decr.u16.low + tb->icount;
1739 cpu_restore_state_from_tb(cpu, tb, retaddr);
1740
1741
1742 n = n - cpu->icount_decr.u16.low;
1743
1744 n++;
1745
1746
1747
1748
1749#if defined(TARGET_MIPS)
1750 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1751 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1752 cpu->icount_decr.u16.low++;
1753 env->hflags &= ~MIPS_HFLAG_BMASK;
1754 }
1755#elif defined(TARGET_SH4)
1756 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1757 && n > 1) {
1758 env->pc -= 2;
1759 cpu->icount_decr.u16.low++;
1760 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1761 }
1762#endif
1763
1764 if (n > CF_COUNT_MASK) {
1765 cpu_abort(cpu, "TB too big during recompile");
1766 }
1767
1768 cflags = n | CF_LAST_IO;
1769 pc = tb->pc;
1770 cs_base = tb->cs_base;
1771 flags = tb->flags;
1772 tb_phys_invalidate(tb, -1);
1773 if (tb->cflags & CF_NOCACHE) {
1774 if (tb->orig_tb) {
1775
1776
1777 tb_phys_invalidate(tb->orig_tb, -1);
1778 }
1779 tb_free(tb);
1780 }
1781
1782
1783 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 cpu_loop_exit_noexc(cpu);
1795}
1796
1797static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1798{
1799 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1800
1801 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1802 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1803 }
1804}
1805
1806void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1807{
1808
1809
1810 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1811 tb_jmp_cache_clear_page(cpu, addr);
1812}
1813
1814static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1815 struct qht_stats hst)
1816{
1817 uint32_t hgram_opts;
1818 size_t hgram_bins;
1819 char *hgram;
1820
1821 if (!hst.head_buckets) {
1822 return;
1823 }
1824 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1825 hst.used_head_buckets, hst.head_buckets,
1826 (double)hst.used_head_buckets / hst.head_buckets * 100);
1827
1828 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1829 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1830 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1831 hgram_opts |= QDIST_PR_NODECIMAL;
1832 }
1833 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1834 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1835 qdist_avg(&hst.occupancy) * 100, hgram);
1836 g_free(hgram);
1837
1838 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1839 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1840 if (hgram_bins > 10) {
1841 hgram_bins = 10;
1842 } else {
1843 hgram_bins = 0;
1844 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1845 }
1846 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1847 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1848 qdist_avg(&hst.chain), hgram);
1849 g_free(hgram);
1850}
1851
1852void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1853{
1854 int i, target_code_size, max_target_code_size;
1855 int direct_jmp_count, direct_jmp2_count, cross_page;
1856 TranslationBlock *tb;
1857 struct qht_stats hst;
1858
1859 tb_lock();
1860
1861 target_code_size = 0;
1862 max_target_code_size = 0;
1863 cross_page = 0;
1864 direct_jmp_count = 0;
1865 direct_jmp2_count = 0;
1866 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1867 tb = tcg_ctx.tb_ctx.tbs[i];
1868 target_code_size += tb->size;
1869 if (tb->size > max_target_code_size) {
1870 max_target_code_size = tb->size;
1871 }
1872 if (tb->page_addr[1] != -1) {
1873 cross_page++;
1874 }
1875 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1876 direct_jmp_count++;
1877 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1878 direct_jmp2_count++;
1879 }
1880 }
1881 }
1882
1883 cpu_fprintf(f, "Translation buffer state:\n");
1884 cpu_fprintf(f, "gen code size %td/%zd\n",
1885 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1886 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1887 cpu_fprintf(f, "TB count %d\n", tcg_ctx.tb_ctx.nb_tbs);
1888 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1889 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1890 tcg_ctx.tb_ctx.nb_tbs : 0,
1891 max_target_code_size);
1892 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1893 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1894 tcg_ctx.code_gen_buffer) /
1895 tcg_ctx.tb_ctx.nb_tbs : 0,
1896 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1897 tcg_ctx.code_gen_buffer) /
1898 target_code_size : 0);
1899 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1900 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1901 tcg_ctx.tb_ctx.nb_tbs : 0);
1902 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1903 direct_jmp_count,
1904 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1905 tcg_ctx.tb_ctx.nb_tbs : 0,
1906 direct_jmp2_count,
1907 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1908 tcg_ctx.tb_ctx.nb_tbs : 0);
1909
1910 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1911 print_qht_statistics(f, cpu_fprintf, hst);
1912 qht_statistics_destroy(&hst);
1913
1914 cpu_fprintf(f, "\nStatistics:\n");
1915 cpu_fprintf(f, "TB flush count %u\n",
1916 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1917 cpu_fprintf(f, "TB invalidate count %d\n",
1918 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1919 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1920 tcg_dump_info(f, cpu_fprintf);
1921
1922 tb_unlock();
1923}
1924
1925void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1926{
1927 tcg_dump_op_count(f, cpu_fprintf);
1928}
1929
1930#else
1931
1932void cpu_interrupt(CPUState *cpu, int mask)
1933{
1934 g_assert(qemu_mutex_iothread_locked());
1935 cpu->interrupt_request |= mask;
1936 cpu->icount_decr.u16.high = -1;
1937}
1938
1939
1940
1941
1942
1943struct walk_memory_regions_data {
1944 walk_memory_regions_fn fn;
1945 void *priv;
1946 target_ulong start;
1947 int prot;
1948};
1949
1950static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1951 target_ulong end, int new_prot)
1952{
1953 if (data->start != -1u) {
1954 int rc = data->fn(data->priv, data->start, end, data->prot);
1955 if (rc != 0) {
1956 return rc;
1957 }
1958 }
1959
1960 data->start = (new_prot ? end : -1u);
1961 data->prot = new_prot;
1962
1963 return 0;
1964}
1965
1966static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1967 target_ulong base, int level, void **lp)
1968{
1969 target_ulong pa;
1970 int i, rc;
1971
1972 if (*lp == NULL) {
1973 return walk_memory_regions_end(data, base, 0);
1974 }
1975
1976 if (level == 0) {
1977 PageDesc *pd = *lp;
1978
1979 for (i = 0; i < V_L2_SIZE; ++i) {
1980 int prot = pd[i].flags;
1981
1982 pa = base | (i << TARGET_PAGE_BITS);
1983 if (prot != data->prot) {
1984 rc = walk_memory_regions_end(data, pa, prot);
1985 if (rc != 0) {
1986 return rc;
1987 }
1988 }
1989 }
1990 } else {
1991 void **pp = *lp;
1992
1993 for (i = 0; i < V_L2_SIZE; ++i) {
1994 pa = base | ((target_ulong)i <<
1995 (TARGET_PAGE_BITS + V_L2_BITS * level));
1996 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1997 if (rc != 0) {
1998 return rc;
1999 }
2000 }
2001 }
2002
2003 return 0;
2004}
2005
2006int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2007{
2008 struct walk_memory_regions_data data;
2009 uintptr_t i, l1_sz = v_l1_size;
2010
2011 data.fn = fn;
2012 data.priv = priv;
2013 data.start = -1u;
2014 data.prot = 0;
2015
2016 for (i = 0; i < l1_sz; i++) {
2017 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2018 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2019 if (rc != 0) {
2020 return rc;
2021 }
2022 }
2023
2024 return walk_memory_regions_end(&data, 0, 0);
2025}
2026
2027static int dump_region(void *priv, target_ulong start,
2028 target_ulong end, unsigned long prot)
2029{
2030 FILE *f = (FILE *)priv;
2031
2032 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2033 " "TARGET_FMT_lx" %c%c%c\n",
2034 start, end, end - start,
2035 ((prot & PAGE_READ) ? 'r' : '-'),
2036 ((prot & PAGE_WRITE) ? 'w' : '-'),
2037 ((prot & PAGE_EXEC) ? 'x' : '-'));
2038
2039 return 0;
2040}
2041
2042
2043void page_dump(FILE *f)
2044{
2045 const int length = sizeof(target_ulong) * 2;
2046 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2047 length, "start", length, "end", length, "size", "prot");
2048 walk_memory_regions(f, dump_region);
2049}
2050
2051int page_get_flags(target_ulong address)
2052{
2053 PageDesc *p;
2054
2055 p = page_find(address >> TARGET_PAGE_BITS);
2056 if (!p) {
2057 return 0;
2058 }
2059 return p->flags;
2060}
2061
2062
2063
2064
2065void page_set_flags(target_ulong start, target_ulong end, int flags)
2066{
2067 target_ulong addr, len;
2068
2069
2070
2071
2072#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2073 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2074#endif
2075 assert(start < end);
2076 assert_memory_lock();
2077
2078 start = start & TARGET_PAGE_MASK;
2079 end = TARGET_PAGE_ALIGN(end);
2080
2081 if (flags & PAGE_WRITE) {
2082 flags |= PAGE_WRITE_ORG;
2083 }
2084
2085 for (addr = start, len = end - start;
2086 len != 0;
2087 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2088 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2089
2090
2091
2092 if (!(p->flags & PAGE_WRITE) &&
2093 (flags & PAGE_WRITE) &&
2094 p->first_tb) {
2095 tb_invalidate_phys_page(addr, 0);
2096 }
2097 p->flags = flags;
2098 }
2099}
2100
2101int page_check_range(target_ulong start, target_ulong len, int flags)
2102{
2103 PageDesc *p;
2104 target_ulong end;
2105 target_ulong addr;
2106
2107
2108
2109
2110#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2111 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2112#endif
2113
2114 if (len == 0) {
2115 return 0;
2116 }
2117 if (start + len - 1 < start) {
2118
2119 return -1;
2120 }
2121
2122
2123 end = TARGET_PAGE_ALIGN(start + len);
2124 start = start & TARGET_PAGE_MASK;
2125
2126 for (addr = start, len = end - start;
2127 len != 0;
2128 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2129 p = page_find(addr >> TARGET_PAGE_BITS);
2130 if (!p) {
2131 return -1;
2132 }
2133 if (!(p->flags & PAGE_VALID)) {
2134 return -1;
2135 }
2136
2137 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2138 return -1;
2139 }
2140 if (flags & PAGE_WRITE) {
2141 if (!(p->flags & PAGE_WRITE_ORG)) {
2142 return -1;
2143 }
2144
2145
2146 if (!(p->flags & PAGE_WRITE)) {
2147 if (!page_unprotect(addr, 0)) {
2148 return -1;
2149 }
2150 }
2151 }
2152 }
2153 return 0;
2154}
2155
2156
2157
2158
2159
2160
2161
2162int page_unprotect(target_ulong address, uintptr_t pc)
2163{
2164 unsigned int prot;
2165 bool current_tb_invalidated;
2166 PageDesc *p;
2167 target_ulong host_start, host_end, addr;
2168
2169
2170
2171
2172 mmap_lock();
2173
2174 p = page_find(address >> TARGET_PAGE_BITS);
2175 if (!p) {
2176 mmap_unlock();
2177 return 0;
2178 }
2179
2180
2181
2182 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2183 host_start = address & qemu_host_page_mask;
2184 host_end = host_start + qemu_host_page_size;
2185
2186 prot = 0;
2187 current_tb_invalidated = false;
2188 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2189 p = page_find(addr >> TARGET_PAGE_BITS);
2190 p->flags |= PAGE_WRITE;
2191 prot |= p->flags;
2192
2193
2194
2195 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2196#ifdef DEBUG_TB_CHECK
2197 tb_invalidate_check(addr);
2198#endif
2199 }
2200 mprotect((void *)g2h(host_start), qemu_host_page_size,
2201 prot & PAGE_BITS);
2202
2203 mmap_unlock();
2204
2205 return current_tb_invalidated ? 2 : 1;
2206 }
2207 mmap_unlock();
2208 return 0;
2209}
2210#endif
2211
2212
2213void tcg_flush_softmmu_tlb(CPUState *cs)
2214{
2215#ifdef CONFIG_SOFTMMU
2216 tlb_flush(cs);
2217#endif
2218}
2219