1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/units.h"
22#include "qemu-common.h"
23
24#define NO_CPU_IO_DEFS
25#include "cpu.h"
26#include "trace.h"
27#include "disas/disas.h"
28#include "exec/exec-all.h"
29#include "tcg/tcg.h"
30#if defined(CONFIG_USER_ONLY)
31#include "qemu.h"
32#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33#include <sys/param.h>
34#if __FreeBSD_version >= 700104
35#define HAVE_KINFO_GETVMMAP
36#define sigqueue sigqueue_freebsd
37#include <sys/proc.h>
38#include <machine/profile.h>
39#define _KERNEL
40#include <sys/user.h>
41#undef _KERNEL
42#undef sigqueue
43#include <libutil.h>
44#endif
45#endif
46#else
47#include "exec/ram_addr.h"
48#endif
49
50#include "exec/cputlb.h"
51#include "exec/tb-hash.h"
52#include "exec/translate-all.h"
53#include "qemu/bitmap.h"
54#include "qemu/error-report.h"
55#include "qemu/qemu-print.h"
56#include "qemu/timer.h"
57#include "qemu/main-loop.h"
58#include "exec/log.h"
59#include "sysemu/cpus.h"
60#include "sysemu/cpu-timers.h"
61#include "sysemu/tcg.h"
62#include "qapi/error.h"
63#include "hw/core/tcg-cpu-ops.h"
64#include "internal.h"
65
66
67
68
69
70
71#ifdef DEBUG_TB_INVALIDATE
72#define DEBUG_TB_INVALIDATE_GATE 1
73#else
74#define DEBUG_TB_INVALIDATE_GATE 0
75#endif
76
77#ifdef DEBUG_TB_FLUSH
78#define DEBUG_TB_FLUSH_GATE 1
79#else
80#define DEBUG_TB_FLUSH_GATE 0
81#endif
82
83#if !defined(CONFIG_USER_ONLY)
84
85#undef DEBUG_TB_CHECK
86#endif
87
88#ifdef DEBUG_TB_CHECK
89#define DEBUG_TB_CHECK_GATE 1
90#else
91#define DEBUG_TB_CHECK_GATE 0
92#endif
93
94
95
96
97
98
99
100#ifdef CONFIG_SOFTMMU
101#define assert_memory_lock()
102#else
103#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
104#endif
105
106#define SMC_BITMAP_USE_THRESHOLD 10
107
108typedef struct PageDesc {
109
110 uintptr_t first_tb;
111#ifdef CONFIG_SOFTMMU
112
113
114 unsigned long *code_bitmap;
115 unsigned int code_write_count;
116#else
117 unsigned long flags;
118 void *target_data;
119#endif
120#ifndef CONFIG_USER_ONLY
121 QemuSpin lock;
122#endif
123} PageDesc;
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138struct page_entry {
139 PageDesc *pd;
140 tb_page_addr_t index;
141 bool locked;
142};
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164struct page_collection {
165 GTree *tree;
166 struct page_entry *max;
167};
168
169
170#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
171 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
172 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
173 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
174
175#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
176 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
177
178#define TB_FOR_EACH_JMP(head_tb, tb, n) \
179 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
180
181
182
183
184
185
186
187
188#if !defined(CONFIG_USER_ONLY)
189#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
190# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
191#else
192# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
193#endif
194#else
195# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
196#endif
197
198
199#define V_L2_BITS 10
200#define V_L2_SIZE (1 << V_L2_BITS)
201
202
203QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
204 sizeof_field(TranslationBlock, trace_vcpu_dstate)
205 * BITS_PER_BYTE);
206
207
208
209
210static int v_l1_size;
211static int v_l1_shift;
212static int v_l2_levels;
213
214
215
216
217#define V_L1_MIN_BITS 4
218#define V_L1_MAX_BITS (V_L2_BITS + 3)
219#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
220
221static void *l1_map[V_L1_MAX_SIZE];
222
223
224TCGContext tcg_init_ctx;
225__thread TCGContext *tcg_ctx;
226TBContext tb_ctx;
227
228static void page_table_config_init(void)
229{
230 uint32_t v_l1_bits;
231
232 assert(TARGET_PAGE_BITS);
233
234 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
235 if (v_l1_bits < V_L1_MIN_BITS) {
236 v_l1_bits += V_L2_BITS;
237 }
238
239 v_l1_size = 1 << v_l1_bits;
240 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
241 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
242
243 assert(v_l1_bits <= V_L1_MAX_BITS);
244 assert(v_l1_shift % V_L2_BITS == 0);
245 assert(v_l2_levels >= 0);
246}
247
248static void cpu_gen_init(void)
249{
250 tcg_context_init(&tcg_init_ctx);
251}
252
253
254
255static uint8_t *encode_sleb128(uint8_t *p, target_long val)
256{
257 int more, byte;
258
259 do {
260 byte = val & 0x7f;
261 val >>= 7;
262 more = !((val == 0 && (byte & 0x40) == 0)
263 || (val == -1 && (byte & 0x40) != 0));
264 if (more) {
265 byte |= 0x80;
266 }
267 *p++ = byte;
268 } while (more);
269
270 return p;
271}
272
273
274
275static target_long decode_sleb128(const uint8_t **pp)
276{
277 const uint8_t *p = *pp;
278 target_long val = 0;
279 int byte, shift = 0;
280
281 do {
282 byte = *p++;
283 val |= (target_ulong)(byte & 0x7f) << shift;
284 shift += 7;
285 } while (byte & 0x80);
286 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
287 val |= -(target_ulong)1 << shift;
288 }
289
290 *pp = p;
291 return val;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306static int encode_search(TranslationBlock *tb, uint8_t *block)
307{
308 uint8_t *highwater = tcg_ctx->code_gen_highwater;
309 uint8_t *p = block;
310 int i, j, n;
311
312 for (i = 0, n = tb->icount; i < n; ++i) {
313 target_ulong prev;
314
315 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
316 if (i == 0) {
317 prev = (j == 0 ? tb->pc : 0);
318 } else {
319 prev = tcg_ctx->gen_insn_data[i - 1][j];
320 }
321 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
322 }
323 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
324 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
325
326
327
328
329
330 if (unlikely(p > highwater)) {
331 return -1;
332 }
333 }
334
335 return p - block;
336}
337
338
339
340
341
342static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
343 uintptr_t searched_pc, bool reset_icount)
344{
345 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
346 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
347 CPUArchState *env = cpu->env_ptr;
348 const uint8_t *p = tb->tc.ptr + tb->tc.size;
349 int i, j, num_insns = tb->icount;
350#ifdef CONFIG_PROFILER
351 TCGProfile *prof = &tcg_ctx->prof;
352 int64_t ti = profile_getclock();
353#endif
354
355 searched_pc -= GETPC_ADJ;
356
357 if (searched_pc < host_pc) {
358 return -1;
359 }
360
361
362
363 for (i = 0; i < num_insns; ++i) {
364 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
365 data[j] += decode_sleb128(&p);
366 }
367 host_pc += decode_sleb128(&p);
368 if (host_pc > searched_pc) {
369 goto found;
370 }
371 }
372 return -1;
373
374 found:
375 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
376 assert(icount_enabled());
377
378
379 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
380 }
381 restore_state_to_opc(env, tb, data);
382
383#ifdef CONFIG_PROFILER
384 qatomic_set(&prof->restore_time,
385 prof->restore_time + profile_getclock() - ti);
386 qatomic_set(&prof->restore_count, prof->restore_count + 1);
387#endif
388 return 0;
389}
390
391void tb_destroy(TranslationBlock *tb)
392{
393 qemu_spin_destroy(&tb->jmp_lock);
394}
395
396bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
397{
398
399
400
401
402
403
404
405
406
407
408 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
409 TranslationBlock *tb = tcg_tb_lookup(host_pc);
410 if (tb) {
411 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
412 return true;
413 }
414 }
415 return false;
416}
417
418static void page_init(void)
419{
420 page_size_init();
421 page_table_config_init();
422
423#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
424 {
425#ifdef HAVE_KINFO_GETVMMAP
426 struct kinfo_vmentry *freep;
427 int i, cnt;
428
429 freep = kinfo_getvmmap(getpid(), &cnt);
430 if (freep) {
431 mmap_lock();
432 for (i = 0; i < cnt; i++) {
433 unsigned long startaddr, endaddr;
434
435 startaddr = freep[i].kve_start;
436 endaddr = freep[i].kve_end;
437 if (h2g_valid(startaddr)) {
438 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
439
440 if (h2g_valid(endaddr)) {
441 endaddr = h2g(endaddr);
442 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
443 } else {
444#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
445 endaddr = ~0ul;
446 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
447#endif
448 }
449 }
450 }
451 free(freep);
452 mmap_unlock();
453 }
454#else
455 FILE *f;
456
457 last_brk = (unsigned long)sbrk(0);
458
459 f = fopen("/compat/linux/proc/self/maps", "r");
460 if (f) {
461 mmap_lock();
462
463 do {
464 unsigned long startaddr, endaddr;
465 int n;
466
467 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
468
469 if (n == 2 && h2g_valid(startaddr)) {
470 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
471
472 if (h2g_valid(endaddr)) {
473 endaddr = h2g(endaddr);
474 } else {
475 endaddr = ~0ul;
476 }
477 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
478 }
479 } while (!feof(f));
480
481 fclose(f);
482 mmap_unlock();
483 }
484#endif
485 }
486#endif
487}
488
489static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
490{
491 PageDesc *pd;
492 void **lp;
493 int i;
494
495
496 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
497
498
499 for (i = v_l2_levels; i > 0; i--) {
500 void **p = qatomic_rcu_read(lp);
501
502 if (p == NULL) {
503 void *existing;
504
505 if (!alloc) {
506 return NULL;
507 }
508 p = g_new0(void *, V_L2_SIZE);
509 existing = qatomic_cmpxchg(lp, NULL, p);
510 if (unlikely(existing)) {
511 g_free(p);
512 p = existing;
513 }
514 }
515
516 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
517 }
518
519 pd = qatomic_rcu_read(lp);
520 if (pd == NULL) {
521 void *existing;
522
523 if (!alloc) {
524 return NULL;
525 }
526 pd = g_new0(PageDesc, V_L2_SIZE);
527#ifndef CONFIG_USER_ONLY
528 {
529 int i;
530
531 for (i = 0; i < V_L2_SIZE; i++) {
532 qemu_spin_init(&pd[i].lock);
533 }
534 }
535#endif
536 existing = qatomic_cmpxchg(lp, NULL, pd);
537 if (unlikely(existing)) {
538#ifndef CONFIG_USER_ONLY
539 {
540 int i;
541
542 for (i = 0; i < V_L2_SIZE; i++) {
543 qemu_spin_destroy(&pd[i].lock);
544 }
545 }
546#endif
547 g_free(pd);
548 pd = existing;
549 }
550 }
551
552 return pd + (index & (V_L2_SIZE - 1));
553}
554
555static inline PageDesc *page_find(tb_page_addr_t index)
556{
557 return page_find_alloc(index, 0);
558}
559
560static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
561 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
562
563
564#ifdef CONFIG_USER_ONLY
565
566#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
567
568static inline void page_lock(PageDesc *pd)
569{ }
570
571static inline void page_unlock(PageDesc *pd)
572{ }
573
574static inline void page_lock_tb(const TranslationBlock *tb)
575{ }
576
577static inline void page_unlock_tb(const TranslationBlock *tb)
578{ }
579
580struct page_collection *
581page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
582{
583 return NULL;
584}
585
586void page_collection_unlock(struct page_collection *set)
587{ }
588#else
589
590#ifdef CONFIG_DEBUG_TCG
591
592static __thread GHashTable *ht_pages_locked_debug;
593
594static void ht_pages_locked_debug_init(void)
595{
596 if (ht_pages_locked_debug) {
597 return;
598 }
599 ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
600}
601
602static bool page_is_locked(const PageDesc *pd)
603{
604 PageDesc *found;
605
606 ht_pages_locked_debug_init();
607 found = g_hash_table_lookup(ht_pages_locked_debug, pd);
608 return !!found;
609}
610
611static void page_lock__debug(PageDesc *pd)
612{
613 ht_pages_locked_debug_init();
614 g_assert(!page_is_locked(pd));
615 g_hash_table_insert(ht_pages_locked_debug, pd, pd);
616}
617
618static void page_unlock__debug(const PageDesc *pd)
619{
620 bool removed;
621
622 ht_pages_locked_debug_init();
623 g_assert(page_is_locked(pd));
624 removed = g_hash_table_remove(ht_pages_locked_debug, pd);
625 g_assert(removed);
626}
627
628static void
629do_assert_page_locked(const PageDesc *pd, const char *file, int line)
630{
631 if (unlikely(!page_is_locked(pd))) {
632 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
633 pd, file, line);
634 abort();
635 }
636}
637
638#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
639
640void assert_no_pages_locked(void)
641{
642 ht_pages_locked_debug_init();
643 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
644}
645
646#else
647
648#define assert_page_locked(pd)
649
650static inline void page_lock__debug(const PageDesc *pd)
651{
652}
653
654static inline void page_unlock__debug(const PageDesc *pd)
655{
656}
657
658#endif
659
660static inline void page_lock(PageDesc *pd)
661{
662 page_lock__debug(pd);
663 qemu_spin_lock(&pd->lock);
664}
665
666static inline void page_unlock(PageDesc *pd)
667{
668 qemu_spin_unlock(&pd->lock);
669 page_unlock__debug(pd);
670}
671
672
673static inline void page_lock_tb(const TranslationBlock *tb)
674{
675 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
676}
677
678static inline void page_unlock_tb(const TranslationBlock *tb)
679{
680 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
681
682 page_unlock(p1);
683 if (unlikely(tb->page_addr[1] != -1)) {
684 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
685
686 if (p2 != p1) {
687 page_unlock(p2);
688 }
689 }
690}
691
692static inline struct page_entry *
693page_entry_new(PageDesc *pd, tb_page_addr_t index)
694{
695 struct page_entry *pe = g_malloc(sizeof(*pe));
696
697 pe->index = index;
698 pe->pd = pd;
699 pe->locked = false;
700 return pe;
701}
702
703static void page_entry_destroy(gpointer p)
704{
705 struct page_entry *pe = p;
706
707 g_assert(pe->locked);
708 page_unlock(pe->pd);
709 g_free(pe);
710}
711
712
713static bool page_entry_trylock(struct page_entry *pe)
714{
715 bool busy;
716
717 busy = qemu_spin_trylock(&pe->pd->lock);
718 if (!busy) {
719 g_assert(!pe->locked);
720 pe->locked = true;
721 page_lock__debug(pe->pd);
722 }
723 return busy;
724}
725
726static void do_page_entry_lock(struct page_entry *pe)
727{
728 page_lock(pe->pd);
729 g_assert(!pe->locked);
730 pe->locked = true;
731}
732
733static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
734{
735 struct page_entry *pe = value;
736
737 do_page_entry_lock(pe);
738 return FALSE;
739}
740
741static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
742{
743 struct page_entry *pe = value;
744
745 if (pe->locked) {
746 pe->locked = false;
747 page_unlock(pe->pd);
748 }
749 return FALSE;
750}
751
752
753
754
755
756static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
757{
758 tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
759 struct page_entry *pe;
760 PageDesc *pd;
761
762 pe = g_tree_lookup(set->tree, &index);
763 if (pe) {
764 return false;
765 }
766
767 pd = page_find(index);
768 if (pd == NULL) {
769 return false;
770 }
771
772 pe = page_entry_new(pd, index);
773 g_tree_insert(set->tree, &pe->index, pe);
774
775
776
777
778
779 if (set->max == NULL || pe->index > set->max->index) {
780 set->max = pe;
781 do_page_entry_lock(pe);
782 return false;
783 }
784
785
786
787
788 return page_entry_trylock(pe);
789}
790
791static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
792{
793 tb_page_addr_t a = *(const tb_page_addr_t *)ap;
794 tb_page_addr_t b = *(const tb_page_addr_t *)bp;
795
796 if (a == b) {
797 return 0;
798 } else if (a < b) {
799 return -1;
800 }
801 return 1;
802}
803
804
805
806
807
808
809struct page_collection *
810page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
811{
812 struct page_collection *set = g_malloc(sizeof(*set));
813 tb_page_addr_t index;
814 PageDesc *pd;
815
816 start >>= TARGET_PAGE_BITS;
817 end >>= TARGET_PAGE_BITS;
818 g_assert(start <= end);
819
820 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
821 page_entry_destroy);
822 set->max = NULL;
823 assert_no_pages_locked();
824
825 retry:
826 g_tree_foreach(set->tree, page_entry_lock, NULL);
827
828 for (index = start; index <= end; index++) {
829 TranslationBlock *tb;
830 int n;
831
832 pd = page_find(index);
833 if (pd == NULL) {
834 continue;
835 }
836 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
837 g_tree_foreach(set->tree, page_entry_unlock, NULL);
838 goto retry;
839 }
840 assert_page_locked(pd);
841 PAGE_FOR_EACH_TB(pd, tb, n) {
842 if (page_trylock_add(set, tb->page_addr[0]) ||
843 (tb->page_addr[1] != -1 &&
844 page_trylock_add(set, tb->page_addr[1]))) {
845
846 g_tree_foreach(set->tree, page_entry_unlock, NULL);
847 goto retry;
848 }
849 }
850 }
851 return set;
852}
853
854void page_collection_unlock(struct page_collection *set)
855{
856
857 g_tree_destroy(set->tree);
858 g_free(set);
859}
860
861#endif
862
863static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
864 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
865{
866 PageDesc *p1, *p2;
867 tb_page_addr_t page1;
868 tb_page_addr_t page2;
869
870 assert_memory_lock();
871 g_assert(phys1 != -1);
872
873 page1 = phys1 >> TARGET_PAGE_BITS;
874 page2 = phys2 >> TARGET_PAGE_BITS;
875
876 p1 = page_find_alloc(page1, alloc);
877 if (ret_p1) {
878 *ret_p1 = p1;
879 }
880 if (likely(phys2 == -1)) {
881 page_lock(p1);
882 return;
883 } else if (page1 == page2) {
884 page_lock(p1);
885 if (ret_p2) {
886 *ret_p2 = p1;
887 }
888 return;
889 }
890 p2 = page_find_alloc(page2, alloc);
891 if (ret_p2) {
892 *ret_p2 = p2;
893 }
894 if (page1 < page2) {
895 page_lock(p1);
896 page_lock(p2);
897 } else {
898 page_lock(p2);
899 page_lock(p1);
900 }
901}
902
903
904
905#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
906
907
908
909
910#if defined(__x86_64__)
911# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
912#elif defined(__sparc__)
913# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
914#elif defined(__powerpc64__)
915# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
916#elif defined(__powerpc__)
917# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
918#elif defined(__aarch64__)
919# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
920#elif defined(__s390x__)
921
922# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
923#elif defined(__mips__)
924
925
926# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
927#else
928# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
929#endif
930
931#if TCG_TARGET_REG_BITS == 32
932#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
933#ifdef CONFIG_USER_ONLY
934
935
936
937
938
939#define USE_STATIC_CODE_GEN_BUFFER
940#endif
941#else
942#ifdef CONFIG_USER_ONLY
943
944
945
946
947
948#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
949#else
950
951
952
953
954
955#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
956#endif
957#endif
958
959#define DEFAULT_CODE_GEN_BUFFER_SIZE \
960 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
961 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
962
963static size_t size_code_gen_buffer(size_t tb_size)
964{
965
966 if (tb_size == 0) {
967 size_t phys_mem = qemu_get_host_physmem();
968 if (phys_mem == 0) {
969 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
970 } else {
971 tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
972 }
973 }
974 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
975 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
976 }
977 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
978 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
979 }
980 return tb_size;
981}
982
983#ifdef __mips__
984
985
986static inline bool cross_256mb(void *addr, size_t size)
987{
988 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
989}
990
991
992
993
994static inline void *split_cross_256mb(void *buf1, size_t size1)
995{
996 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
997 size_t size2 = buf1 + size1 - buf2;
998
999 size1 = buf2 - buf1;
1000 if (size1 < size2) {
1001 size1 = size2;
1002 buf1 = buf2;
1003 }
1004
1005 tcg_ctx->code_gen_buffer_size = size1;
1006 return buf1;
1007}
1008#endif
1009
1010#ifdef USE_STATIC_CODE_GEN_BUFFER
1011static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1012 __attribute__((aligned(CODE_GEN_ALIGN)));
1013
1014static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
1015{
1016 void *buf, *end;
1017 size_t size;
1018
1019 if (splitwx > 0) {
1020 error_setg(errp, "jit split-wx not supported");
1021 return false;
1022 }
1023
1024
1025 buf = static_code_gen_buffer;
1026 end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1027 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1028 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1029
1030 size = end - buf;
1031
1032
1033 if (size > tb_size) {
1034 size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
1035 }
1036 tcg_ctx->code_gen_buffer_size = size;
1037
1038#ifdef __mips__
1039 if (cross_256mb(buf, size)) {
1040 buf = split_cross_256mb(buf, size);
1041 size = tcg_ctx->code_gen_buffer_size;
1042 }
1043#endif
1044
1045 if (qemu_mprotect_rwx(buf, size)) {
1046 error_setg_errno(errp, errno, "mprotect of jit buffer");
1047 return false;
1048 }
1049 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1050
1051 tcg_ctx->code_gen_buffer = buf;
1052 return true;
1053}
1054#elif defined(_WIN32)
1055static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
1056{
1057 void *buf;
1058
1059 if (splitwx > 0) {
1060 error_setg(errp, "jit split-wx not supported");
1061 return false;
1062 }
1063
1064 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1065 PAGE_EXECUTE_READWRITE);
1066 if (buf == NULL) {
1067 error_setg_win32(errp, GetLastError(),
1068 "allocate %zu bytes for jit buffer", size);
1069 return false;
1070 }
1071
1072 tcg_ctx->code_gen_buffer = buf;
1073 tcg_ctx->code_gen_buffer_size = size;
1074 return true;
1075}
1076#else
1077static bool alloc_code_gen_buffer_anon(size_t size, int prot,
1078 int flags, Error **errp)
1079{
1080 void *buf;
1081
1082 buf = mmap(NULL, size, prot, flags, -1, 0);
1083 if (buf == MAP_FAILED) {
1084 error_setg_errno(errp, errno,
1085 "allocate %zu bytes for jit buffer", size);
1086 return false;
1087 }
1088 tcg_ctx->code_gen_buffer_size = size;
1089
1090#ifdef __mips__
1091 if (cross_256mb(buf, size)) {
1092
1093
1094
1095
1096 size_t size2;
1097 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1098 switch ((int)(buf2 != MAP_FAILED)) {
1099 case 1:
1100 if (!cross_256mb(buf2, size)) {
1101
1102 munmap(buf, size);
1103 break;
1104 }
1105
1106 munmap(buf2, size);
1107
1108 default:
1109
1110 buf2 = split_cross_256mb(buf, size);
1111 size2 = tcg_ctx->code_gen_buffer_size;
1112 if (buf == buf2) {
1113 munmap(buf + size2, size - size2);
1114 } else {
1115 munmap(buf, size - size2);
1116 }
1117 size = size2;
1118 break;
1119 }
1120 buf = buf2;
1121 }
1122#endif
1123
1124
1125 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1126
1127 tcg_ctx->code_gen_buffer = buf;
1128 return true;
1129}
1130
1131#ifndef CONFIG_TCG_INTERPRETER
1132#ifdef CONFIG_POSIX
1133#include "qemu/memfd.h"
1134
1135static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
1136{
1137 void *buf_rw = NULL, *buf_rx = MAP_FAILED;
1138 int fd = -1;
1139
1140#ifdef __mips__
1141
1142 if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
1143 MAP_PRIVATE | MAP_ANONYMOUS |
1144 MAP_NORESERVE, errp)) {
1145 return false;
1146 }
1147
1148 size = tcg_ctx->code_gen_buffer_size;
1149 buf_rx = tcg_ctx->code_gen_buffer;
1150#endif
1151
1152 buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
1153 if (buf_rw == NULL) {
1154 goto fail;
1155 }
1156
1157#ifdef __mips__
1158 void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
1159 MAP_SHARED | MAP_FIXED, fd, 0);
1160 if (tmp != buf_rx) {
1161 goto fail_rx;
1162 }
1163#else
1164 buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
1165 if (buf_rx == MAP_FAILED) {
1166 goto fail_rx;
1167 }
1168#endif
1169
1170 close(fd);
1171 tcg_ctx->code_gen_buffer = buf_rw;
1172 tcg_ctx->code_gen_buffer_size = size;
1173 tcg_splitwx_diff = buf_rx - buf_rw;
1174
1175
1176 qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
1177 qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
1178 return true;
1179
1180 fail_rx:
1181 error_setg_errno(errp, errno, "failed to map shared memory for execute");
1182 fail:
1183 if (buf_rx != MAP_FAILED) {
1184 munmap(buf_rx, size);
1185 }
1186 if (buf_rw) {
1187 munmap(buf_rw, size);
1188 }
1189 if (fd >= 0) {
1190 close(fd);
1191 }
1192 return false;
1193}
1194#endif
1195
1196#ifdef CONFIG_DARWIN
1197#include <mach/mach.h>
1198
1199extern kern_return_t mach_vm_remap(vm_map_t target_task,
1200 mach_vm_address_t *target_address,
1201 mach_vm_size_t size,
1202 mach_vm_offset_t mask,
1203 int flags,
1204 vm_map_t src_task,
1205 mach_vm_address_t src_address,
1206 boolean_t copy,
1207 vm_prot_t *cur_protection,
1208 vm_prot_t *max_protection,
1209 vm_inherit_t inheritance);
1210
1211static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
1212{
1213 kern_return_t ret;
1214 mach_vm_address_t buf_rw, buf_rx;
1215 vm_prot_t cur_prot, max_prot;
1216
1217
1218 if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
1219 MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
1220 return false;
1221 }
1222
1223 buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
1224 buf_rx = 0;
1225 ret = mach_vm_remap(mach_task_self(),
1226 &buf_rx,
1227 size,
1228 0,
1229 VM_FLAGS_ANYWHERE,
1230 mach_task_self(),
1231 buf_rw,
1232 false,
1233 &cur_prot,
1234 &max_prot,
1235 VM_INHERIT_NONE);
1236 if (ret != KERN_SUCCESS) {
1237
1238 error_setg(errp, "vm_remap for jit splitwx failed");
1239 munmap((void *)buf_rw, size);
1240 return false;
1241 }
1242
1243 if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
1244 error_setg_errno(errp, errno, "mprotect for jit splitwx");
1245 munmap((void *)buf_rx, size);
1246 munmap((void *)buf_rw, size);
1247 return false;
1248 }
1249
1250 tcg_splitwx_diff = buf_rx - buf_rw;
1251 return true;
1252}
1253#endif
1254#endif
1255
1256static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
1257{
1258#ifndef CONFIG_TCG_INTERPRETER
1259# ifdef CONFIG_DARWIN
1260 return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
1261# endif
1262# ifdef CONFIG_POSIX
1263 return alloc_code_gen_buffer_splitwx_memfd(size, errp);
1264# endif
1265#endif
1266 error_setg(errp, "jit split-wx not supported");
1267 return false;
1268}
1269
1270static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
1271{
1272 ERRP_GUARD();
1273 int prot, flags;
1274
1275 if (splitwx) {
1276 if (alloc_code_gen_buffer_splitwx(size, errp)) {
1277 return true;
1278 }
1279
1280
1281
1282
1283 if (splitwx > 0) {
1284 return false;
1285 }
1286 error_free_or_abort(errp);
1287 }
1288
1289 prot = PROT_READ | PROT_WRITE | PROT_EXEC;
1290 flags = MAP_PRIVATE | MAP_ANONYMOUS;
1291#ifdef CONFIG_TCG_INTERPRETER
1292
1293 prot = PROT_READ | PROT_WRITE;
1294#elif defined(CONFIG_DARWIN)
1295
1296 if (!splitwx) {
1297 flags |= MAP_JIT;
1298 }
1299#endif
1300
1301 return alloc_code_gen_buffer_anon(size, prot, flags, errp);
1302}
1303#endif
1304
1305static bool tb_cmp(const void *ap, const void *bp)
1306{
1307 const TranslationBlock *a = ap;
1308 const TranslationBlock *b = bp;
1309
1310 return a->pc == b->pc &&
1311 a->cs_base == b->cs_base &&
1312 a->flags == b->flags &&
1313 (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
1314 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1315 a->page_addr[0] == b->page_addr[0] &&
1316 a->page_addr[1] == b->page_addr[1];
1317}
1318
1319static void tb_htable_init(void)
1320{
1321 unsigned int mode = QHT_MODE_AUTO_RESIZE;
1322
1323 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1324}
1325
1326
1327
1328
1329void tcg_exec_init(unsigned long tb_size, int splitwx)
1330{
1331 bool ok;
1332
1333 tcg_allowed = true;
1334 cpu_gen_init();
1335 page_init();
1336 tb_htable_init();
1337
1338 ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
1339 splitwx, &error_fatal);
1340 assert(ok);
1341
1342#if defined(CONFIG_SOFTMMU)
1343
1344
1345 tcg_prologue_init(tcg_ctx);
1346#endif
1347}
1348
1349
1350static inline void invalidate_page_bitmap(PageDesc *p)
1351{
1352 assert_page_locked(p);
1353#ifdef CONFIG_SOFTMMU
1354 g_free(p->code_bitmap);
1355 p->code_bitmap = NULL;
1356 p->code_write_count = 0;
1357#endif
1358}
1359
1360
1361static void page_flush_tb_1(int level, void **lp)
1362{
1363 int i;
1364
1365 if (*lp == NULL) {
1366 return;
1367 }
1368 if (level == 0) {
1369 PageDesc *pd = *lp;
1370
1371 for (i = 0; i < V_L2_SIZE; ++i) {
1372 page_lock(&pd[i]);
1373 pd[i].first_tb = (uintptr_t)NULL;
1374 invalidate_page_bitmap(pd + i);
1375 page_unlock(&pd[i]);
1376 }
1377 } else {
1378 void **pp = *lp;
1379
1380 for (i = 0; i < V_L2_SIZE; ++i) {
1381 page_flush_tb_1(level - 1, pp + i);
1382 }
1383 }
1384}
1385
1386static void page_flush_tb(void)
1387{
1388 int i, l1_sz = v_l1_size;
1389
1390 for (i = 0; i < l1_sz; i++) {
1391 page_flush_tb_1(v_l2_levels, l1_map + i);
1392 }
1393}
1394
1395static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1396{
1397 const TranslationBlock *tb = value;
1398 size_t *size = data;
1399
1400 *size += tb->tc.size;
1401 return false;
1402}
1403
1404
1405static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1406{
1407 bool did_flush = false;
1408
1409 mmap_lock();
1410
1411
1412
1413 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1414 goto done;
1415 }
1416 did_flush = true;
1417
1418 if (DEBUG_TB_FLUSH_GATE) {
1419 size_t nb_tbs = tcg_nb_tbs();
1420 size_t host_size = 0;
1421
1422 tcg_tb_foreach(tb_host_size_iter, &host_size);
1423 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1424 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1425 }
1426
1427 CPU_FOREACH(cpu) {
1428 cpu_tb_jmp_cache_clear(cpu);
1429 }
1430
1431 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1432 page_flush_tb();
1433
1434 tcg_region_reset_all();
1435
1436
1437 qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1438
1439done:
1440 mmap_unlock();
1441 if (did_flush) {
1442 qemu_plugin_flush_cb();
1443 }
1444}
1445
1446void tb_flush(CPUState *cpu)
1447{
1448 if (tcg_enabled()) {
1449 unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1450
1451 if (cpu_in_exclusive_context(cpu)) {
1452 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1453 } else {
1454 async_safe_run_on_cpu(cpu, do_tb_flush,
1455 RUN_ON_CPU_HOST_INT(tb_flush_count));
1456 }
1457 }
1458}
1459
1460
1461
1462
1463
1464
1465
1466#ifdef CONFIG_USER_ONLY
1467
1468static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1469{
1470 TranslationBlock *tb = p;
1471 target_ulong addr = *(target_ulong *)userp;
1472
1473 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1474 printf("ERROR invalidate: address=" TARGET_FMT_lx
1475 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1476 }
1477}
1478
1479
1480
1481
1482
1483static void tb_invalidate_check(target_ulong address)
1484{
1485 address &= TARGET_PAGE_MASK;
1486 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1487}
1488
1489static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1490{
1491 TranslationBlock *tb = p;
1492 int flags1, flags2;
1493
1494 flags1 = page_get_flags(tb->pc);
1495 flags2 = page_get_flags(tb->pc + tb->size - 1);
1496 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1497 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1498 (long)tb->pc, tb->size, flags1, flags2);
1499 }
1500}
1501
1502
1503static void tb_page_check(void)
1504{
1505 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1506}
1507
1508#endif
1509
1510
1511
1512
1513
1514static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1515{
1516 TranslationBlock *tb1;
1517 uintptr_t *pprev;
1518 unsigned int n1;
1519
1520 assert_page_locked(pd);
1521 pprev = &pd->first_tb;
1522 PAGE_FOR_EACH_TB(pd, tb1, n1) {
1523 if (tb1 == tb) {
1524 *pprev = tb1->page_next[n1];
1525 return;
1526 }
1527 pprev = &tb1->page_next[n1];
1528 }
1529 g_assert_not_reached();
1530}
1531
1532
1533static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1534{
1535 uintptr_t ptr, ptr_locked;
1536 TranslationBlock *dest;
1537 TranslationBlock *tb;
1538 uintptr_t *pprev;
1539 int n;
1540
1541
1542 ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1543 dest = (TranslationBlock *)(ptr & ~1);
1544 if (dest == NULL) {
1545 return;
1546 }
1547
1548 qemu_spin_lock(&dest->jmp_lock);
1549
1550
1551
1552
1553 ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1554 if (ptr_locked != ptr) {
1555 qemu_spin_unlock(&dest->jmp_lock);
1556
1557
1558
1559
1560
1561 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1562 return;
1563 }
1564
1565
1566
1567
1568 pprev = &dest->jmp_list_head;
1569 TB_FOR_EACH_JMP(dest, tb, n) {
1570 if (tb == orig && n == n_orig) {
1571 *pprev = tb->jmp_list_next[n];
1572
1573 qemu_spin_unlock(&dest->jmp_lock);
1574 return;
1575 }
1576 pprev = &tb->jmp_list_next[n];
1577 }
1578 g_assert_not_reached();
1579}
1580
1581
1582
1583static inline void tb_reset_jump(TranslationBlock *tb, int n)
1584{
1585 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1586 tb_set_jmp_target(tb, n, addr);
1587}
1588
1589
1590static inline void tb_jmp_unlink(TranslationBlock *dest)
1591{
1592 TranslationBlock *tb;
1593 int n;
1594
1595 qemu_spin_lock(&dest->jmp_lock);
1596
1597 TB_FOR_EACH_JMP(dest, tb, n) {
1598 tb_reset_jump(tb, n);
1599 qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1600
1601 }
1602 dest->jmp_list_head = (uintptr_t)NULL;
1603
1604 qemu_spin_unlock(&dest->jmp_lock);
1605}
1606
1607
1608
1609
1610
1611
1612static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1613{
1614 CPUState *cpu;
1615 PageDesc *p;
1616 uint32_t h;
1617 tb_page_addr_t phys_pc;
1618 uint32_t orig_cflags = tb_cflags(tb);
1619
1620 assert_memory_lock();
1621
1622
1623 qemu_spin_lock(&tb->jmp_lock);
1624 qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1625 qemu_spin_unlock(&tb->jmp_lock);
1626
1627
1628 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1629 h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
1630 tb->trace_vcpu_dstate);
1631 if (!qht_remove(&tb_ctx.htable, tb, h)) {
1632 return;
1633 }
1634
1635
1636 if (rm_from_page_list) {
1637 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1638 tb_page_remove(p, tb);
1639 invalidate_page_bitmap(p);
1640 if (tb->page_addr[1] != -1) {
1641 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1642 tb_page_remove(p, tb);
1643 invalidate_page_bitmap(p);
1644 }
1645 }
1646
1647
1648 h = tb_jmp_cache_hash_func(tb->pc);
1649 CPU_FOREACH(cpu) {
1650 if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1651 qatomic_set(&cpu->tb_jmp_cache[h], NULL);
1652 }
1653 }
1654
1655
1656 tb_remove_from_jmp_list(tb, 0);
1657 tb_remove_from_jmp_list(tb, 1);
1658
1659
1660 tb_jmp_unlink(tb);
1661
1662 qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
1663 tcg_ctx->tb_phys_invalidate_count + 1);
1664}
1665
1666static void tb_phys_invalidate__locked(TranslationBlock *tb)
1667{
1668 qemu_thread_jit_write();
1669 do_tb_phys_invalidate(tb, true);
1670 qemu_thread_jit_execute();
1671}
1672
1673
1674
1675
1676
1677void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1678{
1679 if (page_addr == -1 && tb->page_addr[0] != -1) {
1680 page_lock_tb(tb);
1681 do_tb_phys_invalidate(tb, true);
1682 page_unlock_tb(tb);
1683 } else {
1684 do_tb_phys_invalidate(tb, false);
1685 }
1686}
1687
1688#ifdef CONFIG_SOFTMMU
1689
1690static void build_page_bitmap(PageDesc *p)
1691{
1692 int n, tb_start, tb_end;
1693 TranslationBlock *tb;
1694
1695 assert_page_locked(p);
1696 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1697
1698 PAGE_FOR_EACH_TB(p, tb, n) {
1699
1700 if (n == 0) {
1701
1702
1703 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1704 tb_end = tb_start + tb->size;
1705 if (tb_end > TARGET_PAGE_SIZE) {
1706 tb_end = TARGET_PAGE_SIZE;
1707 }
1708 } else {
1709 tb_start = 0;
1710 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1711 }
1712 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1713 }
1714}
1715#endif
1716
1717
1718
1719
1720
1721
1722static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1723 unsigned int n, tb_page_addr_t page_addr)
1724{
1725#ifndef CONFIG_USER_ONLY
1726 bool page_already_protected;
1727#endif
1728
1729 assert_page_locked(p);
1730
1731 tb->page_addr[n] = page_addr;
1732 tb->page_next[n] = p->first_tb;
1733#ifndef CONFIG_USER_ONLY
1734 page_already_protected = p->first_tb != (uintptr_t)NULL;
1735#endif
1736 p->first_tb = (uintptr_t)tb | n;
1737 invalidate_page_bitmap(p);
1738
1739#if defined(CONFIG_USER_ONLY)
1740 if (p->flags & PAGE_WRITE) {
1741 target_ulong addr;
1742 PageDesc *p2;
1743 int prot;
1744
1745
1746
1747 page_addr &= qemu_host_page_mask;
1748 prot = 0;
1749 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1750 addr += TARGET_PAGE_SIZE) {
1751
1752 p2 = page_find(addr >> TARGET_PAGE_BITS);
1753 if (!p2) {
1754 continue;
1755 }
1756 prot |= p2->flags;
1757 p2->flags &= ~PAGE_WRITE;
1758 }
1759 mprotect(g2h_untagged(page_addr), qemu_host_page_size,
1760 (prot & PAGE_BITS) & ~PAGE_WRITE);
1761 if (DEBUG_TB_INVALIDATE_GATE) {
1762 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1763 }
1764 }
1765#else
1766
1767
1768
1769 if (!page_already_protected) {
1770 tlb_protect_code(page_addr);
1771 }
1772#endif
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static TranslationBlock *
1787tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1788 tb_page_addr_t phys_page2)
1789{
1790 PageDesc *p;
1791 PageDesc *p2 = NULL;
1792 void *existing_tb = NULL;
1793 uint32_t h;
1794
1795 assert_memory_lock();
1796 tcg_debug_assert(!(tb->cflags & CF_INVALID));
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1807 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1808 if (p2) {
1809 tb_page_add(p2, tb, 1, phys_page2);
1810 } else {
1811 tb->page_addr[1] = -1;
1812 }
1813
1814
1815 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
1816 tb->trace_vcpu_dstate);
1817 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1818
1819
1820 if (unlikely(existing_tb)) {
1821 tb_page_remove(p, tb);
1822 invalidate_page_bitmap(p);
1823 if (p2) {
1824 tb_page_remove(p2, tb);
1825 invalidate_page_bitmap(p2);
1826 }
1827 tb = existing_tb;
1828 }
1829
1830 if (p2 && p2 != p) {
1831 page_unlock(p2);
1832 }
1833 page_unlock(p);
1834
1835#ifdef CONFIG_USER_ONLY
1836 if (DEBUG_TB_CHECK_GATE) {
1837 tb_page_check();
1838 }
1839#endif
1840 return tb;
1841}
1842
1843
1844TranslationBlock *tb_gen_code(CPUState *cpu,
1845 target_ulong pc, target_ulong cs_base,
1846 uint32_t flags, int cflags)
1847{
1848 CPUArchState *env = cpu->env_ptr;
1849 TranslationBlock *tb, *existing_tb;
1850 tb_page_addr_t phys_pc, phys_page2;
1851 target_ulong virt_page2;
1852 tcg_insn_unit *gen_code_buf;
1853 int gen_code_size, search_size, max_insns;
1854#ifdef CONFIG_PROFILER
1855 TCGProfile *prof = &tcg_ctx->prof;
1856 int64_t ti;
1857#endif
1858
1859 assert_memory_lock();
1860 qemu_thread_jit_write();
1861
1862 phys_pc = get_page_addr_code(env, pc);
1863
1864 if (phys_pc == -1) {
1865
1866 cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
1867 }
1868
1869 max_insns = cflags & CF_COUNT_MASK;
1870 if (max_insns == 0) {
1871 max_insns = CF_COUNT_MASK;
1872 }
1873 if (max_insns > TCG_MAX_INSNS) {
1874 max_insns = TCG_MAX_INSNS;
1875 }
1876 if (cpu->singlestep_enabled || singlestep) {
1877 max_insns = 1;
1878 }
1879
1880 buffer_overflow:
1881 tb = tcg_tb_alloc(tcg_ctx);
1882 if (unlikely(!tb)) {
1883
1884 tb_flush(cpu);
1885 mmap_unlock();
1886
1887 cpu->exception_index = EXCP_INTERRUPT;
1888 cpu_loop_exit(cpu);
1889 }
1890
1891 gen_code_buf = tcg_ctx->code_gen_ptr;
1892 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
1893 tb->pc = pc;
1894 tb->cs_base = cs_base;
1895 tb->flags = flags;
1896 tb->cflags = cflags;
1897 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1898 tcg_ctx->tb_cflags = cflags;
1899 tb_overflow:
1900
1901#ifdef CONFIG_PROFILER
1902
1903 qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1904 ti = profile_getclock();
1905#endif
1906
1907 gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
1908 if (unlikely(gen_code_size != 0)) {
1909 goto error_return;
1910 }
1911
1912 tcg_func_start(tcg_ctx);
1913
1914 tcg_ctx->cpu = env_cpu(env);
1915 gen_intermediate_code(cpu, tb, max_insns);
1916 tcg_ctx->cpu = NULL;
1917 max_insns = tb->icount;
1918
1919 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1920
1921
1922 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1923 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1924 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1925 if (TCG_TARGET_HAS_direct_jump) {
1926 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1927 tcg_ctx->tb_jmp_target_addr = NULL;
1928 } else {
1929 tcg_ctx->tb_jmp_insn_offset = NULL;
1930 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1931 }
1932
1933#ifdef CONFIG_PROFILER
1934 qatomic_set(&prof->tb_count, prof->tb_count + 1);
1935 qatomic_set(&prof->interm_time,
1936 prof->interm_time + profile_getclock() - ti);
1937 ti = profile_getclock();
1938#endif
1939
1940 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1941 if (unlikely(gen_code_size < 0)) {
1942 error_return:
1943 switch (gen_code_size) {
1944 case -1:
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1955 "Restarting code generation for "
1956 "code_gen_buffer overflow\n");
1957 goto buffer_overflow;
1958
1959 case -2:
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969 assert(max_insns > 1);
1970 max_insns /= 2;
1971 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1972 "Restarting code generation with "
1973 "smaller translation block (max %d insns)\n",
1974 max_insns);
1975 goto tb_overflow;
1976
1977 default:
1978 g_assert_not_reached();
1979 }
1980 }
1981 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1982 if (unlikely(search_size < 0)) {
1983 goto buffer_overflow;
1984 }
1985 tb->tc.size = gen_code_size;
1986
1987#ifdef CONFIG_PROFILER
1988 qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1989 qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1990 qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1991 qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1992#endif
1993
1994#ifdef DEBUG_DISAS
1995 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1996 qemu_log_in_addr_range(tb->pc)) {
1997 FILE *logfile = qemu_log_lock();
1998 int code_size, data_size;
1999 const tcg_target_ulong *rx_data_gen_ptr;
2000 size_t chunk_start;
2001 int insn = 0;
2002
2003 if (tcg_ctx->data_gen_ptr) {
2004 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
2005 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
2006 data_size = gen_code_size - code_size;
2007 } else {
2008 rx_data_gen_ptr = 0;
2009 code_size = gen_code_size;
2010 data_size = 0;
2011 }
2012
2013
2014 qemu_log("OUT: [size=%d]\n", gen_code_size);
2015 qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
2016 tcg_ctx->gen_insn_data[insn][0]);
2017 chunk_start = tcg_ctx->gen_insn_end_off[insn];
2018 log_disas(tb->tc.ptr, chunk_start);
2019
2020
2021
2022
2023
2024
2025 while (insn < tb->icount) {
2026 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
2027 if (chunk_end > chunk_start) {
2028 qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n",
2029 tcg_ctx->gen_insn_data[insn][0]);
2030 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
2031 chunk_start = chunk_end;
2032 }
2033 insn++;
2034 }
2035
2036 if (chunk_start < code_size) {
2037 qemu_log(" -- tb slow paths + alignment\n");
2038 log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
2039 }
2040
2041
2042 if (data_size) {
2043 int i;
2044 qemu_log(" data: [size=%d]\n", data_size);
2045 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
2046 qemu_log("0x%08" PRIxPTR ": .quad 0x%" TCG_PRIlx "\n",
2047 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
2048 }
2049 }
2050 qemu_log("\n");
2051 qemu_log_flush();
2052 qemu_log_unlock(logfile);
2053 }
2054#endif
2055
2056 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
2057 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
2058 CODE_GEN_ALIGN));
2059
2060
2061 qemu_spin_init(&tb->jmp_lock);
2062 tb->jmp_list_head = (uintptr_t)NULL;
2063 tb->jmp_list_next[0] = (uintptr_t)NULL;
2064 tb->jmp_list_next[1] = (uintptr_t)NULL;
2065 tb->jmp_dest[0] = (uintptr_t)NULL;
2066 tb->jmp_dest[1] = (uintptr_t)NULL;
2067
2068
2069 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2070 tb_reset_jump(tb, 0);
2071 }
2072 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2073 tb_reset_jump(tb, 1);
2074 }
2075
2076
2077
2078
2079
2080
2081
2082 if (phys_pc == -1) {
2083 tb->page_addr[0] = tb->page_addr[1] = -1;
2084 return tb;
2085 }
2086
2087
2088 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
2089 phys_page2 = -1;
2090 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
2091 phys_page2 = get_page_addr_code(env, virt_page2);
2092 }
2093
2094
2095
2096
2097 existing_tb = tb_link_page(tb, phys_pc, phys_page2);
2098
2099 if (unlikely(existing_tb != tb)) {
2100 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
2101
2102 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
2103 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
2104 tb_destroy(tb);
2105 return existing_tb;
2106 }
2107 tcg_tb_insert(tb);
2108 return tb;
2109}
2110
2111
2112
2113
2114
2115
2116static void
2117tb_invalidate_phys_page_range__locked(struct page_collection *pages,
2118 PageDesc *p, tb_page_addr_t start,
2119 tb_page_addr_t end,
2120 uintptr_t retaddr)
2121{
2122 TranslationBlock *tb;
2123 tb_page_addr_t tb_start, tb_end;
2124 int n;
2125#ifdef TARGET_HAS_PRECISE_SMC
2126 CPUState *cpu = current_cpu;
2127 CPUArchState *env = NULL;
2128 bool current_tb_not_found = retaddr != 0;
2129 bool current_tb_modified = false;
2130 TranslationBlock *current_tb = NULL;
2131 target_ulong current_pc = 0;
2132 target_ulong current_cs_base = 0;
2133 uint32_t current_flags = 0;
2134#endif
2135
2136 assert_page_locked(p);
2137
2138#if defined(TARGET_HAS_PRECISE_SMC)
2139 if (cpu != NULL) {
2140 env = cpu->env_ptr;
2141 }
2142#endif
2143
2144
2145
2146
2147 PAGE_FOR_EACH_TB(p, tb, n) {
2148 assert_page_locked(p);
2149
2150 if (n == 0) {
2151
2152
2153 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
2154 tb_end = tb_start + tb->size;
2155 } else {
2156 tb_start = tb->page_addr[1];
2157 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
2158 }
2159 if (!(tb_end <= start || tb_start >= end)) {
2160#ifdef TARGET_HAS_PRECISE_SMC
2161 if (current_tb_not_found) {
2162 current_tb_not_found = false;
2163
2164 current_tb = tcg_tb_lookup(retaddr);
2165 }
2166 if (current_tb == tb &&
2167 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2168
2169
2170
2171
2172
2173
2174
2175 current_tb_modified = true;
2176 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
2177 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
2178 ¤t_flags);
2179 }
2180#endif
2181 tb_phys_invalidate__locked(tb);
2182 }
2183 }
2184#if !defined(CONFIG_USER_ONLY)
2185
2186 if (!p->first_tb) {
2187 invalidate_page_bitmap(p);
2188 tlb_unprotect_code(start);
2189 }
2190#endif
2191#ifdef TARGET_HAS_PRECISE_SMC
2192 if (current_tb_modified) {
2193 page_collection_unlock(pages);
2194
2195 cpu->cflags_next_tb = 1 | curr_cflags(cpu);
2196 mmap_unlock();
2197 cpu_loop_exit_noexc(cpu);
2198 }
2199#endif
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
2212{
2213 struct page_collection *pages;
2214 PageDesc *p;
2215
2216 assert_memory_lock();
2217
2218 p = page_find(start >> TARGET_PAGE_BITS);
2219 if (p == NULL) {
2220 return;
2221 }
2222 pages = page_collection_lock(start, end);
2223 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
2224 page_collection_unlock(pages);
2225}
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236#ifdef CONFIG_SOFTMMU
2237void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2238#else
2239void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2240#endif
2241{
2242 struct page_collection *pages;
2243 tb_page_addr_t next;
2244
2245 assert_memory_lock();
2246
2247 pages = page_collection_lock(start, end);
2248 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2249 start < end;
2250 start = next, next += TARGET_PAGE_SIZE) {
2251 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2252 tb_page_addr_t bound = MIN(next, end);
2253
2254 if (pd == NULL) {
2255 continue;
2256 }
2257 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2258 }
2259 page_collection_unlock(pages);
2260}
2261
2262#ifdef CONFIG_SOFTMMU
2263
2264
2265
2266
2267
2268
2269void tb_invalidate_phys_page_fast(struct page_collection *pages,
2270 tb_page_addr_t start, int len,
2271 uintptr_t retaddr)
2272{
2273 PageDesc *p;
2274
2275 assert_memory_lock();
2276
2277 p = page_find(start >> TARGET_PAGE_BITS);
2278 if (!p) {
2279 return;
2280 }
2281
2282 assert_page_locked(p);
2283 if (!p->code_bitmap &&
2284 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2285 build_page_bitmap(p);
2286 }
2287 if (p->code_bitmap) {
2288 unsigned int nr;
2289 unsigned long b;
2290
2291 nr = start & ~TARGET_PAGE_MASK;
2292 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2293 if (b & ((1 << len) - 1)) {
2294 goto do_invalidate;
2295 }
2296 } else {
2297 do_invalidate:
2298 tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2299 retaddr);
2300 }
2301}
2302#else
2303
2304
2305
2306
2307
2308
2309static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2310{
2311 TranslationBlock *tb;
2312 PageDesc *p;
2313 int n;
2314#ifdef TARGET_HAS_PRECISE_SMC
2315 TranslationBlock *current_tb = NULL;
2316 CPUState *cpu = current_cpu;
2317 CPUArchState *env = NULL;
2318 int current_tb_modified = 0;
2319 target_ulong current_pc = 0;
2320 target_ulong current_cs_base = 0;
2321 uint32_t current_flags = 0;
2322#endif
2323
2324 assert_memory_lock();
2325
2326 addr &= TARGET_PAGE_MASK;
2327 p = page_find(addr >> TARGET_PAGE_BITS);
2328 if (!p) {
2329 return false;
2330 }
2331
2332#ifdef TARGET_HAS_PRECISE_SMC
2333 if (p->first_tb && pc != 0) {
2334 current_tb = tcg_tb_lookup(pc);
2335 }
2336 if (cpu != NULL) {
2337 env = cpu->env_ptr;
2338 }
2339#endif
2340 assert_page_locked(p);
2341 PAGE_FOR_EACH_TB(p, tb, n) {
2342#ifdef TARGET_HAS_PRECISE_SMC
2343 if (current_tb == tb &&
2344 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2345
2346
2347
2348
2349
2350
2351 current_tb_modified = 1;
2352 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2353 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
2354 ¤t_flags);
2355 }
2356#endif
2357 tb_phys_invalidate(tb, addr);
2358 }
2359 p->first_tb = (uintptr_t)NULL;
2360#ifdef TARGET_HAS_PRECISE_SMC
2361 if (current_tb_modified) {
2362
2363 cpu->cflags_next_tb = 1 | curr_cflags(cpu);
2364 return true;
2365 }
2366#endif
2367
2368 return false;
2369}
2370#endif
2371
2372
2373void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2374{
2375 TranslationBlock *tb;
2376
2377 assert_memory_lock();
2378
2379 tb = tcg_tb_lookup(retaddr);
2380 if (tb) {
2381
2382 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2383 tb_phys_invalidate(tb, -1);
2384 } else {
2385
2386
2387 CPUArchState *env = cpu->env_ptr;
2388 target_ulong pc, cs_base;
2389 tb_page_addr_t addr;
2390 uint32_t flags;
2391
2392 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2393 addr = get_page_addr_code(env, pc);
2394 if (addr != -1) {
2395 tb_invalidate_phys_range(addr, addr + 1);
2396 }
2397 }
2398}
2399
2400#ifndef CONFIG_USER_ONLY
2401
2402
2403
2404
2405
2406
2407void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2408{
2409 TranslationBlock *tb;
2410 CPUClass *cc;
2411 uint32_t n;
2412
2413 tb = tcg_tb_lookup(retaddr);
2414 if (!tb) {
2415 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2416 (void *)retaddr);
2417 }
2418 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2419
2420
2421
2422
2423
2424
2425 n = 1;
2426 cc = CPU_GET_CLASS(cpu);
2427 if (cc->tcg_ops->io_recompile_replay_branch &&
2428 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
2429 cpu_neg(cpu)->icount_decr.u16.low++;
2430 n = 2;
2431 }
2432
2433
2434
2435
2436
2437
2438
2439 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
2440
2441 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
2442 "cpu_io_recompile: rewound execution of TB to "
2443 TARGET_FMT_lx "\n", tb->pc);
2444
2445 cpu_loop_exit_noexc(cpu);
2446}
2447
2448static void print_qht_statistics(struct qht_stats hst)
2449{
2450 uint32_t hgram_opts;
2451 size_t hgram_bins;
2452 char *hgram;
2453
2454 if (!hst.head_buckets) {
2455 return;
2456 }
2457 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2458 hst.used_head_buckets, hst.head_buckets,
2459 (double)hst.used_head_buckets / hst.head_buckets * 100);
2460
2461 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2462 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
2463 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2464 hgram_opts |= QDIST_PR_NODECIMAL;
2465 }
2466 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2467 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2468 qdist_avg(&hst.occupancy) * 100, hgram);
2469 g_free(hgram);
2470
2471 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2472 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2473 if (hgram_bins > 10) {
2474 hgram_bins = 10;
2475 } else {
2476 hgram_bins = 0;
2477 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2478 }
2479 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2480 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
2481 qdist_avg(&hst.chain), hgram);
2482 g_free(hgram);
2483}
2484
2485struct tb_tree_stats {
2486 size_t nb_tbs;
2487 size_t host_size;
2488 size_t target_size;
2489 size_t max_target_size;
2490 size_t direct_jmp_count;
2491 size_t direct_jmp2_count;
2492 size_t cross_page;
2493};
2494
2495static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2496{
2497 const TranslationBlock *tb = value;
2498 struct tb_tree_stats *tst = data;
2499
2500 tst->nb_tbs++;
2501 tst->host_size += tb->tc.size;
2502 tst->target_size += tb->size;
2503 if (tb->size > tst->max_target_size) {
2504 tst->max_target_size = tb->size;
2505 }
2506 if (tb->page_addr[1] != -1) {
2507 tst->cross_page++;
2508 }
2509 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2510 tst->direct_jmp_count++;
2511 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2512 tst->direct_jmp2_count++;
2513 }
2514 }
2515 return false;
2516}
2517
2518void dump_exec_info(void)
2519{
2520 struct tb_tree_stats tst = {};
2521 struct qht_stats hst;
2522 size_t nb_tbs, flush_full, flush_part, flush_elide;
2523
2524 tcg_tb_foreach(tb_tree_stats_iter, &tst);
2525 nb_tbs = tst.nb_tbs;
2526
2527 qemu_printf("Translation buffer state:\n");
2528
2529
2530
2531
2532
2533 qemu_printf("gen code size %zu/%zu\n",
2534 tcg_code_size(), tcg_code_capacity());
2535 qemu_printf("TB count %zu\n", nb_tbs);
2536 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2537 nb_tbs ? tst.target_size / nb_tbs : 0,
2538 tst.max_target_size);
2539 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2540 nb_tbs ? tst.host_size / nb_tbs : 0,
2541 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2542 qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2543 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2544 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2545 tst.direct_jmp_count,
2546 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2547 tst.direct_jmp2_count,
2548 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2549
2550 qht_statistics_init(&tb_ctx.htable, &hst);
2551 print_qht_statistics(hst);
2552 qht_statistics_destroy(&hst);
2553
2554 qemu_printf("\nStatistics:\n");
2555 qemu_printf("TB flush count %u\n",
2556 qatomic_read(&tb_ctx.tb_flush_count));
2557 qemu_printf("TB invalidate count %zu\n",
2558 tcg_tb_phys_invalidate_count());
2559
2560 tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2561 qemu_printf("TLB full flushes %zu\n", flush_full);
2562 qemu_printf("TLB partial flushes %zu\n", flush_part);
2563 qemu_printf("TLB elided flushes %zu\n", flush_elide);
2564 tcg_dump_info();
2565}
2566
2567void dump_opcount_info(void)
2568{
2569 tcg_dump_op_count();
2570}
2571
2572#else
2573
2574void cpu_interrupt(CPUState *cpu, int mask)
2575{
2576 g_assert(qemu_mutex_iothread_locked());
2577 cpu->interrupt_request |= mask;
2578 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2579}
2580
2581
2582
2583
2584
2585struct walk_memory_regions_data {
2586 walk_memory_regions_fn fn;
2587 void *priv;
2588 target_ulong start;
2589 int prot;
2590};
2591
2592static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2593 target_ulong end, int new_prot)
2594{
2595 if (data->start != -1u) {
2596 int rc = data->fn(data->priv, data->start, end, data->prot);
2597 if (rc != 0) {
2598 return rc;
2599 }
2600 }
2601
2602 data->start = (new_prot ? end : -1u);
2603 data->prot = new_prot;
2604
2605 return 0;
2606}
2607
2608static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2609 target_ulong base, int level, void **lp)
2610{
2611 target_ulong pa;
2612 int i, rc;
2613
2614 if (*lp == NULL) {
2615 return walk_memory_regions_end(data, base, 0);
2616 }
2617
2618 if (level == 0) {
2619 PageDesc *pd = *lp;
2620
2621 for (i = 0; i < V_L2_SIZE; ++i) {
2622 int prot = pd[i].flags;
2623
2624 pa = base | (i << TARGET_PAGE_BITS);
2625 if (prot != data->prot) {
2626 rc = walk_memory_regions_end(data, pa, prot);
2627 if (rc != 0) {
2628 return rc;
2629 }
2630 }
2631 }
2632 } else {
2633 void **pp = *lp;
2634
2635 for (i = 0; i < V_L2_SIZE; ++i) {
2636 pa = base | ((target_ulong)i <<
2637 (TARGET_PAGE_BITS + V_L2_BITS * level));
2638 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2639 if (rc != 0) {
2640 return rc;
2641 }
2642 }
2643 }
2644
2645 return 0;
2646}
2647
2648int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2649{
2650 struct walk_memory_regions_data data;
2651 uintptr_t i, l1_sz = v_l1_size;
2652
2653 data.fn = fn;
2654 data.priv = priv;
2655 data.start = -1u;
2656 data.prot = 0;
2657
2658 for (i = 0; i < l1_sz; i++) {
2659 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2660 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2661 if (rc != 0) {
2662 return rc;
2663 }
2664 }
2665
2666 return walk_memory_regions_end(&data, 0, 0);
2667}
2668
2669static int dump_region(void *priv, target_ulong start,
2670 target_ulong end, unsigned long prot)
2671{
2672 FILE *f = (FILE *)priv;
2673
2674 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2675 " "TARGET_FMT_lx" %c%c%c\n",
2676 start, end, end - start,
2677 ((prot & PAGE_READ) ? 'r' : '-'),
2678 ((prot & PAGE_WRITE) ? 'w' : '-'),
2679 ((prot & PAGE_EXEC) ? 'x' : '-'));
2680
2681 return 0;
2682}
2683
2684
2685void page_dump(FILE *f)
2686{
2687 const int length = sizeof(target_ulong) * 2;
2688 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2689 length, "start", length, "end", length, "size", "prot");
2690 walk_memory_regions(f, dump_region);
2691}
2692
2693int page_get_flags(target_ulong address)
2694{
2695 PageDesc *p;
2696
2697 p = page_find(address >> TARGET_PAGE_BITS);
2698 if (!p) {
2699 return 0;
2700 }
2701 return p->flags;
2702}
2703
2704
2705
2706
2707void page_set_flags(target_ulong start, target_ulong end, int flags)
2708{
2709 target_ulong addr, len;
2710 bool reset_target_data;
2711
2712
2713
2714
2715 assert(end - 1 <= GUEST_ADDR_MAX);
2716 assert(start < end);
2717
2718 assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
2719 assert_memory_lock();
2720
2721 start = start & TARGET_PAGE_MASK;
2722 end = TARGET_PAGE_ALIGN(end);
2723
2724 if (flags & PAGE_WRITE) {
2725 flags |= PAGE_WRITE_ORG;
2726 }
2727 reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
2728 flags &= ~PAGE_RESET;
2729
2730 for (addr = start, len = end - start;
2731 len != 0;
2732 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2733 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2734
2735
2736
2737 if (!(p->flags & PAGE_WRITE) &&
2738 (flags & PAGE_WRITE) &&
2739 p->first_tb) {
2740 tb_invalidate_phys_page(addr, 0);
2741 }
2742 if (reset_target_data) {
2743 g_free(p->target_data);
2744 p->target_data = NULL;
2745 p->flags = flags;
2746 } else {
2747
2748 p->flags = (p->flags & PAGE_ANON) | flags;
2749 }
2750 }
2751}
2752
2753void *page_get_target_data(target_ulong address)
2754{
2755 PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2756 return p ? p->target_data : NULL;
2757}
2758
2759void *page_alloc_target_data(target_ulong address, size_t size)
2760{
2761 PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2762 void *ret = NULL;
2763
2764 if (p->flags & PAGE_VALID) {
2765 ret = p->target_data;
2766 if (!ret) {
2767 p->target_data = ret = g_malloc0(size);
2768 }
2769 }
2770 return ret;
2771}
2772
2773int page_check_range(target_ulong start, target_ulong len, int flags)
2774{
2775 PageDesc *p;
2776 target_ulong end;
2777 target_ulong addr;
2778
2779
2780
2781
2782 if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
2783 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2784 }
2785
2786 if (len == 0) {
2787 return 0;
2788 }
2789 if (start + len - 1 < start) {
2790
2791 return -1;
2792 }
2793
2794
2795 end = TARGET_PAGE_ALIGN(start + len);
2796 start = start & TARGET_PAGE_MASK;
2797
2798 for (addr = start, len = end - start;
2799 len != 0;
2800 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2801 p = page_find(addr >> TARGET_PAGE_BITS);
2802 if (!p) {
2803 return -1;
2804 }
2805 if (!(p->flags & PAGE_VALID)) {
2806 return -1;
2807 }
2808
2809 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2810 return -1;
2811 }
2812 if (flags & PAGE_WRITE) {
2813 if (!(p->flags & PAGE_WRITE_ORG)) {
2814 return -1;
2815 }
2816
2817
2818 if (!(p->flags & PAGE_WRITE)) {
2819 if (!page_unprotect(addr, 0)) {
2820 return -1;
2821 }
2822 }
2823 }
2824 }
2825 return 0;
2826}
2827
2828
2829
2830
2831
2832
2833
2834int page_unprotect(target_ulong address, uintptr_t pc)
2835{
2836 unsigned int prot;
2837 bool current_tb_invalidated;
2838 PageDesc *p;
2839 target_ulong host_start, host_end, addr;
2840
2841
2842
2843
2844 mmap_lock();
2845
2846 p = page_find(address >> TARGET_PAGE_BITS);
2847 if (!p) {
2848 mmap_unlock();
2849 return 0;
2850 }
2851
2852
2853
2854 if (p->flags & PAGE_WRITE_ORG) {
2855 current_tb_invalidated = false;
2856 if (p->flags & PAGE_WRITE) {
2857
2858
2859
2860
2861#ifdef TARGET_HAS_PRECISE_SMC
2862 TranslationBlock *current_tb = tcg_tb_lookup(pc);
2863 if (current_tb) {
2864 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2865 }
2866#endif
2867 } else {
2868 host_start = address & qemu_host_page_mask;
2869 host_end = host_start + qemu_host_page_size;
2870
2871 prot = 0;
2872 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2873 p = page_find(addr >> TARGET_PAGE_BITS);
2874 p->flags |= PAGE_WRITE;
2875 prot |= p->flags;
2876
2877
2878
2879 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2880#ifdef CONFIG_USER_ONLY
2881 if (DEBUG_TB_CHECK_GATE) {
2882 tb_invalidate_check(addr);
2883 }
2884#endif
2885 }
2886 mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
2887 prot & PAGE_BITS);
2888 }
2889 mmap_unlock();
2890
2891 return current_tb_invalidated ? 2 : 1;
2892 }
2893 mmap_unlock();
2894 return 0;
2895}
2896#endif
2897
2898
2899void tcg_flush_softmmu_tlb(CPUState *cs)
2900{
2901#ifdef CONFIG_SOFTMMU
2902 tlb_flush(cs);
2903#endif
2904}
2905