1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#endif
22#include "qemu/osdep.h"
23
24
25#include "qemu-common.h"
26#define NO_CPU_IO_DEFS
27#include "cpu.h"
28#include "trace.h"
29#include "disas/disas.h"
30#include "exec/exec-all.h"
31#include "tcg.h"
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35#include <sys/param.h>
36#if __FreeBSD_version >= 700104
37#define HAVE_KINFO_GETVMMAP
38#define sigqueue sigqueue_freebsd
39#include <sys/proc.h>
40#include <machine/profile.h>
41#define _KERNEL
42#include <sys/user.h>
43#undef _KERNEL
44#undef sigqueue
45#include <libutil.h>
46#endif
47#endif
48#else
49#include "exec/ram_addr.h"
50#endif
51
52#include "exec/cputlb.h"
53#include "exec/tb-hash.h"
54#include "translate-all.h"
55#include "qemu/bitmap.h"
56#include "qemu/error-report.h"
57#include "qemu/timer.h"
58#include "qemu/main-loop.h"
59#include "exec/log.h"
60#include "sysemu/cpus.h"
61
62
63
64
65
66
67#ifdef DEBUG_TB_INVALIDATE
68#define DEBUG_TB_INVALIDATE_GATE 1
69#else
70#define DEBUG_TB_INVALIDATE_GATE 0
71#endif
72
73#ifdef DEBUG_TB_FLUSH
74#define DEBUG_TB_FLUSH_GATE 1
75#else
76#define DEBUG_TB_FLUSH_GATE 0
77#endif
78
79#if !defined(CONFIG_USER_ONLY)
80
81#undef DEBUG_TB_CHECK
82#endif
83
84#ifdef DEBUG_TB_CHECK
85#define DEBUG_TB_CHECK_GATE 1
86#else
87#define DEBUG_TB_CHECK_GATE 0
88#endif
89
90
91
92
93
94
95
96#ifdef CONFIG_SOFTMMU
97#define assert_memory_lock()
98#else
99#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100#endif
101
102#define SMC_BITMAP_USE_THRESHOLD 10
103
104typedef struct PageDesc {
105
106 uintptr_t first_tb;
107#ifdef CONFIG_SOFTMMU
108
109
110 unsigned long *code_bitmap;
111 unsigned int code_write_count;
112#else
113 unsigned long flags;
114#endif
115#ifndef CONFIG_USER_ONLY
116 QemuSpin lock;
117#endif
118} PageDesc;
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133struct page_entry {
134 PageDesc *pd;
135 tb_page_addr_t index;
136 bool locked;
137};
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct page_collection {
160 GTree *tree;
161 struct page_entry *max;
162};
163
164
165#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
166 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
167 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169
170#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
171 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172
173#define TB_FOR_EACH_JMP(head_tb, tb, n) \
174 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175
176
177
178#if !defined(CONFIG_USER_ONLY)
179#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
180# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
181#else
182# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
183#endif
184#else
185# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
186#endif
187
188
189#define V_L2_BITS 10
190#define V_L2_SIZE (1 << V_L2_BITS)
191
192
193QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
194 sizeof_field(TranslationBlock, trace_vcpu_dstate)
195 * BITS_PER_BYTE);
196
197
198
199
200static int v_l1_size;
201static int v_l1_shift;
202static int v_l2_levels;
203
204
205
206
207#define V_L1_MIN_BITS 4
208#define V_L1_MAX_BITS (V_L2_BITS + 3)
209#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
210
211static void *l1_map[V_L1_MAX_SIZE];
212
213
214TCGContext tcg_init_ctx;
215__thread TCGContext *tcg_ctx;
216TBContext tb_ctx;
217bool parallel_cpus;
218
219static void page_table_config_init(void)
220{
221 uint32_t v_l1_bits;
222
223 assert(TARGET_PAGE_BITS);
224
225 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
226 if (v_l1_bits < V_L1_MIN_BITS) {
227 v_l1_bits += V_L2_BITS;
228 }
229
230 v_l1_size = 1 << v_l1_bits;
231 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
232 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
233
234 assert(v_l1_bits <= V_L1_MAX_BITS);
235 assert(v_l1_shift % V_L2_BITS == 0);
236 assert(v_l2_levels >= 0);
237}
238
239void cpu_gen_init(void)
240{
241 tcg_context_init(&tcg_init_ctx);
242}
243
244
245
246static uint8_t *encode_sleb128(uint8_t *p, target_long val)
247{
248 int more, byte;
249
250 do {
251 byte = val & 0x7f;
252 val >>= 7;
253 more = !((val == 0 && (byte & 0x40) == 0)
254 || (val == -1 && (byte & 0x40) != 0));
255 if (more) {
256 byte |= 0x80;
257 }
258 *p++ = byte;
259 } while (more);
260
261 return p;
262}
263
264
265
266static target_long decode_sleb128(uint8_t **pp)
267{
268 uint8_t *p = *pp;
269 target_long val = 0;
270 int byte, shift = 0;
271
272 do {
273 byte = *p++;
274 val |= (target_ulong)(byte & 0x7f) << shift;
275 shift += 7;
276 } while (byte & 0x80);
277 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
278 val |= -(target_ulong)1 << shift;
279 }
280
281 *pp = p;
282 return val;
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297static int encode_search(TranslationBlock *tb, uint8_t *block)
298{
299 uint8_t *highwater = tcg_ctx->code_gen_highwater;
300 uint8_t *p = block;
301 int i, j, n;
302
303 for (i = 0, n = tb->icount; i < n; ++i) {
304 target_ulong prev;
305
306 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
307 if (i == 0) {
308 prev = (j == 0 ? tb->pc : 0);
309 } else {
310 prev = tcg_ctx->gen_insn_data[i - 1][j];
311 }
312 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
313 }
314 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
315 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
316
317
318
319
320
321 if (unlikely(p > highwater)) {
322 return -1;
323 }
324 }
325
326 return p - block;
327}
328
329
330
331
332
333static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
334 uintptr_t searched_pc, bool reset_icount)
335{
336 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
337 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
338 CPUArchState *env = cpu->env_ptr;
339 uint8_t *p = tb->tc.ptr + tb->tc.size;
340 int i, j, num_insns = tb->icount;
341#ifdef CONFIG_PROFILER
342 TCGProfile *prof = &tcg_ctx->prof;
343 int64_t ti = profile_getclock();
344#endif
345
346 searched_pc -= GETPC_ADJ;
347
348 if (searched_pc < host_pc) {
349 return -1;
350 }
351
352
353
354 for (i = 0; i < num_insns; ++i) {
355 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
356 data[j] += decode_sleb128(&p);
357 }
358 host_pc += decode_sleb128(&p);
359 if (host_pc > searched_pc) {
360 goto found;
361 }
362 }
363 return -1;
364
365 found:
366 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
367 assert(use_icount);
368
369
370 cpu->icount_decr.u16.low += num_insns - i;
371 }
372 restore_state_to_opc(env, tb, data);
373
374#ifdef CONFIG_PROFILER
375 atomic_set(&prof->restore_time,
376 prof->restore_time + profile_getclock() - ti);
377 atomic_set(&prof->restore_count, prof->restore_count + 1);
378#endif
379 return 0;
380}
381
382bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
383{
384 TranslationBlock *tb;
385 bool r = false;
386 uintptr_t check_offset;
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
402
403 if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
404 tb = tcg_tb_lookup(host_pc);
405 if (tb) {
406 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
407 if (tb_cflags(tb) & CF_NOCACHE) {
408
409 tb_phys_invalidate(tb, -1);
410 tcg_tb_remove(tb);
411 }
412 r = true;
413 }
414 }
415
416 return r;
417}
418
419static void page_init(void)
420{
421 page_size_init();
422 page_table_config_init();
423
424#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
425 {
426#ifdef HAVE_KINFO_GETVMMAP
427 struct kinfo_vmentry *freep;
428 int i, cnt;
429
430 freep = kinfo_getvmmap(getpid(), &cnt);
431 if (freep) {
432 mmap_lock();
433 for (i = 0; i < cnt; i++) {
434 unsigned long startaddr, endaddr;
435
436 startaddr = freep[i].kve_start;
437 endaddr = freep[i].kve_end;
438 if (h2g_valid(startaddr)) {
439 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
440
441 if (h2g_valid(endaddr)) {
442 endaddr = h2g(endaddr);
443 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
444 } else {
445#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
446 endaddr = ~0ul;
447 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
448#endif
449 }
450 }
451 }
452 free(freep);
453 mmap_unlock();
454 }
455#else
456 FILE *f;
457
458 last_brk = (unsigned long)sbrk(0);
459
460 f = fopen("/compat/linux/proc/self/maps", "r");
461 if (f) {
462 mmap_lock();
463
464 do {
465 unsigned long startaddr, endaddr;
466 int n;
467
468 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
469
470 if (n == 2 && h2g_valid(startaddr)) {
471 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
472
473 if (h2g_valid(endaddr)) {
474 endaddr = h2g(endaddr);
475 } else {
476 endaddr = ~0ul;
477 }
478 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
479 }
480 } while (!feof(f));
481
482 fclose(f);
483 mmap_unlock();
484 }
485#endif
486 }
487#endif
488}
489
490static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
491{
492 PageDesc *pd;
493 void **lp;
494 int i;
495
496
497 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
498
499
500 for (i = v_l2_levels; i > 0; i--) {
501 void **p = atomic_rcu_read(lp);
502
503 if (p == NULL) {
504 void *existing;
505
506 if (!alloc) {
507 return NULL;
508 }
509 p = g_new0(void *, V_L2_SIZE);
510 existing = atomic_cmpxchg(lp, NULL, p);
511 if (unlikely(existing)) {
512 g_free(p);
513 p = existing;
514 }
515 }
516
517 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
518 }
519
520 pd = atomic_rcu_read(lp);
521 if (pd == NULL) {
522 void *existing;
523
524 if (!alloc) {
525 return NULL;
526 }
527 pd = g_new0(PageDesc, V_L2_SIZE);
528#ifndef CONFIG_USER_ONLY
529 {
530 int i;
531
532 for (i = 0; i < V_L2_SIZE; i++) {
533 qemu_spin_init(&pd[i].lock);
534 }
535 }
536#endif
537 existing = atomic_cmpxchg(lp, NULL, pd);
538 if (unlikely(existing)) {
539 g_free(pd);
540 pd = existing;
541 }
542 }
543
544 return pd + (index & (V_L2_SIZE - 1));
545}
546
547static inline PageDesc *page_find(tb_page_addr_t index)
548{
549 return page_find_alloc(index, 0);
550}
551
552static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
553 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
554
555
556#ifdef CONFIG_USER_ONLY
557
558#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
559
560static inline void page_lock(PageDesc *pd)
561{ }
562
563static inline void page_unlock(PageDesc *pd)
564{ }
565
566static inline void page_lock_tb(const TranslationBlock *tb)
567{ }
568
569static inline void page_unlock_tb(const TranslationBlock *tb)
570{ }
571
572struct page_collection *
573page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
574{
575 return NULL;
576}
577
578void page_collection_unlock(struct page_collection *set)
579{ }
580#else
581
582#ifdef CONFIG_DEBUG_TCG
583
584static __thread GHashTable *ht_pages_locked_debug;
585
586static void ht_pages_locked_debug_init(void)
587{
588 if (ht_pages_locked_debug) {
589 return;
590 }
591 ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
592}
593
594static bool page_is_locked(const PageDesc *pd)
595{
596 PageDesc *found;
597
598 ht_pages_locked_debug_init();
599 found = g_hash_table_lookup(ht_pages_locked_debug, pd);
600 return !!found;
601}
602
603static void page_lock__debug(PageDesc *pd)
604{
605 ht_pages_locked_debug_init();
606 g_assert(!page_is_locked(pd));
607 g_hash_table_insert(ht_pages_locked_debug, pd, pd);
608}
609
610static void page_unlock__debug(const PageDesc *pd)
611{
612 bool removed;
613
614 ht_pages_locked_debug_init();
615 g_assert(page_is_locked(pd));
616 removed = g_hash_table_remove(ht_pages_locked_debug, pd);
617 g_assert(removed);
618}
619
620static void
621do_assert_page_locked(const PageDesc *pd, const char *file, int line)
622{
623 if (unlikely(!page_is_locked(pd))) {
624 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
625 pd, file, line);
626 abort();
627 }
628}
629
630#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
631
632void assert_no_pages_locked(void)
633{
634 ht_pages_locked_debug_init();
635 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
636}
637
638#else
639
640#define assert_page_locked(pd)
641
642static inline void page_lock__debug(const PageDesc *pd)
643{
644}
645
646static inline void page_unlock__debug(const PageDesc *pd)
647{
648}
649
650#endif
651
652static inline void page_lock(PageDesc *pd)
653{
654 page_lock__debug(pd);
655 qemu_spin_lock(&pd->lock);
656}
657
658static inline void page_unlock(PageDesc *pd)
659{
660 qemu_spin_unlock(&pd->lock);
661 page_unlock__debug(pd);
662}
663
664
665static inline void page_lock_tb(const TranslationBlock *tb)
666{
667 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
668}
669
670static inline void page_unlock_tb(const TranslationBlock *tb)
671{
672 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
673
674 page_unlock(p1);
675 if (unlikely(tb->page_addr[1] != -1)) {
676 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
677
678 if (p2 != p1) {
679 page_unlock(p2);
680 }
681 }
682}
683
684static inline struct page_entry *
685page_entry_new(PageDesc *pd, tb_page_addr_t index)
686{
687 struct page_entry *pe = g_malloc(sizeof(*pe));
688
689 pe->index = index;
690 pe->pd = pd;
691 pe->locked = false;
692 return pe;
693}
694
695static void page_entry_destroy(gpointer p)
696{
697 struct page_entry *pe = p;
698
699 g_assert(pe->locked);
700 page_unlock(pe->pd);
701 g_free(pe);
702}
703
704
705static bool page_entry_trylock(struct page_entry *pe)
706{
707 bool busy;
708
709 busy = qemu_spin_trylock(&pe->pd->lock);
710 if (!busy) {
711 g_assert(!pe->locked);
712 pe->locked = true;
713 page_lock__debug(pe->pd);
714 }
715 return busy;
716}
717
718static void do_page_entry_lock(struct page_entry *pe)
719{
720 page_lock(pe->pd);
721 g_assert(!pe->locked);
722 pe->locked = true;
723}
724
725static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
726{
727 struct page_entry *pe = value;
728
729 do_page_entry_lock(pe);
730 return FALSE;
731}
732
733static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
734{
735 struct page_entry *pe = value;
736
737 if (pe->locked) {
738 pe->locked = false;
739 page_unlock(pe->pd);
740 }
741 return FALSE;
742}
743
744
745
746
747
748static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
749{
750 tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
751 struct page_entry *pe;
752 PageDesc *pd;
753
754 pe = g_tree_lookup(set->tree, &index);
755 if (pe) {
756 return false;
757 }
758
759 pd = page_find(index);
760 if (pd == NULL) {
761 return false;
762 }
763
764 pe = page_entry_new(pd, index);
765 g_tree_insert(set->tree, &pe->index, pe);
766
767
768
769
770
771 if (set->max == NULL || pe->index > set->max->index) {
772 set->max = pe;
773 do_page_entry_lock(pe);
774 return false;
775 }
776
777
778
779
780 return page_entry_trylock(pe);
781}
782
783static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
784{
785 tb_page_addr_t a = *(const tb_page_addr_t *)ap;
786 tb_page_addr_t b = *(const tb_page_addr_t *)bp;
787
788 if (a == b) {
789 return 0;
790 } else if (a < b) {
791 return -1;
792 }
793 return 1;
794}
795
796
797
798
799
800
801struct page_collection *
802page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
803{
804 struct page_collection *set = g_malloc(sizeof(*set));
805 tb_page_addr_t index;
806 PageDesc *pd;
807
808 start >>= TARGET_PAGE_BITS;
809 end >>= TARGET_PAGE_BITS;
810 g_assert(start <= end);
811
812 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
813 page_entry_destroy);
814 set->max = NULL;
815 assert_no_pages_locked();
816
817 retry:
818 g_tree_foreach(set->tree, page_entry_lock, NULL);
819
820 for (index = start; index <= end; index++) {
821 TranslationBlock *tb;
822 int n;
823
824 pd = page_find(index);
825 if (pd == NULL) {
826 continue;
827 }
828 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
829 g_tree_foreach(set->tree, page_entry_unlock, NULL);
830 goto retry;
831 }
832 assert_page_locked(pd);
833 PAGE_FOR_EACH_TB(pd, tb, n) {
834 if (page_trylock_add(set, tb->page_addr[0]) ||
835 (tb->page_addr[1] != -1 &&
836 page_trylock_add(set, tb->page_addr[1]))) {
837
838 g_tree_foreach(set->tree, page_entry_unlock, NULL);
839 goto retry;
840 }
841 }
842 }
843 return set;
844}
845
846void page_collection_unlock(struct page_collection *set)
847{
848
849 g_tree_destroy(set->tree);
850 g_free(set);
851}
852
853#endif
854
855static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
856 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
857{
858 PageDesc *p1, *p2;
859 tb_page_addr_t page1;
860 tb_page_addr_t page2;
861
862 assert_memory_lock();
863 g_assert(phys1 != -1);
864
865 page1 = phys1 >> TARGET_PAGE_BITS;
866 page2 = phys2 >> TARGET_PAGE_BITS;
867
868 p1 = page_find_alloc(page1, alloc);
869 if (ret_p1) {
870 *ret_p1 = p1;
871 }
872 if (likely(phys2 == -1)) {
873 page_lock(p1);
874 return;
875 } else if (page1 == page2) {
876 page_lock(p1);
877 if (ret_p2) {
878 *ret_p2 = p1;
879 }
880 return;
881 }
882 p2 = page_find_alloc(page2, alloc);
883 if (ret_p2) {
884 *ret_p2 = p2;
885 }
886 if (page1 < page2) {
887 page_lock(p1);
888 page_lock(p2);
889 } else {
890 page_lock(p2);
891 page_lock(p1);
892 }
893}
894
895#if defined(CONFIG_USER_ONLY)
896
897
898
899
900#define USE_STATIC_CODE_GEN_BUFFER
901#endif
902
903
904
905#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
906
907
908
909
910#if defined(__x86_64__)
911# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
912#elif defined(__sparc__)
913# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
914#elif defined(__powerpc64__)
915# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
916#elif defined(__powerpc__)
917# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
918#elif defined(__aarch64__)
919# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
920#elif defined(__s390x__)
921
922# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
923#elif defined(__mips__)
924
925
926# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
927#else
928# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
929#endif
930
931#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
932
933#define DEFAULT_CODE_GEN_BUFFER_SIZE \
934 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
935 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
936
937static inline size_t size_code_gen_buffer(size_t tb_size)
938{
939
940 if (tb_size == 0) {
941#ifdef USE_STATIC_CODE_GEN_BUFFER
942 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
943#else
944
945
946
947
948 tb_size = (unsigned long)(ram_size / 4);
949#endif
950 }
951 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
952 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
953 }
954 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
955 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
956 }
957 return tb_size;
958}
959
960#ifdef __mips__
961
962
963static inline bool cross_256mb(void *addr, size_t size)
964{
965 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
966}
967
968
969
970
971static inline void *split_cross_256mb(void *buf1, size_t size1)
972{
973 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
974 size_t size2 = buf1 + size1 - buf2;
975
976 size1 = buf2 - buf1;
977 if (size1 < size2) {
978 size1 = size2;
979 buf1 = buf2;
980 }
981
982 tcg_ctx->code_gen_buffer_size = size1;
983 return buf1;
984}
985#endif
986
987#ifdef USE_STATIC_CODE_GEN_BUFFER
988static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
989 __attribute__((aligned(CODE_GEN_ALIGN)));
990
991static inline void *alloc_code_gen_buffer(void)
992{
993 void *buf = static_code_gen_buffer;
994 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
995 size_t size;
996
997
998 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
999 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1000
1001 size = end - buf;
1002
1003
1004 if (size > tcg_ctx->code_gen_buffer_size) {
1005 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1006 qemu_real_host_page_size);
1007 }
1008 tcg_ctx->code_gen_buffer_size = size;
1009
1010#ifdef __mips__
1011 if (cross_256mb(buf, size)) {
1012 buf = split_cross_256mb(buf, size);
1013 size = tcg_ctx->code_gen_buffer_size;
1014 }
1015#endif
1016
1017 if (qemu_mprotect_rwx(buf, size)) {
1018 abort();
1019 }
1020 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1021
1022 return buf;
1023}
1024#elif defined(_WIN32)
1025static inline void *alloc_code_gen_buffer(void)
1026{
1027 size_t size = tcg_ctx->code_gen_buffer_size;
1028 return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1029 PAGE_EXECUTE_READWRITE);
1030}
1031#else
1032static inline void *alloc_code_gen_buffer(void)
1033{
1034 int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1035 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1036 uintptr_t start = 0;
1037 size_t size = tcg_ctx->code_gen_buffer_size;
1038 void *buf;
1039
1040
1041
1042
1043# if defined(__PIE__) || defined(__PIC__)
1044
1045
1046
1047
1048# elif defined(__x86_64__) && defined(MAP_32BIT)
1049
1050
1051 flags |= MAP_32BIT;
1052
1053 if (size > 800u * 1024 * 1024) {
1054 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1055 }
1056# elif defined(__sparc__)
1057 start = 0x40000000ul;
1058# elif defined(__s390x__)
1059 start = 0x90000000ul;
1060# elif defined(__mips__)
1061# if _MIPS_SIM == _ABI64
1062 start = 0x128000000ul;
1063# else
1064 start = 0x08000000ul;
1065# endif
1066# endif
1067
1068 buf = mmap((void *)start, size, prot, flags, -1, 0);
1069 if (buf == MAP_FAILED) {
1070 return NULL;
1071 }
1072
1073#ifdef __mips__
1074 if (cross_256mb(buf, size)) {
1075
1076
1077 size_t size2;
1078 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1079 switch ((int)(buf2 != MAP_FAILED)) {
1080 case 1:
1081 if (!cross_256mb(buf2, size)) {
1082
1083 munmap(buf, size);
1084 break;
1085 }
1086
1087 munmap(buf2, size);
1088
1089 default:
1090
1091 buf2 = split_cross_256mb(buf, size);
1092 size2 = tcg_ctx->code_gen_buffer_size;
1093 if (buf == buf2) {
1094 munmap(buf + size2, size - size2);
1095 } else {
1096 munmap(buf, size - size2);
1097 }
1098 size = size2;
1099 break;
1100 }
1101 buf = buf2;
1102 }
1103#endif
1104
1105
1106 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1107
1108 return buf;
1109}
1110#endif
1111
1112static inline void code_gen_alloc(size_t tb_size)
1113{
1114 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1115 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1116 if (tcg_ctx->code_gen_buffer == NULL) {
1117 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1118 exit(1);
1119 }
1120}
1121
1122static bool tb_cmp(const void *ap, const void *bp)
1123{
1124 const TranslationBlock *a = ap;
1125 const TranslationBlock *b = bp;
1126
1127 return a->pc == b->pc &&
1128 a->cs_base == b->cs_base &&
1129 a->flags == b->flags &&
1130 (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1131 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1132 a->page_addr[0] == b->page_addr[0] &&
1133 a->page_addr[1] == b->page_addr[1];
1134}
1135
1136static void tb_htable_init(void)
1137{
1138 unsigned int mode = QHT_MODE_AUTO_RESIZE;
1139
1140 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1141}
1142
1143
1144
1145
1146void tcg_exec_init(unsigned long tb_size)
1147{
1148 tcg_allowed = true;
1149 cpu_gen_init();
1150 page_init();
1151 tb_htable_init();
1152 code_gen_alloc(tb_size);
1153#if defined(CONFIG_SOFTMMU)
1154
1155
1156 tcg_prologue_init(tcg_ctx);
1157#endif
1158}
1159
1160
1161
1162
1163
1164static TranslationBlock *tb_alloc(target_ulong pc)
1165{
1166 TranslationBlock *tb;
1167
1168 assert_memory_lock();
1169
1170 tb = tcg_tb_alloc(tcg_ctx);
1171 if (unlikely(tb == NULL)) {
1172 return NULL;
1173 }
1174 return tb;
1175}
1176
1177
1178static inline void invalidate_page_bitmap(PageDesc *p)
1179{
1180 assert_page_locked(p);
1181#ifdef CONFIG_SOFTMMU
1182 g_free(p->code_bitmap);
1183 p->code_bitmap = NULL;
1184 p->code_write_count = 0;
1185#endif
1186}
1187
1188
1189static void page_flush_tb_1(int level, void **lp)
1190{
1191 int i;
1192
1193 if (*lp == NULL) {
1194 return;
1195 }
1196 if (level == 0) {
1197 PageDesc *pd = *lp;
1198
1199 for (i = 0; i < V_L2_SIZE; ++i) {
1200 page_lock(&pd[i]);
1201 pd[i].first_tb = (uintptr_t)NULL;
1202 invalidate_page_bitmap(pd + i);
1203 page_unlock(&pd[i]);
1204 }
1205 } else {
1206 void **pp = *lp;
1207
1208 for (i = 0; i < V_L2_SIZE; ++i) {
1209 page_flush_tb_1(level - 1, pp + i);
1210 }
1211 }
1212}
1213
1214static void page_flush_tb(void)
1215{
1216 int i, l1_sz = v_l1_size;
1217
1218 for (i = 0; i < l1_sz; i++) {
1219 page_flush_tb_1(v_l2_levels, l1_map + i);
1220 }
1221}
1222
1223static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1224{
1225 const TranslationBlock *tb = value;
1226 size_t *size = data;
1227
1228 *size += tb->tc.size;
1229 return false;
1230}
1231
1232
1233static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1234{
1235 mmap_lock();
1236
1237
1238
1239 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1240 goto done;
1241 }
1242
1243 if (DEBUG_TB_FLUSH_GATE) {
1244 size_t nb_tbs = tcg_nb_tbs();
1245 size_t host_size = 0;
1246
1247 tcg_tb_foreach(tb_host_size_iter, &host_size);
1248 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1249 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1250 }
1251
1252 CPU_FOREACH(cpu) {
1253 cpu_tb_jmp_cache_clear(cpu);
1254 }
1255
1256 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1257 page_flush_tb();
1258
1259 tcg_region_reset_all();
1260
1261
1262 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1263
1264done:
1265 mmap_unlock();
1266}
1267
1268void tb_flush(CPUState *cpu)
1269{
1270 if (tcg_enabled()) {
1271 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1272 async_safe_run_on_cpu(cpu, do_tb_flush,
1273 RUN_ON_CPU_HOST_INT(tb_flush_count));
1274 }
1275}
1276
1277
1278
1279
1280
1281
1282
1283#ifdef CONFIG_USER_ONLY
1284
1285static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1286{
1287 TranslationBlock *tb = p;
1288 target_ulong addr = *(target_ulong *)userp;
1289
1290 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1291 printf("ERROR invalidate: address=" TARGET_FMT_lx
1292 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1293 }
1294}
1295
1296
1297
1298
1299
1300static void tb_invalidate_check(target_ulong address)
1301{
1302 address &= TARGET_PAGE_MASK;
1303 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1304}
1305
1306static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1307{
1308 TranslationBlock *tb = p;
1309 int flags1, flags2;
1310
1311 flags1 = page_get_flags(tb->pc);
1312 flags2 = page_get_flags(tb->pc + tb->size - 1);
1313 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1314 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1315 (long)tb->pc, tb->size, flags1, flags2);
1316 }
1317}
1318
1319
1320static void tb_page_check(void)
1321{
1322 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1323}
1324
1325#endif
1326
1327
1328
1329
1330
1331static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1332{
1333 TranslationBlock *tb1;
1334 uintptr_t *pprev;
1335 unsigned int n1;
1336
1337 assert_page_locked(pd);
1338 pprev = &pd->first_tb;
1339 PAGE_FOR_EACH_TB(pd, tb1, n1) {
1340 if (tb1 == tb) {
1341 *pprev = tb1->page_next[n1];
1342 return;
1343 }
1344 pprev = &tb1->page_next[n1];
1345 }
1346 g_assert_not_reached();
1347}
1348
1349
1350static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1351{
1352 uintptr_t ptr, ptr_locked;
1353 TranslationBlock *dest;
1354 TranslationBlock *tb;
1355 uintptr_t *pprev;
1356 int n;
1357
1358
1359 ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1360 dest = (TranslationBlock *)(ptr & ~1);
1361 if (dest == NULL) {
1362 return;
1363 }
1364
1365 qemu_spin_lock(&dest->jmp_lock);
1366
1367
1368
1369
1370 ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1371 if (ptr_locked != ptr) {
1372 qemu_spin_unlock(&dest->jmp_lock);
1373
1374
1375
1376
1377
1378 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1379 return;
1380 }
1381
1382
1383
1384
1385 pprev = &dest->jmp_list_head;
1386 TB_FOR_EACH_JMP(dest, tb, n) {
1387 if (tb == orig && n == n_orig) {
1388 *pprev = tb->jmp_list_next[n];
1389
1390 qemu_spin_unlock(&dest->jmp_lock);
1391 return;
1392 }
1393 pprev = &tb->jmp_list_next[n];
1394 }
1395 g_assert_not_reached();
1396}
1397
1398
1399
1400static inline void tb_reset_jump(TranslationBlock *tb, int n)
1401{
1402 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1403 tb_set_jmp_target(tb, n, addr);
1404}
1405
1406
1407static inline void tb_jmp_unlink(TranslationBlock *dest)
1408{
1409 TranslationBlock *tb;
1410 int n;
1411
1412 qemu_spin_lock(&dest->jmp_lock);
1413
1414 TB_FOR_EACH_JMP(dest, tb, n) {
1415 tb_reset_jump(tb, n);
1416 atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1417
1418 }
1419 dest->jmp_list_head = (uintptr_t)NULL;
1420
1421 qemu_spin_unlock(&dest->jmp_lock);
1422}
1423
1424
1425
1426
1427
1428
1429static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1430{
1431 CPUState *cpu;
1432 PageDesc *p;
1433 uint32_t h;
1434 tb_page_addr_t phys_pc;
1435
1436 assert_memory_lock();
1437
1438
1439 qemu_spin_lock(&tb->jmp_lock);
1440 atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1441 qemu_spin_unlock(&tb->jmp_lock);
1442
1443
1444 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1445 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1446 tb->trace_vcpu_dstate);
1447 if (!(tb->cflags & CF_NOCACHE) &&
1448 !qht_remove(&tb_ctx.htable, tb, h)) {
1449 return;
1450 }
1451
1452
1453 if (rm_from_page_list) {
1454 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1455 tb_page_remove(p, tb);
1456 invalidate_page_bitmap(p);
1457 if (tb->page_addr[1] != -1) {
1458 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1459 tb_page_remove(p, tb);
1460 invalidate_page_bitmap(p);
1461 }
1462 }
1463
1464
1465 h = tb_jmp_cache_hash_func(tb->pc);
1466 CPU_FOREACH(cpu) {
1467 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1468 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1469 }
1470 }
1471
1472
1473 tb_remove_from_jmp_list(tb, 0);
1474 tb_remove_from_jmp_list(tb, 1);
1475
1476
1477 tb_jmp_unlink(tb);
1478
1479 atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1480 tcg_ctx->tb_phys_invalidate_count + 1);
1481}
1482
1483static void tb_phys_invalidate__locked(TranslationBlock *tb)
1484{
1485 do_tb_phys_invalidate(tb, true);
1486}
1487
1488
1489
1490
1491
1492void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1493{
1494 if (page_addr == -1 && tb->page_addr[0] != -1) {
1495 page_lock_tb(tb);
1496 do_tb_phys_invalidate(tb, true);
1497 page_unlock_tb(tb);
1498 } else {
1499 do_tb_phys_invalidate(tb, false);
1500 }
1501}
1502
1503#ifdef CONFIG_SOFTMMU
1504
1505static void build_page_bitmap(PageDesc *p)
1506{
1507 int n, tb_start, tb_end;
1508 TranslationBlock *tb;
1509
1510 assert_page_locked(p);
1511 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1512
1513 PAGE_FOR_EACH_TB(p, tb, n) {
1514
1515 if (n == 0) {
1516
1517
1518 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1519 tb_end = tb_start + tb->size;
1520 if (tb_end > TARGET_PAGE_SIZE) {
1521 tb_end = TARGET_PAGE_SIZE;
1522 }
1523 } else {
1524 tb_start = 0;
1525 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1526 }
1527 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1528 }
1529}
1530#endif
1531
1532
1533
1534
1535
1536
1537static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1538 unsigned int n, tb_page_addr_t page_addr)
1539{
1540#ifndef CONFIG_USER_ONLY
1541 bool page_already_protected;
1542#endif
1543
1544 assert_page_locked(p);
1545
1546 tb->page_addr[n] = page_addr;
1547 tb->page_next[n] = p->first_tb;
1548#ifndef CONFIG_USER_ONLY
1549 page_already_protected = p->first_tb != (uintptr_t)NULL;
1550#endif
1551 p->first_tb = (uintptr_t)tb | n;
1552 invalidate_page_bitmap(p);
1553
1554#if defined(CONFIG_USER_ONLY)
1555 if (p->flags & PAGE_WRITE) {
1556 target_ulong addr;
1557 PageDesc *p2;
1558 int prot;
1559
1560
1561
1562 page_addr &= qemu_host_page_mask;
1563 prot = 0;
1564 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1565 addr += TARGET_PAGE_SIZE) {
1566
1567 p2 = page_find(addr >> TARGET_PAGE_BITS);
1568 if (!p2) {
1569 continue;
1570 }
1571 prot |= p2->flags;
1572 p2->flags &= ~PAGE_WRITE;
1573 }
1574 mprotect(g2h(page_addr), qemu_host_page_size,
1575 (prot & PAGE_BITS) & ~PAGE_WRITE);
1576 if (DEBUG_TB_INVALIDATE_GATE) {
1577 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1578 }
1579 }
1580#else
1581
1582
1583
1584 if (!page_already_protected) {
1585 tlb_protect_code(page_addr);
1586 }
1587#endif
1588}
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600static TranslationBlock *
1601tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1602 tb_page_addr_t phys_page2)
1603{
1604 PageDesc *p;
1605 PageDesc *p2 = NULL;
1606
1607 assert_memory_lock();
1608
1609 if (phys_pc == -1) {
1610
1611
1612
1613
1614
1615 assert(tb->cflags & CF_NOCACHE);
1616 tb->page_addr[0] = tb->page_addr[1] = -1;
1617 return tb;
1618 }
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1629 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1630 if (p2) {
1631 tb_page_add(p2, tb, 1, phys_page2);
1632 } else {
1633 tb->page_addr[1] = -1;
1634 }
1635
1636 if (!(tb->cflags & CF_NOCACHE)) {
1637 void *existing_tb = NULL;
1638 uint32_t h;
1639
1640
1641 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1642 tb->trace_vcpu_dstate);
1643 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1644
1645
1646 if (unlikely(existing_tb)) {
1647 tb_page_remove(p, tb);
1648 invalidate_page_bitmap(p);
1649 if (p2) {
1650 tb_page_remove(p2, tb);
1651 invalidate_page_bitmap(p2);
1652 }
1653 tb = existing_tb;
1654 }
1655 }
1656
1657 if (p2 && p2 != p) {
1658 page_unlock(p2);
1659 }
1660 page_unlock(p);
1661
1662#ifdef CONFIG_USER_ONLY
1663 if (DEBUG_TB_CHECK_GATE) {
1664 tb_page_check();
1665 }
1666#endif
1667 return tb;
1668}
1669
1670
1671TranslationBlock *tb_gen_code(CPUState *cpu,
1672 target_ulong pc, target_ulong cs_base,
1673 uint32_t flags, int cflags)
1674{
1675 CPUArchState *env = cpu->env_ptr;
1676 TranslationBlock *tb, *existing_tb;
1677 tb_page_addr_t phys_pc, phys_page2;
1678 target_ulong virt_page2;
1679 tcg_insn_unit *gen_code_buf;
1680 int gen_code_size, search_size;
1681#ifdef CONFIG_PROFILER
1682 TCGProfile *prof = &tcg_ctx->prof;
1683 int64_t ti;
1684#endif
1685 assert_memory_lock();
1686
1687 phys_pc = get_page_addr_code(env, pc);
1688
1689 if (phys_pc == -1) {
1690
1691 cflags &= ~CF_COUNT_MASK;
1692 cflags |= CF_NOCACHE | 1;
1693 }
1694
1695 buffer_overflow:
1696 tb = tb_alloc(pc);
1697 if (unlikely(!tb)) {
1698
1699 tb_flush(cpu);
1700 mmap_unlock();
1701
1702 cpu->exception_index = EXCP_INTERRUPT;
1703 cpu_loop_exit(cpu);
1704 }
1705
1706 gen_code_buf = tcg_ctx->code_gen_ptr;
1707 tb->tc.ptr = gen_code_buf;
1708 tb->pc = pc;
1709 tb->cs_base = cs_base;
1710 tb->flags = flags;
1711 tb->cflags = cflags;
1712 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1713 tcg_ctx->tb_cflags = cflags;
1714
1715#ifdef CONFIG_PROFILER
1716
1717 atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1718 ti = profile_getclock();
1719#endif
1720
1721 tcg_func_start(tcg_ctx);
1722
1723 tcg_ctx->cpu = ENV_GET_CPU(env);
1724 gen_intermediate_code(cpu, tb);
1725 tcg_ctx->cpu = NULL;
1726
1727 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1728
1729
1730 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1731 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1732 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1733 if (TCG_TARGET_HAS_direct_jump) {
1734 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1735 tcg_ctx->tb_jmp_target_addr = NULL;
1736 } else {
1737 tcg_ctx->tb_jmp_insn_offset = NULL;
1738 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1739 }
1740
1741#ifdef CONFIG_PROFILER
1742 atomic_set(&prof->tb_count, prof->tb_count + 1);
1743 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1744 ti = profile_getclock();
1745#endif
1746
1747
1748
1749
1750
1751
1752 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1753 if (unlikely(gen_code_size < 0)) {
1754 goto buffer_overflow;
1755 }
1756 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1757 if (unlikely(search_size < 0)) {
1758 goto buffer_overflow;
1759 }
1760 tb->tc.size = gen_code_size;
1761
1762#ifdef CONFIG_PROFILER
1763 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1764 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1765 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1766 atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1767#endif
1768
1769#ifdef DEBUG_DISAS
1770 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1771 qemu_log_in_addr_range(tb->pc)) {
1772 qemu_log_lock();
1773 qemu_log("OUT: [size=%d]\n", gen_code_size);
1774 if (tcg_ctx->data_gen_ptr) {
1775 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1776 size_t data_size = gen_code_size - code_size;
1777 size_t i;
1778
1779 log_disas(tb->tc.ptr, code_size);
1780
1781 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1782 if (sizeof(tcg_target_ulong) == 8) {
1783 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1784 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1785 *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1786 } else {
1787 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
1788 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1789 *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1790 }
1791 }
1792 } else {
1793 log_disas(tb->tc.ptr, gen_code_size);
1794 }
1795 qemu_log("\n");
1796 qemu_log_flush();
1797 qemu_log_unlock();
1798 }
1799#endif
1800
1801 atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1802 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1803 CODE_GEN_ALIGN));
1804
1805
1806 qemu_spin_init(&tb->jmp_lock);
1807 tb->jmp_list_head = (uintptr_t)NULL;
1808 tb->jmp_list_next[0] = (uintptr_t)NULL;
1809 tb->jmp_list_next[1] = (uintptr_t)NULL;
1810 tb->jmp_dest[0] = (uintptr_t)NULL;
1811 tb->jmp_dest[1] = (uintptr_t)NULL;
1812
1813
1814 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1815 tb_reset_jump(tb, 0);
1816 }
1817 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1818 tb_reset_jump(tb, 1);
1819 }
1820
1821
1822 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1823 phys_page2 = -1;
1824 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1825 phys_page2 = get_page_addr_code(env, virt_page2);
1826 }
1827
1828
1829
1830
1831 existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1832
1833 if (unlikely(existing_tb != tb)) {
1834 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1835
1836 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1837 atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1838 return existing_tb;
1839 }
1840 tcg_tb_insert(tb);
1841 return tb;
1842}
1843
1844
1845
1846
1847
1848
1849static void
1850tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1851 PageDesc *p, tb_page_addr_t start,
1852 tb_page_addr_t end,
1853 int is_cpu_write_access)
1854{
1855 TranslationBlock *tb;
1856 tb_page_addr_t tb_start, tb_end;
1857 int n;
1858#ifdef TARGET_HAS_PRECISE_SMC
1859 CPUState *cpu = current_cpu;
1860 CPUArchState *env = NULL;
1861 int current_tb_not_found = is_cpu_write_access;
1862 TranslationBlock *current_tb = NULL;
1863 int current_tb_modified = 0;
1864 target_ulong current_pc = 0;
1865 target_ulong current_cs_base = 0;
1866 uint32_t current_flags = 0;
1867#endif
1868
1869 assert_page_locked(p);
1870
1871#if defined(TARGET_HAS_PRECISE_SMC)
1872 if (cpu != NULL) {
1873 env = cpu->env_ptr;
1874 }
1875#endif
1876
1877
1878
1879
1880 PAGE_FOR_EACH_TB(p, tb, n) {
1881 assert_page_locked(p);
1882
1883 if (n == 0) {
1884
1885
1886 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1887 tb_end = tb_start + tb->size;
1888 } else {
1889 tb_start = tb->page_addr[1];
1890 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1891 }
1892 if (!(tb_end <= start || tb_start >= end)) {
1893#ifdef TARGET_HAS_PRECISE_SMC
1894 if (current_tb_not_found) {
1895 current_tb_not_found = 0;
1896 current_tb = NULL;
1897 if (cpu->mem_io_pc) {
1898
1899 current_tb = tcg_tb_lookup(cpu->mem_io_pc);
1900 }
1901 }
1902 if (current_tb == tb &&
1903 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1904
1905
1906
1907
1908
1909
1910 current_tb_modified = 1;
1911 cpu_restore_state_from_tb(cpu, current_tb,
1912 cpu->mem_io_pc, true);
1913 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1914 ¤t_flags);
1915 }
1916#endif
1917 tb_phys_invalidate__locked(tb);
1918 }
1919 }
1920#if !defined(CONFIG_USER_ONLY)
1921
1922 if (!p->first_tb) {
1923 invalidate_page_bitmap(p);
1924 tlb_unprotect_code(start);
1925 }
1926#endif
1927#ifdef TARGET_HAS_PRECISE_SMC
1928 if (current_tb_modified) {
1929 page_collection_unlock(pages);
1930
1931 cpu->cflags_next_tb = 1 | curr_cflags();
1932 mmap_unlock();
1933 cpu_loop_exit_noexc(cpu);
1934 }
1935#endif
1936}
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1948 int is_cpu_write_access)
1949{
1950 struct page_collection *pages;
1951 PageDesc *p;
1952
1953 assert_memory_lock();
1954
1955 p = page_find(start >> TARGET_PAGE_BITS);
1956 if (p == NULL) {
1957 return;
1958 }
1959 pages = page_collection_lock(start, end);
1960 tb_invalidate_phys_page_range__locked(pages, p, start, end,
1961 is_cpu_write_access);
1962 page_collection_unlock(pages);
1963}
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974#ifdef CONFIG_SOFTMMU
1975void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1976#else
1977void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1978#endif
1979{
1980 struct page_collection *pages;
1981 tb_page_addr_t next;
1982
1983 assert_memory_lock();
1984
1985 pages = page_collection_lock(start, end);
1986 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1987 start < end;
1988 start = next, next += TARGET_PAGE_SIZE) {
1989 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1990 tb_page_addr_t bound = MIN(next, end);
1991
1992 if (pd == NULL) {
1993 continue;
1994 }
1995 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1996 }
1997 page_collection_unlock(pages);
1998}
1999
2000#ifdef CONFIG_SOFTMMU
2001
2002
2003
2004
2005
2006
2007void tb_invalidate_phys_page_fast(struct page_collection *pages,
2008 tb_page_addr_t start, int len)
2009{
2010 PageDesc *p;
2011
2012 assert_memory_lock();
2013
2014 p = page_find(start >> TARGET_PAGE_BITS);
2015 if (!p) {
2016 return;
2017 }
2018
2019 assert_page_locked(p);
2020 if (!p->code_bitmap &&
2021 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2022 build_page_bitmap(p);
2023 }
2024 if (p->code_bitmap) {
2025 unsigned int nr;
2026 unsigned long b;
2027
2028 nr = start & ~TARGET_PAGE_MASK;
2029 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2030 if (b & ((1 << len) - 1)) {
2031 goto do_invalidate;
2032 }
2033 } else {
2034 do_invalidate:
2035 tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
2036 }
2037}
2038#else
2039
2040
2041
2042
2043
2044
2045static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2046{
2047 TranslationBlock *tb;
2048 PageDesc *p;
2049 int n;
2050#ifdef TARGET_HAS_PRECISE_SMC
2051 TranslationBlock *current_tb = NULL;
2052 CPUState *cpu = current_cpu;
2053 CPUArchState *env = NULL;
2054 int current_tb_modified = 0;
2055 target_ulong current_pc = 0;
2056 target_ulong current_cs_base = 0;
2057 uint32_t current_flags = 0;
2058#endif
2059
2060 assert_memory_lock();
2061
2062 addr &= TARGET_PAGE_MASK;
2063 p = page_find(addr >> TARGET_PAGE_BITS);
2064 if (!p) {
2065 return false;
2066 }
2067
2068#ifdef TARGET_HAS_PRECISE_SMC
2069 if (p->first_tb && pc != 0) {
2070 current_tb = tcg_tb_lookup(pc);
2071 }
2072 if (cpu != NULL) {
2073 env = cpu->env_ptr;
2074 }
2075#endif
2076 assert_page_locked(p);
2077 PAGE_FOR_EACH_TB(p, tb, n) {
2078#ifdef TARGET_HAS_PRECISE_SMC
2079 if (current_tb == tb &&
2080 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2081
2082
2083
2084
2085
2086
2087 current_tb_modified = 1;
2088 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2089 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
2090 ¤t_flags);
2091 }
2092#endif
2093 tb_phys_invalidate(tb, addr);
2094 }
2095 p->first_tb = (uintptr_t)NULL;
2096#ifdef TARGET_HAS_PRECISE_SMC
2097 if (current_tb_modified) {
2098
2099 cpu->cflags_next_tb = 1 | curr_cflags();
2100 return true;
2101 }
2102#endif
2103
2104 return false;
2105}
2106#endif
2107
2108
2109void tb_check_watchpoint(CPUState *cpu)
2110{
2111 TranslationBlock *tb;
2112
2113 assert_memory_lock();
2114
2115 tb = tcg_tb_lookup(cpu->mem_io_pc);
2116 if (tb) {
2117
2118 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
2119 tb_phys_invalidate(tb, -1);
2120 } else {
2121
2122
2123 CPUArchState *env = cpu->env_ptr;
2124 target_ulong pc, cs_base;
2125 tb_page_addr_t addr;
2126 uint32_t flags;
2127
2128 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2129 addr = get_page_addr_code(env, pc);
2130 if (addr != -1) {
2131 tb_invalidate_phys_range(addr, addr + 1);
2132 }
2133 }
2134}
2135
2136#ifndef CONFIG_USER_ONLY
2137
2138
2139
2140
2141
2142void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2143{
2144#if defined(TARGET_MIPS) || defined(TARGET_SH4)
2145 CPUArchState *env = cpu->env_ptr;
2146#endif
2147 TranslationBlock *tb;
2148 uint32_t n;
2149
2150 tb = tcg_tb_lookup(retaddr);
2151 if (!tb) {
2152 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2153 (void *)retaddr);
2154 }
2155 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2156
2157
2158
2159
2160
2161 n = 1;
2162#if defined(TARGET_MIPS)
2163 if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2164 && env->active_tc.PC != tb->pc) {
2165 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2166 cpu->icount_decr.u16.low++;
2167 env->hflags &= ~MIPS_HFLAG_BMASK;
2168 n = 2;
2169 }
2170#elif defined(TARGET_SH4)
2171 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2172 && env->pc != tb->pc) {
2173 env->pc -= 2;
2174 cpu->icount_decr.u16.low++;
2175 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2176 n = 2;
2177 }
2178#endif
2179
2180
2181 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2182
2183 if (tb_cflags(tb) & CF_NOCACHE) {
2184 if (tb->orig_tb) {
2185
2186
2187 tb_phys_invalidate(tb->orig_tb, -1);
2188 }
2189 tcg_tb_remove(tb);
2190 }
2191
2192
2193
2194
2195
2196
2197
2198 cpu_loop_exit_noexc(cpu);
2199}
2200
2201static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2202{
2203 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2204
2205 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2206 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2207 }
2208}
2209
2210void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2211{
2212
2213
2214 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2215 tb_jmp_cache_clear_page(cpu, addr);
2216}
2217
2218static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
2219 struct qht_stats hst)
2220{
2221 uint32_t hgram_opts;
2222 size_t hgram_bins;
2223 char *hgram;
2224
2225 if (!hst.head_buckets) {
2226 return;
2227 }
2228 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2229 hst.used_head_buckets, hst.head_buckets,
2230 (double)hst.used_head_buckets / hst.head_buckets * 100);
2231
2232 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2233 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
2234 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2235 hgram_opts |= QDIST_PR_NODECIMAL;
2236 }
2237 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2238 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2239 qdist_avg(&hst.occupancy) * 100, hgram);
2240 g_free(hgram);
2241
2242 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2243 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2244 if (hgram_bins > 10) {
2245 hgram_bins = 10;
2246 } else {
2247 hgram_bins = 0;
2248 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2249 }
2250 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2251 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
2252 qdist_avg(&hst.chain), hgram);
2253 g_free(hgram);
2254}
2255
2256struct tb_tree_stats {
2257 size_t nb_tbs;
2258 size_t host_size;
2259 size_t target_size;
2260 size_t max_target_size;
2261 size_t direct_jmp_count;
2262 size_t direct_jmp2_count;
2263 size_t cross_page;
2264};
2265
2266static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2267{
2268 const TranslationBlock *tb = value;
2269 struct tb_tree_stats *tst = data;
2270
2271 tst->nb_tbs++;
2272 tst->host_size += tb->tc.size;
2273 tst->target_size += tb->size;
2274 if (tb->size > tst->max_target_size) {
2275 tst->max_target_size = tb->size;
2276 }
2277 if (tb->page_addr[1] != -1) {
2278 tst->cross_page++;
2279 }
2280 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2281 tst->direct_jmp_count++;
2282 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2283 tst->direct_jmp2_count++;
2284 }
2285 }
2286 return false;
2287}
2288
2289void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
2290{
2291 struct tb_tree_stats tst = {};
2292 struct qht_stats hst;
2293 size_t nb_tbs, flush_full, flush_part, flush_elide;
2294
2295 tcg_tb_foreach(tb_tree_stats_iter, &tst);
2296 nb_tbs = tst.nb_tbs;
2297
2298 cpu_fprintf(f, "Translation buffer state:\n");
2299
2300
2301
2302
2303
2304 cpu_fprintf(f, "gen code size %zu/%zu\n",
2305 tcg_code_size(), tcg_code_capacity());
2306 cpu_fprintf(f, "TB count %zu\n", nb_tbs);
2307 cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n",
2308 nb_tbs ? tst.target_size / nb_tbs : 0,
2309 tst.max_target_size);
2310 cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2311 nb_tbs ? tst.host_size / nb_tbs : 0,
2312 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2313 cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
2314 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2315 cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2316 tst.direct_jmp_count,
2317 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2318 tst.direct_jmp2_count,
2319 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2320
2321 qht_statistics_init(&tb_ctx.htable, &hst);
2322 print_qht_statistics(f, cpu_fprintf, hst);
2323 qht_statistics_destroy(&hst);
2324
2325 cpu_fprintf(f, "\nStatistics:\n");
2326 cpu_fprintf(f, "TB flush count %u\n",
2327 atomic_read(&tb_ctx.tb_flush_count));
2328 cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
2329
2330 tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2331 cpu_fprintf(f, "TLB full flushes %zu\n", flush_full);
2332 cpu_fprintf(f, "TLB partial flushes %zu\n", flush_part);
2333 cpu_fprintf(f, "TLB elided flushes %zu\n", flush_elide);
2334 tcg_dump_info(f, cpu_fprintf);
2335}
2336
2337void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
2338{
2339 tcg_dump_op_count(f, cpu_fprintf);
2340}
2341
2342#else
2343
2344void cpu_interrupt(CPUState *cpu, int mask)
2345{
2346 g_assert(qemu_mutex_iothread_locked());
2347 cpu->interrupt_request |= mask;
2348 atomic_set(&cpu->icount_decr.u16.high, -1);
2349}
2350
2351
2352
2353
2354
2355struct walk_memory_regions_data {
2356 walk_memory_regions_fn fn;
2357 void *priv;
2358 target_ulong start;
2359 int prot;
2360};
2361
2362static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2363 target_ulong end, int new_prot)
2364{
2365 if (data->start != -1u) {
2366 int rc = data->fn(data->priv, data->start, end, data->prot);
2367 if (rc != 0) {
2368 return rc;
2369 }
2370 }
2371
2372 data->start = (new_prot ? end : -1u);
2373 data->prot = new_prot;
2374
2375 return 0;
2376}
2377
2378static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2379 target_ulong base, int level, void **lp)
2380{
2381 target_ulong pa;
2382 int i, rc;
2383
2384 if (*lp == NULL) {
2385 return walk_memory_regions_end(data, base, 0);
2386 }
2387
2388 if (level == 0) {
2389 PageDesc *pd = *lp;
2390
2391 for (i = 0; i < V_L2_SIZE; ++i) {
2392 int prot = pd[i].flags;
2393
2394 pa = base | (i << TARGET_PAGE_BITS);
2395 if (prot != data->prot) {
2396 rc = walk_memory_regions_end(data, pa, prot);
2397 if (rc != 0) {
2398 return rc;
2399 }
2400 }
2401 }
2402 } else {
2403 void **pp = *lp;
2404
2405 for (i = 0; i < V_L2_SIZE; ++i) {
2406 pa = base | ((target_ulong)i <<
2407 (TARGET_PAGE_BITS + V_L2_BITS * level));
2408 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2409 if (rc != 0) {
2410 return rc;
2411 }
2412 }
2413 }
2414
2415 return 0;
2416}
2417
2418int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2419{
2420 struct walk_memory_regions_data data;
2421 uintptr_t i, l1_sz = v_l1_size;
2422
2423 data.fn = fn;
2424 data.priv = priv;
2425 data.start = -1u;
2426 data.prot = 0;
2427
2428 for (i = 0; i < l1_sz; i++) {
2429 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2430 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2431 if (rc != 0) {
2432 return rc;
2433 }
2434 }
2435
2436 return walk_memory_regions_end(&data, 0, 0);
2437}
2438
2439static int dump_region(void *priv, target_ulong start,
2440 target_ulong end, unsigned long prot)
2441{
2442 FILE *f = (FILE *)priv;
2443
2444 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2445 " "TARGET_FMT_lx" %c%c%c\n",
2446 start, end, end - start,
2447 ((prot & PAGE_READ) ? 'r' : '-'),
2448 ((prot & PAGE_WRITE) ? 'w' : '-'),
2449 ((prot & PAGE_EXEC) ? 'x' : '-'));
2450
2451 return 0;
2452}
2453
2454
2455void page_dump(FILE *f)
2456{
2457 const int length = sizeof(target_ulong) * 2;
2458 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2459 length, "start", length, "end", length, "size", "prot");
2460 walk_memory_regions(f, dump_region);
2461}
2462
2463int page_get_flags(target_ulong address)
2464{
2465 PageDesc *p;
2466
2467 p = page_find(address >> TARGET_PAGE_BITS);
2468 if (!p) {
2469 return 0;
2470 }
2471 return p->flags;
2472}
2473
2474
2475
2476
2477void page_set_flags(target_ulong start, target_ulong end, int flags)
2478{
2479 target_ulong addr, len;
2480
2481
2482
2483
2484#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2485 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2486#endif
2487 assert(start < end);
2488 assert_memory_lock();
2489
2490 start = start & TARGET_PAGE_MASK;
2491 end = TARGET_PAGE_ALIGN(end);
2492
2493 if (flags & PAGE_WRITE) {
2494 flags |= PAGE_WRITE_ORG;
2495 }
2496
2497 for (addr = start, len = end - start;
2498 len != 0;
2499 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2500 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2501
2502
2503
2504 if (!(p->flags & PAGE_WRITE) &&
2505 (flags & PAGE_WRITE) &&
2506 p->first_tb) {
2507 tb_invalidate_phys_page(addr, 0);
2508 }
2509 p->flags = flags;
2510 }
2511}
2512
2513int page_check_range(target_ulong start, target_ulong len, int flags)
2514{
2515 PageDesc *p;
2516 target_ulong end;
2517 target_ulong addr;
2518
2519
2520
2521
2522#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2523 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2524#endif
2525
2526 if (len == 0) {
2527 return 0;
2528 }
2529 if (start + len - 1 < start) {
2530
2531 return -1;
2532 }
2533
2534
2535 end = TARGET_PAGE_ALIGN(start + len);
2536 start = start & TARGET_PAGE_MASK;
2537
2538 for (addr = start, len = end - start;
2539 len != 0;
2540 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2541 p = page_find(addr >> TARGET_PAGE_BITS);
2542 if (!p) {
2543 return -1;
2544 }
2545 if (!(p->flags & PAGE_VALID)) {
2546 return -1;
2547 }
2548
2549 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2550 return -1;
2551 }
2552 if (flags & PAGE_WRITE) {
2553 if (!(p->flags & PAGE_WRITE_ORG)) {
2554 return -1;
2555 }
2556
2557
2558 if (!(p->flags & PAGE_WRITE)) {
2559 if (!page_unprotect(addr, 0)) {
2560 return -1;
2561 }
2562 }
2563 }
2564 }
2565 return 0;
2566}
2567
2568
2569
2570
2571
2572
2573
2574int page_unprotect(target_ulong address, uintptr_t pc)
2575{
2576 unsigned int prot;
2577 bool current_tb_invalidated;
2578 PageDesc *p;
2579 target_ulong host_start, host_end, addr;
2580
2581
2582
2583
2584 mmap_lock();
2585
2586 p = page_find(address >> TARGET_PAGE_BITS);
2587 if (!p) {
2588 mmap_unlock();
2589 return 0;
2590 }
2591
2592
2593
2594 if (p->flags & PAGE_WRITE_ORG) {
2595 current_tb_invalidated = false;
2596 if (p->flags & PAGE_WRITE) {
2597
2598
2599
2600
2601#ifdef TARGET_HAS_PRECISE_SMC
2602 TranslationBlock *current_tb = tcg_tb_lookup(pc);
2603 if (current_tb) {
2604 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2605 }
2606#endif
2607 } else {
2608 host_start = address & qemu_host_page_mask;
2609 host_end = host_start + qemu_host_page_size;
2610
2611 prot = 0;
2612 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2613 p = page_find(addr >> TARGET_PAGE_BITS);
2614 p->flags |= PAGE_WRITE;
2615 prot |= p->flags;
2616
2617
2618
2619 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2620#ifdef CONFIG_USER_ONLY
2621 if (DEBUG_TB_CHECK_GATE) {
2622 tb_invalidate_check(addr);
2623 }
2624#endif
2625 }
2626 mprotect((void *)g2h(host_start), qemu_host_page_size,
2627 prot & PAGE_BITS);
2628 }
2629 mmap_unlock();
2630
2631 return current_tb_invalidated ? 2 : 1;
2632 }
2633 mmap_unlock();
2634 return 0;
2635}
2636#endif
2637
2638
2639void tcg_flush_softmmu_tlb(CPUState *cs)
2640{
2641#ifdef CONFIG_SOFTMMU
2642 tlb_flush(cs);
2643#endif
2644}
2645