1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu-common.h"
22
23#define NO_CPU_IO_DEFS
24#include "trace.h"
25#include "disas/disas.h"
26#include "exec/exec-all.h"
27#include "tcg/tcg.h"
28#if defined(CONFIG_USER_ONLY)
29#include "qemu.h"
30#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
31#include <sys/param.h>
32#if __FreeBSD_version >= 700104
33#define HAVE_KINFO_GETVMMAP
34#define sigqueue sigqueue_freebsd
35#include <sys/proc.h>
36#include <machine/profile.h>
37#define _KERNEL
38#include <sys/user.h>
39#undef _KERNEL
40#undef sigqueue
41#include <libutil.h>
42#endif
43#endif
44#else
45#include "exec/ram_addr.h"
46#endif
47
48#include "exec/cputlb.h"
49#include "exec/translate-all.h"
50#include "qemu/bitmap.h"
51#include "qemu/qemu-print.h"
52#include "qemu/timer.h"
53#include "qemu/main-loop.h"
54#include "exec/log.h"
55#include "qemu/etrace.h"
56#include "sysemu/cpus.h"
57#include "sysemu/cpu-timers.h"
58#include "sysemu/tcg.h"
59#include "qapi/error.h"
60#include "hw/core/tcg-cpu-ops.h"
61#include "tb-hash.h"
62#include "tb-context.h"
63#include "internal.h"
64
65
66
67
68
69
70#ifdef DEBUG_TB_INVALIDATE
71#define DEBUG_TB_INVALIDATE_GATE 1
72#else
73#define DEBUG_TB_INVALIDATE_GATE 0
74#endif
75
76#ifdef DEBUG_TB_FLUSH
77#define DEBUG_TB_FLUSH_GATE 1
78#else
79#define DEBUG_TB_FLUSH_GATE 0
80#endif
81
82#if !defined(CONFIG_USER_ONLY)
83
84#undef DEBUG_TB_CHECK
85#endif
86
87#ifdef DEBUG_TB_CHECK
88#define DEBUG_TB_CHECK_GATE 1
89#else
90#define DEBUG_TB_CHECK_GATE 0
91#endif
92
93
94
95
96
97
98
99#ifdef CONFIG_SOFTMMU
100#define assert_memory_lock()
101#else
102#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
103#endif
104
105#define SMC_BITMAP_USE_THRESHOLD 10
106
107typedef struct PageDesc {
108
109 uintptr_t first_tb;
110#ifdef CONFIG_SOFTMMU
111
112
113 unsigned long *code_bitmap;
114 unsigned int code_write_count;
115#else
116 unsigned long flags;
117 void *target_data;
118#endif
119#ifndef CONFIG_USER_ONLY
120 QemuSpin lock;
121#endif
122} PageDesc;
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137struct page_entry {
138 PageDesc *pd;
139 tb_page_addr_t index;
140 bool locked;
141};
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163struct page_collection {
164 GTree *tree;
165 struct page_entry *max;
166};
167
168
169#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
170 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
171 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
172 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
173
174#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
175 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
176
177#define TB_FOR_EACH_JMP(head_tb, tb, n) \
178 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
179
180
181
182
183
184
185
186
187#if !defined(CONFIG_USER_ONLY)
188#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
189# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
190#else
191# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
192#endif
193#else
194# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
195#endif
196
197
198#define V_L2_BITS 10
199#define V_L2_SIZE (1 << V_L2_BITS)
200
201
202QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
203 sizeof_field(TranslationBlock, trace_vcpu_dstate)
204 * BITS_PER_BYTE);
205
206
207
208
209static int v_l1_size;
210static int v_l1_shift;
211static int v_l2_levels;
212
213
214
215
216#define V_L1_MIN_BITS 4
217#define V_L1_MAX_BITS (V_L2_BITS + 3)
218#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
219
220static void *l1_map[V_L1_MAX_SIZE];
221
222TBContext tb_ctx;
223
224static void page_table_config_init(void)
225{
226 uint32_t v_l1_bits;
227
228 assert(TARGET_PAGE_BITS);
229
230 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
231 if (v_l1_bits < V_L1_MIN_BITS) {
232 v_l1_bits += V_L2_BITS;
233 }
234
235 v_l1_size = 1 << v_l1_bits;
236 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
237 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
238
239 assert(v_l1_bits <= V_L1_MAX_BITS);
240 assert(v_l1_shift % V_L2_BITS == 0);
241 assert(v_l2_levels >= 0);
242}
243
244
245
246static uint8_t *encode_sleb128(uint8_t *p, target_long val)
247{
248 int more, byte;
249
250 do {
251 byte = val & 0x7f;
252 val >>= 7;
253 more = !((val == 0 && (byte & 0x40) == 0)
254 || (val == -1 && (byte & 0x40) != 0));
255 if (more) {
256 byte |= 0x80;
257 }
258 *p++ = byte;
259 } while (more);
260
261 return p;
262}
263
264
265
266static target_long decode_sleb128(const uint8_t **pp)
267{
268 const uint8_t *p = *pp;
269 target_long val = 0;
270 int byte, shift = 0;
271
272 do {
273 byte = *p++;
274 val |= (target_ulong)(byte & 0x7f) << shift;
275 shift += 7;
276 } while (byte & 0x80);
277 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
278 val |= -(target_ulong)1 << shift;
279 }
280
281 *pp = p;
282 return val;
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297static int encode_search(TranslationBlock *tb, uint8_t *block)
298{
299 uint8_t *highwater = tcg_ctx->code_gen_highwater;
300 uint8_t *p = block;
301 int i, j, n;
302
303 for (i = 0, n = tb->icount; i < n; ++i) {
304 target_ulong prev;
305
306 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
307 if (i == 0) {
308 prev = (j == 0 ? tb->pc : 0);
309 } else {
310 prev = tcg_ctx->gen_insn_data[i - 1][j];
311 }
312 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
313 }
314 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
315 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
316
317
318
319
320
321 if (unlikely(p > highwater)) {
322 return -1;
323 }
324 }
325
326 return p - block;
327}
328
329
330
331
332
333static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
334 uintptr_t searched_pc, bool reset_icount)
335{
336 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
337 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
338 CPUArchState *env = cpu->env_ptr;
339 const uint8_t *p = tb->tc.ptr + tb->tc.size;
340 int i, j, num_insns = tb->icount;
341#ifdef CONFIG_PROFILER
342 TCGProfile *prof = &tcg_ctx->prof;
343 int64_t ti = profile_getclock();
344#endif
345
346 searched_pc -= GETPC_ADJ;
347
348 if (searched_pc < host_pc) {
349 return -1;
350 }
351
352
353
354 for (i = 0; i < num_insns; ++i) {
355 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
356 data[j] += decode_sleb128(&p);
357 }
358 host_pc += decode_sleb128(&p);
359 if (host_pc > searched_pc) {
360 goto found;
361 }
362 }
363 return -1;
364
365 found:
366 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
367 assert(icount_enabled());
368
369
370 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
371 }
372 restore_state_to_opc(env, tb, data);
373
374#ifdef CONFIG_PROFILER
375 qatomic_set(&prof->restore_time,
376 prof->restore_time + profile_getclock() - ti);
377 qatomic_set(&prof->restore_count, prof->restore_count + 1);
378#endif
379 return 0;
380}
381
382bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
383{
384
385
386
387
388
389
390
391
392
393
394 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
395 TranslationBlock *tb = tcg_tb_lookup(host_pc);
396 if (tb) {
397 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
398 return true;
399 }
400 }
401 return false;
402}
403
404void page_init(void)
405{
406 page_size_init();
407 page_table_config_init();
408
409#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
410 {
411#ifdef HAVE_KINFO_GETVMMAP
412 struct kinfo_vmentry *freep;
413 int i, cnt;
414
415 freep = kinfo_getvmmap(getpid(), &cnt);
416 if (freep) {
417 mmap_lock();
418 for (i = 0; i < cnt; i++) {
419 unsigned long startaddr, endaddr;
420
421 startaddr = freep[i].kve_start;
422 endaddr = freep[i].kve_end;
423 if (h2g_valid(startaddr)) {
424 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
425
426 if (h2g_valid(endaddr)) {
427 endaddr = h2g(endaddr);
428 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
429 } else {
430#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
431 endaddr = ~0ul;
432 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
433#endif
434 }
435 }
436 }
437 free(freep);
438 mmap_unlock();
439 }
440#else
441 FILE *f;
442
443 last_brk = (unsigned long)sbrk(0);
444
445 f = fopen("/compat/linux/proc/self/maps", "r");
446 if (f) {
447 mmap_lock();
448
449 do {
450 unsigned long startaddr, endaddr;
451 int n;
452
453 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
454
455 if (n == 2 && h2g_valid(startaddr)) {
456 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
457
458 if (h2g_valid(endaddr)) {
459 endaddr = h2g(endaddr);
460 } else {
461 endaddr = ~0ul;
462 }
463 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
464 }
465 } while (!feof(f));
466
467 fclose(f);
468 mmap_unlock();
469 }
470#endif
471 }
472#endif
473}
474
475static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
476{
477 PageDesc *pd;
478 void **lp;
479 int i;
480
481
482 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
483
484
485 for (i = v_l2_levels; i > 0; i--) {
486 void **p = qatomic_rcu_read(lp);
487
488 if (p == NULL) {
489 void *existing;
490
491 if (!alloc) {
492 return NULL;
493 }
494 p = g_new0(void *, V_L2_SIZE);
495 existing = qatomic_cmpxchg(lp, NULL, p);
496 if (unlikely(existing)) {
497 g_free(p);
498 p = existing;
499 }
500 }
501
502 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
503 }
504
505 pd = qatomic_rcu_read(lp);
506 if (pd == NULL) {
507 void *existing;
508
509 if (!alloc) {
510 return NULL;
511 }
512 pd = g_new0(PageDesc, V_L2_SIZE);
513#ifndef CONFIG_USER_ONLY
514 {
515 int i;
516
517 for (i = 0; i < V_L2_SIZE; i++) {
518 qemu_spin_init(&pd[i].lock);
519 }
520 }
521#endif
522 existing = qatomic_cmpxchg(lp, NULL, pd);
523 if (unlikely(existing)) {
524#ifndef CONFIG_USER_ONLY
525 {
526 int i;
527
528 for (i = 0; i < V_L2_SIZE; i++) {
529 qemu_spin_destroy(&pd[i].lock);
530 }
531 }
532#endif
533 g_free(pd);
534 pd = existing;
535 }
536 }
537
538 return pd + (index & (V_L2_SIZE - 1));
539}
540
541static inline PageDesc *page_find(tb_page_addr_t index)
542{
543 return page_find_alloc(index, 0);
544}
545
546static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
547 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
548
549
550#ifdef CONFIG_USER_ONLY
551
552#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
553
554static inline void page_lock(PageDesc *pd)
555{ }
556
557static inline void page_unlock(PageDesc *pd)
558{ }
559
560static inline void page_lock_tb(const TranslationBlock *tb)
561{ }
562
563static inline void page_unlock_tb(const TranslationBlock *tb)
564{ }
565
566struct page_collection *
567page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
568{
569 return NULL;
570}
571
572void page_collection_unlock(struct page_collection *set)
573{ }
574#else
575
576#ifdef CONFIG_DEBUG_TCG
577
578static __thread GHashTable *ht_pages_locked_debug;
579
580static void ht_pages_locked_debug_init(void)
581{
582 if (ht_pages_locked_debug) {
583 return;
584 }
585 ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
586}
587
588static bool page_is_locked(const PageDesc *pd)
589{
590 PageDesc *found;
591
592 ht_pages_locked_debug_init();
593 found = g_hash_table_lookup(ht_pages_locked_debug, pd);
594 return !!found;
595}
596
597static void page_lock__debug(PageDesc *pd)
598{
599 ht_pages_locked_debug_init();
600 g_assert(!page_is_locked(pd));
601 g_hash_table_insert(ht_pages_locked_debug, pd, pd);
602}
603
604static void page_unlock__debug(const PageDesc *pd)
605{
606 bool removed;
607
608 ht_pages_locked_debug_init();
609 g_assert(page_is_locked(pd));
610 removed = g_hash_table_remove(ht_pages_locked_debug, pd);
611 g_assert(removed);
612}
613
614static void
615do_assert_page_locked(const PageDesc *pd, const char *file, int line)
616{
617 if (unlikely(!page_is_locked(pd))) {
618 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
619 pd, file, line);
620 abort();
621 }
622}
623
624#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
625
626void assert_no_pages_locked(void)
627{
628 ht_pages_locked_debug_init();
629 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
630}
631
632#else
633
634#define assert_page_locked(pd)
635
636static inline void page_lock__debug(const PageDesc *pd)
637{
638}
639
640static inline void page_unlock__debug(const PageDesc *pd)
641{
642}
643
644#endif
645
646static inline void page_lock(PageDesc *pd)
647{
648 page_lock__debug(pd);
649 qemu_spin_lock(&pd->lock);
650}
651
652static inline void page_unlock(PageDesc *pd)
653{
654 qemu_spin_unlock(&pd->lock);
655 page_unlock__debug(pd);
656}
657
658
659static inline void page_lock_tb(const TranslationBlock *tb)
660{
661 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
662}
663
664static inline void page_unlock_tb(const TranslationBlock *tb)
665{
666 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
667
668 page_unlock(p1);
669 if (unlikely(tb->page_addr[1] != -1)) {
670 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
671
672 if (p2 != p1) {
673 page_unlock(p2);
674 }
675 }
676}
677
678static inline struct page_entry *
679page_entry_new(PageDesc *pd, tb_page_addr_t index)
680{
681 struct page_entry *pe = g_malloc(sizeof(*pe));
682
683 pe->index = index;
684 pe->pd = pd;
685 pe->locked = false;
686 return pe;
687}
688
689static void page_entry_destroy(gpointer p)
690{
691 struct page_entry *pe = p;
692
693 g_assert(pe->locked);
694 page_unlock(pe->pd);
695 g_free(pe);
696}
697
698
699static bool page_entry_trylock(struct page_entry *pe)
700{
701 bool busy;
702
703 busy = qemu_spin_trylock(&pe->pd->lock);
704 if (!busy) {
705 g_assert(!pe->locked);
706 pe->locked = true;
707 page_lock__debug(pe->pd);
708 }
709 return busy;
710}
711
712static void do_page_entry_lock(struct page_entry *pe)
713{
714 page_lock(pe->pd);
715 g_assert(!pe->locked);
716 pe->locked = true;
717}
718
719static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
720{
721 struct page_entry *pe = value;
722
723 do_page_entry_lock(pe);
724 return FALSE;
725}
726
727static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
728{
729 struct page_entry *pe = value;
730
731 if (pe->locked) {
732 pe->locked = false;
733 page_unlock(pe->pd);
734 }
735 return FALSE;
736}
737
738
739
740
741
742static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
743{
744 tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
745 struct page_entry *pe;
746 PageDesc *pd;
747
748 pe = g_tree_lookup(set->tree, &index);
749 if (pe) {
750 return false;
751 }
752
753 pd = page_find(index);
754 if (pd == NULL) {
755 return false;
756 }
757
758 pe = page_entry_new(pd, index);
759 g_tree_insert(set->tree, &pe->index, pe);
760
761
762
763
764
765 if (set->max == NULL || pe->index > set->max->index) {
766 set->max = pe;
767 do_page_entry_lock(pe);
768 return false;
769 }
770
771
772
773
774 return page_entry_trylock(pe);
775}
776
777static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
778{
779 tb_page_addr_t a = *(const tb_page_addr_t *)ap;
780 tb_page_addr_t b = *(const tb_page_addr_t *)bp;
781
782 if (a == b) {
783 return 0;
784 } else if (a < b) {
785 return -1;
786 }
787 return 1;
788}
789
790
791
792
793
794
795struct page_collection *
796page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
797{
798 struct page_collection *set = g_malloc(sizeof(*set));
799 tb_page_addr_t index;
800 PageDesc *pd;
801
802 start >>= TARGET_PAGE_BITS;
803 end >>= TARGET_PAGE_BITS;
804 g_assert(start <= end);
805
806 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
807 page_entry_destroy);
808 set->max = NULL;
809 assert_no_pages_locked();
810
811 retry:
812 g_tree_foreach(set->tree, page_entry_lock, NULL);
813
814 for (index = start; index <= end; index++) {
815 TranslationBlock *tb;
816 int n;
817
818 pd = page_find(index);
819 if (pd == NULL) {
820 continue;
821 }
822 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
823 g_tree_foreach(set->tree, page_entry_unlock, NULL);
824 goto retry;
825 }
826 assert_page_locked(pd);
827 PAGE_FOR_EACH_TB(pd, tb, n) {
828 if (page_trylock_add(set, tb->page_addr[0]) ||
829 (tb->page_addr[1] != -1 &&
830 page_trylock_add(set, tb->page_addr[1]))) {
831
832 g_tree_foreach(set->tree, page_entry_unlock, NULL);
833 goto retry;
834 }
835 }
836 }
837 return set;
838}
839
840void page_collection_unlock(struct page_collection *set)
841{
842
843 g_tree_destroy(set->tree);
844 g_free(set);
845}
846
847#endif
848
849static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
850 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
851{
852 PageDesc *p1, *p2;
853 tb_page_addr_t page1;
854 tb_page_addr_t page2;
855
856 assert_memory_lock();
857 g_assert(phys1 != -1);
858
859 page1 = phys1 >> TARGET_PAGE_BITS;
860 page2 = phys2 >> TARGET_PAGE_BITS;
861
862 p1 = page_find_alloc(page1, alloc);
863 if (ret_p1) {
864 *ret_p1 = p1;
865 }
866 if (likely(phys2 == -1)) {
867 page_lock(p1);
868 return;
869 } else if (page1 == page2) {
870 page_lock(p1);
871 if (ret_p2) {
872 *ret_p2 = p1;
873 }
874 return;
875 }
876 p2 = page_find_alloc(page2, alloc);
877 if (ret_p2) {
878 *ret_p2 = p2;
879 }
880 if (page1 < page2) {
881 page_lock(p1);
882 page_lock(p2);
883 } else {
884 page_lock(p2);
885 page_lock(p1);
886 }
887}
888
889static bool tb_cmp(const void *ap, const void *bp)
890{
891 const TranslationBlock *a = ap;
892 const TranslationBlock *b = bp;
893
894 return a->pc == b->pc &&
895 a->cs_base == b->cs_base &&
896 a->flags == b->flags &&
897 (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
898 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
899 a->page_addr[0] == b->page_addr[0] &&
900 a->page_addr[1] == b->page_addr[1];
901}
902
903void tb_htable_init(void)
904{
905 unsigned int mode = QHT_MODE_AUTO_RESIZE;
906
907 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
908}
909
910
911static inline void invalidate_page_bitmap(PageDesc *p)
912{
913 assert_page_locked(p);
914#ifdef CONFIG_SOFTMMU
915 g_free(p->code_bitmap);
916 p->code_bitmap = NULL;
917 p->code_write_count = 0;
918#endif
919}
920
921
922static void page_flush_tb_1(int level, void **lp)
923{
924 int i;
925
926 if (*lp == NULL) {
927 return;
928 }
929 if (level == 0) {
930 PageDesc *pd = *lp;
931
932 for (i = 0; i < V_L2_SIZE; ++i) {
933 page_lock(&pd[i]);
934 pd[i].first_tb = (uintptr_t)NULL;
935 invalidate_page_bitmap(pd + i);
936 page_unlock(&pd[i]);
937 }
938 } else {
939 void **pp = *lp;
940
941 for (i = 0; i < V_L2_SIZE; ++i) {
942 page_flush_tb_1(level - 1, pp + i);
943 }
944 }
945}
946
947static void page_flush_tb(void)
948{
949 int i, l1_sz = v_l1_size;
950
951 for (i = 0; i < l1_sz; i++) {
952 page_flush_tb_1(v_l2_levels, l1_map + i);
953 }
954}
955
956static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
957{
958 const TranslationBlock *tb = value;
959 size_t *size = data;
960
961 *size += tb->tc.size;
962 return false;
963}
964
965
966static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
967{
968 bool did_flush = false;
969
970 mmap_lock();
971
972
973
974 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
975 goto done;
976 }
977 did_flush = true;
978
979 if (DEBUG_TB_FLUSH_GATE) {
980 size_t nb_tbs = tcg_nb_tbs();
981 size_t host_size = 0;
982
983 tcg_tb_foreach(tb_host_size_iter, &host_size);
984 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
985 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
986 }
987
988 CPU_FOREACH(cpu) {
989 cpu_tb_jmp_cache_clear(cpu);
990 }
991
992 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
993 page_flush_tb();
994
995 tcg_region_reset_all();
996
997
998 qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
999
1000done:
1001 mmap_unlock();
1002 if (did_flush) {
1003 qemu_plugin_flush_cb();
1004 }
1005}
1006
1007void tb_flush(CPUState *cpu)
1008{
1009 if (tcg_enabled()) {
1010 unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1011
1012 if (cpu_in_exclusive_context(cpu)) {
1013 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1014 } else {
1015 async_safe_run_on_cpu(cpu, do_tb_flush,
1016 RUN_ON_CPU_HOST_INT(tb_flush_count));
1017 }
1018 }
1019}
1020
1021
1022
1023
1024
1025
1026
1027#ifdef CONFIG_USER_ONLY
1028
1029static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1030{
1031 TranslationBlock *tb = p;
1032 target_ulong addr = *(target_ulong *)userp;
1033
1034 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1035 printf("ERROR invalidate: address=" TARGET_FMT_lx
1036 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1037 }
1038}
1039
1040
1041
1042
1043
1044static void tb_invalidate_check(target_ulong address)
1045{
1046 address &= TARGET_PAGE_MASK;
1047 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1048}
1049
1050static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1051{
1052 TranslationBlock *tb = p;
1053 int flags1, flags2;
1054
1055 flags1 = page_get_flags(tb->pc);
1056 flags2 = page_get_flags(tb->pc + tb->size - 1);
1057 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1058 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1059 (long)tb->pc, tb->size, flags1, flags2);
1060 }
1061}
1062
1063
1064static void tb_page_check(void)
1065{
1066 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1067}
1068
1069#endif
1070
1071
1072
1073
1074
1075static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1076{
1077 TranslationBlock *tb1;
1078 uintptr_t *pprev;
1079 unsigned int n1;
1080
1081 assert_page_locked(pd);
1082 pprev = &pd->first_tb;
1083 PAGE_FOR_EACH_TB(pd, tb1, n1) {
1084 if (tb1 == tb) {
1085 *pprev = tb1->page_next[n1];
1086 return;
1087 }
1088 pprev = &tb1->page_next[n1];
1089 }
1090 g_assert_not_reached();
1091}
1092
1093
1094static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1095{
1096 uintptr_t ptr, ptr_locked;
1097 TranslationBlock *dest;
1098 TranslationBlock *tb;
1099 uintptr_t *pprev;
1100 int n;
1101
1102
1103 ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1104 dest = (TranslationBlock *)(ptr & ~1);
1105 if (dest == NULL) {
1106 return;
1107 }
1108
1109 qemu_spin_lock(&dest->jmp_lock);
1110
1111
1112
1113
1114 ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1115 if (ptr_locked != ptr) {
1116 qemu_spin_unlock(&dest->jmp_lock);
1117
1118
1119
1120
1121
1122 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1123 return;
1124 }
1125
1126
1127
1128
1129 pprev = &dest->jmp_list_head;
1130 TB_FOR_EACH_JMP(dest, tb, n) {
1131 if (tb == orig && n == n_orig) {
1132 *pprev = tb->jmp_list_next[n];
1133
1134 qemu_spin_unlock(&dest->jmp_lock);
1135 return;
1136 }
1137 pprev = &tb->jmp_list_next[n];
1138 }
1139 g_assert_not_reached();
1140}
1141
1142
1143
1144static inline void tb_reset_jump(TranslationBlock *tb, int n)
1145{
1146 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1147 tb_set_jmp_target(tb, n, addr);
1148}
1149
1150
1151static inline void tb_jmp_unlink(TranslationBlock *dest)
1152{
1153 TranslationBlock *tb;
1154 int n;
1155
1156 qemu_spin_lock(&dest->jmp_lock);
1157
1158 TB_FOR_EACH_JMP(dest, tb, n) {
1159 tb_reset_jump(tb, n);
1160 qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1161
1162 }
1163 dest->jmp_list_head = (uintptr_t)NULL;
1164
1165 qemu_spin_unlock(&dest->jmp_lock);
1166}
1167
1168
1169
1170
1171
1172
1173static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1174{
1175 CPUState *cpu;
1176 PageDesc *p;
1177 uint32_t h;
1178 tb_page_addr_t phys_pc;
1179 uint32_t orig_cflags = tb_cflags(tb);
1180
1181 assert_memory_lock();
1182
1183
1184 qemu_spin_lock(&tb->jmp_lock);
1185 qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1186 qemu_spin_unlock(&tb->jmp_lock);
1187
1188
1189 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1190 h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
1191 tb->trace_vcpu_dstate);
1192 if (!qht_remove(&tb_ctx.htable, tb, h)) {
1193 return;
1194 }
1195
1196
1197 if (rm_from_page_list) {
1198 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1199 tb_page_remove(p, tb);
1200 invalidate_page_bitmap(p);
1201 if (tb->page_addr[1] != -1) {
1202 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1203 tb_page_remove(p, tb);
1204 invalidate_page_bitmap(p);
1205 }
1206 }
1207
1208
1209 h = tb_jmp_cache_hash_func(tb->pc);
1210 CPU_FOREACH(cpu) {
1211 if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1212 qatomic_set(&cpu->tb_jmp_cache[h], NULL);
1213 }
1214 }
1215
1216
1217 tb_remove_from_jmp_list(tb, 0);
1218 tb_remove_from_jmp_list(tb, 1);
1219
1220
1221 tb_jmp_unlink(tb);
1222
1223 qatomic_set(&tb_ctx.tb_phys_invalidate_count,
1224 tb_ctx.tb_phys_invalidate_count + 1);
1225}
1226
1227static void tb_phys_invalidate__locked(TranslationBlock *tb)
1228{
1229 qemu_thread_jit_write();
1230 do_tb_phys_invalidate(tb, true);
1231 qemu_thread_jit_execute();
1232}
1233
1234
1235
1236
1237
1238void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1239{
1240 if (page_addr == -1 && tb->page_addr[0] != -1) {
1241 page_lock_tb(tb);
1242 do_tb_phys_invalidate(tb, true);
1243 page_unlock_tb(tb);
1244 } else {
1245 do_tb_phys_invalidate(tb, false);
1246 }
1247}
1248
1249#ifdef CONFIG_SOFTMMU
1250
1251static void build_page_bitmap(PageDesc *p)
1252{
1253 int n, tb_start, tb_end;
1254 TranslationBlock *tb;
1255
1256 assert_page_locked(p);
1257 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1258
1259 PAGE_FOR_EACH_TB(p, tb, n) {
1260
1261 if (n == 0) {
1262
1263
1264 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1265 tb_end = tb_start + tb->size;
1266 if (tb_end > TARGET_PAGE_SIZE) {
1267 tb_end = TARGET_PAGE_SIZE;
1268 }
1269 } else {
1270 tb_start = 0;
1271 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1272 }
1273 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1274 }
1275}
1276#endif
1277
1278
1279
1280
1281
1282
1283static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1284 unsigned int n, tb_page_addr_t page_addr)
1285{
1286#ifndef CONFIG_USER_ONLY
1287 bool page_already_protected;
1288#endif
1289
1290 assert_page_locked(p);
1291
1292 tb->page_addr[n] = page_addr;
1293 tb->page_next[n] = p->first_tb;
1294#ifndef CONFIG_USER_ONLY
1295 page_already_protected = p->first_tb != (uintptr_t)NULL;
1296#endif
1297 p->first_tb = (uintptr_t)tb | n;
1298 invalidate_page_bitmap(p);
1299
1300#if defined(CONFIG_USER_ONLY)
1301 if (p->flags & PAGE_WRITE) {
1302 target_ulong addr;
1303 PageDesc *p2;
1304 int prot;
1305
1306
1307
1308 page_addr &= qemu_host_page_mask;
1309 prot = 0;
1310 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1311 addr += TARGET_PAGE_SIZE) {
1312
1313 p2 = page_find(addr >> TARGET_PAGE_BITS);
1314 if (!p2) {
1315 continue;
1316 }
1317 prot |= p2->flags;
1318 p2->flags &= ~PAGE_WRITE;
1319 }
1320 mprotect(g2h_untagged(page_addr), qemu_host_page_size,
1321 (prot & PAGE_BITS) & ~PAGE_WRITE);
1322 if (DEBUG_TB_INVALIDATE_GATE) {
1323 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1324 }
1325 }
1326#else
1327
1328
1329
1330 if (!page_already_protected) {
1331 tlb_protect_code(page_addr);
1332 }
1333#endif
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347static TranslationBlock *
1348tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1349 tb_page_addr_t phys_page2)
1350{
1351 PageDesc *p;
1352 PageDesc *p2 = NULL;
1353 void *existing_tb = NULL;
1354 uint32_t h;
1355
1356 assert_memory_lock();
1357 tcg_debug_assert(!(tb->cflags & CF_INVALID));
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1368 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1369 if (p2) {
1370 tb_page_add(p2, tb, 1, phys_page2);
1371 } else {
1372 tb->page_addr[1] = -1;
1373 }
1374
1375
1376 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
1377 tb->trace_vcpu_dstate);
1378 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1379
1380
1381 if (unlikely(existing_tb)) {
1382 tb_page_remove(p, tb);
1383 invalidate_page_bitmap(p);
1384 if (p2) {
1385 tb_page_remove(p2, tb);
1386 invalidate_page_bitmap(p2);
1387 }
1388 tb = existing_tb;
1389 }
1390
1391 if (p2 && p2 != p) {
1392 page_unlock(p2);
1393 }
1394 page_unlock(p);
1395
1396#ifdef CONFIG_USER_ONLY
1397 if (DEBUG_TB_CHECK_GATE) {
1398 tb_page_check();
1399 }
1400#endif
1401 return tb;
1402}
1403
1404
1405TranslationBlock *tb_gen_code(CPUState *cpu,
1406 target_ulong pc, target_ulong cs_base,
1407 uint32_t flags, int cflags)
1408{
1409 CPUArchState *env = cpu->env_ptr;
1410 TranslationBlock *tb, *existing_tb;
1411 tb_page_addr_t phys_pc, phys_page2;
1412 target_ulong virt_page2;
1413 tcg_insn_unit *gen_code_buf;
1414 int gen_code_size, search_size, max_insns;
1415#ifdef CONFIG_PROFILER
1416 TCGProfile *prof = &tcg_ctx->prof;
1417 int64_t ti;
1418#endif
1419
1420 assert_memory_lock();
1421 qemu_thread_jit_write();
1422
1423 phys_pc = get_page_addr_code(env, pc);
1424
1425 if (phys_pc == -1) {
1426
1427
1428 cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO;
1429 }
1430
1431 max_insns = cflags & CF_COUNT_MASK;
1432 if (max_insns == 0) {
1433 max_insns = TCG_MAX_INSNS;
1434 }
1435 QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
1436
1437 buffer_overflow:
1438 tb = tcg_tb_alloc(tcg_ctx);
1439 if (unlikely(!tb)) {
1440
1441 tb_flush(cpu);
1442 mmap_unlock();
1443
1444 cpu->exception_index = EXCP_INTERRUPT;
1445 cpu_loop_exit(cpu);
1446 }
1447
1448 gen_code_buf = tcg_ctx->code_gen_ptr;
1449 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
1450 tb->pc = pc;
1451 tb->cs_base = cs_base;
1452 tb->flags = flags;
1453 tb->cflags = cflags;
1454 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1455 tcg_ctx->tb_cflags = cflags;
1456 tb_overflow:
1457
1458#ifdef CONFIG_PROFILER
1459
1460 qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1461 ti = profile_getclock();
1462#endif
1463
1464 gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
1465 if (unlikely(gen_code_size != 0)) {
1466 goto error_return;
1467 }
1468
1469 tcg_func_start(tcg_ctx);
1470
1471 tcg_ctx->cpu = env_cpu(env);
1472 gen_intermediate_code(cpu, tb, max_insns);
1473 assert(tb->size != 0);
1474 tcg_ctx->cpu = NULL;
1475 max_insns = tb->icount;
1476
1477 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1478
1479
1480 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1481 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1482 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1483 if (TCG_TARGET_HAS_direct_jump) {
1484 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1485 tcg_ctx->tb_jmp_target_addr = NULL;
1486 } else {
1487 tcg_ctx->tb_jmp_insn_offset = NULL;
1488 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1489 }
1490
1491#ifdef CONFIG_PROFILER
1492 qatomic_set(&prof->tb_count, prof->tb_count + 1);
1493 qatomic_set(&prof->interm_time,
1494 prof->interm_time + profile_getclock() - ti);
1495 ti = profile_getclock();
1496#endif
1497
1498 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1499 if (unlikely(gen_code_size < 0)) {
1500 error_return:
1501 switch (gen_code_size) {
1502 case -1:
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1513 "Restarting code generation for "
1514 "code_gen_buffer overflow\n");
1515 goto buffer_overflow;
1516
1517 case -2:
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527 assert(max_insns > 1);
1528 max_insns /= 2;
1529 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1530 "Restarting code generation with "
1531 "smaller translation block (max %d insns)\n",
1532 max_insns);
1533 goto tb_overflow;
1534
1535 default:
1536 g_assert_not_reached();
1537 }
1538 }
1539 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1540 if (unlikely(search_size < 0)) {
1541 goto buffer_overflow;
1542 }
1543 tb->tc.size = gen_code_size;
1544
1545#ifdef CONFIG_PROFILER
1546 qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1547 qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1548 qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1549 qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1550#endif
1551
1552#ifdef DEBUG_DISAS
1553 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1554 qemu_log_in_addr_range(tb->pc)) {
1555 FILE *logfile = qemu_log_lock();
1556 int code_size, data_size;
1557 const tcg_target_ulong *rx_data_gen_ptr;
1558 size_t chunk_start;
1559 int insn = 0;
1560
1561 if (tcg_ctx->data_gen_ptr) {
1562 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
1563 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
1564 data_size = gen_code_size - code_size;
1565 } else {
1566 rx_data_gen_ptr = 0;
1567 code_size = gen_code_size;
1568 data_size = 0;
1569 }
1570
1571
1572 qemu_log("OUT: [size=%d]\n", gen_code_size);
1573 qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
1574 tcg_ctx->gen_insn_data[insn][0]);
1575 chunk_start = tcg_ctx->gen_insn_end_off[insn];
1576 log_disas(tb->tc.ptr, chunk_start);
1577
1578
1579
1580
1581
1582
1583 while (insn < tb->icount) {
1584 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
1585 if (chunk_end > chunk_start) {
1586 qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n",
1587 tcg_ctx->gen_insn_data[insn][0]);
1588 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
1589 chunk_start = chunk_end;
1590 }
1591 insn++;
1592 }
1593
1594 if (chunk_start < code_size) {
1595 qemu_log(" -- tb slow paths + alignment\n");
1596 log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
1597 }
1598
1599
1600 if (data_size) {
1601 int i;
1602 qemu_log(" data: [size=%d]\n", data_size);
1603 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
1604 if (sizeof(tcg_target_ulong) == 8) {
1605 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
1606 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
1607 } else if (sizeof(tcg_target_ulong) == 4) {
1608 qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
1609 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
1610 } else {
1611 qemu_build_not_reached();
1612 }
1613 }
1614 }
1615 qemu_log("\n");
1616 qemu_log_flush();
1617 qemu_log_unlock(logfile);
1618 }
1619#endif
1620
1621 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
1622 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1623 CODE_GEN_ALIGN));
1624
1625
1626 qemu_spin_init(&tb->jmp_lock);
1627 tb->jmp_list_head = (uintptr_t)NULL;
1628 tb->jmp_list_next[0] = (uintptr_t)NULL;
1629 tb->jmp_list_next[1] = (uintptr_t)NULL;
1630 tb->jmp_dest[0] = (uintptr_t)NULL;
1631 tb->jmp_dest[1] = (uintptr_t)NULL;
1632
1633
1634 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1635 tb_reset_jump(tb, 0);
1636 }
1637 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1638 tb_reset_jump(tb, 1);
1639 }
1640
1641
1642
1643
1644
1645
1646
1647 if (phys_pc == -1) {
1648 tb->page_addr[0] = tb->page_addr[1] = -1;
1649 return tb;
1650 }
1651
1652
1653
1654
1655
1656
1657 tcg_tb_insert(tb);
1658
1659
1660 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1661 phys_page2 = -1;
1662 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1663 phys_page2 = get_page_addr_code(env, virt_page2);
1664 }
1665
1666
1667
1668
1669 existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1670
1671 if (unlikely(existing_tb != tb)) {
1672 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1673
1674 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1675 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1676 tcg_tb_remove(tb);
1677 return existing_tb;
1678 }
1679 return tb;
1680}
1681
1682
1683
1684
1685
1686
1687static void
1688tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1689 PageDesc *p, tb_page_addr_t start,
1690 tb_page_addr_t end,
1691 uintptr_t retaddr)
1692{
1693 TranslationBlock *tb;
1694 tb_page_addr_t tb_start, tb_end;
1695 int n;
1696#ifdef TARGET_HAS_PRECISE_SMC
1697 CPUState *cpu = current_cpu;
1698 CPUArchState *env = NULL;
1699 bool current_tb_not_found = retaddr != 0;
1700 bool current_tb_modified = false;
1701 TranslationBlock *current_tb = NULL;
1702 target_ulong current_pc = 0;
1703 target_ulong current_cs_base = 0;
1704 uint32_t current_flags = 0;
1705#endif
1706
1707 assert_page_locked(p);
1708
1709#if defined(TARGET_HAS_PRECISE_SMC)
1710 if (cpu != NULL) {
1711 env = cpu->env_ptr;
1712 }
1713#endif
1714
1715
1716
1717
1718 PAGE_FOR_EACH_TB(p, tb, n) {
1719 assert_page_locked(p);
1720
1721 if (n == 0) {
1722
1723
1724 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1725 tb_end = tb_start + tb->size;
1726 } else {
1727 tb_start = tb->page_addr[1];
1728 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1729 }
1730 if (!(tb_end <= start || tb_start >= end)) {
1731#ifdef TARGET_HAS_PRECISE_SMC
1732 if (current_tb_not_found) {
1733 current_tb_not_found = false;
1734
1735 current_tb = tcg_tb_lookup(retaddr);
1736 }
1737 if (current_tb == tb &&
1738 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1739
1740
1741
1742
1743
1744
1745
1746 current_tb_modified = true;
1747 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1748 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1749 ¤t_flags);
1750 }
1751#endif
1752 tb_phys_invalidate__locked(tb);
1753 }
1754 }
1755#if !defined(CONFIG_USER_ONLY)
1756
1757 if (!p->first_tb) {
1758 invalidate_page_bitmap(p);
1759 tlb_unprotect_code(start);
1760 }
1761#endif
1762#ifdef TARGET_HAS_PRECISE_SMC
1763 if (current_tb_modified) {
1764 page_collection_unlock(pages);
1765
1766 cpu->cflags_next_tb = 1 | curr_cflags(cpu);
1767 mmap_unlock();
1768 cpu_loop_exit_noexc(cpu);
1769 }
1770#endif
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1783{
1784 struct page_collection *pages;
1785 PageDesc *p;
1786
1787 assert_memory_lock();
1788
1789 p = page_find(start >> TARGET_PAGE_BITS);
1790 if (p == NULL) {
1791 return;
1792 }
1793 pages = page_collection_lock(start, end);
1794 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
1795 page_collection_unlock(pages);
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807#ifdef CONFIG_SOFTMMU
1808void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1809#else
1810void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1811#endif
1812{
1813 struct page_collection *pages;
1814 tb_page_addr_t next;
1815
1816 assert_memory_lock();
1817
1818 pages = page_collection_lock(start, end);
1819 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1820 start < end;
1821 start = next, next += TARGET_PAGE_SIZE) {
1822 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1823 tb_page_addr_t bound = MIN(next, end);
1824
1825 if (pd == NULL) {
1826 continue;
1827 }
1828 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1829 }
1830 page_collection_unlock(pages);
1831}
1832
1833#ifdef CONFIG_SOFTMMU
1834
1835
1836
1837
1838
1839
1840void tb_invalidate_phys_page_fast(struct page_collection *pages,
1841 tb_page_addr_t start, int len,
1842 uintptr_t retaddr)
1843{
1844 PageDesc *p;
1845
1846 assert_memory_lock();
1847
1848 p = page_find(start >> TARGET_PAGE_BITS);
1849 if (!p) {
1850 return;
1851 }
1852
1853 assert_page_locked(p);
1854 if (!p->code_bitmap &&
1855 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1856 build_page_bitmap(p);
1857 }
1858 if (p->code_bitmap) {
1859 unsigned int nr;
1860 unsigned long b;
1861
1862 nr = start & ~TARGET_PAGE_MASK;
1863 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1864 if (b & ((1 << len) - 1)) {
1865 goto do_invalidate;
1866 }
1867 } else {
1868 do_invalidate:
1869 tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
1870 retaddr);
1871 }
1872}
1873#else
1874
1875
1876
1877
1878
1879
1880static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1881{
1882 TranslationBlock *tb;
1883 PageDesc *p;
1884 int n;
1885#ifdef TARGET_HAS_PRECISE_SMC
1886 TranslationBlock *current_tb = NULL;
1887 CPUState *cpu = current_cpu;
1888 CPUArchState *env = NULL;
1889 int current_tb_modified = 0;
1890 target_ulong current_pc = 0;
1891 target_ulong current_cs_base = 0;
1892 uint32_t current_flags = 0;
1893#endif
1894
1895 assert_memory_lock();
1896
1897 addr &= TARGET_PAGE_MASK;
1898 p = page_find(addr >> TARGET_PAGE_BITS);
1899 if (!p) {
1900 return false;
1901 }
1902
1903#ifdef TARGET_HAS_PRECISE_SMC
1904 if (p->first_tb && pc != 0) {
1905 current_tb = tcg_tb_lookup(pc);
1906 }
1907 if (cpu != NULL) {
1908 env = cpu->env_ptr;
1909 }
1910#endif
1911 assert_page_locked(p);
1912 PAGE_FOR_EACH_TB(p, tb, n) {
1913#ifdef TARGET_HAS_PRECISE_SMC
1914 if (current_tb == tb &&
1915 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1916
1917
1918
1919
1920
1921
1922 current_tb_modified = 1;
1923 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
1924 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1925 ¤t_flags);
1926 }
1927#endif
1928 tb_phys_invalidate(tb, addr);
1929 }
1930 p->first_tb = (uintptr_t)NULL;
1931#ifdef TARGET_HAS_PRECISE_SMC
1932 if (current_tb_modified) {
1933
1934 cpu->cflags_next_tb = 1 | curr_cflags(cpu);
1935 return true;
1936 }
1937#endif
1938
1939 return false;
1940}
1941#endif
1942
1943
1944void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
1945{
1946 TranslationBlock *tb;
1947
1948 assert_memory_lock();
1949
1950 tb = tcg_tb_lookup(retaddr);
1951 if (tb) {
1952
1953 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
1954 tb_phys_invalidate(tb, -1);
1955 } else {
1956
1957
1958 CPUArchState *env = cpu->env_ptr;
1959 target_ulong pc, cs_base;
1960 tb_page_addr_t addr;
1961 uint32_t flags;
1962
1963 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1964 addr = get_page_addr_code(env, pc);
1965 if (addr != -1) {
1966 tb_invalidate_phys_range(addr, addr + 1);
1967 }
1968 }
1969}
1970
1971#ifndef CONFIG_USER_ONLY
1972
1973
1974
1975
1976
1977
1978void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1979{
1980 TranslationBlock *tb;
1981 CPUClass *cc;
1982 uint32_t n;
1983
1984 tb = tcg_tb_lookup(retaddr);
1985 if (!tb) {
1986 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1987 (void *)retaddr);
1988 }
1989 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
1990
1991
1992
1993
1994
1995
1996 n = 1;
1997 cc = CPU_GET_CLASS(cpu);
1998 if (cc->tcg_ops->io_recompile_replay_branch &&
1999 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
2000 cpu_neg(cpu)->icount_decr.u16.low++;
2001 n = 2;
2002 }
2003
2004
2005
2006
2007
2008
2009
2010 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
2011
2012 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
2013 "cpu_io_recompile: rewound execution of TB to "
2014 TARGET_FMT_lx "\n", tb->pc);
2015
2016 cpu_loop_exit_noexc(cpu);
2017}
2018
2019static void print_qht_statistics(struct qht_stats hst)
2020{
2021 uint32_t hgram_opts;
2022 size_t hgram_bins;
2023 char *hgram;
2024
2025 if (!hst.head_buckets) {
2026 return;
2027 }
2028 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2029 hst.used_head_buckets, hst.head_buckets,
2030 (double)hst.used_head_buckets / hst.head_buckets * 100);
2031
2032 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2033 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
2034 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2035 hgram_opts |= QDIST_PR_NODECIMAL;
2036 }
2037 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2038 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2039 qdist_avg(&hst.occupancy) * 100, hgram);
2040 g_free(hgram);
2041
2042 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2043 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2044 if (hgram_bins > 10) {
2045 hgram_bins = 10;
2046 } else {
2047 hgram_bins = 0;
2048 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2049 }
2050 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2051 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
2052 qdist_avg(&hst.chain), hgram);
2053 g_free(hgram);
2054}
2055
2056struct tb_tree_stats {
2057 size_t nb_tbs;
2058 size_t host_size;
2059 size_t target_size;
2060 size_t max_target_size;
2061 size_t direct_jmp_count;
2062 size_t direct_jmp2_count;
2063 size_t cross_page;
2064};
2065
2066static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2067{
2068 const TranslationBlock *tb = value;
2069 struct tb_tree_stats *tst = data;
2070
2071 tst->nb_tbs++;
2072 tst->host_size += tb->tc.size;
2073 tst->target_size += tb->size;
2074 if (tb->size > tst->max_target_size) {
2075 tst->max_target_size = tb->size;
2076 }
2077 if (tb->page_addr[1] != -1) {
2078 tst->cross_page++;
2079 }
2080 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2081 tst->direct_jmp_count++;
2082 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2083 tst->direct_jmp2_count++;
2084 }
2085 }
2086 return false;
2087}
2088
2089void dump_exec_info(void)
2090{
2091 struct tb_tree_stats tst = {};
2092 struct qht_stats hst;
2093 size_t nb_tbs, flush_full, flush_part, flush_elide;
2094
2095 tcg_tb_foreach(tb_tree_stats_iter, &tst);
2096 nb_tbs = tst.nb_tbs;
2097
2098 qemu_printf("Translation buffer state:\n");
2099
2100
2101
2102
2103
2104 qemu_printf("gen code size %zu/%zu\n",
2105 tcg_code_size(), tcg_code_capacity());
2106 qemu_printf("TB count %zu\n", nb_tbs);
2107 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2108 nb_tbs ? tst.target_size / nb_tbs : 0,
2109 tst.max_target_size);
2110 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2111 nb_tbs ? tst.host_size / nb_tbs : 0,
2112 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2113 qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2114 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2115 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2116 tst.direct_jmp_count,
2117 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2118 tst.direct_jmp2_count,
2119 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2120
2121 qht_statistics_init(&tb_ctx.htable, &hst);
2122 print_qht_statistics(hst);
2123 qht_statistics_destroy(&hst);
2124
2125 qemu_printf("\nStatistics:\n");
2126 qemu_printf("TB flush count %u\n",
2127 qatomic_read(&tb_ctx.tb_flush_count));
2128 qemu_printf("TB invalidate count %u\n",
2129 qatomic_read(&tb_ctx.tb_phys_invalidate_count));
2130
2131 tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2132 qemu_printf("TLB full flushes %zu\n", flush_full);
2133 qemu_printf("TLB partial flushes %zu\n", flush_part);
2134 qemu_printf("TLB elided flushes %zu\n", flush_elide);
2135 tcg_dump_info();
2136}
2137
2138void dump_opcount_info(void)
2139{
2140 tcg_dump_op_count();
2141}
2142
2143#else
2144
2145void cpu_interrupt(CPUState *cpu, int mask)
2146{
2147 g_assert(qemu_mutex_iothread_locked());
2148 cpu->interrupt_request |= mask;
2149 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2150}
2151
2152
2153
2154
2155
2156struct walk_memory_regions_data {
2157 walk_memory_regions_fn fn;
2158 void *priv;
2159 target_ulong start;
2160 int prot;
2161};
2162
2163static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2164 target_ulong end, int new_prot)
2165{
2166 if (data->start != -1u) {
2167 int rc = data->fn(data->priv, data->start, end, data->prot);
2168 if (rc != 0) {
2169 return rc;
2170 }
2171 }
2172
2173 data->start = (new_prot ? end : -1u);
2174 data->prot = new_prot;
2175
2176 return 0;
2177}
2178
2179static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2180 target_ulong base, int level, void **lp)
2181{
2182 target_ulong pa;
2183 int i, rc;
2184
2185 if (*lp == NULL) {
2186 return walk_memory_regions_end(data, base, 0);
2187 }
2188
2189 if (level == 0) {
2190 PageDesc *pd = *lp;
2191
2192 for (i = 0; i < V_L2_SIZE; ++i) {
2193 int prot = pd[i].flags;
2194
2195 pa = base | (i << TARGET_PAGE_BITS);
2196 if (prot != data->prot) {
2197 rc = walk_memory_regions_end(data, pa, prot);
2198 if (rc != 0) {
2199 return rc;
2200 }
2201 }
2202 }
2203 } else {
2204 void **pp = *lp;
2205
2206 for (i = 0; i < V_L2_SIZE; ++i) {
2207 pa = base | ((target_ulong)i <<
2208 (TARGET_PAGE_BITS + V_L2_BITS * level));
2209 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2210 if (rc != 0) {
2211 return rc;
2212 }
2213 }
2214 }
2215
2216 return 0;
2217}
2218
2219int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2220{
2221 struct walk_memory_regions_data data;
2222 uintptr_t i, l1_sz = v_l1_size;
2223
2224 data.fn = fn;
2225 data.priv = priv;
2226 data.start = -1u;
2227 data.prot = 0;
2228
2229 for (i = 0; i < l1_sz; i++) {
2230 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2231 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2232 if (rc != 0) {
2233 return rc;
2234 }
2235 }
2236
2237 return walk_memory_regions_end(&data, 0, 0);
2238}
2239
2240static int dump_region(void *priv, target_ulong start,
2241 target_ulong end, unsigned long prot)
2242{
2243 FILE *f = (FILE *)priv;
2244
2245 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2246 " "TARGET_FMT_lx" %c%c%c\n",
2247 start, end, end - start,
2248 ((prot & PAGE_READ) ? 'r' : '-'),
2249 ((prot & PAGE_WRITE) ? 'w' : '-'),
2250 ((prot & PAGE_EXEC) ? 'x' : '-'));
2251
2252 return 0;
2253}
2254
2255
2256void page_dump(FILE *f)
2257{
2258 const int length = sizeof(target_ulong) * 2;
2259 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2260 length, "start", length, "end", length, "size", "prot");
2261 walk_memory_regions(f, dump_region);
2262}
2263
2264int page_get_flags(target_ulong address)
2265{
2266 PageDesc *p;
2267
2268 p = page_find(address >> TARGET_PAGE_BITS);
2269 if (!p) {
2270 return 0;
2271 }
2272 return p->flags;
2273}
2274
2275
2276
2277
2278void page_set_flags(target_ulong start, target_ulong end, int flags)
2279{
2280 target_ulong addr, len;
2281 bool reset_target_data;
2282
2283
2284
2285
2286 assert(end - 1 <= GUEST_ADDR_MAX);
2287 assert(start < end);
2288
2289 assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
2290 assert_memory_lock();
2291
2292 start = start & TARGET_PAGE_MASK;
2293 end = TARGET_PAGE_ALIGN(end);
2294
2295 if (flags & PAGE_WRITE) {
2296 flags |= PAGE_WRITE_ORG;
2297 }
2298 reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
2299 flags &= ~PAGE_RESET;
2300
2301 for (addr = start, len = end - start;
2302 len != 0;
2303 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2304 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2305
2306
2307
2308 if (!(p->flags & PAGE_WRITE) &&
2309 (flags & PAGE_WRITE) &&
2310 p->first_tb) {
2311 tb_invalidate_phys_page(addr, 0);
2312 }
2313 if (reset_target_data) {
2314 g_free(p->target_data);
2315 p->target_data = NULL;
2316 p->flags = flags;
2317 } else {
2318
2319 p->flags = (p->flags & PAGE_ANON) | flags;
2320 }
2321 }
2322}
2323
2324void *page_get_target_data(target_ulong address)
2325{
2326 PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2327 return p ? p->target_data : NULL;
2328}
2329
2330void *page_alloc_target_data(target_ulong address, size_t size)
2331{
2332 PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2333 void *ret = NULL;
2334
2335 if (p->flags & PAGE_VALID) {
2336 ret = p->target_data;
2337 if (!ret) {
2338 p->target_data = ret = g_malloc0(size);
2339 }
2340 }
2341 return ret;
2342}
2343
2344int page_check_range(target_ulong start, target_ulong len, int flags)
2345{
2346 PageDesc *p;
2347 target_ulong end;
2348 target_ulong addr;
2349
2350
2351
2352
2353 if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
2354 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2355 }
2356
2357 if (len == 0) {
2358 return 0;
2359 }
2360 if (start + len - 1 < start) {
2361
2362 return -1;
2363 }
2364
2365
2366 end = TARGET_PAGE_ALIGN(start + len);
2367 start = start & TARGET_PAGE_MASK;
2368
2369 for (addr = start, len = end - start;
2370 len != 0;
2371 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2372 p = page_find(addr >> TARGET_PAGE_BITS);
2373 if (!p) {
2374 return -1;
2375 }
2376 if (!(p->flags & PAGE_VALID)) {
2377 return -1;
2378 }
2379
2380 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2381 return -1;
2382 }
2383 if (flags & PAGE_WRITE) {
2384 if (!(p->flags & PAGE_WRITE_ORG)) {
2385 return -1;
2386 }
2387
2388
2389 if (!(p->flags & PAGE_WRITE)) {
2390 if (!page_unprotect(addr, 0)) {
2391 return -1;
2392 }
2393 }
2394 }
2395 }
2396 return 0;
2397}
2398
2399
2400
2401
2402
2403
2404
2405int page_unprotect(target_ulong address, uintptr_t pc)
2406{
2407 unsigned int prot;
2408 bool current_tb_invalidated;
2409 PageDesc *p;
2410 target_ulong host_start, host_end, addr;
2411
2412
2413
2414
2415 mmap_lock();
2416
2417 p = page_find(address >> TARGET_PAGE_BITS);
2418 if (!p) {
2419 mmap_unlock();
2420 return 0;
2421 }
2422
2423
2424
2425 if (p->flags & PAGE_WRITE_ORG) {
2426 current_tb_invalidated = false;
2427 if (p->flags & PAGE_WRITE) {
2428
2429
2430
2431
2432#ifdef TARGET_HAS_PRECISE_SMC
2433 TranslationBlock *current_tb = tcg_tb_lookup(pc);
2434 if (current_tb) {
2435 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2436 }
2437#endif
2438 } else {
2439 host_start = address & qemu_host_page_mask;
2440 host_end = host_start + qemu_host_page_size;
2441
2442 prot = 0;
2443 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2444 p = page_find(addr >> TARGET_PAGE_BITS);
2445 p->flags |= PAGE_WRITE;
2446 prot |= p->flags;
2447
2448
2449
2450 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2451#ifdef CONFIG_USER_ONLY
2452 if (DEBUG_TB_CHECK_GATE) {
2453 tb_invalidate_check(addr);
2454 }
2455#endif
2456 }
2457 mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
2458 prot & PAGE_BITS);
2459 }
2460 mmap_unlock();
2461
2462 return current_tb_invalidated ? 2 : 1;
2463 }
2464 mmap_unlock();
2465 return 0;
2466}
2467#endif
2468
2469
2470void tcg_flush_softmmu_tlb(CPUState *cs)
2471{
2472#ifdef CONFIG_SOFTMMU
2473 tlb_flush(cs);
2474#endif
2475}
2476