1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/mman.h>
23#endif
24#include "qemu/osdep.h"
25
26
27#include "qemu-common.h"
28#define NO_CPU_IO_DEFS
29#include "cpu.h"
30#include "trace.h"
31#include "disas/disas.h"
32#include "tcg.h"
33#if defined(CONFIG_USER_ONLY)
34#include "qemu.h"
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
49#else
50#include "exec/address-spaces.h"
51#endif
52
53#include "exec/cputlb.h"
54#include "exec/tb-hash.h"
55#include "translate-all.h"
56#include "qemu/bitmap.h"
57#include "qemu/timer.h"
58#include "exec/log.h"
59#include "qemu/etrace.h"
60
61
62
63
64
65
66#if !defined(CONFIG_USER_ONLY)
67
68#undef DEBUG_TB_CHECK
69#endif
70
71#define SMC_BITMAP_USE_THRESHOLD 10
72
73typedef struct PageDesc {
74
75 TranslationBlock *first_tb;
76
77
78 unsigned int code_write_count;
79 unsigned long *code_bitmap;
80#if defined(CONFIG_USER_ONLY)
81 unsigned long flags;
82#endif
83} PageDesc;
84
85
86
87#if !defined(CONFIG_USER_ONLY)
88#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
89# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
90#else
91# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
92#endif
93#else
94# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
95#endif
96
97
98#define V_L2_BITS 10
99#define V_L2_SIZE (1 << V_L2_BITS)
100
101
102#define V_L1_BITS_REM \
103 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
104
105#if V_L1_BITS_REM < 4
106#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
107#else
108#define V_L1_BITS V_L1_BITS_REM
109#endif
110
111#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
112
113#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
114
115uintptr_t qemu_host_page_size;
116intptr_t qemu_host_page_mask;
117
118
119static void *l1_map[V_L1_SIZE];
120
121
122TCGContext tcg_ctx;
123
124
125#ifdef CONFIG_USER_ONLY
126__thread int have_tb_lock;
127#endif
128
129void tb_lock(void)
130{
131#ifdef CONFIG_USER_ONLY
132 assert(!have_tb_lock);
133 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
134 have_tb_lock++;
135#endif
136}
137
138void tb_unlock(void)
139{
140#ifdef CONFIG_USER_ONLY
141 assert(have_tb_lock);
142 have_tb_lock--;
143 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
144#endif
145}
146
147void tb_lock_reset(void)
148{
149#ifdef CONFIG_USER_ONLY
150 if (have_tb_lock) {
151 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
152 have_tb_lock = 0;
153 }
154#endif
155}
156
157static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
158 tb_page_addr_t phys_page2);
159static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
160
161void cpu_gen_init(void)
162{
163 tcg_context_init(&tcg_ctx);
164}
165
166
167
168static uint8_t *encode_sleb128(uint8_t *p, target_long val)
169{
170 int more, byte;
171
172 do {
173 byte = val & 0x7f;
174 val >>= 7;
175 more = !((val == 0 && (byte & 0x40) == 0)
176 || (val == -1 && (byte & 0x40) != 0));
177 if (more) {
178 byte |= 0x80;
179 }
180 *p++ = byte;
181 } while (more);
182
183 return p;
184}
185
186
187
188static target_long decode_sleb128(uint8_t **pp)
189{
190 uint8_t *p = *pp;
191 target_long val = 0;
192 int byte, shift = 0;
193
194 do {
195 byte = *p++;
196 val |= (target_ulong)(byte & 0x7f) << shift;
197 shift += 7;
198 } while (byte & 0x80);
199 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
200 val |= -(target_ulong)1 << shift;
201 }
202
203 *pp = p;
204 return val;
205}
206
207
208
209
210
211
212
213
214
215
216
217
218
219static int encode_search(TranslationBlock *tb, uint8_t *block)
220{
221 uint8_t *highwater = tcg_ctx.code_gen_highwater;
222 uint8_t *p = block;
223 int i, j, n;
224
225 tb->tc_search = block;
226
227 for (i = 0, n = tb->icount; i < n; ++i) {
228 target_ulong prev;
229
230 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
231 if (i == 0) {
232 prev = (j == 0 ? tb->pc : 0);
233 } else {
234 prev = tcg_ctx.gen_insn_data[i - 1][j];
235 }
236 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
237 }
238 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
239 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
240
241
242
243
244
245 if (unlikely(p > highwater)) {
246 return -1;
247 }
248 }
249
250 return p - block;
251}
252
253
254static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
255 uintptr_t searched_pc)
256{
257 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
258 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
259 CPUArchState *env = cpu->env_ptr;
260 uint8_t *p = tb->tc_search;
261 int i, j, num_insns = tb->icount;
262#ifdef CONFIG_PROFILER
263 int64_t ti = profile_getclock();
264#endif
265
266 if (searched_pc < host_pc) {
267 return -1;
268 }
269
270
271
272 for (i = 0; i < num_insns; ++i) {
273 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
274 data[j] += decode_sleb128(&p);
275 }
276 host_pc += decode_sleb128(&p);
277 if (host_pc > searched_pc) {
278 goto found;
279 }
280 }
281 return -1;
282
283 found:
284 if (tb->cflags & CF_USE_ICOUNT) {
285 assert(use_icount);
286
287 cpu->icount_decr.u16.low += num_insns;
288
289 cpu->can_do_io = 0;
290 }
291 cpu->icount_decr.u16.low -= i;
292 restore_state_to_opc(env, tb, data);
293
294#ifdef CONFIG_PROFILER
295 tcg_ctx.restore_time += profile_getclock() - ti;
296 tcg_ctx.restore_count++;
297#endif
298 return 0;
299}
300
301bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
302{
303 TranslationBlock *tb;
304
305 tb = tb_find_pc(retaddr);
306 if (tb) {
307 cpu_restore_state_from_tb(cpu, tb, retaddr);
308 if (tb->cflags & CF_NOCACHE) {
309
310 cpu->current_tb = NULL;
311 tb_phys_invalidate(tb, -1);
312 tb_free(tb);
313 }
314 return true;
315 }
316 return false;
317}
318
319void page_size_init(void)
320{
321
322
323 qemu_real_host_page_size = getpagesize();
324 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
325 if (qemu_host_page_size == 0) {
326 qemu_host_page_size = qemu_real_host_page_size;
327 }
328 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
329 qemu_host_page_size = TARGET_PAGE_SIZE;
330 }
331 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
332}
333
334static void page_init(void)
335{
336 page_size_init();
337#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
338 {
339#ifdef HAVE_KINFO_GETVMMAP
340 struct kinfo_vmentry *freep;
341 int i, cnt;
342
343 freep = kinfo_getvmmap(getpid(), &cnt);
344 if (freep) {
345 mmap_lock();
346 for (i = 0; i < cnt; i++) {
347 unsigned long startaddr, endaddr;
348
349 startaddr = freep[i].kve_start;
350 endaddr = freep[i].kve_end;
351 if (h2g_valid(startaddr)) {
352 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
353
354 if (h2g_valid(endaddr)) {
355 endaddr = h2g(endaddr);
356 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
357 } else {
358#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
359 endaddr = ~0ul;
360 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
361#endif
362 }
363 }
364 }
365 free(freep);
366 mmap_unlock();
367 }
368#else
369 FILE *f;
370
371 last_brk = (unsigned long)sbrk(0);
372
373 f = fopen("/compat/linux/proc/self/maps", "r");
374 if (f) {
375 mmap_lock();
376
377 do {
378 unsigned long startaddr, endaddr;
379 int n;
380
381 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
382
383 if (n == 2 && h2g_valid(startaddr)) {
384 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
385
386 if (h2g_valid(endaddr)) {
387 endaddr = h2g(endaddr);
388 } else {
389 endaddr = ~0ul;
390 }
391 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
392 }
393 } while (!feof(f));
394
395 fclose(f);
396 mmap_unlock();
397 }
398#endif
399 }
400#endif
401}
402
403
404
405
406static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
407{
408 PageDesc *pd;
409 void **lp;
410 int i;
411
412
413 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
414
415
416 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
417 void **p = atomic_rcu_read(lp);
418
419 if (p == NULL) {
420 if (!alloc) {
421 return NULL;
422 }
423 p = g_new0(void *, V_L2_SIZE);
424 atomic_rcu_set(lp, p);
425 }
426
427 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
428 }
429
430 pd = atomic_rcu_read(lp);
431 if (pd == NULL) {
432 if (!alloc) {
433 return NULL;
434 }
435 pd = g_new0(PageDesc, V_L2_SIZE);
436 atomic_rcu_set(lp, pd);
437 }
438
439 return pd + (index & (V_L2_SIZE - 1));
440}
441
442static inline PageDesc *page_find(tb_page_addr_t index)
443{
444 return page_find_alloc(index, 0);
445}
446
447#if defined(CONFIG_USER_ONLY)
448
449
450
451
452#define USE_STATIC_CODE_GEN_BUFFER
453#endif
454
455
456
457#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
458
459
460
461
462#if defined(__x86_64__)
463# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
464#elif defined(__sparc__)
465# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
466#elif defined(__powerpc64__)
467# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
468#elif defined(__aarch64__)
469# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
470#elif defined(__arm__)
471# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
472#elif defined(__s390x__)
473
474# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
475#elif defined(__mips__)
476
477
478# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
479#else
480# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
481#endif
482
483#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
484
485#define DEFAULT_CODE_GEN_BUFFER_SIZE \
486 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
487 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
488
489static inline size_t size_code_gen_buffer(size_t tb_size)
490{
491
492 if (tb_size == 0) {
493#ifdef USE_STATIC_CODE_GEN_BUFFER
494 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
495#else
496
497
498
499
500 tb_size = (unsigned long)(ram_size / 4);
501#endif
502 }
503 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
504 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
505 }
506 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
507 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
508 }
509 tcg_ctx.code_gen_buffer_size = tb_size;
510 return tb_size;
511}
512
513#ifdef __mips__
514
515
516static inline bool cross_256mb(void *addr, size_t size)
517{
518 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
519}
520
521
522
523
524static inline void *split_cross_256mb(void *buf1, size_t size1)
525{
526 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
527 size_t size2 = buf1 + size1 - buf2;
528
529 size1 = buf2 - buf1;
530 if (size1 < size2) {
531 size1 = size2;
532 buf1 = buf2;
533 }
534
535 tcg_ctx.code_gen_buffer_size = size1;
536 return buf1;
537}
538#endif
539
540#ifdef USE_STATIC_CODE_GEN_BUFFER
541static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
542 __attribute__((aligned(CODE_GEN_ALIGN)));
543
544# ifdef _WIN32
545static inline void do_protect(void *addr, long size, int prot)
546{
547 DWORD old_protect;
548 VirtualProtect(addr, size, prot, &old_protect);
549}
550
551static inline void map_exec(void *addr, long size)
552{
553 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
554}
555
556static inline void map_none(void *addr, long size)
557{
558 do_protect(addr, size, PAGE_NOACCESS);
559}
560# else
561static inline void do_protect(void *addr, long size, int prot)
562{
563 uintptr_t start, end;
564
565 start = (uintptr_t)addr;
566 start &= qemu_real_host_page_mask;
567
568 end = (uintptr_t)addr + size;
569 end = ROUND_UP(end, qemu_real_host_page_size);
570
571 mprotect((void *)start, end - start, prot);
572}
573
574static inline void map_exec(void *addr, long size)
575{
576 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
577}
578
579static inline void map_none(void *addr, long size)
580{
581 do_protect(addr, size, PROT_NONE);
582}
583# endif
584
585static inline void *alloc_code_gen_buffer(void)
586{
587 void *buf = static_code_gen_buffer;
588 size_t full_size, size;
589
590
591 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
592 & qemu_real_host_page_mask) - (uintptr_t)buf;
593
594
595 size = full_size - qemu_real_host_page_size;
596
597
598 if (size > tcg_ctx.code_gen_buffer_size) {
599 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
600 & qemu_real_host_page_mask) - (uintptr_t)buf;
601 }
602 tcg_ctx.code_gen_buffer_size = size;
603
604#ifdef __mips__
605 if (cross_256mb(buf, size)) {
606 buf = split_cross_256mb(buf, size);
607 size = tcg_ctx.code_gen_buffer_size;
608 }
609#endif
610
611 map_exec(buf, size);
612 map_none(buf + size, qemu_real_host_page_size);
613 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
614
615 return buf;
616}
617#elif defined(_WIN32)
618static inline void *alloc_code_gen_buffer(void)
619{
620 size_t size = tcg_ctx.code_gen_buffer_size;
621 void *buf1, *buf2;
622
623
624
625 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
626 MEM_RESERVE, PAGE_NOACCESS);
627 if (buf1 != NULL) {
628 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
629 assert(buf1 == buf2);
630 }
631
632 return buf1;
633}
634#else
635static inline void *alloc_code_gen_buffer(void)
636{
637 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
638 uintptr_t start = 0;
639 size_t size = tcg_ctx.code_gen_buffer_size;
640 void *buf;
641
642
643
644
645# if defined(__PIE__) || defined(__PIC__)
646
647
648
649
650# elif defined(__x86_64__) && defined(MAP_32BIT)
651
652
653 flags |= MAP_32BIT;
654
655 if (size > 800u * 1024 * 1024) {
656 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
657 }
658# elif defined(__sparc__)
659 start = 0x40000000ul;
660# elif defined(__s390x__)
661 start = 0x90000000ul;
662# elif defined(__mips__)
663# if _MIPS_SIM == _ABI64
664 start = 0x128000000ul;
665# else
666 start = 0x08000000ul;
667# endif
668# endif
669
670 buf = mmap((void *)start, size + qemu_real_host_page_size,
671 PROT_NONE, flags, -1, 0);
672 if (buf == MAP_FAILED) {
673 return NULL;
674 }
675
676#ifdef __mips__
677 if (cross_256mb(buf, size)) {
678
679
680 size_t size2;
681 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
682 PROT_NONE, flags, -1, 0);
683 switch (buf2 != MAP_FAILED) {
684 case 1:
685 if (!cross_256mb(buf2, size)) {
686
687 munmap(buf, size);
688 break;
689 }
690
691 munmap(buf2, size);
692
693 default:
694
695 buf2 = split_cross_256mb(buf, size);
696 size2 = tcg_ctx.code_gen_buffer_size;
697 if (buf == buf2) {
698 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
699 } else {
700 munmap(buf, size - size2);
701 }
702 size = size2;
703 break;
704 }
705 buf = buf2;
706 }
707#endif
708
709
710
711 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
712
713
714 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
715
716 return buf;
717}
718#endif
719
720static inline void code_gen_alloc(size_t tb_size)
721{
722 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
723 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
724 if (tcg_ctx.code_gen_buffer == NULL) {
725 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
726 exit(1);
727 }
728
729
730
731
732 tcg_ctx.code_gen_max_blocks
733 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
734 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
735
736 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
737}
738
739
740
741
742void tcg_exec_init(unsigned long tb_size)
743{
744 cpu_gen_init();
745 page_init();
746 code_gen_alloc(tb_size);
747#if defined(CONFIG_SOFTMMU)
748
749
750 tcg_prologue_init(&tcg_ctx);
751#endif
752}
753
754bool tcg_enabled(void)
755{
756 return tcg_ctx.code_gen_buffer != NULL;
757}
758
759
760
761static TranslationBlock *tb_alloc(target_ulong pc)
762{
763 TranslationBlock *tb;
764
765 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
766 return NULL;
767 }
768 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
769 tb->pc = pc;
770 tb->cflags = 0;
771 return tb;
772}
773
774void tb_free(TranslationBlock *tb)
775{
776
777
778
779 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
780 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
781 tcg_ctx.code_gen_ptr = tb->tc_ptr;
782 tcg_ctx.tb_ctx.nb_tbs--;
783 }
784}
785
786static inline void invalidate_page_bitmap(PageDesc *p)
787{
788 g_free(p->code_bitmap);
789 p->code_bitmap = NULL;
790 p->code_write_count = 0;
791}
792
793
794static void page_flush_tb_1(int level, void **lp)
795{
796 int i;
797
798 if (*lp == NULL) {
799 return;
800 }
801 if (level == 0) {
802 PageDesc *pd = *lp;
803
804 for (i = 0; i < V_L2_SIZE; ++i) {
805 pd[i].first_tb = NULL;
806 invalidate_page_bitmap(pd + i);
807 }
808 } else {
809 void **pp = *lp;
810
811 for (i = 0; i < V_L2_SIZE; ++i) {
812 page_flush_tb_1(level - 1, pp + i);
813 }
814 }
815}
816
817static void page_flush_tb(void)
818{
819 int i;
820
821 for (i = 0; i < V_L1_SIZE; i++) {
822 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
823 }
824}
825
826
827
828void tb_flush(CPUState *cpu)
829{
830#if defined(DEBUG_FLUSH)
831 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
832 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
833 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
834 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
835 tcg_ctx.tb_ctx.nb_tbs : 0);
836#endif
837 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
838 > tcg_ctx.code_gen_buffer_size) {
839 cpu_abort(cpu, "Internal error: code buffer overflow\n");
840 }
841 tcg_ctx.tb_ctx.nb_tbs = 0;
842
843 CPU_FOREACH(cpu) {
844 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
845 }
846
847 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
848 page_flush_tb();
849
850 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
851
852
853 tcg_ctx.tb_ctx.tb_flush_count++;
854}
855
856#ifdef DEBUG_TB_CHECK
857
858static void tb_invalidate_check(target_ulong address)
859{
860 TranslationBlock *tb;
861 int i;
862
863 address &= TARGET_PAGE_MASK;
864 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
865 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
866 tb = tb->phys_hash_next) {
867 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
868 address >= tb->pc + tb->size)) {
869 printf("ERROR invalidate: address=" TARGET_FMT_lx
870 " PC=%08lx size=%04x\n",
871 address, (long)tb->pc, tb->size);
872 }
873 }
874 }
875}
876
877
878static void tb_page_check(void)
879{
880 TranslationBlock *tb;
881 int i, flags1, flags2;
882
883 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
884 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
885 tb = tb->phys_hash_next) {
886 flags1 = page_get_flags(tb->pc);
887 flags2 = page_get_flags(tb->pc + tb->size - 1);
888 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
889 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
890 (long)tb->pc, tb->size, flags1, flags2);
891 }
892 }
893 }
894}
895
896#endif
897
898static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
899{
900 TranslationBlock *tb1;
901
902 for (;;) {
903 tb1 = *ptb;
904 if (tb1 == tb) {
905 *ptb = tb1->phys_hash_next;
906 break;
907 }
908 ptb = &tb1->phys_hash_next;
909 }
910}
911
912static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
913{
914 TranslationBlock *tb1;
915 unsigned int n1;
916
917 for (;;) {
918 tb1 = *ptb;
919 n1 = (uintptr_t)tb1 & 3;
920 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
921 if (tb1 == tb) {
922 *ptb = tb1->page_next[n1];
923 break;
924 }
925 ptb = &tb1->page_next[n1];
926 }
927}
928
929static inline void tb_jmp_remove(TranslationBlock *tb, int n)
930{
931 TranslationBlock *tb1, **ptb;
932 unsigned int n1;
933
934 ptb = &tb->jmp_next[n];
935 tb1 = *ptb;
936 if (tb1) {
937
938 for (;;) {
939 tb1 = *ptb;
940 n1 = (uintptr_t)tb1 & 3;
941 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
942 if (n1 == n && tb1 == tb) {
943 break;
944 }
945 if (n1 == 2) {
946 ptb = &tb1->jmp_first;
947 } else {
948 ptb = &tb1->jmp_next[n1];
949 }
950 }
951
952 *ptb = tb->jmp_next[n];
953
954 tb->jmp_next[n] = NULL;
955 }
956}
957
958
959
960static inline void tb_reset_jump(TranslationBlock *tb, int n)
961{
962 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
963}
964
965
966void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
967{
968 CPUState *cpu;
969 PageDesc *p;
970 unsigned int h, n1;
971 tb_page_addr_t phys_pc;
972 TranslationBlock *tb1, *tb2;
973
974
975 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
976 h = tb_phys_hash_func(phys_pc);
977 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
978
979
980 if (tb->page_addr[0] != page_addr) {
981 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
982 tb_page_remove(&p->first_tb, tb);
983 invalidate_page_bitmap(p);
984 }
985 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
986 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
987 tb_page_remove(&p->first_tb, tb);
988 invalidate_page_bitmap(p);
989 }
990
991 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
992
993
994 h = tb_jmp_cache_hash_func(tb->pc);
995 CPU_FOREACH(cpu) {
996 if (cpu->tb_jmp_cache[h] == tb) {
997 cpu->tb_jmp_cache[h] = NULL;
998 }
999 }
1000
1001
1002 tb_jmp_remove(tb, 0);
1003 tb_jmp_remove(tb, 1);
1004
1005
1006 tb1 = tb->jmp_first;
1007 for (;;) {
1008 n1 = (uintptr_t)tb1 & 3;
1009 if (n1 == 2) {
1010 break;
1011 }
1012 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1013 tb2 = tb1->jmp_next[n1];
1014 tb_reset_jump(tb1, n1);
1015 tb1->jmp_next[n1] = NULL;
1016 tb1 = tb2;
1017 }
1018 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1019
1020 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1021}
1022
1023static void build_page_bitmap(PageDesc *p)
1024{
1025 int n, tb_start, tb_end;
1026 TranslationBlock *tb;
1027
1028 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1029
1030 tb = p->first_tb;
1031 while (tb != NULL) {
1032 n = (uintptr_t)tb & 3;
1033 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1034
1035 if (n == 0) {
1036
1037
1038 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1039 tb_end = tb_start + tb->size;
1040 if (tb_end > TARGET_PAGE_SIZE) {
1041 tb_end = TARGET_PAGE_SIZE;
1042 }
1043 } else {
1044 tb_start = 0;
1045 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1046 }
1047 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1048 tb = tb->page_next[n];
1049 }
1050}
1051
1052
1053TranslationBlock *tb_gen_code(CPUState *cpu,
1054 target_ulong pc, target_ulong cs_base,
1055 int flags, int cflags)
1056{
1057 CPUArchState *env = cpu->env_ptr;
1058 TranslationBlock *tb;
1059 tb_page_addr_t phys_pc, phys_page2;
1060 target_ulong virt_page2;
1061 tcg_insn_unit *gen_code_buf;
1062 int gen_code_size, search_size;
1063#ifdef CONFIG_PROFILER
1064 int64_t ti;
1065#endif
1066
1067 phys_pc = get_page_addr_code(env, pc);
1068 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1069 cflags |= CF_USE_ICOUNT;
1070 }
1071
1072 tb = tb_alloc(pc);
1073 if (unlikely(!tb)) {
1074 buffer_overflow:
1075
1076 tb_flush(cpu);
1077
1078 tb = tb_alloc(pc);
1079 assert(tb != NULL);
1080
1081 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1082 }
1083
1084 gen_code_buf = tcg_ctx.code_gen_ptr;
1085 tb->tc_ptr = gen_code_buf;
1086 tb->cs_base = cs_base;
1087 tb->flags = flags;
1088 tb->cflags = cflags;
1089
1090#ifdef CONFIG_PROFILER
1091 tcg_ctx.tb_count1++;
1092
1093 ti = profile_getclock();
1094#endif
1095
1096 tcg_func_start(&tcg_ctx);
1097
1098 gen_intermediate_code(env, tb);
1099
1100 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1101
1102
1103 tb->tb_next_offset[0] = 0xffff;
1104 tb->tb_next_offset[1] = 0xffff;
1105 tcg_ctx.tb_next_offset = tb->tb_next_offset;
1106#ifdef USE_DIRECT_JUMP
1107 tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
1108 tcg_ctx.tb_next = NULL;
1109#else
1110 tcg_ctx.tb_jmp_offset = NULL;
1111 tcg_ctx.tb_next = tb->tb_next;
1112#endif
1113
1114#ifdef CONFIG_PROFILER
1115 tcg_ctx.tb_count++;
1116 tcg_ctx.interm_time += profile_getclock() - ti;
1117 tcg_ctx.code_time -= profile_getclock();
1118#endif
1119
1120
1121
1122
1123
1124
1125 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1126 if (unlikely(gen_code_size < 0)) {
1127 goto buffer_overflow;
1128 }
1129 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1130 if (unlikely(search_size < 0)) {
1131 goto buffer_overflow;
1132 }
1133
1134#ifdef CONFIG_PROFILER
1135 tcg_ctx.code_time += profile_getclock();
1136 tcg_ctx.code_in_len += tb->size;
1137 tcg_ctx.code_out_len += gen_code_size;
1138 tcg_ctx.search_out_len += search_size;
1139#endif
1140
1141#ifdef DEBUG_DISAS
1142 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1143 qemu_log_in_addr_range(tb->pc)) {
1144 qemu_log("OUT: [size=%d]\n", gen_code_size);
1145 log_disas(tb->tc_ptr, gen_code_size);
1146 qemu_log("\n");
1147 qemu_log_flush();
1148 }
1149#endif
1150
1151 tcg_ctx.code_gen_ptr = (void *)
1152 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1153 CODE_GEN_ALIGN);
1154
1155
1156 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1157 phys_page2 = -1;
1158 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1159 phys_page2 = get_page_addr_code(env, virt_page2);
1160 }
1161 tb_link_page(tb, phys_pc, phys_page2);
1162
1163 if (qemu_etrace_mask(ETRACE_F_TRANSLATION)) {
1164 CPUState *cpu = ENV_GET_CPU(env);
1165 hwaddr phys_addr = pc;
1166
1167#if !defined(CONFIG_USER_ONLY)
1168 phys_addr = cpu_get_phys_page_debug(cpu, pc & TARGET_PAGE_MASK);
1169 phys_addr += pc & ~TARGET_PAGE_MASK;
1170#endif
1171 etrace_dump_tb(&qemu_etracer, NULL, cpu->cpu_index,
1172 tb->pc, phys_addr, tb->size,
1173 tb->tc_ptr, gen_code_size);
1174 }
1175
1176 return tb;
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1189{
1190 while (start < end) {
1191 tb_invalidate_phys_page_range(start, end, 0);
1192 start &= TARGET_PAGE_MASK;
1193 start += TARGET_PAGE_SIZE;
1194 }
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1207 int is_cpu_write_access)
1208{
1209 TranslationBlock *tb, *tb_next, *saved_tb;
1210 CPUState *cpu = current_cpu;
1211#if defined(TARGET_HAS_PRECISE_SMC)
1212 CPUArchState *env = NULL;
1213#endif
1214 tb_page_addr_t tb_start, tb_end;
1215 PageDesc *p;
1216 int n;
1217#ifdef TARGET_HAS_PRECISE_SMC
1218 int current_tb_not_found = is_cpu_write_access;
1219 TranslationBlock *current_tb = NULL;
1220 int current_tb_modified = 0;
1221 target_ulong current_pc = 0;
1222 target_ulong current_cs_base = 0;
1223 int current_flags = 0;
1224#endif
1225
1226 p = page_find(start >> TARGET_PAGE_BITS);
1227 if (!p) {
1228 return;
1229 }
1230#if defined(TARGET_HAS_PRECISE_SMC)
1231 if (cpu != NULL) {
1232 env = cpu->env_ptr;
1233 }
1234#endif
1235
1236
1237
1238
1239 tb = p->first_tb;
1240 while (tb != NULL) {
1241 n = (uintptr_t)tb & 3;
1242 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1243 tb_next = tb->page_next[n];
1244
1245 if (n == 0) {
1246
1247
1248 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1249 tb_end = tb_start + tb->size;
1250 } else {
1251 tb_start = tb->page_addr[1];
1252 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1253 }
1254 if (!(tb_end <= start || tb_start >= end)) {
1255#ifdef TARGET_HAS_PRECISE_SMC
1256 if (current_tb_not_found) {
1257 current_tb_not_found = 0;
1258 current_tb = NULL;
1259 if (cpu->mem_io_pc) {
1260
1261 current_tb = tb_find_pc(cpu->mem_io_pc);
1262 }
1263 }
1264 if (current_tb == tb &&
1265 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1266
1267
1268
1269
1270
1271
1272 current_tb_modified = 1;
1273 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1274 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1275 ¤t_flags);
1276 }
1277#endif
1278
1279
1280 saved_tb = NULL;
1281 if (cpu != NULL) {
1282 saved_tb = cpu->current_tb;
1283 cpu->current_tb = NULL;
1284 }
1285 tb_phys_invalidate(tb, -1);
1286 if (cpu != NULL) {
1287 cpu->current_tb = saved_tb;
1288 if (cpu->interrupt_request && cpu->current_tb) {
1289 cpu_interrupt(cpu, cpu->interrupt_request);
1290 }
1291 }
1292 }
1293 tb = tb_next;
1294 }
1295#if !defined(CONFIG_USER_ONLY)
1296
1297 if (!p->first_tb) {
1298 invalidate_page_bitmap(p);
1299 tlb_unprotect_code(start);
1300 }
1301#endif
1302#ifdef TARGET_HAS_PRECISE_SMC
1303 if (current_tb_modified) {
1304
1305
1306
1307 cpu->current_tb = NULL;
1308 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1309 cpu_resume_from_signal(cpu, NULL);
1310 }
1311#endif
1312}
1313
1314
1315void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1316{
1317 PageDesc *p;
1318
1319#if 0
1320 if (1) {
1321 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1322 cpu_single_env->mem_io_vaddr, len,
1323 cpu_single_env->eip,
1324 cpu_single_env->eip +
1325 (intptr_t)cpu_single_env->segs[R_CS].base);
1326 }
1327#endif
1328 p = page_find(start >> TARGET_PAGE_BITS);
1329 if (!p) {
1330 return;
1331 }
1332 if (!p->code_bitmap &&
1333 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1334
1335 build_page_bitmap(p);
1336 }
1337 if (p->code_bitmap) {
1338 unsigned int nr;
1339 unsigned long b;
1340
1341 nr = start & ~TARGET_PAGE_MASK;
1342 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1343 if (b & ((1 << len) - 1)) {
1344 goto do_invalidate;
1345 }
1346 } else {
1347 do_invalidate:
1348 tb_invalidate_phys_page_range(start, start + len, 1);
1349 }
1350}
1351
1352#if !defined(CONFIG_SOFTMMU)
1353
1354static void tb_invalidate_phys_page(tb_page_addr_t addr,
1355 uintptr_t pc, void *puc,
1356 bool locked)
1357{
1358 TranslationBlock *tb;
1359 PageDesc *p;
1360 int n;
1361#ifdef TARGET_HAS_PRECISE_SMC
1362 TranslationBlock *current_tb = NULL;
1363 CPUState *cpu = current_cpu;
1364 CPUArchState *env = NULL;
1365 int current_tb_modified = 0;
1366 target_ulong current_pc = 0;
1367 target_ulong current_cs_base = 0;
1368 int current_flags = 0;
1369#endif
1370
1371 addr &= TARGET_PAGE_MASK;
1372 p = page_find(addr >> TARGET_PAGE_BITS);
1373 if (!p) {
1374 return;
1375 }
1376 tb = p->first_tb;
1377#ifdef TARGET_HAS_PRECISE_SMC
1378 if (tb && pc != 0) {
1379 current_tb = tb_find_pc(pc);
1380 }
1381 if (cpu != NULL) {
1382 env = cpu->env_ptr;
1383 }
1384#endif
1385 while (tb != NULL) {
1386 n = (uintptr_t)tb & 3;
1387 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1388#ifdef TARGET_HAS_PRECISE_SMC
1389 if (current_tb == tb &&
1390 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1391
1392
1393
1394
1395
1396
1397 current_tb_modified = 1;
1398 cpu_restore_state_from_tb(cpu, current_tb, pc);
1399 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1400 ¤t_flags);
1401 }
1402#endif
1403 tb_phys_invalidate(tb, addr);
1404 tb = tb->page_next[n];
1405 }
1406 p->first_tb = NULL;
1407#ifdef TARGET_HAS_PRECISE_SMC
1408 if (current_tb_modified) {
1409
1410
1411
1412 cpu->current_tb = NULL;
1413 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1414 if (locked) {
1415 mmap_unlock();
1416 }
1417 cpu_resume_from_signal(cpu, puc);
1418 }
1419#endif
1420}
1421#endif
1422
1423
1424
1425
1426
1427static inline void tb_alloc_page(TranslationBlock *tb,
1428 unsigned int n, tb_page_addr_t page_addr)
1429{
1430 PageDesc *p;
1431#ifndef CONFIG_USER_ONLY
1432 bool page_already_protected;
1433#endif
1434
1435 tb->page_addr[n] = page_addr;
1436 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1437 tb->page_next[n] = p->first_tb;
1438#ifndef CONFIG_USER_ONLY
1439 page_already_protected = p->first_tb != NULL;
1440#endif
1441 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1442 invalidate_page_bitmap(p);
1443
1444#if defined(CONFIG_USER_ONLY)
1445 if (p->flags & PAGE_WRITE) {
1446 target_ulong addr;
1447 PageDesc *p2;
1448 int prot;
1449
1450
1451
1452 page_addr &= qemu_host_page_mask;
1453 prot = 0;
1454 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1455 addr += TARGET_PAGE_SIZE) {
1456
1457 p2 = page_find(addr >> TARGET_PAGE_BITS);
1458 if (!p2) {
1459 continue;
1460 }
1461 prot |= p2->flags;
1462 p2->flags &= ~PAGE_WRITE;
1463 }
1464 mprotect(g2h(page_addr), qemu_host_page_size,
1465 (prot & PAGE_BITS) & ~PAGE_WRITE);
1466#ifdef DEBUG_TB_INVALIDATE
1467 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1468 page_addr);
1469#endif
1470 }
1471#else
1472
1473
1474
1475 if (!page_already_protected) {
1476 tlb_protect_code(page_addr);
1477 }
1478#endif
1479}
1480
1481
1482
1483
1484
1485
1486static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1487 tb_page_addr_t phys_page2)
1488{
1489 unsigned int h;
1490 TranslationBlock **ptb;
1491
1492
1493 h = tb_phys_hash_func(phys_pc);
1494 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1495 tb->phys_hash_next = *ptb;
1496 *ptb = tb;
1497
1498
1499 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1500 if (phys_page2 != -1) {
1501 tb_alloc_page(tb, 1, phys_page2);
1502 } else {
1503 tb->page_addr[1] = -1;
1504 }
1505
1506 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1507 tb->jmp_next[0] = NULL;
1508 tb->jmp_next[1] = NULL;
1509
1510
1511 if (tb->tb_next_offset[0] != 0xffff) {
1512 tb_reset_jump(tb, 0);
1513 }
1514 if (tb->tb_next_offset[1] != 0xffff) {
1515 tb_reset_jump(tb, 1);
1516 }
1517
1518#ifdef DEBUG_TB_CHECK
1519 tb_page_check();
1520#endif
1521}
1522
1523
1524
1525static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1526{
1527 int m_min, m_max, m;
1528 uintptr_t v;
1529 TranslationBlock *tb;
1530
1531 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1532 return NULL;
1533 }
1534 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1535 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1536 return NULL;
1537 }
1538
1539 m_min = 0;
1540 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1541 while (m_min <= m_max) {
1542 m = (m_min + m_max) >> 1;
1543 tb = &tcg_ctx.tb_ctx.tbs[m];
1544 v = (uintptr_t)tb->tc_ptr;
1545 if (v == tc_ptr) {
1546 return tb;
1547 } else if (tc_ptr < v) {
1548 m_max = m - 1;
1549 } else {
1550 m_min = m + 1;
1551 }
1552 }
1553 return &tcg_ctx.tb_ctx.tbs[m_max];
1554}
1555
1556#if !defined(CONFIG_USER_ONLY)
1557void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1558{
1559 ram_addr_t ram_addr;
1560 MemoryRegion *mr;
1561 hwaddr l = 1;
1562
1563 rcu_read_lock();
1564 mr = address_space_translate(as, addr, &addr, &l, false);
1565 if (!(memory_region_is_ram(mr)
1566 || memory_region_is_romd(mr))) {
1567 rcu_read_unlock();
1568 return;
1569 }
1570 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1571 + addr;
1572 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1573 rcu_read_unlock();
1574}
1575#endif
1576
1577void tb_check_watchpoint(CPUState *cpu)
1578{
1579 TranslationBlock *tb;
1580
1581 tb = tb_find_pc(cpu->mem_io_pc);
1582 if (tb) {
1583
1584 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1585 tb_phys_invalidate(tb, -1);
1586 } else {
1587
1588
1589 CPUArchState *env = cpu->env_ptr;
1590 target_ulong pc, cs_base;
1591 tb_page_addr_t addr;
1592 int flags;
1593
1594 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1595 addr = get_page_addr_code(env, pc);
1596 tb_invalidate_phys_range(addr, addr + 1);
1597 }
1598}
1599
1600#ifndef CONFIG_USER_ONLY
1601
1602
1603void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1604{
1605#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1606 CPUArchState *env = cpu->env_ptr;
1607#endif
1608 TranslationBlock *tb;
1609 uint32_t n, cflags;
1610 target_ulong pc, cs_base;
1611 uint64_t flags;
1612
1613 tb = tb_find_pc(retaddr);
1614 if (!tb) {
1615 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1616 (void *)retaddr);
1617 }
1618 n = cpu->icount_decr.u16.low + tb->icount;
1619 cpu_restore_state_from_tb(cpu, tb, retaddr);
1620
1621
1622 n = n - cpu->icount_decr.u16.low;
1623
1624 n++;
1625
1626
1627
1628
1629#if defined(TARGET_MIPS)
1630 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1631 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1632 cpu->icount_decr.u16.low++;
1633 env->hflags &= ~MIPS_HFLAG_BMASK;
1634 }
1635#elif defined(TARGET_SH4)
1636 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1637 && n > 1) {
1638 env->pc -= 2;
1639 cpu->icount_decr.u16.low++;
1640 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1641 }
1642#endif
1643
1644 if (n > CF_COUNT_MASK) {
1645 cpu_abort(cpu, "TB too big during recompile");
1646 }
1647
1648 cflags = n | CF_LAST_IO;
1649 pc = tb->pc;
1650 cs_base = tb->cs_base;
1651 flags = tb->flags;
1652 tb_phys_invalidate(tb, -1);
1653 if (tb->cflags & CF_NOCACHE) {
1654 if (tb->orig_tb) {
1655
1656
1657 tb_phys_invalidate(tb->orig_tb, -1);
1658 }
1659 tb_free(tb);
1660 }
1661
1662
1663 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1664
1665
1666
1667
1668
1669 cpu_resume_from_signal(cpu, NULL);
1670}
1671
1672void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1673{
1674 unsigned int i;
1675
1676
1677
1678 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1679 memset(&cpu->tb_jmp_cache[i], 0,
1680 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1681
1682 i = tb_jmp_cache_hash_page(addr);
1683 memset(&cpu->tb_jmp_cache[i], 0,
1684 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1685}
1686
1687void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1688{
1689 int i, target_code_size, max_target_code_size;
1690 int direct_jmp_count, direct_jmp2_count, cross_page;
1691 TranslationBlock *tb;
1692
1693 target_code_size = 0;
1694 max_target_code_size = 0;
1695 cross_page = 0;
1696 direct_jmp_count = 0;
1697 direct_jmp2_count = 0;
1698 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1699 tb = &tcg_ctx.tb_ctx.tbs[i];
1700 target_code_size += tb->size;
1701 if (tb->size > max_target_code_size) {
1702 max_target_code_size = tb->size;
1703 }
1704 if (tb->page_addr[1] != -1) {
1705 cross_page++;
1706 }
1707 if (tb->tb_next_offset[0] != 0xffff) {
1708 direct_jmp_count++;
1709 if (tb->tb_next_offset[1] != 0xffff) {
1710 direct_jmp2_count++;
1711 }
1712 }
1713 }
1714
1715 cpu_fprintf(f, "Translation buffer state:\n");
1716 cpu_fprintf(f, "gen code size %td/%zd\n",
1717 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1718 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1719 cpu_fprintf(f, "TB count %d/%d\n",
1720 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1721 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1722 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1723 tcg_ctx.tb_ctx.nb_tbs : 0,
1724 max_target_code_size);
1725 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1726 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1727 tcg_ctx.code_gen_buffer) /
1728 tcg_ctx.tb_ctx.nb_tbs : 0,
1729 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1730 tcg_ctx.code_gen_buffer) /
1731 target_code_size : 0);
1732 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1733 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1734 tcg_ctx.tb_ctx.nb_tbs : 0);
1735 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1736 direct_jmp_count,
1737 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1738 tcg_ctx.tb_ctx.nb_tbs : 0,
1739 direct_jmp2_count,
1740 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1741 tcg_ctx.tb_ctx.nb_tbs : 0);
1742 cpu_fprintf(f, "\nStatistics:\n");
1743 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1744 cpu_fprintf(f, "TB invalidate count %d\n",
1745 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1746 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1747 tcg_dump_info(f, cpu_fprintf);
1748}
1749
1750void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1751{
1752 tcg_dump_op_count(f, cpu_fprintf);
1753}
1754
1755#else
1756
1757void cpu_interrupt(CPUState *cpu, int mask)
1758{
1759 cpu->interrupt_request |= mask;
1760 cpu->tcg_exit_req = 1;
1761}
1762
1763
1764
1765
1766
1767struct walk_memory_regions_data {
1768 walk_memory_regions_fn fn;
1769 void *priv;
1770 target_ulong start;
1771 int prot;
1772};
1773
1774static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1775 target_ulong end, int new_prot)
1776{
1777 if (data->start != -1u) {
1778 int rc = data->fn(data->priv, data->start, end, data->prot);
1779 if (rc != 0) {
1780 return rc;
1781 }
1782 }
1783
1784 data->start = (new_prot ? end : -1u);
1785 data->prot = new_prot;
1786
1787 return 0;
1788}
1789
1790static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1791 target_ulong base, int level, void **lp)
1792{
1793 target_ulong pa;
1794 int i, rc;
1795
1796 if (*lp == NULL) {
1797 return walk_memory_regions_end(data, base, 0);
1798 }
1799
1800 if (level == 0) {
1801 PageDesc *pd = *lp;
1802
1803 for (i = 0; i < V_L2_SIZE; ++i) {
1804 int prot = pd[i].flags;
1805
1806 pa = base | (i << TARGET_PAGE_BITS);
1807 if (prot != data->prot) {
1808 rc = walk_memory_regions_end(data, pa, prot);
1809 if (rc != 0) {
1810 return rc;
1811 }
1812 }
1813 }
1814 } else {
1815 void **pp = *lp;
1816
1817 for (i = 0; i < V_L2_SIZE; ++i) {
1818 pa = base | ((target_ulong)i <<
1819 (TARGET_PAGE_BITS + V_L2_BITS * level));
1820 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1821 if (rc != 0) {
1822 return rc;
1823 }
1824 }
1825 }
1826
1827 return 0;
1828}
1829
1830int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1831{
1832 struct walk_memory_regions_data data;
1833 uintptr_t i;
1834
1835 data.fn = fn;
1836 data.priv = priv;
1837 data.start = -1u;
1838 data.prot = 0;
1839
1840 for (i = 0; i < V_L1_SIZE; i++) {
1841 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1842 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1843 if (rc != 0) {
1844 return rc;
1845 }
1846 }
1847
1848 return walk_memory_regions_end(&data, 0, 0);
1849}
1850
1851static int dump_region(void *priv, target_ulong start,
1852 target_ulong end, unsigned long prot)
1853{
1854 FILE *f = (FILE *)priv;
1855
1856 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1857 " "TARGET_FMT_lx" %c%c%c\n",
1858 start, end, end - start,
1859 ((prot & PAGE_READ) ? 'r' : '-'),
1860 ((prot & PAGE_WRITE) ? 'w' : '-'),
1861 ((prot & PAGE_EXEC) ? 'x' : '-'));
1862
1863 return 0;
1864}
1865
1866
1867void page_dump(FILE *f)
1868{
1869 const int length = sizeof(target_ulong) * 2;
1870 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1871 length, "start", length, "end", length, "size", "prot");
1872 walk_memory_regions(f, dump_region);
1873}
1874
1875int page_get_flags(target_ulong address)
1876{
1877 PageDesc *p;
1878
1879 p = page_find(address >> TARGET_PAGE_BITS);
1880 if (!p) {
1881 return 0;
1882 }
1883 return p->flags;
1884}
1885
1886
1887
1888
1889void page_set_flags(target_ulong start, target_ulong end, int flags)
1890{
1891 target_ulong addr, len;
1892
1893
1894
1895
1896#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1897 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1898#endif
1899 assert(start < end);
1900
1901 start = start & TARGET_PAGE_MASK;
1902 end = TARGET_PAGE_ALIGN(end);
1903
1904 if (flags & PAGE_WRITE) {
1905 flags |= PAGE_WRITE_ORG;
1906 }
1907
1908 for (addr = start, len = end - start;
1909 len != 0;
1910 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1911 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1912
1913
1914
1915 if (!(p->flags & PAGE_WRITE) &&
1916 (flags & PAGE_WRITE) &&
1917 p->first_tb) {
1918 tb_invalidate_phys_page(addr, 0, NULL, false);
1919 }
1920 p->flags = flags;
1921 }
1922}
1923
1924int page_check_range(target_ulong start, target_ulong len, int flags)
1925{
1926 PageDesc *p;
1927 target_ulong end;
1928 target_ulong addr;
1929
1930
1931
1932
1933#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1934 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1935#endif
1936
1937 if (len == 0) {
1938 return 0;
1939 }
1940 if (start + len - 1 < start) {
1941
1942 return -1;
1943 }
1944
1945
1946 end = TARGET_PAGE_ALIGN(start + len);
1947 start = start & TARGET_PAGE_MASK;
1948
1949 for (addr = start, len = end - start;
1950 len != 0;
1951 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1952 p = page_find(addr >> TARGET_PAGE_BITS);
1953 if (!p) {
1954 return -1;
1955 }
1956 if (!(p->flags & PAGE_VALID)) {
1957 return -1;
1958 }
1959
1960 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1961 return -1;
1962 }
1963 if (flags & PAGE_WRITE) {
1964 if (!(p->flags & PAGE_WRITE_ORG)) {
1965 return -1;
1966 }
1967
1968
1969 if (!(p->flags & PAGE_WRITE)) {
1970 if (!page_unprotect(addr, 0, NULL)) {
1971 return -1;
1972 }
1973 }
1974 }
1975 }
1976 return 0;
1977}
1978
1979
1980
1981int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1982{
1983 unsigned int prot;
1984 PageDesc *p;
1985 target_ulong host_start, host_end, addr;
1986
1987
1988
1989
1990 mmap_lock();
1991
1992 p = page_find(address >> TARGET_PAGE_BITS);
1993 if (!p) {
1994 mmap_unlock();
1995 return 0;
1996 }
1997
1998
1999
2000 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2001 host_start = address & qemu_host_page_mask;
2002 host_end = host_start + qemu_host_page_size;
2003
2004 prot = 0;
2005 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2006 p = page_find(addr >> TARGET_PAGE_BITS);
2007 p->flags |= PAGE_WRITE;
2008 prot |= p->flags;
2009
2010
2011
2012 tb_invalidate_phys_page(addr, pc, puc, true);
2013#ifdef DEBUG_TB_CHECK
2014 tb_invalidate_check(addr);
2015#endif
2016 }
2017 mprotect((void *)g2h(host_start), qemu_host_page_size,
2018 prot & PAGE_BITS);
2019
2020 mmap_unlock();
2021 return 1;
2022 }
2023 mmap_unlock();
2024 return 0;
2025}
2026#endif
2027