1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
26#include "exec/cpu_ldst.h"
27#include "exec/cputlb.h"
28#include "exec/memory-internal.h"
29#include "exec/ram_addr.h"
30#include "tcg/tcg.h"
31#include "qemu/error-report.h"
32#include "exec/log.h"
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
35
36
37
38
39
40#ifdef DEBUG_TLB
41# define DEBUG_TLB_GATE 1
42# ifdef DEBUG_TLB_LOG
43# define DEBUG_TLB_LOG_GATE 1
44# else
45# define DEBUG_TLB_LOG_GATE 0
46# endif
47#else
48# define DEBUG_TLB_GATE 0
49# define DEBUG_TLB_LOG_GATE 0
50#endif
51
52#define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59} while (0)
60
61#define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
66
67
68
69QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70
71
72
73QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75
76
77
78
79
80
81
82
83static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
84 run_on_cpu_data d)
85{
86 CPUState *cpu;
87
88 CPU_FOREACH(cpu) {
89 if (cpu != src) {
90 async_run_on_cpu(cpu, fn, d);
91 }
92 }
93}
94
95
96int tlb_flush_count;
97
98
99
100
101
102
103static void tlb_flush_nocheck(CPUState *cpu)
104{
105 CPUArchState *env = cpu->env_ptr;
106
107
108
109
110 if (!tcg_enabled()) {
111 return;
112 }
113
114 assert_cpu_is_self(cpu);
115 tlb_debug("(count: %d)\n", tlb_flush_count++);
116
117 tb_lock();
118
119 memset(env->tlb_table, -1, sizeof(env->tlb_table));
120 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
121 cpu_tb_jmp_cache_clear(cpu);
122
123 env->vtlb_index = 0;
124 env->tlb_flush_addr = -1;
125 env->tlb_flush_mask = 0;
126
127 tb_unlock();
128
129 atomic_mb_set(&cpu->pending_tlb_flush, 0);
130}
131
132static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
133{
134 tlb_flush_nocheck(cpu);
135}
136
137void tlb_flush(CPUState *cpu)
138{
139 if (cpu->created && !qemu_cpu_is_self(cpu)) {
140 if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
141 atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
142 async_run_on_cpu(cpu, tlb_flush_global_async_work,
143 RUN_ON_CPU_NULL);
144 }
145 } else {
146 tlb_flush_nocheck(cpu);
147 }
148}
149
150void tlb_flush_all_cpus(CPUState *src_cpu)
151{
152 const run_on_cpu_func fn = tlb_flush_global_async_work;
153 flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
154 fn(src_cpu, RUN_ON_CPU_NULL);
155}
156
157void tlb_flush_all_cpus_synced(CPUState *src_cpu)
158{
159 const run_on_cpu_func fn = tlb_flush_global_async_work;
160 flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
161 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
162}
163
164static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
165{
166 CPUArchState *env = cpu->env_ptr;
167 unsigned long mmu_idx_bitmask = data.host_int;
168 int mmu_idx;
169
170 assert_cpu_is_self(cpu);
171
172 tb_lock();
173
174 tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
175
176 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
177
178 if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
179 tlb_debug("%d\n", mmu_idx);
180
181 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
182 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
183 }
184 }
185
186 cpu_tb_jmp_cache_clear(cpu);
187
188 tlb_debug("done\n");
189
190 tb_unlock();
191}
192
193void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
194{
195 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
196
197 if (!qemu_cpu_is_self(cpu)) {
198 uint16_t pending_flushes = idxmap;
199 pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
200
201 if (pending_flushes) {
202 tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
203
204 atomic_or(&cpu->pending_tlb_flush, pending_flushes);
205 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
206 RUN_ON_CPU_HOST_INT(pending_flushes));
207 }
208 } else {
209 tlb_flush_by_mmuidx_async_work(cpu,
210 RUN_ON_CPU_HOST_INT(idxmap));
211 }
212}
213
214void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
215{
216 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
217
218 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
219
220 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
221 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
222}
223
224void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
225 uint16_t idxmap)
226{
227 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
228
229 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
230
231 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
232 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
233}
234
235
236
237static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
238{
239 if (addr == (tlb_entry->addr_read &
240 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
241 addr == (tlb_entry->addr_write &
242 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
243 addr == (tlb_entry->addr_code &
244 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
245 memset(tlb_entry, -1, sizeof(*tlb_entry));
246 }
247}
248
249static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
250{
251 CPUArchState *env = cpu->env_ptr;
252 target_ulong addr = (target_ulong) data.target_ptr;
253 int i;
254 int mmu_idx;
255
256 assert_cpu_is_self(cpu);
257
258 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
259
260
261 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
262 tlb_debug("forcing full flush ("
263 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
264 env->tlb_flush_addr, env->tlb_flush_mask);
265
266 tlb_flush(cpu);
267 return;
268 }
269
270 addr &= TARGET_PAGE_MASK;
271 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
272 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
273 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
274 }
275
276
277 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
278 int k;
279 for (k = 0; k < CPU_VTLB_SIZE; k++) {
280 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
281 }
282 }
283
284 tb_flush_jmp_cache(cpu, addr);
285}
286
287void tlb_flush_page(CPUState *cpu, target_ulong addr)
288{
289 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
290
291 if (!qemu_cpu_is_self(cpu)) {
292 async_run_on_cpu(cpu, tlb_flush_page_async_work,
293 RUN_ON_CPU_TARGET_PTR(addr));
294 } else {
295 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
296 }
297}
298
299
300
301
302QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
303
304static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
305 run_on_cpu_data data)
306{
307 CPUArchState *env = cpu->env_ptr;
308 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
309 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
310 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
311 int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
312 int mmu_idx;
313 int i;
314
315 assert_cpu_is_self(cpu);
316
317 tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
318 page, addr, mmu_idx_bitmap);
319
320 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
321 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
322 tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
323
324
325 for (i = 0; i < CPU_VTLB_SIZE; i++) {
326 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
327 }
328 }
329 }
330
331 tb_flush_jmp_cache(cpu, addr);
332}
333
334static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
335 run_on_cpu_data data)
336{
337 CPUArchState *env = cpu->env_ptr;
338 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
339 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
340 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
341
342 tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
343
344
345 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
346 tlb_debug("forced full flush ("
347 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
348 env->tlb_flush_addr, env->tlb_flush_mask);
349
350 tlb_flush_by_mmuidx_async_work(cpu,
351 RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
352 } else {
353 tlb_flush_page_by_mmuidx_async_work(cpu, data);
354 }
355}
356
357void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
358{
359 target_ulong addr_and_mmu_idx;
360
361 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
362
363
364 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
365 addr_and_mmu_idx |= idxmap;
366
367 if (!qemu_cpu_is_self(cpu)) {
368 async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
369 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
370 } else {
371 tlb_check_page_and_flush_by_mmuidx_async_work(
372 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
373 }
374}
375
376void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
377 uint16_t idxmap)
378{
379 const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
380 target_ulong addr_and_mmu_idx;
381
382 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
383
384
385 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
386 addr_and_mmu_idx |= idxmap;
387
388 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
389 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
390}
391
392void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
393 target_ulong addr,
394 uint16_t idxmap)
395{
396 const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
397 target_ulong addr_and_mmu_idx;
398
399 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
400
401
402 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
403 addr_and_mmu_idx |= idxmap;
404
405 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
406 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
407}
408
409void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
410{
411 const run_on_cpu_func fn = tlb_flush_page_async_work;
412
413 flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
414 fn(src, RUN_ON_CPU_TARGET_PTR(addr));
415}
416
417void tlb_flush_page_all_cpus_synced(CPUState *src,
418 target_ulong addr)
419{
420 const run_on_cpu_func fn = tlb_flush_page_async_work;
421
422 flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
423 async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
424}
425
426
427
428void tlb_protect_code(ram_addr_t ram_addr)
429{
430 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
431 DIRTY_MEMORY_CODE);
432}
433
434
435
436void tlb_unprotect_code(ram_addr_t ram_addr)
437{
438 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
463 uintptr_t length)
464{
465#if TCG_OVERSIZED_GUEST
466 uintptr_t addr = tlb_entry->addr_write;
467
468 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
469 addr &= TARGET_PAGE_MASK;
470 addr += tlb_entry->addend;
471 if ((addr - start) < length) {
472 tlb_entry->addr_write |= TLB_NOTDIRTY;
473 }
474 }
475#else
476
477 uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
478 uintptr_t addr = orig_addr;
479
480 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
481 addr &= TARGET_PAGE_MASK;
482 addr += atomic_read(&tlb_entry->addend);
483 if ((addr - start) < length) {
484 uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
485 atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
486 }
487 }
488#endif
489}
490
491
492
493static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
494 bool atomic_set)
495{
496#if TCG_OVERSIZED_GUEST
497 *d = *s;
498#else
499 if (atomic_set) {
500 d->addr_read = s->addr_read;
501 d->addr_code = s->addr_code;
502 atomic_set(&d->addend, atomic_read(&s->addend));
503
504 atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
505 } else {
506 d->addr_read = s->addr_read;
507 d->addr_write = atomic_read(&s->addr_write);
508 d->addr_code = s->addr_code;
509 d->addend = atomic_read(&s->addend);
510 }
511#endif
512}
513
514
515
516
517
518
519void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
520{
521 CPUArchState *env;
522
523 int mmu_idx;
524
525 env = cpu->env_ptr;
526 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
527 unsigned int i;
528
529 for (i = 0; i < CPU_TLB_SIZE; i++) {
530 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
531 start1, length);
532 }
533
534 for (i = 0; i < CPU_VTLB_SIZE; i++) {
535 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
536 start1, length);
537 }
538 }
539}
540
541static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
542{
543 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
544 tlb_entry->addr_write = vaddr;
545 }
546}
547
548
549
550void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
551{
552 CPUArchState *env = cpu->env_ptr;
553 int i;
554 int mmu_idx;
555
556 assert_cpu_is_self(cpu);
557
558 vaddr &= TARGET_PAGE_MASK;
559 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
560 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
561 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
562 }
563
564 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
565 int k;
566 for (k = 0; k < CPU_VTLB_SIZE; k++) {
567 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
568 }
569 }
570}
571
572
573
574static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
575 target_ulong size)
576{
577 target_ulong mask = ~(size - 1);
578
579 if (env->tlb_flush_addr == (target_ulong)-1) {
580 env->tlb_flush_addr = vaddr & mask;
581 env->tlb_flush_mask = mask;
582 return;
583 }
584
585
586
587 mask &= env->tlb_flush_mask;
588 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
589 mask <<= 1;
590 }
591 env->tlb_flush_addr &= mask;
592 env->tlb_flush_mask = mask;
593}
594
595
596
597
598
599
600
601
602void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
603 hwaddr paddr, MemTxAttrs attrs, int prot,
604 int mmu_idx, target_ulong size)
605{
606 CPUArchState *env = cpu->env_ptr;
607 MemoryRegionSection *section;
608 unsigned int index;
609 target_ulong address;
610 target_ulong code_address;
611 uintptr_t addend;
612 CPUTLBEntry *te, *tv, tn;
613 hwaddr iotlb, xlat, sz;
614 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
615 int asidx = cpu_asidx_from_attrs(cpu, attrs);
616
617 assert_cpu_is_self(cpu);
618 assert(size >= TARGET_PAGE_SIZE);
619 if (size != TARGET_PAGE_SIZE) {
620 tlb_add_large_page(env, vaddr, size);
621 }
622
623 sz = size;
624 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
625 assert(sz >= TARGET_PAGE_SIZE);
626
627 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
628 " prot=%x idx=%d\n",
629 vaddr, paddr, prot, mmu_idx);
630
631 address = vaddr;
632 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
633
634 address |= TLB_MMIO;
635 addend = 0;
636 } else {
637
638 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
639 }
640
641 code_address = address;
642 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
643 prot, &address);
644
645 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
646 te = &env->tlb_table[mmu_idx][index];
647
648 tv = &env->tlb_v_table[mmu_idx][vidx];
649
650
651 copy_tlb_helper(tv, te, true);
652
653 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
654
655
656 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
657 env->iotlb[mmu_idx][index].attrs = attrs;
658
659
660 tn.addend = addend - vaddr;
661 if (prot & PAGE_READ) {
662 tn.addr_read = address;
663 } else {
664 tn.addr_read = -1;
665 }
666
667 if (prot & PAGE_EXEC) {
668 tn.addr_code = code_address;
669 } else {
670 tn.addr_code = -1;
671 }
672
673 tn.addr_write = -1;
674 if (prot & PAGE_WRITE) {
675 if ((memory_region_is_ram(section->mr) && section->readonly)
676 || memory_region_is_romd(section->mr)) {
677
678 tn.addr_write = address | TLB_MMIO;
679 } else if (memory_region_is_ram(section->mr)
680 && cpu_physical_memory_is_clean(
681 memory_region_get_ram_addr(section->mr) + xlat)) {
682 tn.addr_write = address | TLB_NOTDIRTY;
683 } else {
684 tn.addr_write = address;
685 }
686 }
687
688
689 copy_tlb_helper(te, &tn, true);
690
691}
692
693
694
695
696void tlb_set_page(CPUState *cpu, target_ulong vaddr,
697 hwaddr paddr, int prot,
698 int mmu_idx, target_ulong size)
699{
700 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
701 prot, mmu_idx, size);
702}
703
704static void report_bad_exec(CPUState *cpu, target_ulong addr)
705{
706
707
708
709
710
711 error_report("Trying to execute code outside RAM or ROM at 0x"
712 TARGET_FMT_lx, addr);
713 error_printf("This usually means one of the following happened:\n\n"
714 "(1) You told QEMU to execute a kernel for the wrong machine "
715 "type, and it crashed on startup (eg trying to run a "
716 "raspberry pi kernel on a versatilepb QEMU machine)\n"
717 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
718 "and QEMU executed a ROM full of no-op instructions until "
719 "it fell off the end\n"
720 "(3) Your guest kernel has a bug and crashed by jumping "
721 "off into nowhere\n\n"
722 "This is almost always one of the first two, so check your "
723 "command line and that you are using the right type of kernel "
724 "for this machine.\n"
725 "If you think option (3) is likely then you can try debugging "
726 "your guest with the -d debug options; in particular "
727 "-d guest_errors will cause the log to include a dump of the "
728 "guest register state at this point.\n\n"
729 "Execution cannot continue; stopping here.\n\n");
730
731
732 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
733 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
734 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
735}
736
737static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
738{
739 ram_addr_t ram_addr;
740
741 ram_addr = qemu_ram_addr_from_host(ptr);
742 if (ram_addr == RAM_ADDR_INVALID) {
743 error_report("Bad ram pointer %p", ptr);
744 abort();
745 }
746 return ram_addr;
747}
748
749static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
750 target_ulong addr, uintptr_t retaddr, int size)
751{
752 CPUState *cpu = ENV_GET_CPU(env);
753 hwaddr physaddr = iotlbentry->addr;
754 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
755 uint64_t val;
756 bool locked = false;
757
758 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
759 cpu->mem_io_pc = retaddr;
760 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
761 cpu_io_recompile(cpu, retaddr);
762 }
763
764 cpu->mem_io_vaddr = addr;
765
766 if (mr->global_locking) {
767 qemu_mutex_lock_iothread();
768 locked = true;
769 }
770 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
771 if (locked) {
772 qemu_mutex_unlock_iothread();
773 }
774
775 return val;
776}
777
778static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
779 uint64_t val, target_ulong addr,
780 uintptr_t retaddr, int size)
781{
782 CPUState *cpu = ENV_GET_CPU(env);
783 hwaddr physaddr = iotlbentry->addr;
784 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
785 bool locked = false;
786
787 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
788 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
789 cpu_io_recompile(cpu, retaddr);
790 }
791 cpu->mem_io_vaddr = addr;
792 cpu->mem_io_pc = retaddr;
793
794 if (mr->global_locking) {
795 qemu_mutex_lock_iothread();
796 locked = true;
797 }
798 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
799 if (locked) {
800 qemu_mutex_unlock_iothread();
801 }
802}
803
804
805
806static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
807 size_t elt_ofs, target_ulong page)
808{
809 size_t vidx;
810 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
811 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
812 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
813
814 if (cmp == page) {
815
816 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
817
818 copy_tlb_helper(&tmptlb, tlb, false);
819 copy_tlb_helper(tlb, vtlb, true);
820 copy_tlb_helper(vtlb, &tmptlb, true);
821
822 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
823 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
824 tmpio = *io; *io = *vio; *vio = tmpio;
825 return true;
826 }
827 }
828 return false;
829}
830
831
832#define VICTIM_TLB_HIT(TY, ADDR) \
833 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
834 (ADDR) & TARGET_PAGE_MASK)
835
836
837
838
839
840
841tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
842{
843 int mmu_idx, index, pd;
844 void *p;
845 MemoryRegion *mr;
846 CPUState *cpu = ENV_GET_CPU(env);
847 CPUIOTLBEntry *iotlbentry;
848
849 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
850 mmu_idx = cpu_mmu_index(env, true);
851 if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
852 (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
853 if (!VICTIM_TLB_HIT(addr_read, addr)) {
854 tlb_fill(ENV_GET_CPU(env), addr, MMU_INST_FETCH, mmu_idx, 0);
855 }
856 }
857 iotlbentry = &env->iotlb[mmu_idx][index];
858 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
859 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
860 if (memory_region_is_unassigned(mr)) {
861 qemu_mutex_lock_iothread();
862 if (memory_region_request_mmio_ptr(mr, addr)) {
863 qemu_mutex_unlock_iothread();
864
865
866
867 return get_page_addr_code(env, addr);
868 }
869 qemu_mutex_unlock_iothread();
870
871 cpu_unassigned_access(cpu, addr, false, true, 0, 4);
872
873
874
875
876 report_bad_exec(cpu, addr);
877 exit(1);
878 }
879 p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
880 return qemu_ram_addr_from_host_nofail(p);
881}
882
883
884
885
886
887
888
889void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
890 uintptr_t retaddr)
891{
892 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
893 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
894
895 if ((addr & TARGET_PAGE_MASK)
896 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
897
898 if (!VICTIM_TLB_HIT(addr_write, addr)) {
899 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
900 }
901 }
902}
903
904
905
906static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
907 TCGMemOpIdx oi, uintptr_t retaddr)
908{
909 size_t mmu_idx = get_mmuidx(oi);
910 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
911 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
912 target_ulong tlb_addr = tlbe->addr_write;
913 TCGMemOp mop = get_memop(oi);
914 int a_bits = get_alignment_bits(mop);
915 int s_bits = mop & MO_SIZE;
916
917
918 retaddr -= GETPC_ADJ;
919
920
921 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
922
923 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
924 mmu_idx, retaddr);
925 }
926
927
928 if (unlikely(addr & ((1 << s_bits) - 1))) {
929
930
931
932
933 goto stop_the_world;
934 }
935
936
937 if ((addr & TARGET_PAGE_MASK)
938 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
939 if (!VICTIM_TLB_HIT(addr_write, addr)) {
940 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
941 }
942 tlb_addr = tlbe->addr_write;
943 }
944
945
946 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
947 tlb_set_dirty(ENV_GET_CPU(env), addr);
948 tlb_addr = tlb_addr & ~TLB_NOTDIRTY;
949 }
950
951
952 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
953
954
955 goto stop_the_world;
956 }
957
958
959 if (unlikely(tlbe->addr_read != tlb_addr)) {
960 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
961
962
963
964 goto stop_the_world;
965 }
966
967 return (void *)((uintptr_t)addr + tlbe->addend);
968
969 stop_the_world:
970 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
971}
972
973#ifdef TARGET_WORDS_BIGENDIAN
974# define TGT_BE(X) (X)
975# define TGT_LE(X) BSWAP(X)
976#else
977# define TGT_BE(X) BSWAP(X)
978# define TGT_LE(X) (X)
979#endif
980
981#define MMUSUFFIX _mmu
982
983#define DATA_SIZE 1
984#include "softmmu_template.h"
985
986#define DATA_SIZE 2
987#include "softmmu_template.h"
988
989#define DATA_SIZE 4
990#include "softmmu_template.h"
991
992#define DATA_SIZE 8
993#include "softmmu_template.h"
994
995
996
997
998#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
999#define ATOMIC_NAME(X) \
1000 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1001#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
1002
1003#define DATA_SIZE 1
1004#include "atomic_template.h"
1005
1006#define DATA_SIZE 2
1007#include "atomic_template.h"
1008
1009#define DATA_SIZE 4
1010#include "atomic_template.h"
1011
1012#ifdef CONFIG_ATOMIC64
1013#define DATA_SIZE 8
1014#include "atomic_template.h"
1015#endif
1016
1017#ifdef CONFIG_ATOMIC128
1018#define DATA_SIZE 16
1019#include "atomic_template.h"
1020#endif
1021
1022
1023
1024#undef EXTRA_ARGS
1025#undef ATOMIC_NAME
1026#undef ATOMIC_MMU_LOOKUP
1027#define EXTRA_ARGS , TCGMemOpIdx oi
1028#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1029#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
1030
1031#define DATA_SIZE 1
1032#include "atomic_template.h"
1033
1034#define DATA_SIZE 2
1035#include "atomic_template.h"
1036
1037#define DATA_SIZE 4
1038#include "atomic_template.h"
1039
1040#ifdef CONFIG_ATOMIC64
1041#define DATA_SIZE 8
1042#include "atomic_template.h"
1043#endif
1044
1045
1046
1047#undef MMUSUFFIX
1048#define MMUSUFFIX _cmmu
1049#undef GETPC
1050#define GETPC() ((uintptr_t)0)
1051#define SOFTMMU_CODE_ACCESS
1052
1053#define DATA_SIZE 1
1054#include "softmmu_template.h"
1055
1056#define DATA_SIZE 2
1057#include "softmmu_template.h"
1058
1059#define DATA_SIZE 4
1060#include "softmmu_template.h"
1061
1062#define DATA_SIZE 8
1063#include "softmmu_template.h"
1064