1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
26#include "exec/cpu_ldst.h"
27#include "exec/cputlb.h"
28#include "exec/memory-internal.h"
29#include "exec/ram_addr.h"
30#include "tcg/tcg.h"
31#include "qemu/error-report.h"
32#include "exec/log.h"
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
35
36
37
38
39
40#ifdef DEBUG_TLB
41# define DEBUG_TLB_GATE 1
42# ifdef DEBUG_TLB_LOG
43# define DEBUG_TLB_LOG_GATE 1
44# else
45# define DEBUG_TLB_LOG_GATE 0
46# endif
47#else
48# define DEBUG_TLB_GATE 0
49# define DEBUG_TLB_LOG_GATE 0
50#endif
51
52#define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59} while (0)
60
61#define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
66
67
68
69QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70
71
72
73QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75
76
77
78
79
80
81
82
83static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
84 run_on_cpu_data d)
85{
86 CPUState *cpu;
87
88 CPU_FOREACH(cpu) {
89 if (cpu != src) {
90 async_run_on_cpu(cpu, fn, d);
91 }
92 }
93}
94
95size_t tlb_flush_count(void)
96{
97 CPUState *cpu;
98 size_t count = 0;
99
100 CPU_FOREACH(cpu) {
101 CPUArchState *env = cpu->env_ptr;
102
103 count += atomic_read(&env->tlb_flush_count);
104 }
105 return count;
106}
107
108
109
110
111
112
113static void tlb_flush_nocheck(CPUState *cpu)
114{
115 CPUArchState *env = cpu->env_ptr;
116
117
118
119
120 if (!tcg_enabled()) {
121 return;
122 }
123
124 assert_cpu_is_self(cpu);
125 atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
126 tlb_debug("(count: %zu)\n", tlb_flush_count());
127
128 tb_lock();
129
130 memset(env->tlb_table, -1, sizeof(env->tlb_table));
131 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
132 cpu_tb_jmp_cache_clear(cpu);
133
134 env->vtlb_index = 0;
135 env->tlb_flush_addr = -1;
136 env->tlb_flush_mask = 0;
137
138 tb_unlock();
139
140 atomic_mb_set(&cpu->pending_tlb_flush, 0);
141}
142
143static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
144{
145 tlb_flush_nocheck(cpu);
146}
147
148void tlb_flush(CPUState *cpu)
149{
150 if (cpu->created && !qemu_cpu_is_self(cpu)) {
151 if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
152 atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
153 async_run_on_cpu(cpu, tlb_flush_global_async_work,
154 RUN_ON_CPU_NULL);
155 }
156 } else {
157 tlb_flush_nocheck(cpu);
158 }
159}
160
161void tlb_flush_all_cpus(CPUState *src_cpu)
162{
163 const run_on_cpu_func fn = tlb_flush_global_async_work;
164 flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
165 fn(src_cpu, RUN_ON_CPU_NULL);
166}
167
168void tlb_flush_all_cpus_synced(CPUState *src_cpu)
169{
170 const run_on_cpu_func fn = tlb_flush_global_async_work;
171 flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
172 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
173}
174
175static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
176{
177 CPUArchState *env = cpu->env_ptr;
178 unsigned long mmu_idx_bitmask = data.host_int;
179 int mmu_idx;
180
181 assert_cpu_is_self(cpu);
182
183 tb_lock();
184
185 tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
186
187 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
188
189 if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
190 tlb_debug("%d\n", mmu_idx);
191
192 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
193 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
194 }
195 }
196
197 cpu_tb_jmp_cache_clear(cpu);
198
199 tlb_debug("done\n");
200
201 tb_unlock();
202}
203
204void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
205{
206 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
207
208 if (!qemu_cpu_is_self(cpu)) {
209 uint16_t pending_flushes = idxmap;
210 pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
211
212 if (pending_flushes) {
213 tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
214
215 atomic_or(&cpu->pending_tlb_flush, pending_flushes);
216 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
217 RUN_ON_CPU_HOST_INT(pending_flushes));
218 }
219 } else {
220 tlb_flush_by_mmuidx_async_work(cpu,
221 RUN_ON_CPU_HOST_INT(idxmap));
222 }
223}
224
225void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
226{
227 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
228
229 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
230
231 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
232 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
233}
234
235void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
236 uint16_t idxmap)
237{
238 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
239
240 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
241
242 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
243 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
244}
245
246
247
248static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
249{
250 if (addr == (tlb_entry->addr_read &
251 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
252 addr == (tlb_entry->addr_write &
253 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
254 addr == (tlb_entry->addr_code &
255 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
256 memset(tlb_entry, -1, sizeof(*tlb_entry));
257 }
258}
259
260static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
261{
262 CPUArchState *env = cpu->env_ptr;
263 target_ulong addr = (target_ulong) data.target_ptr;
264 int i;
265 int mmu_idx;
266
267 assert_cpu_is_self(cpu);
268
269 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
270
271
272 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
273 tlb_debug("forcing full flush ("
274 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
275 env->tlb_flush_addr, env->tlb_flush_mask);
276
277 tlb_flush(cpu);
278 return;
279 }
280
281 addr &= TARGET_PAGE_MASK;
282 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
283 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
284 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
285 }
286
287
288 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
289 int k;
290 for (k = 0; k < CPU_VTLB_SIZE; k++) {
291 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
292 }
293 }
294
295 tb_flush_jmp_cache(cpu, addr);
296}
297
298void tlb_flush_page(CPUState *cpu, target_ulong addr)
299{
300 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
301
302 if (!qemu_cpu_is_self(cpu)) {
303 async_run_on_cpu(cpu, tlb_flush_page_async_work,
304 RUN_ON_CPU_TARGET_PTR(addr));
305 } else {
306 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
307 }
308}
309
310
311
312
313QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
314
315static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
316 run_on_cpu_data data)
317{
318 CPUArchState *env = cpu->env_ptr;
319 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
320 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
321 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
322 int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
323 int mmu_idx;
324 int i;
325
326 assert_cpu_is_self(cpu);
327
328 tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
329 page, addr, mmu_idx_bitmap);
330
331 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
332 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
333 tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
334
335
336 for (i = 0; i < CPU_VTLB_SIZE; i++) {
337 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
338 }
339 }
340 }
341
342 tb_flush_jmp_cache(cpu, addr);
343}
344
345static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
346 run_on_cpu_data data)
347{
348 CPUArchState *env = cpu->env_ptr;
349 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
350 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
351 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
352
353 tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
354
355
356 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
357 tlb_debug("forced full flush ("
358 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
359 env->tlb_flush_addr, env->tlb_flush_mask);
360
361 tlb_flush_by_mmuidx_async_work(cpu,
362 RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
363 } else {
364 tlb_flush_page_by_mmuidx_async_work(cpu, data);
365 }
366}
367
368void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
369{
370 target_ulong addr_and_mmu_idx;
371
372 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
373
374
375 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
376 addr_and_mmu_idx |= idxmap;
377
378 if (!qemu_cpu_is_self(cpu)) {
379 async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
380 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
381 } else {
382 tlb_check_page_and_flush_by_mmuidx_async_work(
383 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
384 }
385}
386
387void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
388 uint16_t idxmap)
389{
390 const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
391 target_ulong addr_and_mmu_idx;
392
393 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
394
395
396 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
397 addr_and_mmu_idx |= idxmap;
398
399 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
400 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
401}
402
403void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
404 target_ulong addr,
405 uint16_t idxmap)
406{
407 const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
408 target_ulong addr_and_mmu_idx;
409
410 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
411
412
413 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
414 addr_and_mmu_idx |= idxmap;
415
416 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
417 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
418}
419
420void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
421{
422 const run_on_cpu_func fn = tlb_flush_page_async_work;
423
424 flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
425 fn(src, RUN_ON_CPU_TARGET_PTR(addr));
426}
427
428void tlb_flush_page_all_cpus_synced(CPUState *src,
429 target_ulong addr)
430{
431 const run_on_cpu_func fn = tlb_flush_page_async_work;
432
433 flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
434 async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
435}
436
437
438
439void tlb_protect_code(ram_addr_t ram_addr)
440{
441 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
442 DIRTY_MEMORY_CODE);
443}
444
445
446
447void tlb_unprotect_code(ram_addr_t ram_addr)
448{
449 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
450}
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
474 uintptr_t length)
475{
476#if TCG_OVERSIZED_GUEST
477 uintptr_t addr = tlb_entry->addr_write;
478
479 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
480 addr &= TARGET_PAGE_MASK;
481 addr += tlb_entry->addend;
482 if ((addr - start) < length) {
483 tlb_entry->addr_write |= TLB_NOTDIRTY;
484 }
485 }
486#else
487
488 uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
489 uintptr_t addr = orig_addr;
490
491 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
492 addr &= TARGET_PAGE_MASK;
493 addr += atomic_read(&tlb_entry->addend);
494 if ((addr - start) < length) {
495 uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
496 atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
497 }
498 }
499#endif
500}
501
502
503
504static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
505 bool atomic_set)
506{
507#if TCG_OVERSIZED_GUEST
508 *d = *s;
509#else
510 if (atomic_set) {
511 d->addr_read = s->addr_read;
512 d->addr_code = s->addr_code;
513 atomic_set(&d->addend, atomic_read(&s->addend));
514
515 atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
516 } else {
517 d->addr_read = s->addr_read;
518 d->addr_write = atomic_read(&s->addr_write);
519 d->addr_code = s->addr_code;
520 d->addend = atomic_read(&s->addend);
521 }
522#endif
523}
524
525
526
527
528
529
530void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
531{
532 CPUArchState *env;
533
534 int mmu_idx;
535
536 env = cpu->env_ptr;
537 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
538 unsigned int i;
539
540 for (i = 0; i < CPU_TLB_SIZE; i++) {
541 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
542 start1, length);
543 }
544
545 for (i = 0; i < CPU_VTLB_SIZE; i++) {
546 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
547 start1, length);
548 }
549 }
550}
551
552static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
553{
554 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
555 tlb_entry->addr_write = vaddr;
556 }
557}
558
559
560
561void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
562{
563 CPUArchState *env = cpu->env_ptr;
564 int i;
565 int mmu_idx;
566
567 assert_cpu_is_self(cpu);
568
569 vaddr &= TARGET_PAGE_MASK;
570 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
571 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
572 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
573 }
574
575 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
576 int k;
577 for (k = 0; k < CPU_VTLB_SIZE; k++) {
578 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
579 }
580 }
581}
582
583
584
585static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
586 target_ulong size)
587{
588 target_ulong mask = ~(size - 1);
589
590 if (env->tlb_flush_addr == (target_ulong)-1) {
591 env->tlb_flush_addr = vaddr & mask;
592 env->tlb_flush_mask = mask;
593 return;
594 }
595
596
597
598 mask &= env->tlb_flush_mask;
599 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
600 mask <<= 1;
601 }
602 env->tlb_flush_addr &= mask;
603 env->tlb_flush_mask = mask;
604}
605
606
607
608
609
610
611
612
613void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
614 hwaddr paddr, MemTxAttrs attrs, int prot,
615 int mmu_idx, target_ulong size)
616{
617 CPUArchState *env = cpu->env_ptr;
618 MemoryRegionSection *section;
619 unsigned int index;
620 target_ulong address;
621 target_ulong code_address;
622 uintptr_t addend;
623 CPUTLBEntry *te, *tv, tn;
624 hwaddr iotlb, xlat, sz;
625 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
626 int asidx = cpu_asidx_from_attrs(cpu, attrs);
627
628 assert_cpu_is_self(cpu);
629 assert(size >= TARGET_PAGE_SIZE);
630 if (size != TARGET_PAGE_SIZE) {
631 tlb_add_large_page(env, vaddr, size);
632 }
633
634 sz = size;
635 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
636 assert(sz >= TARGET_PAGE_SIZE);
637
638 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
639 " prot=%x idx=%d\n",
640 vaddr, paddr, prot, mmu_idx);
641
642 address = vaddr;
643 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
644
645 address |= TLB_MMIO;
646 addend = 0;
647 } else {
648
649 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
650 }
651
652 code_address = address;
653 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
654 prot, &address);
655
656 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
657 te = &env->tlb_table[mmu_idx][index];
658
659 tv = &env->tlb_v_table[mmu_idx][vidx];
660
661
662 copy_tlb_helper(tv, te, true);
663
664 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
665
666
667 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
668 env->iotlb[mmu_idx][index].attrs = attrs;
669
670
671 tn.addend = addend - vaddr;
672 if (prot & PAGE_READ) {
673 tn.addr_read = address;
674 } else {
675 tn.addr_read = -1;
676 }
677
678 if (prot & PAGE_EXEC) {
679 tn.addr_code = code_address;
680 } else {
681 tn.addr_code = -1;
682 }
683
684 tn.addr_write = -1;
685 if (prot & PAGE_WRITE) {
686 if ((memory_region_is_ram(section->mr) && section->readonly)
687 || memory_region_is_romd(section->mr)) {
688
689 tn.addr_write = address | TLB_MMIO;
690 } else if (memory_region_is_ram(section->mr)
691 && cpu_physical_memory_is_clean(
692 memory_region_get_ram_addr(section->mr) + xlat)) {
693 tn.addr_write = address | TLB_NOTDIRTY;
694 } else {
695 tn.addr_write = address;
696 }
697 if (prot & PAGE_WRITE_INV) {
698 tn.addr_write |= TLB_INVALID_MASK;
699 }
700 }
701
702
703 copy_tlb_helper(te, &tn, true);
704
705}
706
707
708
709
710void tlb_set_page(CPUState *cpu, target_ulong vaddr,
711 hwaddr paddr, int prot,
712 int mmu_idx, target_ulong size)
713{
714 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
715 prot, mmu_idx, size);
716}
717
718static void report_bad_exec(CPUState *cpu, target_ulong addr)
719{
720
721
722
723
724
725 error_report("Trying to execute code outside RAM or ROM at 0x"
726 TARGET_FMT_lx, addr);
727 error_printf("This usually means one of the following happened:\n\n"
728 "(1) You told QEMU to execute a kernel for the wrong machine "
729 "type, and it crashed on startup (eg trying to run a "
730 "raspberry pi kernel on a versatilepb QEMU machine)\n"
731 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
732 "and QEMU executed a ROM full of no-op instructions until "
733 "it fell off the end\n"
734 "(3) Your guest kernel has a bug and crashed by jumping "
735 "off into nowhere\n\n"
736 "This is almost always one of the first two, so check your "
737 "command line and that you are using the right type of kernel "
738 "for this machine.\n"
739 "If you think option (3) is likely then you can try debugging "
740 "your guest with the -d debug options; in particular "
741 "-d guest_errors will cause the log to include a dump of the "
742 "guest register state at this point.\n\n"
743 "Execution cannot continue; stopping here.\n\n");
744
745
746 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
747 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
748 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
749}
750
751static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
752{
753 ram_addr_t ram_addr;
754
755 ram_addr = qemu_ram_addr_from_host(ptr);
756 if (ram_addr == RAM_ADDR_INVALID) {
757 error_report("Bad ram pointer %p", ptr);
758 abort();
759 }
760 return ram_addr;
761}
762
763static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
764 int mmu_idx,
765 target_ulong addr, uintptr_t retaddr, int size)
766{
767 CPUState *cpu = ENV_GET_CPU(env);
768 hwaddr physaddr = iotlbentry->addr;
769 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
770 uint64_t val;
771 bool locked = false;
772 MemTxResult r;
773
774 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
775 cpu->mem_io_pc = retaddr;
776 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
777 cpu_io_recompile(cpu, retaddr);
778 }
779
780 cpu->mem_io_vaddr = addr;
781
782 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
783 qemu_mutex_lock_iothread();
784 locked = true;
785 }
786 r = memory_region_dispatch_read(mr, physaddr,
787 &val, size, iotlbentry->attrs);
788 if (r != MEMTX_OK) {
789 cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
790 mmu_idx, iotlbentry->attrs, r, retaddr);
791 }
792 if (locked) {
793 qemu_mutex_unlock_iothread();
794 }
795
796 return val;
797}
798
799static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
800 int mmu_idx,
801 uint64_t val, target_ulong addr,
802 uintptr_t retaddr, int size)
803{
804 CPUState *cpu = ENV_GET_CPU(env);
805 hwaddr physaddr = iotlbentry->addr;
806 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
807 bool locked = false;
808 MemTxResult r;
809
810 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
811 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
812 cpu_io_recompile(cpu, retaddr);
813 }
814 cpu->mem_io_vaddr = addr;
815 cpu->mem_io_pc = retaddr;
816
817 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
818 qemu_mutex_lock_iothread();
819 locked = true;
820 }
821 r = memory_region_dispatch_write(mr, physaddr,
822 val, size, iotlbentry->attrs);
823 if (r != MEMTX_OK) {
824 cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
825 mmu_idx, iotlbentry->attrs, r, retaddr);
826 }
827 if (locked) {
828 qemu_mutex_unlock_iothread();
829 }
830}
831
832
833
834static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
835 size_t elt_ofs, target_ulong page)
836{
837 size_t vidx;
838 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
839 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
840 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
841
842 if (cmp == page) {
843
844 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
845
846 copy_tlb_helper(&tmptlb, tlb, false);
847 copy_tlb_helper(tlb, vtlb, true);
848 copy_tlb_helper(vtlb, &tmptlb, true);
849
850 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
851 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
852 tmpio = *io; *io = *vio; *vio = tmpio;
853 return true;
854 }
855 }
856 return false;
857}
858
859
860#define VICTIM_TLB_HIT(TY, ADDR) \
861 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
862 (ADDR) & TARGET_PAGE_MASK)
863
864
865
866
867
868
869tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
870{
871 int mmu_idx, index, pd;
872 void *p;
873 MemoryRegion *mr;
874 CPUState *cpu = ENV_GET_CPU(env);
875 CPUIOTLBEntry *iotlbentry;
876 hwaddr physaddr;
877
878 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
879 mmu_idx = cpu_mmu_index(env, true);
880 if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
881 (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
882 if (!VICTIM_TLB_HIT(addr_read, addr)) {
883 tlb_fill(ENV_GET_CPU(env), addr, MMU_INST_FETCH, mmu_idx, 0);
884 }
885 }
886 iotlbentry = &env->iotlb[mmu_idx][index];
887 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
888 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
889 if (memory_region_is_unassigned(mr)) {
890 qemu_mutex_lock_iothread();
891 if (memory_region_request_mmio_ptr(mr, addr)) {
892 qemu_mutex_unlock_iothread();
893
894
895
896 return get_page_addr_code(env, addr);
897 }
898 qemu_mutex_unlock_iothread();
899
900
901
902
903
904
905
906
907
908
909 physaddr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
910 cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
911 iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
912
913 cpu_unassigned_access(cpu, addr, false, true, 0, 4);
914
915
916
917
918 report_bad_exec(cpu, addr);
919 exit(1);
920 }
921 p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
922 return qemu_ram_addr_from_host_nofail(p);
923}
924
925
926
927
928
929
930
931void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
932 uintptr_t retaddr)
933{
934 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
935 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
936
937 if ((addr & TARGET_PAGE_MASK)
938 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
939
940 if (!VICTIM_TLB_HIT(addr_write, addr)) {
941 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
942 }
943 }
944}
945
946
947
948static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
949 TCGMemOpIdx oi, uintptr_t retaddr,
950 NotDirtyInfo *ndi)
951{
952 size_t mmu_idx = get_mmuidx(oi);
953 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
954 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
955 target_ulong tlb_addr = tlbe->addr_write;
956 TCGMemOp mop = get_memop(oi);
957 int a_bits = get_alignment_bits(mop);
958 int s_bits = mop & MO_SIZE;
959 void *hostaddr;
960
961
962 retaddr -= GETPC_ADJ;
963
964
965 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
966
967 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
968 mmu_idx, retaddr);
969 }
970
971
972 if (unlikely(addr & ((1 << s_bits) - 1))) {
973
974
975
976
977 goto stop_the_world;
978 }
979
980
981 if ((addr & TARGET_PAGE_MASK)
982 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
983 if (!VICTIM_TLB_HIT(addr_write, addr)) {
984 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
985 }
986 tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
987 }
988
989
990 if (unlikely(tlb_addr & TLB_MMIO)) {
991
992
993 goto stop_the_world;
994 }
995
996
997 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
998 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
999
1000
1001
1002 goto stop_the_world;
1003 }
1004
1005 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1006
1007 ndi->active = false;
1008 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1009 ndi->active = true;
1010 memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
1011 qemu_ram_addr_from_host_nofail(hostaddr),
1012 1 << s_bits);
1013 }
1014
1015 return hostaddr;
1016
1017 stop_the_world:
1018 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1019}
1020
1021#ifdef TARGET_WORDS_BIGENDIAN
1022# define TGT_BE(X) (X)
1023# define TGT_LE(X) BSWAP(X)
1024#else
1025# define TGT_BE(X) BSWAP(X)
1026# define TGT_LE(X) (X)
1027#endif
1028
1029#define MMUSUFFIX _mmu
1030
1031#define DATA_SIZE 1
1032#include "softmmu_template.h"
1033
1034#define DATA_SIZE 2
1035#include "softmmu_template.h"
1036
1037#define DATA_SIZE 4
1038#include "softmmu_template.h"
1039
1040#define DATA_SIZE 8
1041#include "softmmu_template.h"
1042
1043
1044
1045
1046#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1047#define ATOMIC_NAME(X) \
1048 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1049#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1050#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1051#define ATOMIC_MMU_CLEANUP \
1052 do { \
1053 if (unlikely(ndi.active)) { \
1054 memory_notdirty_write_complete(&ndi); \
1055 } \
1056 } while (0)
1057
1058#define DATA_SIZE 1
1059#include "atomic_template.h"
1060
1061#define DATA_SIZE 2
1062#include "atomic_template.h"
1063
1064#define DATA_SIZE 4
1065#include "atomic_template.h"
1066
1067#ifdef CONFIG_ATOMIC64
1068#define DATA_SIZE 8
1069#include "atomic_template.h"
1070#endif
1071
1072#ifdef CONFIG_ATOMIC128
1073#define DATA_SIZE 16
1074#include "atomic_template.h"
1075#endif
1076
1077
1078
1079#undef EXTRA_ARGS
1080#undef ATOMIC_NAME
1081#undef ATOMIC_MMU_LOOKUP
1082#define EXTRA_ARGS , TCGMemOpIdx oi
1083#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1084#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1085
1086#define DATA_SIZE 1
1087#include "atomic_template.h"
1088
1089#define DATA_SIZE 2
1090#include "atomic_template.h"
1091
1092#define DATA_SIZE 4
1093#include "atomic_template.h"
1094
1095#ifdef CONFIG_ATOMIC64
1096#define DATA_SIZE 8
1097#include "atomic_template.h"
1098#endif
1099
1100
1101
1102#undef MMUSUFFIX
1103#define MMUSUFFIX _cmmu
1104#undef GETPC
1105#define GETPC() ((uintptr_t)0)
1106#define SOFTMMU_CODE_ACCESS
1107
1108#define DATA_SIZE 1
1109#include "softmmu_template.h"
1110
1111#define DATA_SIZE 2
1112#include "softmmu_template.h"
1113
1114#define DATA_SIZE 4
1115#include "softmmu_template.h"
1116
1117#define DATA_SIZE 8
1118#include "softmmu_template.h"
1119