1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
26#include "exec/cpu_ldst.h"
27#include "exec/cputlb.h"
28#include "exec/memory-internal.h"
29#include "exec/ram_addr.h"
30#include "tcg/tcg.h"
31#include "qemu/error-report.h"
32#include "exec/log.h"
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
35#include "qemu/etrace.h"
36
37
38
39
40
41#ifdef DEBUG_TLB
42# define DEBUG_TLB_GATE 1
43# ifdef DEBUG_TLB_LOG
44# define DEBUG_TLB_LOG_GATE 1
45# else
46# define DEBUG_TLB_LOG_GATE 0
47# endif
48#else
49# define DEBUG_TLB_GATE 0
50# define DEBUG_TLB_LOG_GATE 0
51#endif
52
53#define tlb_debug(fmt, ...) do { \
54 if (DEBUG_TLB_LOG_GATE) { \
55 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56 ## __VA_ARGS__); \
57 } else if (DEBUG_TLB_GATE) { \
58 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59 } \
60} while (0)
61
62#define assert_cpu_is_self(this_cpu) do { \
63 if (DEBUG_TLB_GATE) { \
64 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
65 } \
66 } while (0)
67
68
69
70QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71
72
73
74QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76
77
78
79
80
81
82
83
84static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
85 run_on_cpu_data d)
86{
87 CPUState *cpu;
88
89 CPU_FOREACH(cpu) {
90 if (cpu != src) {
91 async_run_on_cpu(cpu, fn, d);
92 }
93 }
94}
95
96size_t tlb_flush_count(void)
97{
98 CPUState *cpu;
99 size_t count = 0;
100
101 CPU_FOREACH(cpu) {
102 CPUArchState *env = cpu->env_ptr;
103
104 count += atomic_read(&env->tlb_flush_count);
105 }
106 return count;
107}
108
109
110
111
112
113
114static void tlb_flush_nocheck(CPUState *cpu)
115{
116 CPUArchState *env = cpu->env_ptr;
117
118
119
120
121 if (!tcg_enabled()) {
122 return;
123 }
124
125 assert_cpu_is_self(cpu);
126 atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
127 tlb_debug("(count: %zu)\n", tlb_flush_count());
128
129 tb_lock();
130
131 memset(env->tlb_table, -1, sizeof(env->tlb_table));
132 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
133 cpu_tb_jmp_cache_clear(cpu);
134
135 env->vtlb_index = 0;
136 env->tlb_flush_addr = -1;
137 env->tlb_flush_mask = 0;
138
139 tb_unlock();
140
141 atomic_mb_set(&cpu->pending_tlb_flush, 0);
142}
143
144static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
145{
146 tlb_flush_nocheck(cpu);
147}
148
149void tlb_flush(CPUState *cpu)
150{
151 if (cpu->created && !qemu_cpu_is_self(cpu)) {
152 if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
153 atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
154 async_run_on_cpu(cpu, tlb_flush_global_async_work,
155 RUN_ON_CPU_NULL);
156 }
157 } else {
158 tlb_flush_nocheck(cpu);
159 }
160}
161
162void tlb_flush_all_cpus(CPUState *src_cpu)
163{
164 const run_on_cpu_func fn = tlb_flush_global_async_work;
165 flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
166 fn(src_cpu, RUN_ON_CPU_NULL);
167}
168
169void tlb_flush_all_cpus_synced(CPUState *src_cpu)
170{
171 const run_on_cpu_func fn = tlb_flush_global_async_work;
172 flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
173 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
174}
175
176static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
177{
178 CPUArchState *env = cpu->env_ptr;
179 unsigned long mmu_idx_bitmask = data.host_int;
180 int mmu_idx;
181
182 assert_cpu_is_self(cpu);
183
184 tb_lock();
185
186 tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
187
188 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
189
190 if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
191 tlb_debug("%d\n", mmu_idx);
192
193 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
194 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
195 }
196 }
197
198 cpu_tb_jmp_cache_clear(cpu);
199
200 tlb_debug("done\n");
201
202 tb_unlock();
203}
204
205void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
206{
207 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
208
209 if (!qemu_cpu_is_self(cpu)) {
210 uint16_t pending_flushes = idxmap;
211 pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
212
213 if (pending_flushes) {
214 tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
215
216 atomic_or(&cpu->pending_tlb_flush, pending_flushes);
217 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
218 RUN_ON_CPU_HOST_INT(pending_flushes));
219 }
220 } else {
221 tlb_flush_by_mmuidx_async_work(cpu,
222 RUN_ON_CPU_HOST_INT(idxmap));
223 }
224}
225
226void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
227{
228 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
229
230 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
231
232 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
233 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
234}
235
236void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
237 uint16_t idxmap)
238{
239 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
240
241 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
242
243 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
244 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
245}
246
247
248
249static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
250{
251 if (addr == (tlb_entry->addr_read &
252 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
253 addr == (tlb_entry->addr_write &
254 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
255 addr == (tlb_entry->addr_code &
256 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
257 memset(tlb_entry, -1, sizeof(*tlb_entry));
258 }
259}
260
261static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
262{
263 CPUArchState *env = cpu->env_ptr;
264 target_ulong addr = (target_ulong) data.target_ptr;
265 int i;
266 int mmu_idx;
267
268 assert_cpu_is_self(cpu);
269
270 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
271
272
273 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
274 tlb_debug("forcing full flush ("
275 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
276 env->tlb_flush_addr, env->tlb_flush_mask);
277
278 tlb_flush(cpu);
279 return;
280 }
281
282 addr &= TARGET_PAGE_MASK;
283 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
284 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
285 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
286 }
287
288
289 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
290 int k;
291 for (k = 0; k < CPU_VTLB_SIZE; k++) {
292 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
293 }
294 }
295
296 tb_flush_jmp_cache(cpu, addr);
297}
298
299void tlb_flush_page(CPUState *cpu, target_ulong addr)
300{
301 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
302
303 if (!qemu_cpu_is_self(cpu)) {
304 async_run_on_cpu(cpu, tlb_flush_page_async_work,
305 RUN_ON_CPU_TARGET_PTR(addr));
306 } else {
307 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
308 }
309}
310
311
312
313
314QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
315
316static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
317 run_on_cpu_data data)
318{
319 CPUArchState *env = cpu->env_ptr;
320 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
321 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
322 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
323 int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
324 int mmu_idx;
325 int i;
326
327 assert_cpu_is_self(cpu);
328
329 tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
330 page, addr, mmu_idx_bitmap);
331
332 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
333 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
334 tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
335
336
337 for (i = 0; i < CPU_VTLB_SIZE; i++) {
338 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
339 }
340 }
341 }
342
343 tb_flush_jmp_cache(cpu, addr);
344}
345
346static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
347 run_on_cpu_data data)
348{
349 CPUArchState *env = cpu->env_ptr;
350 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
351 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
352 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
353
354 tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
355
356
357 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
358 tlb_debug("forced full flush ("
359 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
360 env->tlb_flush_addr, env->tlb_flush_mask);
361
362 tlb_flush_by_mmuidx_async_work(cpu,
363 RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
364 } else {
365 tlb_flush_page_by_mmuidx_async_work(cpu, data);
366 }
367}
368
369void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
370{
371 target_ulong addr_and_mmu_idx;
372
373 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
374
375
376 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
377 addr_and_mmu_idx |= idxmap;
378
379 if (!qemu_cpu_is_self(cpu)) {
380 async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
381 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
382 } else {
383 tlb_check_page_and_flush_by_mmuidx_async_work(
384 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
385 }
386}
387
388void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
389 uint16_t idxmap)
390{
391 const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
392 target_ulong addr_and_mmu_idx;
393
394 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
395
396
397 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
398 addr_and_mmu_idx |= idxmap;
399
400 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
401 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
402}
403
404void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
405 target_ulong addr,
406 uint16_t idxmap)
407{
408 const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
409 target_ulong addr_and_mmu_idx;
410
411 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
412
413
414 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
415 addr_and_mmu_idx |= idxmap;
416
417 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
418 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
419}
420
421void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
422{
423 const run_on_cpu_func fn = tlb_flush_page_async_work;
424
425 flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
426 fn(src, RUN_ON_CPU_TARGET_PTR(addr));
427}
428
429void tlb_flush_page_all_cpus_synced(CPUState *src,
430 target_ulong addr)
431{
432 const run_on_cpu_func fn = tlb_flush_page_async_work;
433
434 flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
435 async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
436}
437
438
439
440void tlb_protect_code(ram_addr_t ram_addr)
441{
442 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
443 DIRTY_MEMORY_CODE);
444}
445
446
447
448void tlb_unprotect_code(ram_addr_t ram_addr)
449{
450 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
451}
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
475 uintptr_t length)
476{
477#if TCG_OVERSIZED_GUEST
478 uintptr_t addr = tlb_entry->addr_write;
479
480 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
481 addr &= TARGET_PAGE_MASK;
482 addr += tlb_entry->addend;
483 if ((addr - start) < length) {
484 tlb_entry->addr_write |= TLB_NOTDIRTY;
485 }
486 }
487#else
488
489 uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
490 uintptr_t addr = orig_addr;
491
492 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
493 addr &= TARGET_PAGE_MASK;
494 addr += atomic_read(&tlb_entry->addend);
495 if ((addr - start) < length) {
496 uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
497 atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
498 }
499 }
500#endif
501}
502
503
504
505static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
506 bool atomic_set)
507{
508#if TCG_OVERSIZED_GUEST
509 *d = *s;
510#else
511 if (atomic_set) {
512 d->addr_read = s->addr_read;
513 d->addr_code = s->addr_code;
514 atomic_set(&d->addend, atomic_read(&s->addend));
515
516 atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
517 } else {
518 d->addr_read = s->addr_read;
519 d->addr_write = atomic_read(&s->addr_write);
520 d->addr_code = s->addr_code;
521 d->addend = atomic_read(&s->addend);
522 }
523#endif
524}
525
526
527
528
529
530
531void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
532{
533 CPUArchState *env;
534
535 int mmu_idx;
536
537 env = cpu->env_ptr;
538 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
539 unsigned int i;
540
541 for (i = 0; i < CPU_TLB_SIZE; i++) {
542 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
543 start1, length);
544 }
545
546 for (i = 0; i < CPU_VTLB_SIZE; i++) {
547 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
548 start1, length);
549 }
550 }
551}
552
553static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
554{
555 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
556 tlb_entry->addr_write = vaddr;
557 }
558}
559
560
561
562void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
563{
564 CPUArchState *env = cpu->env_ptr;
565 int i;
566 int mmu_idx;
567
568 assert_cpu_is_self(cpu);
569
570 vaddr &= TARGET_PAGE_MASK;
571 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
572 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
573 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
574 }
575
576 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
577 int k;
578 for (k = 0; k < CPU_VTLB_SIZE; k++) {
579 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
580 }
581 }
582}
583
584
585
586static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
587 target_ulong size)
588{
589 target_ulong mask = ~(size - 1);
590
591 if (env->tlb_flush_addr == (target_ulong)-1) {
592 env->tlb_flush_addr = vaddr & mask;
593 env->tlb_flush_mask = mask;
594 return;
595 }
596
597
598
599 mask &= env->tlb_flush_mask;
600 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
601 mask <<= 1;
602 }
603 env->tlb_flush_addr &= mask;
604 env->tlb_flush_mask = mask;
605}
606
607
608
609
610
611
612
613
614void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
615 hwaddr paddr, MemTxAttrs attrs, int prot,
616 int mmu_idx, target_ulong size)
617{
618 CPUArchState *env = cpu->env_ptr;
619 MemoryRegionSection *section;
620 unsigned int index;
621 target_ulong address;
622 target_ulong code_address;
623 uintptr_t addend;
624 CPUTLBEntry *te, *tv, tn;
625 hwaddr iotlb, xlat, sz;
626 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
627 int asidx = cpu_asidx_from_attrs(cpu, attrs);
628
629
630
631 CPUIOTLBEntry *attr = &env->memattr[attrs.secure];
632
633 assert_cpu_is_self(cpu);
634 assert(size >= TARGET_PAGE_SIZE);
635 if (size != TARGET_PAGE_SIZE) {
636 tlb_add_large_page(env, vaddr, size);
637 }
638
639 sz = size;
640 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz,
641 &prot, &attr->attrs);
642 assert(sz >= TARGET_PAGE_SIZE);
643
644 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
645 " prot=%x idx=%d sec=%d.%d\n",
646 vaddr, paddr, prot, mmu_idx, attr->attrs.secure, attrs.secure);
647
648 address = vaddr;
649 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
650
651 address |= TLB_MMIO;
652 addend = 0;
653 } else {
654
655 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
656 }
657
658 code_address = address;
659 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
660 prot, &address);
661
662 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
663 te = &env->tlb_table[mmu_idx][index];
664
665 tv = &env->tlb_v_table[mmu_idx][vidx];
666
667
668 copy_tlb_helper(tv, te, true);
669
670 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
671
672
673 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
674 env->iotlb[mmu_idx][index].attrs = attr->attrs;
675
676
677 tn.addend = addend - vaddr;
678 if (prot & PAGE_READ) {
679 tn.addr_read = address;
680 } else {
681 tn.addr_read = -1;
682 }
683
684 if (prot & PAGE_EXEC) {
685 tn.addr_code = code_address;
686 } else {
687 tn.addr_code = -1;
688 }
689
690 tn.addr_write = -1;
691 if (prot & PAGE_WRITE) {
692 if ((memory_region_is_ram(section->mr) && section->readonly)
693 || memory_region_is_romd(section->mr)) {
694
695 tn.addr_write = address | TLB_MMIO;
696 } else if (memory_region_is_ram(section->mr)
697 && cpu_physical_memory_is_clean(
698 memory_region_get_ram_addr(section->mr) + xlat)) {
699 tn.addr_write = address | TLB_NOTDIRTY;
700 } else {
701 tn.addr_write = address;
702 }
703 if (prot & PAGE_WRITE_INV) {
704 tn.addr_write |= TLB_INVALID_MASK;
705 }
706 }
707
708
709 copy_tlb_helper(te, &tn, true);
710
711}
712
713
714
715
716void tlb_set_page(CPUState *cpu, target_ulong vaddr,
717 hwaddr paddr, int prot,
718 int mmu_idx, target_ulong size)
719{
720 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
721 prot, mmu_idx, size);
722}
723
724static void report_bad_exec(CPUState *cpu, target_ulong addr)
725{
726
727
728
729
730
731 error_report("Trying to execute code outside RAM or ROM at 0x"
732 TARGET_FMT_lx, addr);
733 error_printf("This usually means one of the following happened:\n\n"
734 "(1) You told QEMU to execute a kernel for the wrong machine "
735 "type, and it crashed on startup (eg trying to run a "
736 "raspberry pi kernel on a versatilepb QEMU machine)\n"
737 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
738 "and QEMU executed a ROM full of no-op instructions until "
739 "it fell off the end\n"
740 "(3) Your guest kernel has a bug and crashed by jumping "
741 "off into nowhere\n\n"
742 "This is almost always one of the first two, so check your "
743 "command line and that you are using the right type of kernel "
744 "for this machine.\n"
745 "If you think option (3) is likely then you can try debugging "
746 "your guest with the -d debug options; in particular "
747 "-d guest_errors will cause the log to include a dump of the "
748 "guest register state at this point.\n\n"
749 "Execution cannot continue; stopping here.\n\n");
750
751
752 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
753 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
754 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
755}
756
757static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
758{
759 ram_addr_t ram_addr;
760
761 ram_addr = qemu_ram_addr_from_host(ptr);
762 if (ram_addr == RAM_ADDR_INVALID) {
763 error_report("Bad ram pointer %p", ptr);
764 abort();
765 }
766 return ram_addr;
767}
768
769static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
770 int mmu_idx,
771 target_ulong addr, uintptr_t retaddr, int size)
772{
773 CPUState *cpu = ENV_GET_CPU(env);
774 hwaddr physaddr = iotlbentry->addr;
775 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
776 uint64_t val;
777 bool locked = false;
778 MemTxResult r;
779
780 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
781 cpu->mem_io_pc = retaddr;
782 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
783 cpu_io_recompile(cpu, retaddr);
784 }
785
786 cpu->mem_io_vaddr = addr;
787
788 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
789 qemu_mutex_lock_iothread();
790 locked = true;
791 }
792
793
794
795
796 if (memory_region_get_iommu(mr)) {
797 r = address_space_rw(cpu->as, physaddr, iotlbentry->attrs,
798 (void *) &val, size, false);
799 } else {
800 r = memory_region_dispatch_read(mr, physaddr,
801 &val, size, iotlbentry->attrs);
802 }
803
804 if (qemu_etrace_mask(ETRACE_F_MEM)) {
805 etrace_mem_access(&qemu_etracer, 0, 0,
806 addr, size, MEM_READ, val);
807 }
808
809 if (r != MEMTX_OK) {
810 cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
811 mmu_idx, iotlbentry->attrs, r, retaddr);
812 }
813
814 if (locked) {
815 qemu_mutex_unlock_iothread();
816 }
817
818 return val;
819}
820
821static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
822 int mmu_idx,
823 uint64_t val, target_ulong addr,
824 uintptr_t retaddr, int size)
825{
826 CPUState *cpu = ENV_GET_CPU(env);
827 hwaddr physaddr = iotlbentry->addr;
828 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
829 bool locked = false;
830 MemTxResult r;
831
832 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
833 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
834 cpu_io_recompile(cpu, retaddr);
835 }
836 cpu->mem_io_vaddr = addr;
837 cpu->mem_io_pc = retaddr;
838
839 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
840 qemu_mutex_lock_iothread();
841 locked = true;
842 }
843
844
845
846
847 if (memory_region_get_iommu(mr)) {
848 r = address_space_rw(cpu->as, physaddr, iotlbentry->attrs,
849 (void *) &val, size, true);
850 } else {
851 r = memory_region_dispatch_write(mr, physaddr,
852 val, size, iotlbentry->attrs);
853 }
854
855 if (qemu_etrace_mask(ETRACE_F_MEM)) {
856 etrace_mem_access(&qemu_etracer, 0, 0,
857 addr, size, MEM_WRITE, val);
858 }
859
860 if (r != MEMTX_OK) {
861 cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
862 mmu_idx, iotlbentry->attrs, r, retaddr);
863 }
864
865 if (locked) {
866 qemu_mutex_unlock_iothread();
867 }
868}
869
870
871
872static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
873 size_t elt_ofs, target_ulong page)
874{
875 size_t vidx;
876 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
877 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
878 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
879
880 if (cmp == page) {
881
882 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
883
884 copy_tlb_helper(&tmptlb, tlb, false);
885 copy_tlb_helper(tlb, vtlb, true);
886 copy_tlb_helper(vtlb, &tmptlb, true);
887
888 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
889 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
890 tmpio = *io; *io = *vio; *vio = tmpio;
891 return true;
892 }
893 }
894 return false;
895}
896
897
898#define VICTIM_TLB_HIT(TY, ADDR) \
899 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
900 (ADDR) & TARGET_PAGE_MASK)
901
902
903
904
905
906
907tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
908{
909 int mmu_idx, index, pd;
910 void *p;
911 MemoryRegion *mr;
912 CPUState *cpu = ENV_GET_CPU(env);
913 CPUIOTLBEntry *iotlbentry;
914 hwaddr physaddr;
915
916 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
917 mmu_idx = cpu_mmu_index(env, true);
918 if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
919 (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
920 if (!VICTIM_TLB_HIT(addr_read, addr)) {
921 tlb_fill(ENV_GET_CPU(env), addr, MMU_INST_FETCH, mmu_idx, 0);
922 }
923 }
924 iotlbentry = &env->iotlb[mmu_idx][index];
925 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
926 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
927 if (memory_region_is_unassigned(mr)) {
928 qemu_mutex_lock_iothread();
929 if (memory_region_request_mmio_ptr(mr, addr)) {
930 qemu_mutex_unlock_iothread();
931
932
933
934 return get_page_addr_code(env, addr);
935 }
936 qemu_mutex_unlock_iothread();
937
938
939
940
941
942
943
944
945
946
947 physaddr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
948 cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
949 iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
950
951 cpu_unassigned_access(cpu, addr, false, true, 0, 4);
952
953
954
955
956 report_bad_exec(cpu, addr);
957 exit(1);
958 }
959 p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
960 return qemu_ram_addr_from_host_nofail(p);
961}
962
963
964
965
966
967
968
969void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
970 uintptr_t retaddr)
971{
972 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
973 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
974
975 if ((addr & TARGET_PAGE_MASK)
976 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
977
978 if (!VICTIM_TLB_HIT(addr_write, addr)) {
979 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
980 }
981 }
982}
983
984
985
986static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
987 TCGMemOpIdx oi, uintptr_t retaddr,
988 NotDirtyInfo *ndi)
989{
990 size_t mmu_idx = get_mmuidx(oi);
991 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
992 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
993 target_ulong tlb_addr = tlbe->addr_write;
994 TCGMemOp mop = get_memop(oi);
995 int a_bits = get_alignment_bits(mop);
996 int s_bits = mop & MO_SIZE;
997 void *hostaddr;
998
999
1000 retaddr -= GETPC_ADJ;
1001
1002
1003 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1004
1005 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1006 mmu_idx, retaddr);
1007 }
1008
1009
1010 if (unlikely(addr & ((1 << s_bits) - 1))) {
1011
1012
1013
1014
1015 goto stop_the_world;
1016 }
1017
1018
1019 if ((addr & TARGET_PAGE_MASK)
1020 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1021 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1022 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
1023 }
1024 tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
1025 }
1026
1027
1028 if (unlikely(tlb_addr & TLB_MMIO)) {
1029
1030
1031 goto stop_the_world;
1032 }
1033
1034
1035 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1036 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
1037
1038
1039
1040 goto stop_the_world;
1041 }
1042
1043 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1044
1045 ndi->active = false;
1046 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1047 ndi->active = true;
1048 memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
1049 qemu_ram_addr_from_host_nofail(hostaddr),
1050 1 << s_bits);
1051 }
1052
1053 return hostaddr;
1054
1055 stop_the_world:
1056 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1057}
1058
1059#ifdef TARGET_WORDS_BIGENDIAN
1060# define TGT_BE(X) (X)
1061# define TGT_LE(X) BSWAP(X)
1062#else
1063# define TGT_BE(X) BSWAP(X)
1064# define TGT_LE(X) (X)
1065#endif
1066
1067#define MMUSUFFIX _mmu
1068
1069#define DATA_SIZE 1
1070#include "softmmu_template.h"
1071
1072#define DATA_SIZE 2
1073#include "softmmu_template.h"
1074
1075#define DATA_SIZE 4
1076#include "softmmu_template.h"
1077
1078#define DATA_SIZE 8
1079#include "softmmu_template.h"
1080
1081
1082
1083
1084#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1085#define ATOMIC_NAME(X) \
1086 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1087#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1088#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1089#define ATOMIC_MMU_CLEANUP \
1090 do { \
1091 if (unlikely(ndi.active)) { \
1092 memory_notdirty_write_complete(&ndi); \
1093 } \
1094 } while (0)
1095
1096#define DATA_SIZE 1
1097#include "atomic_template.h"
1098
1099#define DATA_SIZE 2
1100#include "atomic_template.h"
1101
1102#define DATA_SIZE 4
1103#include "atomic_template.h"
1104
1105#ifdef CONFIG_ATOMIC64
1106#define DATA_SIZE 8
1107#include "atomic_template.h"
1108#endif
1109
1110#ifdef CONFIG_ATOMIC128
1111#define DATA_SIZE 16
1112#include "atomic_template.h"
1113#endif
1114
1115
1116
1117#undef EXTRA_ARGS
1118#undef ATOMIC_NAME
1119#undef ATOMIC_MMU_LOOKUP
1120#define EXTRA_ARGS , TCGMemOpIdx oi
1121#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1122#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1123
1124#define DATA_SIZE 1
1125#include "atomic_template.h"
1126
1127#define DATA_SIZE 2
1128#include "atomic_template.h"
1129
1130#define DATA_SIZE 4
1131#include "atomic_template.h"
1132
1133#ifdef CONFIG_ATOMIC64
1134#define DATA_SIZE 8
1135#include "atomic_template.h"
1136#endif
1137
1138
1139
1140#undef MMUSUFFIX
1141#define MMUSUFFIX _cmmu
1142#undef GETPC
1143#define GETPC() ((uintptr_t)0)
1144#define SOFTMMU_CODE_ACCESS
1145
1146#define DATA_SIZE 1
1147#include "softmmu_template.h"
1148
1149#define DATA_SIZE 2
1150#include "softmmu_template.h"
1151
1152#define DATA_SIZE 4
1153#include "softmmu_template.h"
1154
1155#define DATA_SIZE 8
1156#include "softmmu_template.h"
1157