1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
26#include "exec/cpu_ldst.h"
27#include "exec/cputlb.h"
28#include "exec/memory-internal.h"
29#include "exec/ram_addr.h"
30#include "tcg/tcg.h"
31#include "qemu/error-report.h"
32#include "exec/log.h"
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
35#include "qemu/atomic128.h"
36
37
38
39
40
41#ifdef DEBUG_TLB
42# define DEBUG_TLB_GATE 1
43# ifdef DEBUG_TLB_LOG
44# define DEBUG_TLB_LOG_GATE 1
45# else
46# define DEBUG_TLB_LOG_GATE 0
47# endif
48#else
49# define DEBUG_TLB_GATE 0
50# define DEBUG_TLB_LOG_GATE 0
51#endif
52
53#define tlb_debug(fmt, ...) do { \
54 if (DEBUG_TLB_LOG_GATE) { \
55 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56 ## __VA_ARGS__); \
57 } else if (DEBUG_TLB_GATE) { \
58 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59 } \
60} while (0)
61
62#define assert_cpu_is_self(cpu) do { \
63 if (DEBUG_TLB_GATE) { \
64 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
65 } \
66 } while (0)
67
68
69
70QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71
72
73
74QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76
77void tlb_init(CPUState *cpu)
78{
79 CPUArchState *env = cpu->env_ptr;
80
81 qemu_spin_init(&env->tlb_c.lock);
82
83
84 env->tlb_c.dirty = ALL_MMUIDX_BITS;
85}
86
87
88
89
90
91
92
93
94static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
95 run_on_cpu_data d)
96{
97 CPUState *cpu;
98
99 CPU_FOREACH(cpu) {
100 if (cpu != src) {
101 async_run_on_cpu(cpu, fn, d);
102 }
103 }
104}
105
106void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
107{
108 CPUState *cpu;
109 size_t full = 0, part = 0, elide = 0;
110
111 CPU_FOREACH(cpu) {
112 CPUArchState *env = cpu->env_ptr;
113
114 full += atomic_read(&env->tlb_c.full_flush_count);
115 part += atomic_read(&env->tlb_c.part_flush_count);
116 elide += atomic_read(&env->tlb_c.elide_flush_count);
117 }
118 *pfull = full;
119 *ppart = part;
120 *pelide = elide;
121}
122
123static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
124{
125 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
126 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
127 env->tlb_d[mmu_idx].large_page_addr = -1;
128 env->tlb_d[mmu_idx].large_page_mask = -1;
129 env->tlb_d[mmu_idx].vindex = 0;
130}
131
132static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
133{
134 CPUArchState *env = cpu->env_ptr;
135 uint16_t asked = data.host_int;
136 uint16_t all_dirty, work, to_clean;
137
138 assert_cpu_is_self(cpu);
139
140 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
141
142 qemu_spin_lock(&env->tlb_c.lock);
143
144 all_dirty = env->tlb_c.dirty;
145 to_clean = asked & all_dirty;
146 all_dirty &= ~to_clean;
147 env->tlb_c.dirty = all_dirty;
148
149 for (work = to_clean; work != 0; work &= work - 1) {
150 int mmu_idx = ctz32(work);
151 tlb_flush_one_mmuidx_locked(env, mmu_idx);
152 }
153
154 qemu_spin_unlock(&env->tlb_c.lock);
155
156 cpu_tb_jmp_cache_clear(cpu);
157
158 if (to_clean == ALL_MMUIDX_BITS) {
159 atomic_set(&env->tlb_c.full_flush_count,
160 env->tlb_c.full_flush_count + 1);
161 } else {
162 atomic_set(&env->tlb_c.part_flush_count,
163 env->tlb_c.part_flush_count + ctpop16(to_clean));
164 if (to_clean != asked) {
165 atomic_set(&env->tlb_c.elide_flush_count,
166 env->tlb_c.elide_flush_count +
167 ctpop16(asked & ~to_clean));
168 }
169 }
170}
171
172void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
173{
174 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
175
176 if (cpu->created && !qemu_cpu_is_self(cpu)) {
177 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
178 RUN_ON_CPU_HOST_INT(idxmap));
179 } else {
180 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
181 }
182}
183
184void tlb_flush(CPUState *cpu)
185{
186 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
187}
188
189void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
190{
191 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
192
193 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
194
195 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
196 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
197}
198
199void tlb_flush_all_cpus(CPUState *src_cpu)
200{
201 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
202}
203
204void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
205{
206 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
207
208 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
209
210 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
211 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
212}
213
214void tlb_flush_all_cpus_synced(CPUState *src_cpu)
215{
216 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
217}
218
219static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
220 target_ulong page)
221{
222 return tlb_hit_page(tlb_entry->addr_read, page) ||
223 tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
224 tlb_hit_page(tlb_entry->addr_code, page);
225}
226
227
228static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
229 target_ulong page)
230{
231 if (tlb_hit_page_anyprot(tlb_entry, page)) {
232 memset(tlb_entry, -1, sizeof(*tlb_entry));
233 }
234}
235
236
237static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
238 target_ulong page)
239{
240 int k;
241
242 assert_cpu_is_self(ENV_GET_CPU(env));
243 for (k = 0; k < CPU_VTLB_SIZE; k++) {
244 tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page);
245 }
246}
247
248static void tlb_flush_page_locked(CPUArchState *env, int midx,
249 target_ulong page)
250{
251 target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
252 target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
253
254
255 if ((page & lp_mask) == lp_addr) {
256 tlb_debug("forcing full flush midx %d ("
257 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
258 midx, lp_addr, lp_mask);
259 tlb_flush_one_mmuidx_locked(env, midx);
260 } else {
261 tlb_flush_entry_locked(tlb_entry(env, midx, page), page);
262 tlb_flush_vtlb_page_locked(env, midx, page);
263 }
264}
265
266
267
268
269QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
270
271static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
272 run_on_cpu_data data)
273{
274 CPUArchState *env = cpu->env_ptr;
275 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
276 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
277 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
278 int mmu_idx;
279
280 assert_cpu_is_self(cpu);
281
282 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
283 addr, mmu_idx_bitmap);
284
285 qemu_spin_lock(&env->tlb_c.lock);
286 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
287 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
288 tlb_flush_page_locked(env, mmu_idx, addr);
289 }
290 }
291 qemu_spin_unlock(&env->tlb_c.lock);
292
293 tb_flush_jmp_cache(cpu, addr);
294}
295
296void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
297{
298 target_ulong addr_and_mmu_idx;
299
300 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
301
302
303 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
304 addr_and_mmu_idx |= idxmap;
305
306 if (!qemu_cpu_is_self(cpu)) {
307 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
308 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
309 } else {
310 tlb_flush_page_by_mmuidx_async_work(
311 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
312 }
313}
314
315void tlb_flush_page(CPUState *cpu, target_ulong addr)
316{
317 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
318}
319
320void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
321 uint16_t idxmap)
322{
323 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
324 target_ulong addr_and_mmu_idx;
325
326 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
327
328
329 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
330 addr_and_mmu_idx |= idxmap;
331
332 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
333 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
334}
335
336void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
337{
338 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
339}
340
341void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
342 target_ulong addr,
343 uint16_t idxmap)
344{
345 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
346 target_ulong addr_and_mmu_idx;
347
348 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
349
350
351 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
352 addr_and_mmu_idx |= idxmap;
353
354 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
355 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
356}
357
358void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
359{
360 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
361}
362
363
364
365void tlb_protect_code(ram_addr_t ram_addr)
366{
367 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
368 DIRTY_MEMORY_CODE);
369}
370
371
372
373void tlb_unprotect_code(ram_addr_t ram_addr)
374{
375 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
396 uintptr_t start, uintptr_t length)
397{
398 uintptr_t addr = tlb_entry->addr_write;
399
400 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
401 addr &= TARGET_PAGE_MASK;
402 addr += tlb_entry->addend;
403 if ((addr - start) < length) {
404#if TCG_OVERSIZED_GUEST
405 tlb_entry->addr_write |= TLB_NOTDIRTY;
406#else
407 atomic_set(&tlb_entry->addr_write,
408 tlb_entry->addr_write | TLB_NOTDIRTY);
409#endif
410 }
411 }
412}
413
414
415
416
417
418static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
419{
420 *d = *s;
421}
422
423
424
425
426
427
428void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
429{
430 CPUArchState *env;
431
432 int mmu_idx;
433
434 env = cpu->env_ptr;
435 qemu_spin_lock(&env->tlb_c.lock);
436 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
437 unsigned int i;
438
439 for (i = 0; i < CPU_TLB_SIZE; i++) {
440 tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
441 length);
442 }
443
444 for (i = 0; i < CPU_VTLB_SIZE; i++) {
445 tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
446 length);
447 }
448 }
449 qemu_spin_unlock(&env->tlb_c.lock);
450}
451
452
453static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
454 target_ulong vaddr)
455{
456 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
457 tlb_entry->addr_write = vaddr;
458 }
459}
460
461
462
463void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
464{
465 CPUArchState *env = cpu->env_ptr;
466 int mmu_idx;
467
468 assert_cpu_is_self(cpu);
469
470 vaddr &= TARGET_PAGE_MASK;
471 qemu_spin_lock(&env->tlb_c.lock);
472 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
473 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
474 }
475
476 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
477 int k;
478 for (k = 0; k < CPU_VTLB_SIZE; k++) {
479 tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
480 }
481 }
482 qemu_spin_unlock(&env->tlb_c.lock);
483}
484
485
486
487static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
488 target_ulong vaddr, target_ulong size)
489{
490 target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
491 target_ulong lp_mask = ~(size - 1);
492
493 if (lp_addr == (target_ulong)-1) {
494
495 lp_addr = vaddr;
496 } else {
497
498
499
500 lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
501 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
502 lp_mask <<= 1;
503 }
504 }
505 env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
506 env->tlb_d[mmu_idx].large_page_mask = lp_mask;
507}
508
509
510
511
512
513
514
515
516void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
517 hwaddr paddr, MemTxAttrs attrs, int prot,
518 int mmu_idx, target_ulong size)
519{
520 CPUArchState *env = cpu->env_ptr;
521 MemoryRegionSection *section;
522 unsigned int index;
523 target_ulong address;
524 target_ulong code_address;
525 uintptr_t addend;
526 CPUTLBEntry *te, tn;
527 hwaddr iotlb, xlat, sz, paddr_page;
528 target_ulong vaddr_page;
529 int asidx = cpu_asidx_from_attrs(cpu, attrs);
530
531 assert_cpu_is_self(cpu);
532
533 if (size <= TARGET_PAGE_SIZE) {
534 sz = TARGET_PAGE_SIZE;
535 } else {
536 tlb_add_large_page(env, mmu_idx, vaddr, size);
537 sz = size;
538 }
539 vaddr_page = vaddr & TARGET_PAGE_MASK;
540 paddr_page = paddr & TARGET_PAGE_MASK;
541
542 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
543 &xlat, &sz, attrs, &prot);
544 assert(sz >= TARGET_PAGE_SIZE);
545
546 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
547 " prot=%x idx=%d\n",
548 vaddr, paddr, prot, mmu_idx);
549
550 address = vaddr_page;
551 if (size < TARGET_PAGE_SIZE) {
552
553
554
555
556 address |= TLB_RECHECK;
557 }
558 if (!memory_region_is_ram(section->mr) &&
559 !memory_region_is_romd(section->mr)) {
560
561 address |= TLB_MMIO;
562 addend = 0;
563 } else {
564
565 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
566 }
567
568 code_address = address;
569 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
570 paddr_page, xlat, prot, &address);
571
572 index = tlb_index(env, mmu_idx, vaddr_page);
573 te = tlb_entry(env, mmu_idx, vaddr_page);
574
575
576
577
578
579
580
581
582 qemu_spin_lock(&env->tlb_c.lock);
583
584
585 env->tlb_c.dirty |= 1 << mmu_idx;
586
587
588 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
589
590
591
592
593
594 if (!tlb_hit_page_anyprot(te, vaddr_page)) {
595 unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
596 CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
597
598
599 copy_tlb_helper_locked(tv, te);
600 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
601 }
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616 env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
617 env->iotlb[mmu_idx][index].attrs = attrs;
618
619
620 tn.addend = addend - vaddr_page;
621 if (prot & PAGE_READ) {
622 tn.addr_read = address;
623 } else {
624 tn.addr_read = -1;
625 }
626
627 if (prot & PAGE_EXEC) {
628 tn.addr_code = code_address;
629 } else {
630 tn.addr_code = -1;
631 }
632
633 tn.addr_write = -1;
634 if (prot & PAGE_WRITE) {
635 if ((memory_region_is_ram(section->mr) && section->readonly)
636 || memory_region_is_romd(section->mr)) {
637
638 tn.addr_write = address | TLB_MMIO;
639 } else if (memory_region_is_ram(section->mr)
640 && cpu_physical_memory_is_clean(
641 memory_region_get_ram_addr(section->mr) + xlat)) {
642 tn.addr_write = address | TLB_NOTDIRTY;
643 } else {
644 tn.addr_write = address;
645 }
646 if (prot & PAGE_WRITE_INV) {
647 tn.addr_write |= TLB_INVALID_MASK;
648 }
649 }
650
651 copy_tlb_helper_locked(te, &tn);
652 qemu_spin_unlock(&env->tlb_c.lock);
653}
654
655
656
657
658void tlb_set_page(CPUState *cpu, target_ulong vaddr,
659 hwaddr paddr, int prot,
660 int mmu_idx, target_ulong size)
661{
662 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
663 prot, mmu_idx, size);
664}
665
666static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
667{
668 ram_addr_t ram_addr;
669
670 ram_addr = qemu_ram_addr_from_host(ptr);
671 if (ram_addr == RAM_ADDR_INVALID) {
672 error_report("Bad ram pointer %p", ptr);
673 abort();
674 }
675 return ram_addr;
676}
677
678static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
679 int mmu_idx,
680 target_ulong addr, uintptr_t retaddr,
681 bool recheck, MMUAccessType access_type, int size)
682{
683 CPUState *cpu = ENV_GET_CPU(env);
684 hwaddr mr_offset;
685 MemoryRegionSection *section;
686 MemoryRegion *mr;
687 uint64_t val;
688 bool locked = false;
689 MemTxResult r;
690
691 if (recheck) {
692
693
694
695
696
697
698 CPUTLBEntry *entry;
699 target_ulong tlb_addr;
700
701 tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
702
703 entry = tlb_entry(env, mmu_idx, addr);
704 tlb_addr = entry->addr_read;
705 if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
706
707 uintptr_t haddr = addr + entry->addend;
708
709 return ldn_p((void *)haddr, size);
710 }
711
712 }
713
714 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
715 mr = section->mr;
716 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
717 cpu->mem_io_pc = retaddr;
718 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
719 cpu_io_recompile(cpu, retaddr);
720 }
721
722 cpu->mem_io_vaddr = addr;
723 cpu->mem_io_access_type = access_type;
724
725 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
726 qemu_mutex_lock_iothread();
727 locked = true;
728 }
729 r = memory_region_dispatch_read(mr, mr_offset,
730 &val, size, iotlbentry->attrs);
731 if (r != MEMTX_OK) {
732 hwaddr physaddr = mr_offset +
733 section->offset_within_address_space -
734 section->offset_within_region;
735
736 cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
737 mmu_idx, iotlbentry->attrs, r, retaddr);
738 }
739 if (locked) {
740 qemu_mutex_unlock_iothread();
741 }
742
743 return val;
744}
745
746static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
747 int mmu_idx,
748 uint64_t val, target_ulong addr,
749 uintptr_t retaddr, bool recheck, int size)
750{
751 CPUState *cpu = ENV_GET_CPU(env);
752 hwaddr mr_offset;
753 MemoryRegionSection *section;
754 MemoryRegion *mr;
755 bool locked = false;
756 MemTxResult r;
757
758 if (recheck) {
759
760
761
762
763
764
765 CPUTLBEntry *entry;
766 target_ulong tlb_addr;
767
768 tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
769
770 entry = tlb_entry(env, mmu_idx, addr);
771 tlb_addr = tlb_addr_write(entry);
772 if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
773
774 uintptr_t haddr = addr + entry->addend;
775
776 stn_p((void *)haddr, size, val);
777 return;
778 }
779
780 }
781
782 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
783 mr = section->mr;
784 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
785 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
786 cpu_io_recompile(cpu, retaddr);
787 }
788 cpu->mem_io_vaddr = addr;
789 cpu->mem_io_pc = retaddr;
790
791 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
792 qemu_mutex_lock_iothread();
793 locked = true;
794 }
795 r = memory_region_dispatch_write(mr, mr_offset,
796 val, size, iotlbentry->attrs);
797 if (r != MEMTX_OK) {
798 hwaddr physaddr = mr_offset +
799 section->offset_within_address_space -
800 section->offset_within_region;
801
802 cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
803 mmu_idx, iotlbentry->attrs, r, retaddr);
804 }
805 if (locked) {
806 qemu_mutex_unlock_iothread();
807 }
808}
809
810
811
812static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
813 size_t elt_ofs, target_ulong page)
814{
815 size_t vidx;
816
817 assert_cpu_is_self(ENV_GET_CPU(env));
818 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
819 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
820 target_ulong cmp;
821
822
823#if TCG_OVERSIZED_GUEST
824 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
825#else
826 cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
827#endif
828
829 if (cmp == page) {
830
831 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
832
833 qemu_spin_lock(&env->tlb_c.lock);
834 copy_tlb_helper_locked(&tmptlb, tlb);
835 copy_tlb_helper_locked(tlb, vtlb);
836 copy_tlb_helper_locked(vtlb, &tmptlb);
837 qemu_spin_unlock(&env->tlb_c.lock);
838
839 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
840 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
841 tmpio = *io; *io = *vio; *vio = tmpio;
842 return true;
843 }
844 }
845 return false;
846}
847
848
849#define VICTIM_TLB_HIT(TY, ADDR) \
850 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
851 (ADDR) & TARGET_PAGE_MASK)
852
853
854
855
856
857
858tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
859{
860 uintptr_t mmu_idx = cpu_mmu_index(env, true);
861 uintptr_t index = tlb_index(env, mmu_idx, addr);
862 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
863 void *p;
864
865 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
866 if (!VICTIM_TLB_HIT(addr_code, addr)) {
867 tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
868 }
869 assert(tlb_hit(entry->addr_code, addr));
870 }
871
872 if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
873
874
875
876
877
878
879
880
881 return -1;
882 }
883
884 p = (void *)((uintptr_t)addr + entry->addend);
885 return qemu_ram_addr_from_host_nofail(p);
886}
887
888
889
890
891
892
893
894void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
895 uintptr_t retaddr)
896{
897 uintptr_t index = tlb_index(env, mmu_idx, addr);
898 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
899
900 if (!tlb_hit(tlb_addr_write(entry), addr)) {
901
902 if (!VICTIM_TLB_HIT(addr_write, addr)) {
903 tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
904 mmu_idx, retaddr);
905 }
906 }
907}
908
909
910
911static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
912 TCGMemOpIdx oi, uintptr_t retaddr,
913 NotDirtyInfo *ndi)
914{
915 size_t mmu_idx = get_mmuidx(oi);
916 uintptr_t index = tlb_index(env, mmu_idx, addr);
917 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
918 target_ulong tlb_addr = tlb_addr_write(tlbe);
919 TCGMemOp mop = get_memop(oi);
920 int a_bits = get_alignment_bits(mop);
921 int s_bits = mop & MO_SIZE;
922 void *hostaddr;
923
924
925 retaddr -= GETPC_ADJ;
926
927
928 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
929
930 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
931 mmu_idx, retaddr);
932 }
933
934
935 if (unlikely(addr & ((1 << s_bits) - 1))) {
936
937
938
939
940 goto stop_the_world;
941 }
942
943
944 if (!tlb_hit(tlb_addr, addr)) {
945 if (!VICTIM_TLB_HIT(addr_write, addr)) {
946 tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
947 mmu_idx, retaddr);
948 }
949 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
950 }
951
952
953 if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
954
955
956 goto stop_the_world;
957 }
958
959
960 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
961 tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
962 mmu_idx, retaddr);
963
964
965
966 goto stop_the_world;
967 }
968
969 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
970
971 ndi->active = false;
972 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
973 ndi->active = true;
974 memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
975 qemu_ram_addr_from_host_nofail(hostaddr),
976 1 << s_bits);
977 }
978
979 return hostaddr;
980
981 stop_the_world:
982 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
983}
984
985#ifdef TARGET_WORDS_BIGENDIAN
986# define TGT_BE(X) (X)
987# define TGT_LE(X) BSWAP(X)
988#else
989# define TGT_BE(X) BSWAP(X)
990# define TGT_LE(X) (X)
991#endif
992
993#define MMUSUFFIX _mmu
994
995#define DATA_SIZE 1
996#include "softmmu_template.h"
997
998#define DATA_SIZE 2
999#include "softmmu_template.h"
1000
1001#define DATA_SIZE 4
1002#include "softmmu_template.h"
1003
1004#define DATA_SIZE 8
1005#include "softmmu_template.h"
1006
1007
1008
1009
1010#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1011#define ATOMIC_NAME(X) \
1012 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1013#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1014#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1015#define ATOMIC_MMU_CLEANUP \
1016 do { \
1017 if (unlikely(ndi.active)) { \
1018 memory_notdirty_write_complete(&ndi); \
1019 } \
1020 } while (0)
1021
1022#define DATA_SIZE 1
1023#include "atomic_template.h"
1024
1025#define DATA_SIZE 2
1026#include "atomic_template.h"
1027
1028#define DATA_SIZE 4
1029#include "atomic_template.h"
1030
1031#ifdef CONFIG_ATOMIC64
1032#define DATA_SIZE 8
1033#include "atomic_template.h"
1034#endif
1035
1036#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1037#define DATA_SIZE 16
1038#include "atomic_template.h"
1039#endif
1040
1041
1042
1043#undef EXTRA_ARGS
1044#undef ATOMIC_NAME
1045#undef ATOMIC_MMU_LOOKUP
1046#define EXTRA_ARGS , TCGMemOpIdx oi
1047#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1048#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1049
1050#define DATA_SIZE 1
1051#include "atomic_template.h"
1052
1053#define DATA_SIZE 2
1054#include "atomic_template.h"
1055
1056#define DATA_SIZE 4
1057#include "atomic_template.h"
1058
1059#ifdef CONFIG_ATOMIC64
1060#define DATA_SIZE 8
1061#include "atomic_template.h"
1062#endif
1063
1064
1065
1066#undef MMUSUFFIX
1067#define MMUSUFFIX _cmmu
1068#undef GETPC
1069#define GETPC() ((uintptr_t)0)
1070#define SOFTMMU_CODE_ACCESS
1071
1072#define DATA_SIZE 1
1073#include "softmmu_template.h"
1074
1075#define DATA_SIZE 2
1076#include "softmmu_template.h"
1077
1078#define DATA_SIZE 4
1079#include "softmmu_template.h"
1080
1081#define DATA_SIZE 8
1082#include "softmmu_template.h"
1083