1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
26#include "exec/cpu_ldst.h"
27#include "exec/cputlb.h"
28#include "exec/memory-internal.h"
29#include "exec/ram_addr.h"
30#include "tcg/tcg.h"
31#include "qemu/error-report.h"
32#include "exec/log.h"
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
35#include "qemu/atomic128.h"
36
37
38
39
40
41#ifdef DEBUG_TLB
42# define DEBUG_TLB_GATE 1
43# ifdef DEBUG_TLB_LOG
44# define DEBUG_TLB_LOG_GATE 1
45# else
46# define DEBUG_TLB_LOG_GATE 0
47# endif
48#else
49# define DEBUG_TLB_GATE 0
50# define DEBUG_TLB_LOG_GATE 0
51#endif
52
53#define tlb_debug(fmt, ...) do { \
54 if (DEBUG_TLB_LOG_GATE) { \
55 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56 ## __VA_ARGS__); \
57 } else if (DEBUG_TLB_GATE) { \
58 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59 } \
60} while (0)
61
62#define assert_cpu_is_self(cpu) do { \
63 if (DEBUG_TLB_GATE) { \
64 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
65 } \
66 } while (0)
67
68
69
70QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71
72
73
74QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76
77static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
78{
79 return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
80}
81
82static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
83 size_t max_entries)
84{
85 desc->window_begin_ns = ns;
86 desc->window_max_entries = max_entries;
87}
88
89static void tlb_dyn_init(CPUArchState *env)
90{
91 int i;
92
93 for (i = 0; i < NB_MMU_MODES; i++) {
94 CPUTLBDesc *desc = &env_tlb(env)->d[i];
95 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
96
97 tlb_window_reset(desc, get_clock_realtime(), 0);
98 desc->n_used_entries = 0;
99 env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
100 env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
101 env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
102 }
103}
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
146{
147 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
148 size_t old_size = tlb_n_entries(env, mmu_idx);
149 size_t rate;
150 size_t new_size = old_size;
151 int64_t now = get_clock_realtime();
152 int64_t window_len_ms = 100;
153 int64_t window_len_ns = window_len_ms * 1000 * 1000;
154 bool window_expired = now > desc->window_begin_ns + window_len_ns;
155
156 if (desc->n_used_entries > desc->window_max_entries) {
157 desc->window_max_entries = desc->n_used_entries;
158 }
159 rate = desc->window_max_entries * 100 / old_size;
160
161 if (rate > 70) {
162 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
163 } else if (rate < 30 && window_expired) {
164 size_t ceil = pow2ceil(desc->window_max_entries);
165 size_t expected_rate = desc->window_max_entries * 100 / ceil;
166
167
168
169
170
171
172
173
174
175
176
177 if (expected_rate > 70) {
178 ceil *= 2;
179 }
180 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
181 }
182
183 if (new_size == old_size) {
184 if (window_expired) {
185 tlb_window_reset(desc, now, desc->n_used_entries);
186 }
187 return;
188 }
189
190 g_free(env_tlb(env)->f[mmu_idx].table);
191 g_free(env_tlb(env)->d[mmu_idx].iotlb);
192
193 tlb_window_reset(desc, now, 0);
194
195 env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196 env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
197 env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
198
199
200
201
202
203
204
205 while (env_tlb(env)->f[mmu_idx].table == NULL ||
206 env_tlb(env)->d[mmu_idx].iotlb == NULL) {
207 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
208 error_report("%s: %s", __func__, strerror(errno));
209 abort();
210 }
211 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
212 env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
213
214 g_free(env_tlb(env)->f[mmu_idx].table);
215 g_free(env_tlb(env)->d[mmu_idx].iotlb);
216 env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
217 env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
218 }
219}
220
221static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
222{
223 tlb_mmu_resize_locked(env, mmu_idx);
224 memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
225 env_tlb(env)->d[mmu_idx].n_used_entries = 0;
226}
227
228static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
229{
230 env_tlb(env)->d[mmu_idx].n_used_entries++;
231}
232
233static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
234{
235 env_tlb(env)->d[mmu_idx].n_used_entries--;
236}
237
238void tlb_init(CPUState *cpu)
239{
240 CPUArchState *env = cpu->env_ptr;
241
242 qemu_spin_init(&env_tlb(env)->c.lock);
243
244
245 env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
246
247 tlb_dyn_init(env);
248}
249
250
251
252
253
254
255
256
257static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
258 run_on_cpu_data d)
259{
260 CPUState *cpu;
261
262 CPU_FOREACH(cpu) {
263 if (cpu != src) {
264 async_run_on_cpu(cpu, fn, d);
265 }
266 }
267}
268
269void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
270{
271 CPUState *cpu;
272 size_t full = 0, part = 0, elide = 0;
273
274 CPU_FOREACH(cpu) {
275 CPUArchState *env = cpu->env_ptr;
276
277 full += atomic_read(&env_tlb(env)->c.full_flush_count);
278 part += atomic_read(&env_tlb(env)->c.part_flush_count);
279 elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
280 }
281 *pfull = full;
282 *ppart = part;
283 *pelide = elide;
284}
285
286static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
287{
288 tlb_table_flush_by_mmuidx(env, mmu_idx);
289 env_tlb(env)->d[mmu_idx].large_page_addr = -1;
290 env_tlb(env)->d[mmu_idx].large_page_mask = -1;
291 env_tlb(env)->d[mmu_idx].vindex = 0;
292 memset(env_tlb(env)->d[mmu_idx].vtable, -1,
293 sizeof(env_tlb(env)->d[0].vtable));
294}
295
296static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
297{
298 CPUArchState *env = cpu->env_ptr;
299 uint16_t asked = data.host_int;
300 uint16_t all_dirty, work, to_clean;
301
302 assert_cpu_is_self(cpu);
303
304 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
305
306 qemu_spin_lock(&env_tlb(env)->c.lock);
307
308 all_dirty = env_tlb(env)->c.dirty;
309 to_clean = asked & all_dirty;
310 all_dirty &= ~to_clean;
311 env_tlb(env)->c.dirty = all_dirty;
312
313 for (work = to_clean; work != 0; work &= work - 1) {
314 int mmu_idx = ctz32(work);
315 tlb_flush_one_mmuidx_locked(env, mmu_idx);
316 }
317
318 qemu_spin_unlock(&env_tlb(env)->c.lock);
319
320 cpu_tb_jmp_cache_clear(cpu);
321
322 if (to_clean == ALL_MMUIDX_BITS) {
323 atomic_set(&env_tlb(env)->c.full_flush_count,
324 env_tlb(env)->c.full_flush_count + 1);
325 } else {
326 atomic_set(&env_tlb(env)->c.part_flush_count,
327 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
328 if (to_clean != asked) {
329 atomic_set(&env_tlb(env)->c.elide_flush_count,
330 env_tlb(env)->c.elide_flush_count +
331 ctpop16(asked & ~to_clean));
332 }
333 }
334}
335
336void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
337{
338 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
339
340 if (cpu->created && !qemu_cpu_is_self(cpu)) {
341 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
342 RUN_ON_CPU_HOST_INT(idxmap));
343 } else {
344 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
345 }
346}
347
348void tlb_flush(CPUState *cpu)
349{
350 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
351}
352
353void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
354{
355 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
356
357 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
358
359 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
360 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
361}
362
363void tlb_flush_all_cpus(CPUState *src_cpu)
364{
365 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
366}
367
368void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
369{
370 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
371
372 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
373
374 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
375 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
376}
377
378void tlb_flush_all_cpus_synced(CPUState *src_cpu)
379{
380 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
381}
382
383static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
384 target_ulong page)
385{
386 return tlb_hit_page(tlb_entry->addr_read, page) ||
387 tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
388 tlb_hit_page(tlb_entry->addr_code, page);
389}
390
391
392
393
394
395static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
396{
397 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
398}
399
400
401static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
402 target_ulong page)
403{
404 if (tlb_hit_page_anyprot(tlb_entry, page)) {
405 memset(tlb_entry, -1, sizeof(*tlb_entry));
406 return true;
407 }
408 return false;
409}
410
411
412static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
413 target_ulong page)
414{
415 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
416 int k;
417
418 assert_cpu_is_self(env_cpu(env));
419 for (k = 0; k < CPU_VTLB_SIZE; k++) {
420 if (tlb_flush_entry_locked(&d->vtable[k], page)) {
421 tlb_n_used_entries_dec(env, mmu_idx);
422 }
423 }
424}
425
426static void tlb_flush_page_locked(CPUArchState *env, int midx,
427 target_ulong page)
428{
429 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
430 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
431
432
433 if ((page & lp_mask) == lp_addr) {
434 tlb_debug("forcing full flush midx %d ("
435 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
436 midx, lp_addr, lp_mask);
437 tlb_flush_one_mmuidx_locked(env, midx);
438 } else {
439 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
440 tlb_n_used_entries_dec(env, midx);
441 }
442 tlb_flush_vtlb_page_locked(env, midx, page);
443 }
444}
445
446
447
448
449QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
450
451static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
452 run_on_cpu_data data)
453{
454 CPUArchState *env = cpu->env_ptr;
455 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
456 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
457 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
458 int mmu_idx;
459
460 assert_cpu_is_self(cpu);
461
462 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
463 addr, mmu_idx_bitmap);
464
465 qemu_spin_lock(&env_tlb(env)->c.lock);
466 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
467 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
468 tlb_flush_page_locked(env, mmu_idx, addr);
469 }
470 }
471 qemu_spin_unlock(&env_tlb(env)->c.lock);
472
473 tb_flush_jmp_cache(cpu, addr);
474}
475
476void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
477{
478 target_ulong addr_and_mmu_idx;
479
480 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
481
482
483 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
484 addr_and_mmu_idx |= idxmap;
485
486 if (!qemu_cpu_is_self(cpu)) {
487 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
488 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
489 } else {
490 tlb_flush_page_by_mmuidx_async_work(
491 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
492 }
493}
494
495void tlb_flush_page(CPUState *cpu, target_ulong addr)
496{
497 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
498}
499
500void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
501 uint16_t idxmap)
502{
503 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
504 target_ulong addr_and_mmu_idx;
505
506 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
507
508
509 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
510 addr_and_mmu_idx |= idxmap;
511
512 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
513 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
514}
515
516void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
517{
518 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
519}
520
521void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
522 target_ulong addr,
523 uint16_t idxmap)
524{
525 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
526 target_ulong addr_and_mmu_idx;
527
528 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
529
530
531 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
532 addr_and_mmu_idx |= idxmap;
533
534 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
535 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
536}
537
538void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
539{
540 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
541}
542
543
544
545void tlb_protect_code(ram_addr_t ram_addr)
546{
547 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
548 DIRTY_MEMORY_CODE);
549}
550
551
552
553void tlb_unprotect_code(ram_addr_t ram_addr)
554{
555 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
556}
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
576 uintptr_t start, uintptr_t length)
577{
578 uintptr_t addr = tlb_entry->addr_write;
579
580 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
581 addr &= TARGET_PAGE_MASK;
582 addr += tlb_entry->addend;
583 if ((addr - start) < length) {
584#if TCG_OVERSIZED_GUEST
585 tlb_entry->addr_write |= TLB_NOTDIRTY;
586#else
587 atomic_set(&tlb_entry->addr_write,
588 tlb_entry->addr_write | TLB_NOTDIRTY);
589#endif
590 }
591 }
592}
593
594
595
596
597
598static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
599{
600 *d = *s;
601}
602
603
604
605
606
607
608void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
609{
610 CPUArchState *env;
611
612 int mmu_idx;
613
614 env = cpu->env_ptr;
615 qemu_spin_lock(&env_tlb(env)->c.lock);
616 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
617 unsigned int i;
618 unsigned int n = tlb_n_entries(env, mmu_idx);
619
620 for (i = 0; i < n; i++) {
621 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
622 start1, length);
623 }
624
625 for (i = 0; i < CPU_VTLB_SIZE; i++) {
626 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
627 start1, length);
628 }
629 }
630 qemu_spin_unlock(&env_tlb(env)->c.lock);
631}
632
633
634static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
635 target_ulong vaddr)
636{
637 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
638 tlb_entry->addr_write = vaddr;
639 }
640}
641
642
643
644void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
645{
646 CPUArchState *env = cpu->env_ptr;
647 int mmu_idx;
648
649 assert_cpu_is_self(cpu);
650
651 vaddr &= TARGET_PAGE_MASK;
652 qemu_spin_lock(&env_tlb(env)->c.lock);
653 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
654 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
655 }
656
657 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
658 int k;
659 for (k = 0; k < CPU_VTLB_SIZE; k++) {
660 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
661 }
662 }
663 qemu_spin_unlock(&env_tlb(env)->c.lock);
664}
665
666
667
668static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
669 target_ulong vaddr, target_ulong size)
670{
671 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
672 target_ulong lp_mask = ~(size - 1);
673
674 if (lp_addr == (target_ulong)-1) {
675
676 lp_addr = vaddr;
677 } else {
678
679
680
681 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
682 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
683 lp_mask <<= 1;
684 }
685 }
686 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
687 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
688}
689
690
691
692
693
694
695
696
697void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
698 hwaddr paddr, MemTxAttrs attrs, int prot,
699 int mmu_idx, target_ulong size)
700{
701 CPUArchState *env = cpu->env_ptr;
702 CPUTLB *tlb = env_tlb(env);
703 CPUTLBDesc *desc = &tlb->d[mmu_idx];
704 MemoryRegionSection *section;
705 unsigned int index;
706 target_ulong address;
707 target_ulong code_address;
708 uintptr_t addend;
709 CPUTLBEntry *te, tn;
710 hwaddr iotlb, xlat, sz, paddr_page;
711 target_ulong vaddr_page;
712 int asidx = cpu_asidx_from_attrs(cpu, attrs);
713
714 assert_cpu_is_self(cpu);
715
716 if (size <= TARGET_PAGE_SIZE) {
717 sz = TARGET_PAGE_SIZE;
718 } else {
719 tlb_add_large_page(env, mmu_idx, vaddr, size);
720 sz = size;
721 }
722 vaddr_page = vaddr & TARGET_PAGE_MASK;
723 paddr_page = paddr & TARGET_PAGE_MASK;
724
725 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
726 &xlat, &sz, attrs, &prot);
727 assert(sz >= TARGET_PAGE_SIZE);
728
729 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
730 " prot=%x idx=%d\n",
731 vaddr, paddr, prot, mmu_idx);
732
733 address = vaddr_page;
734 if (size < TARGET_PAGE_SIZE) {
735
736
737
738
739 address |= TLB_RECHECK;
740 }
741 if (!memory_region_is_ram(section->mr) &&
742 !memory_region_is_romd(section->mr)) {
743
744 address |= TLB_MMIO;
745 addend = 0;
746 } else {
747
748 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
749 }
750
751 code_address = address;
752 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
753 paddr_page, xlat, prot, &address);
754
755 index = tlb_index(env, mmu_idx, vaddr_page);
756 te = tlb_entry(env, mmu_idx, vaddr_page);
757
758
759
760
761
762
763
764
765 qemu_spin_lock(&tlb->c.lock);
766
767
768 tlb->c.dirty |= 1 << mmu_idx;
769
770
771 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
772
773
774
775
776
777 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
778 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
779 CPUTLBEntry *tv = &desc->vtable[vidx];
780
781
782 copy_tlb_helper_locked(tv, te);
783 desc->viotlb[vidx] = desc->iotlb[index];
784 tlb_n_used_entries_dec(env, mmu_idx);
785 }
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800 desc->iotlb[index].addr = iotlb - vaddr_page;
801 desc->iotlb[index].attrs = attrs;
802
803
804 tn.addend = addend - vaddr_page;
805 if (prot & PAGE_READ) {
806 tn.addr_read = address;
807 } else {
808 tn.addr_read = -1;
809 }
810
811 if (prot & PAGE_EXEC) {
812 tn.addr_code = code_address;
813 } else {
814 tn.addr_code = -1;
815 }
816
817 tn.addr_write = -1;
818 if (prot & PAGE_WRITE) {
819 if ((memory_region_is_ram(section->mr) && section->readonly)
820 || memory_region_is_romd(section->mr)) {
821
822 tn.addr_write = address | TLB_MMIO;
823 } else if (memory_region_is_ram(section->mr)
824 && cpu_physical_memory_is_clean(
825 memory_region_get_ram_addr(section->mr) + xlat)) {
826 tn.addr_write = address | TLB_NOTDIRTY;
827 } else {
828 tn.addr_write = address;
829 }
830 if (prot & PAGE_WRITE_INV) {
831 tn.addr_write |= TLB_INVALID_MASK;
832 }
833 }
834
835 copy_tlb_helper_locked(te, &tn);
836 tlb_n_used_entries_inc(env, mmu_idx);
837 qemu_spin_unlock(&tlb->c.lock);
838}
839
840
841
842
843void tlb_set_page(CPUState *cpu, target_ulong vaddr,
844 hwaddr paddr, int prot,
845 int mmu_idx, target_ulong size)
846{
847 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
848 prot, mmu_idx, size);
849}
850
851static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
852{
853 ram_addr_t ram_addr;
854
855 ram_addr = qemu_ram_addr_from_host(ptr);
856 if (ram_addr == RAM_ADDR_INVALID) {
857 error_report("Bad ram pointer %p", ptr);
858 abort();
859 }
860 return ram_addr;
861}
862
863
864
865
866
867
868static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
869 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
870{
871 CPUClass *cc = CPU_GET_CLASS(cpu);
872 bool ok;
873
874
875
876
877
878 ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
879 assert(ok);
880}
881
882static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
883 int mmu_idx, target_ulong addr, uintptr_t retaddr,
884 MMUAccessType access_type, int size)
885{
886 CPUState *cpu = env_cpu(env);
887 hwaddr mr_offset;
888 MemoryRegionSection *section;
889 MemoryRegion *mr;
890 uint64_t val;
891 bool locked = false;
892 MemTxResult r;
893
894 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
895 mr = section->mr;
896 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
897 cpu->mem_io_pc = retaddr;
898 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
899 cpu_io_recompile(cpu, retaddr);
900 }
901
902 cpu->mem_io_vaddr = addr;
903 cpu->mem_io_access_type = access_type;
904
905 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
906 qemu_mutex_lock_iothread();
907 locked = true;
908 }
909 r = memory_region_dispatch_read(mr, mr_offset,
910 &val, size, iotlbentry->attrs);
911 if (r != MEMTX_OK) {
912 hwaddr physaddr = mr_offset +
913 section->offset_within_address_space -
914 section->offset_within_region;
915
916 cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
917 mmu_idx, iotlbentry->attrs, r, retaddr);
918 }
919 if (locked) {
920 qemu_mutex_unlock_iothread();
921 }
922
923 return val;
924}
925
926static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
927 int mmu_idx, uint64_t val, target_ulong addr,
928 uintptr_t retaddr, int size)
929{
930 CPUState *cpu = env_cpu(env);
931 hwaddr mr_offset;
932 MemoryRegionSection *section;
933 MemoryRegion *mr;
934 bool locked = false;
935 MemTxResult r;
936
937 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
938 mr = section->mr;
939 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
940 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
941 cpu_io_recompile(cpu, retaddr);
942 }
943 cpu->mem_io_vaddr = addr;
944 cpu->mem_io_pc = retaddr;
945
946 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
947 qemu_mutex_lock_iothread();
948 locked = true;
949 }
950 r = memory_region_dispatch_write(mr, mr_offset,
951 val, size, iotlbentry->attrs);
952 if (r != MEMTX_OK) {
953 hwaddr physaddr = mr_offset +
954 section->offset_within_address_space -
955 section->offset_within_region;
956
957 cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
958 mmu_idx, iotlbentry->attrs, r, retaddr);
959 }
960 if (locked) {
961 qemu_mutex_unlock_iothread();
962 }
963}
964
965static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
966{
967#if TCG_OVERSIZED_GUEST
968 return *(target_ulong *)((uintptr_t)entry + ofs);
969#else
970
971 return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
972#endif
973}
974
975
976
977static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
978 size_t elt_ofs, target_ulong page)
979{
980 size_t vidx;
981
982 assert_cpu_is_self(env_cpu(env));
983 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
984 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
985 target_ulong cmp;
986
987
988#if TCG_OVERSIZED_GUEST
989 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
990#else
991 cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
992#endif
993
994 if (cmp == page) {
995
996 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
997
998 qemu_spin_lock(&env_tlb(env)->c.lock);
999 copy_tlb_helper_locked(&tmptlb, tlb);
1000 copy_tlb_helper_locked(tlb, vtlb);
1001 copy_tlb_helper_locked(vtlb, &tmptlb);
1002 qemu_spin_unlock(&env_tlb(env)->c.lock);
1003
1004 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1005 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1006 tmpio = *io; *io = *vio; *vio = tmpio;
1007 return true;
1008 }
1009 }
1010 return false;
1011}
1012
1013
1014#define VICTIM_TLB_HIT(TY, ADDR) \
1015 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1016 (ADDR) & TARGET_PAGE_MASK)
1017
1018
1019
1020
1021
1022
1023tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1024{
1025 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1026 uintptr_t index = tlb_index(env, mmu_idx, addr);
1027 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1028 void *p;
1029
1030 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1031 if (!VICTIM_TLB_HIT(addr_code, addr)) {
1032 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1033 index = tlb_index(env, mmu_idx, addr);
1034 entry = tlb_entry(env, mmu_idx, addr);
1035 }
1036 assert(tlb_hit(entry->addr_code, addr));
1037 }
1038
1039 if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
1040
1041
1042
1043
1044
1045
1046
1047
1048 return -1;
1049 }
1050
1051 p = (void *)((uintptr_t)addr + entry->addend);
1052 return qemu_ram_addr_from_host_nofail(p);
1053}
1054
1055
1056
1057
1058
1059
1060
1061void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1062 uintptr_t retaddr)
1063{
1064 uintptr_t index = tlb_index(env, mmu_idx, addr);
1065 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1066
1067 if (!tlb_hit(tlb_addr_write(entry), addr)) {
1068
1069 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1070 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1071 mmu_idx, retaddr);
1072 }
1073 }
1074}
1075
1076void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1077 MMUAccessType access_type, int mmu_idx)
1078{
1079 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1080 uintptr_t tlb_addr, page;
1081 size_t elt_ofs;
1082
1083 switch (access_type) {
1084 case MMU_DATA_LOAD:
1085 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1086 break;
1087 case MMU_DATA_STORE:
1088 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1089 break;
1090 case MMU_INST_FETCH:
1091 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1092 break;
1093 default:
1094 g_assert_not_reached();
1095 }
1096
1097 page = addr & TARGET_PAGE_MASK;
1098 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1099
1100 if (!tlb_hit_page(tlb_addr, page)) {
1101 uintptr_t index = tlb_index(env, mmu_idx, addr);
1102
1103 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
1104 CPUState *cs = env_cpu(env);
1105 CPUClass *cc = CPU_GET_CLASS(cs);
1106
1107 if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
1108
1109 return NULL;
1110 }
1111
1112
1113 entry = tlb_entry(env, mmu_idx, addr);
1114 }
1115 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1116 }
1117
1118 if (tlb_addr & ~TARGET_PAGE_MASK) {
1119
1120 return NULL;
1121 }
1122
1123 return (void *)((uintptr_t)addr + entry->addend);
1124}
1125
1126
1127
1128static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1129 TCGMemOpIdx oi, uintptr_t retaddr,
1130 NotDirtyInfo *ndi)
1131{
1132 size_t mmu_idx = get_mmuidx(oi);
1133 uintptr_t index = tlb_index(env, mmu_idx, addr);
1134 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1135 target_ulong tlb_addr = tlb_addr_write(tlbe);
1136 TCGMemOp mop = get_memop(oi);
1137 int a_bits = get_alignment_bits(mop);
1138 int s_bits = mop & MO_SIZE;
1139 void *hostaddr;
1140
1141
1142 retaddr -= GETPC_ADJ;
1143
1144
1145 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1146
1147 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1148 mmu_idx, retaddr);
1149 }
1150
1151
1152 if (unlikely(addr & ((1 << s_bits) - 1))) {
1153
1154
1155
1156
1157 goto stop_the_world;
1158 }
1159
1160
1161 if (!tlb_hit(tlb_addr, addr)) {
1162 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1163 tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
1164 mmu_idx, retaddr);
1165 index = tlb_index(env, mmu_idx, addr);
1166 tlbe = tlb_entry(env, mmu_idx, addr);
1167 }
1168 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1169 }
1170
1171
1172 if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1173
1174
1175 goto stop_the_world;
1176 }
1177
1178
1179 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1180 tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1181 mmu_idx, retaddr);
1182
1183
1184
1185 goto stop_the_world;
1186 }
1187
1188 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1189
1190 ndi->active = false;
1191 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1192 ndi->active = true;
1193 memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
1194 qemu_ram_addr_from_host_nofail(hostaddr),
1195 1 << s_bits);
1196 }
1197
1198 return hostaddr;
1199
1200 stop_the_world:
1201 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1202}
1203
1204#ifdef TARGET_WORDS_BIGENDIAN
1205#define NEED_BE_BSWAP 0
1206#define NEED_LE_BSWAP 1
1207#else
1208#define NEED_BE_BSWAP 1
1209#define NEED_LE_BSWAP 0
1210#endif
1211
1212
1213
1214
1215
1216
1217
1218
1219static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
1220{
1221 if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
1222 switch (size) {
1223 case 1: return val;
1224 case 2: return bswap16(val);
1225 case 4: return bswap32(val);
1226 case 8: return bswap64(val);
1227 default:
1228 g_assert_not_reached();
1229 }
1230 } else {
1231 return val;
1232 }
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1245 TCGMemOpIdx oi, uintptr_t retaddr);
1246
1247static inline uint64_t __attribute__((always_inline))
1248load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1249 uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
1250 FullLoadHelper *full_load)
1251{
1252 uintptr_t mmu_idx = get_mmuidx(oi);
1253 uintptr_t index = tlb_index(env, mmu_idx, addr);
1254 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1255 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1256 const size_t tlb_off = code_read ?
1257 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1258 const MMUAccessType access_type =
1259 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1260 unsigned a_bits = get_alignment_bits(get_memop(oi));
1261 void *haddr;
1262 uint64_t res;
1263
1264
1265 if (addr & ((1 << a_bits) - 1)) {
1266 cpu_unaligned_access(env_cpu(env), addr, access_type,
1267 mmu_idx, retaddr);
1268 }
1269
1270
1271 if (!tlb_hit(tlb_addr, addr)) {
1272 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1273 addr & TARGET_PAGE_MASK)) {
1274 tlb_fill(env_cpu(env), addr, size,
1275 access_type, mmu_idx, retaddr);
1276 index = tlb_index(env, mmu_idx, addr);
1277 entry = tlb_entry(env, mmu_idx, addr);
1278 }
1279 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1280 }
1281
1282
1283 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1284 if ((addr & (size - 1)) != 0) {
1285 goto do_unaligned_access;
1286 }
1287
1288 if (tlb_addr & TLB_RECHECK) {
1289
1290
1291
1292
1293
1294
1295 tlb_fill(env_cpu(env), addr, size,
1296 access_type, mmu_idx, retaddr);
1297 index = tlb_index(env, mmu_idx, addr);
1298 entry = tlb_entry(env, mmu_idx, addr);
1299
1300 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1301 tlb_addr &= ~TLB_RECHECK;
1302 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1303
1304 goto do_aligned_access;
1305 }
1306 }
1307
1308 res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
1309 mmu_idx, addr, retaddr, access_type, size);
1310 return handle_bswap(res, size, big_endian);
1311 }
1312
1313
1314 if (size > 1
1315 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1316 >= TARGET_PAGE_SIZE)) {
1317 target_ulong addr1, addr2;
1318 uint64_t r1, r2;
1319 unsigned shift;
1320 do_unaligned_access:
1321 addr1 = addr & ~((target_ulong)size - 1);
1322 addr2 = addr1 + size;
1323 r1 = full_load(env, addr1, oi, retaddr);
1324 r2 = full_load(env, addr2, oi, retaddr);
1325 shift = (addr & (size - 1)) * 8;
1326
1327 if (big_endian) {
1328
1329 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1330 } else {
1331
1332 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1333 }
1334 return res & MAKE_64BIT_MASK(0, size * 8);
1335 }
1336
1337 do_aligned_access:
1338 haddr = (void *)((uintptr_t)addr + entry->addend);
1339 switch (size) {
1340 case 1:
1341 res = ldub_p(haddr);
1342 break;
1343 case 2:
1344 if (big_endian) {
1345 res = lduw_be_p(haddr);
1346 } else {
1347 res = lduw_le_p(haddr);
1348 }
1349 break;
1350 case 4:
1351 if (big_endian) {
1352 res = (uint32_t)ldl_be_p(haddr);
1353 } else {
1354 res = (uint32_t)ldl_le_p(haddr);
1355 }
1356 break;
1357 case 8:
1358 if (big_endian) {
1359 res = ldq_be_p(haddr);
1360 } else {
1361 res = ldq_le_p(haddr);
1362 }
1363 break;
1364 default:
1365 g_assert_not_reached();
1366 }
1367
1368 return res;
1369}
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1382 TCGMemOpIdx oi, uintptr_t retaddr)
1383{
1384 return load_helper(env, addr, oi, retaddr, 1, false, false,
1385 full_ldub_mmu);
1386}
1387
1388tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1389 TCGMemOpIdx oi, uintptr_t retaddr)
1390{
1391 return full_ldub_mmu(env, addr, oi, retaddr);
1392}
1393
1394static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1395 TCGMemOpIdx oi, uintptr_t retaddr)
1396{
1397 return load_helper(env, addr, oi, retaddr, 2, false, false,
1398 full_le_lduw_mmu);
1399}
1400
1401tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1402 TCGMemOpIdx oi, uintptr_t retaddr)
1403{
1404 return full_le_lduw_mmu(env, addr, oi, retaddr);
1405}
1406
1407static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1408 TCGMemOpIdx oi, uintptr_t retaddr)
1409{
1410 return load_helper(env, addr, oi, retaddr, 2, true, false,
1411 full_be_lduw_mmu);
1412}
1413
1414tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1415 TCGMemOpIdx oi, uintptr_t retaddr)
1416{
1417 return full_be_lduw_mmu(env, addr, oi, retaddr);
1418}
1419
1420static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1421 TCGMemOpIdx oi, uintptr_t retaddr)
1422{
1423 return load_helper(env, addr, oi, retaddr, 4, false, false,
1424 full_le_ldul_mmu);
1425}
1426
1427tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1428 TCGMemOpIdx oi, uintptr_t retaddr)
1429{
1430 return full_le_ldul_mmu(env, addr, oi, retaddr);
1431}
1432
1433static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1434 TCGMemOpIdx oi, uintptr_t retaddr)
1435{
1436 return load_helper(env, addr, oi, retaddr, 4, true, false,
1437 full_be_ldul_mmu);
1438}
1439
1440tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1441 TCGMemOpIdx oi, uintptr_t retaddr)
1442{
1443 return full_be_ldul_mmu(env, addr, oi, retaddr);
1444}
1445
1446uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1447 TCGMemOpIdx oi, uintptr_t retaddr)
1448{
1449 return load_helper(env, addr, oi, retaddr, 8, false, false,
1450 helper_le_ldq_mmu);
1451}
1452
1453uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1454 TCGMemOpIdx oi, uintptr_t retaddr)
1455{
1456 return load_helper(env, addr, oi, retaddr, 8, true, false,
1457 helper_be_ldq_mmu);
1458}
1459
1460
1461
1462
1463
1464
1465
1466tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1467 TCGMemOpIdx oi, uintptr_t retaddr)
1468{
1469 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1470}
1471
1472tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1473 TCGMemOpIdx oi, uintptr_t retaddr)
1474{
1475 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1476}
1477
1478tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1479 TCGMemOpIdx oi, uintptr_t retaddr)
1480{
1481 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1482}
1483
1484tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1485 TCGMemOpIdx oi, uintptr_t retaddr)
1486{
1487 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1488}
1489
1490tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1491 TCGMemOpIdx oi, uintptr_t retaddr)
1492{
1493 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1494}
1495
1496
1497
1498
1499
1500static inline void __attribute__((always_inline))
1501store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1502 TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
1503{
1504 uintptr_t mmu_idx = get_mmuidx(oi);
1505 uintptr_t index = tlb_index(env, mmu_idx, addr);
1506 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1507 target_ulong tlb_addr = tlb_addr_write(entry);
1508 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1509 unsigned a_bits = get_alignment_bits(get_memop(oi));
1510 void *haddr;
1511
1512
1513 if (addr & ((1 << a_bits) - 1)) {
1514 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1515 mmu_idx, retaddr);
1516 }
1517
1518
1519 if (!tlb_hit(tlb_addr, addr)) {
1520 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1521 addr & TARGET_PAGE_MASK)) {
1522 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1523 mmu_idx, retaddr);
1524 index = tlb_index(env, mmu_idx, addr);
1525 entry = tlb_entry(env, mmu_idx, addr);
1526 }
1527 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1528 }
1529
1530
1531 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1532 if ((addr & (size - 1)) != 0) {
1533 goto do_unaligned_access;
1534 }
1535
1536 if (tlb_addr & TLB_RECHECK) {
1537
1538
1539
1540
1541
1542
1543 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1544 mmu_idx, retaddr);
1545 index = tlb_index(env, mmu_idx, addr);
1546 entry = tlb_entry(env, mmu_idx, addr);
1547
1548 tlb_addr = tlb_addr_write(entry);
1549 tlb_addr &= ~TLB_RECHECK;
1550 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1551
1552 goto do_aligned_access;
1553 }
1554 }
1555
1556 io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
1557 handle_bswap(val, size, big_endian),
1558 addr, retaddr, size);
1559 return;
1560 }
1561
1562
1563 if (size > 1
1564 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1565 >= TARGET_PAGE_SIZE)) {
1566 int i;
1567 uintptr_t index2;
1568 CPUTLBEntry *entry2;
1569 target_ulong page2, tlb_addr2;
1570 do_unaligned_access:
1571
1572
1573
1574
1575
1576 page2 = (addr + size) & TARGET_PAGE_MASK;
1577 index2 = tlb_index(env, mmu_idx, page2);
1578 entry2 = tlb_entry(env, mmu_idx, page2);
1579 tlb_addr2 = tlb_addr_write(entry2);
1580 if (!tlb_hit_page(tlb_addr2, page2)
1581 && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1582 page2 & TARGET_PAGE_MASK)) {
1583 tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
1584 mmu_idx, retaddr);
1585 }
1586
1587
1588
1589
1590
1591
1592 for (i = 0; i < size; ++i) {
1593 uint8_t val8;
1594 if (big_endian) {
1595
1596 val8 = val >> (((size - 1) * 8) - (i * 8));
1597 } else {
1598
1599 val8 = val >> (i * 8);
1600 }
1601 helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
1602 }
1603 return;
1604 }
1605
1606 do_aligned_access:
1607 haddr = (void *)((uintptr_t)addr + entry->addend);
1608 switch (size) {
1609 case 1:
1610 stb_p(haddr, val);
1611 break;
1612 case 2:
1613 if (big_endian) {
1614 stw_be_p(haddr, val);
1615 } else {
1616 stw_le_p(haddr, val);
1617 }
1618 break;
1619 case 4:
1620 if (big_endian) {
1621 stl_be_p(haddr, val);
1622 } else {
1623 stl_le_p(haddr, val);
1624 }
1625 break;
1626 case 8:
1627 if (big_endian) {
1628 stq_be_p(haddr, val);
1629 } else {
1630 stq_le_p(haddr, val);
1631 }
1632 break;
1633 default:
1634 g_assert_not_reached();
1635 break;
1636 }
1637}
1638
1639void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1640 TCGMemOpIdx oi, uintptr_t retaddr)
1641{
1642 store_helper(env, addr, val, oi, retaddr, 1, false);
1643}
1644
1645void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1646 TCGMemOpIdx oi, uintptr_t retaddr)
1647{
1648 store_helper(env, addr, val, oi, retaddr, 2, false);
1649}
1650
1651void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1652 TCGMemOpIdx oi, uintptr_t retaddr)
1653{
1654 store_helper(env, addr, val, oi, retaddr, 2, true);
1655}
1656
1657void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1658 TCGMemOpIdx oi, uintptr_t retaddr)
1659{
1660 store_helper(env, addr, val, oi, retaddr, 4, false);
1661}
1662
1663void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1664 TCGMemOpIdx oi, uintptr_t retaddr)
1665{
1666 store_helper(env, addr, val, oi, retaddr, 4, true);
1667}
1668
1669void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1670 TCGMemOpIdx oi, uintptr_t retaddr)
1671{
1672 store_helper(env, addr, val, oi, retaddr, 8, false);
1673}
1674
1675void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1676 TCGMemOpIdx oi, uintptr_t retaddr)
1677{
1678 store_helper(env, addr, val, oi, retaddr, 8, true);
1679}
1680
1681
1682
1683
1684#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1685#define ATOMIC_NAME(X) \
1686 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1687#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1688#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1689#define ATOMIC_MMU_CLEANUP \
1690 do { \
1691 if (unlikely(ndi.active)) { \
1692 memory_notdirty_write_complete(&ndi); \
1693 } \
1694 } while (0)
1695
1696#define DATA_SIZE 1
1697#include "atomic_template.h"
1698
1699#define DATA_SIZE 2
1700#include "atomic_template.h"
1701
1702#define DATA_SIZE 4
1703#include "atomic_template.h"
1704
1705#ifdef CONFIG_ATOMIC64
1706#define DATA_SIZE 8
1707#include "atomic_template.h"
1708#endif
1709
1710#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1711#define DATA_SIZE 16
1712#include "atomic_template.h"
1713#endif
1714
1715
1716
1717#undef EXTRA_ARGS
1718#undef ATOMIC_NAME
1719#undef ATOMIC_MMU_LOOKUP
1720#define EXTRA_ARGS , TCGMemOpIdx oi
1721#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1722#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1723
1724#define DATA_SIZE 1
1725#include "atomic_template.h"
1726
1727#define DATA_SIZE 2
1728#include "atomic_template.h"
1729
1730#define DATA_SIZE 4
1731#include "atomic_template.h"
1732
1733#ifdef CONFIG_ATOMIC64
1734#define DATA_SIZE 8
1735#include "atomic_template.h"
1736#endif
1737
1738
1739
1740static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
1741 TCGMemOpIdx oi, uintptr_t retaddr)
1742{
1743 return load_helper(env, addr, oi, retaddr, 1, false, true,
1744 full_ldub_cmmu);
1745}
1746
1747uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1748 TCGMemOpIdx oi, uintptr_t retaddr)
1749{
1750 return full_ldub_cmmu(env, addr, oi, retaddr);
1751}
1752
1753static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
1754 TCGMemOpIdx oi, uintptr_t retaddr)
1755{
1756 return load_helper(env, addr, oi, retaddr, 2, false, true,
1757 full_le_lduw_cmmu);
1758}
1759
1760uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1761 TCGMemOpIdx oi, uintptr_t retaddr)
1762{
1763 return full_le_lduw_cmmu(env, addr, oi, retaddr);
1764}
1765
1766static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
1767 TCGMemOpIdx oi, uintptr_t retaddr)
1768{
1769 return load_helper(env, addr, oi, retaddr, 2, true, true,
1770 full_be_lduw_cmmu);
1771}
1772
1773uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1774 TCGMemOpIdx oi, uintptr_t retaddr)
1775{
1776 return full_be_lduw_cmmu(env, addr, oi, retaddr);
1777}
1778
1779static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
1780 TCGMemOpIdx oi, uintptr_t retaddr)
1781{
1782 return load_helper(env, addr, oi, retaddr, 4, false, true,
1783 full_le_ldul_cmmu);
1784}
1785
1786uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1787 TCGMemOpIdx oi, uintptr_t retaddr)
1788{
1789 return full_le_ldul_cmmu(env, addr, oi, retaddr);
1790}
1791
1792static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
1793 TCGMemOpIdx oi, uintptr_t retaddr)
1794{
1795 return load_helper(env, addr, oi, retaddr, 4, true, true,
1796 full_be_ldul_cmmu);
1797}
1798
1799uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1800 TCGMemOpIdx oi, uintptr_t retaddr)
1801{
1802 return full_be_ldul_cmmu(env, addr, oi, retaddr);
1803}
1804
1805uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1806 TCGMemOpIdx oi, uintptr_t retaddr)
1807{
1808 return load_helper(env, addr, oi, retaddr, 8, false, true,
1809 helper_le_ldq_cmmu);
1810}
1811
1812uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1813 TCGMemOpIdx oi, uintptr_t retaddr)
1814{
1815 return load_helper(env, addr, oi, retaddr, 8, true, true,
1816 helper_be_ldq_cmmu);
1817}
1818