1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "hw/core/tcg-cpu-ops.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/cpu_ldst.h"
26#include "exec/cputlb.h"
27#include "exec/memory-internal.h"
28#include "exec/ram_addr.h"
29#include "tcg/tcg.h"
30#include "qemu/error-report.h"
31#include "exec/log.h"
32#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
34#include "qemu/atomic128.h"
35#include "exec/translate-all.h"
36#include "trace/trace-root.h"
37#include "tb-hash.h"
38#include "internal.h"
39#ifdef CONFIG_PLUGIN
40#include "qemu/plugin-memory.h"
41#endif
42#include "tcg/tcg-ldst.h"
43
44
45
46
47
48#ifdef DEBUG_TLB
49# define DEBUG_TLB_GATE 1
50# ifdef DEBUG_TLB_LOG
51# define DEBUG_TLB_LOG_GATE 1
52# else
53# define DEBUG_TLB_LOG_GATE 0
54# endif
55#else
56# define DEBUG_TLB_GATE 0
57# define DEBUG_TLB_LOG_GATE 0
58#endif
59
60#define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67} while (0)
68
69#define assert_cpu_is_self(cpu) do { \
70 if (DEBUG_TLB_GATE) { \
71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
72 } \
73 } while (0)
74
75
76
77QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
79
80
81QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
84static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
85{
86 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
87}
88
89static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
90{
91 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
92}
93
94static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
95 size_t max_entries)
96{
97 desc->window_begin_ns = ns;
98 desc->window_max_entries = max_entries;
99}
100
101static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
102{
103 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
104
105 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
106 qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
107 }
108}
109
110static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
111{
112
113
114 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
115 tb_jmp_cache_clear_page(cpu, addr);
116}
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
159 int64_t now)
160{
161 size_t old_size = tlb_n_entries(fast);
162 size_t rate;
163 size_t new_size = old_size;
164 int64_t window_len_ms = 100;
165 int64_t window_len_ns = window_len_ms * 1000 * 1000;
166 bool window_expired = now > desc->window_begin_ns + window_len_ns;
167
168 if (desc->n_used_entries > desc->window_max_entries) {
169 desc->window_max_entries = desc->n_used_entries;
170 }
171 rate = desc->window_max_entries * 100 / old_size;
172
173 if (rate > 70) {
174 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
175 } else if (rate < 30 && window_expired) {
176 size_t ceil = pow2ceil(desc->window_max_entries);
177 size_t expected_rate = desc->window_max_entries * 100 / ceil;
178
179
180
181
182
183
184
185
186
187
188
189 if (expected_rate > 70) {
190 ceil *= 2;
191 }
192 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
193 }
194
195 if (new_size == old_size) {
196 if (window_expired) {
197 tlb_window_reset(desc, now, desc->n_used_entries);
198 }
199 return;
200 }
201
202 g_free(fast->table);
203 g_free(desc->iotlb);
204
205 tlb_window_reset(desc, now, 0);
206
207 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
208 fast->table = g_try_new(CPUTLBEntry, new_size);
209 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
210
211
212
213
214
215
216
217
218 while (fast->table == NULL || desc->iotlb == NULL) {
219 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
220 error_report("%s: %s", __func__, strerror(errno));
221 abort();
222 }
223 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
224 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
225
226 g_free(fast->table);
227 g_free(desc->iotlb);
228 fast->table = g_try_new(CPUTLBEntry, new_size);
229 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
230 }
231}
232
233static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
234{
235 desc->n_used_entries = 0;
236 desc->large_page_addr = -1;
237 desc->large_page_mask = -1;
238 desc->vindex = 0;
239 memset(fast->table, -1, sizeof_tlb(fast));
240 memset(desc->vtable, -1, sizeof(desc->vtable));
241}
242
243static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
244 int64_t now)
245{
246 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
247 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
248
249 tlb_mmu_resize_locked(desc, fast, now);
250 tlb_mmu_flush_locked(desc, fast);
251}
252
253static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
254{
255 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
256
257 tlb_window_reset(desc, now, 0);
258 desc->n_used_entries = 0;
259 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
260 fast->table = g_new(CPUTLBEntry, n_entries);
261 desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
262 tlb_mmu_flush_locked(desc, fast);
263}
264
265static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
266{
267 env_tlb(env)->d[mmu_idx].n_used_entries++;
268}
269
270static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
271{
272 env_tlb(env)->d[mmu_idx].n_used_entries--;
273}
274
275void tlb_init(CPUState *cpu)
276{
277 CPUArchState *env = cpu->env_ptr;
278 int64_t now = get_clock_realtime();
279 int i;
280
281 qemu_spin_init(&env_tlb(env)->c.lock);
282
283
284 env_tlb(env)->c.dirty = 0;
285
286 for (i = 0; i < NB_MMU_MODES; i++) {
287 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
288 }
289}
290
291void tlb_destroy(CPUState *cpu)
292{
293 CPUArchState *env = cpu->env_ptr;
294 int i;
295
296 qemu_spin_destroy(&env_tlb(env)->c.lock);
297 for (i = 0; i < NB_MMU_MODES; i++) {
298 CPUTLBDesc *desc = &env_tlb(env)->d[i];
299 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
300
301 g_free(fast->table);
302 g_free(desc->iotlb);
303 }
304}
305
306
307
308
309
310
311
312
313static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
314 run_on_cpu_data d)
315{
316 CPUState *cpu;
317
318 CPU_FOREACH(cpu) {
319 if (cpu != src) {
320 async_run_on_cpu(cpu, fn, d);
321 }
322 }
323}
324
325void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
326{
327 CPUState *cpu;
328 size_t full = 0, part = 0, elide = 0;
329
330 CPU_FOREACH(cpu) {
331 CPUArchState *env = cpu->env_ptr;
332
333 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
334 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
335 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
336 }
337 *pfull = full;
338 *ppart = part;
339 *pelide = elide;
340}
341
342static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
343{
344 CPUArchState *env = cpu->env_ptr;
345 uint16_t asked = data.host_int;
346 uint16_t all_dirty, work, to_clean;
347 int64_t now = get_clock_realtime();
348
349 assert_cpu_is_self(cpu);
350
351 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
352
353 qemu_spin_lock(&env_tlb(env)->c.lock);
354
355 all_dirty = env_tlb(env)->c.dirty;
356 to_clean = asked & all_dirty;
357 all_dirty &= ~to_clean;
358 env_tlb(env)->c.dirty = all_dirty;
359
360 for (work = to_clean; work != 0; work &= work - 1) {
361 int mmu_idx = ctz32(work);
362 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
363 }
364
365 qemu_spin_unlock(&env_tlb(env)->c.lock);
366
367 cpu_tb_jmp_cache_clear(cpu);
368
369 if (to_clean == ALL_MMUIDX_BITS) {
370 qatomic_set(&env_tlb(env)->c.full_flush_count,
371 env_tlb(env)->c.full_flush_count + 1);
372 } else {
373 qatomic_set(&env_tlb(env)->c.part_flush_count,
374 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
375 if (to_clean != asked) {
376 qatomic_set(&env_tlb(env)->c.elide_flush_count,
377 env_tlb(env)->c.elide_flush_count +
378 ctpop16(asked & ~to_clean));
379 }
380 }
381}
382
383void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
384{
385 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
386
387 if (cpu->created && !qemu_cpu_is_self(cpu)) {
388 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
389 RUN_ON_CPU_HOST_INT(idxmap));
390 } else {
391 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
392 }
393}
394
395void tlb_flush(CPUState *cpu)
396{
397 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
398}
399
400void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
401{
402 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
403
404 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
405
406 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
407 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
408}
409
410void tlb_flush_all_cpus(CPUState *src_cpu)
411{
412 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
413}
414
415void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
416{
417 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
418
419 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
420
421 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
423}
424
425void tlb_flush_all_cpus_synced(CPUState *src_cpu)
426{
427 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
428}
429
430static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
431 target_ulong page, target_ulong mask)
432{
433 page &= mask;
434 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
435
436 return (page == (tlb_entry->addr_read & mask) ||
437 page == (tlb_addr_write(tlb_entry) & mask) ||
438 page == (tlb_entry->addr_code & mask));
439}
440
441static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
442 target_ulong page)
443{
444 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
445}
446
447
448
449
450
451static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
452{
453 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
454}
455
456
457static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
458 target_ulong page,
459 target_ulong mask)
460{
461 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
462 memset(tlb_entry, -1, sizeof(*tlb_entry));
463 return true;
464 }
465 return false;
466}
467
468static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
469 target_ulong page)
470{
471 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
472}
473
474
475static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
476 target_ulong page,
477 target_ulong mask)
478{
479 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
480 int k;
481
482 assert_cpu_is_self(env_cpu(env));
483 for (k = 0; k < CPU_VTLB_SIZE; k++) {
484 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
485 tlb_n_used_entries_dec(env, mmu_idx);
486 }
487 }
488}
489
490static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
491 target_ulong page)
492{
493 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
494}
495
496static void tlb_flush_page_locked(CPUArchState *env, int midx,
497 target_ulong page)
498{
499 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
500 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
501
502
503 if ((page & lp_mask) == lp_addr) {
504 tlb_debug("forcing full flush midx %d ("
505 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
506 midx, lp_addr, lp_mask);
507 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
508 } else {
509 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
510 tlb_n_used_entries_dec(env, midx);
511 }
512 tlb_flush_vtlb_page_locked(env, midx, page);
513 }
514}
515
516
517
518
519
520
521
522
523
524
525static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
526 target_ulong addr,
527 uint16_t idxmap)
528{
529 CPUArchState *env = cpu->env_ptr;
530 int mmu_idx;
531
532 assert_cpu_is_self(cpu);
533
534 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
535
536 qemu_spin_lock(&env_tlb(env)->c.lock);
537 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
538 if ((idxmap >> mmu_idx) & 1) {
539 tlb_flush_page_locked(env, mmu_idx, addr);
540 }
541 }
542 qemu_spin_unlock(&env_tlb(env)->c.lock);
543
544 tb_flush_jmp_cache(cpu, addr);
545}
546
547
548
549
550
551
552
553
554
555
556
557static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
558 run_on_cpu_data data)
559{
560 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
561 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
562 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
563
564 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
565}
566
567typedef struct {
568 target_ulong addr;
569 uint16_t idxmap;
570} TLBFlushPageByMMUIdxData;
571
572
573
574
575
576
577
578
579
580
581
582static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
583 run_on_cpu_data data)
584{
585 TLBFlushPageByMMUIdxData *d = data.host_ptr;
586
587 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
588 g_free(d);
589}
590
591void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
592{
593 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
594
595
596 addr &= TARGET_PAGE_MASK;
597
598 if (qemu_cpu_is_self(cpu)) {
599 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
600 } else if (idxmap < TARGET_PAGE_SIZE) {
601
602
603
604
605
606 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
607 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
608 } else {
609 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
610
611
612 d->addr = addr;
613 d->idxmap = idxmap;
614 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
615 RUN_ON_CPU_HOST_PTR(d));
616 }
617}
618
619void tlb_flush_page(CPUState *cpu, target_ulong addr)
620{
621 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
622}
623
624void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
625 uint16_t idxmap)
626{
627 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
628
629
630 addr &= TARGET_PAGE_MASK;
631
632
633
634
635
636 if (idxmap < TARGET_PAGE_SIZE) {
637 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
638 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
639 } else {
640 CPUState *dst_cpu;
641
642
643 CPU_FOREACH(dst_cpu) {
644 if (dst_cpu != src_cpu) {
645 TLBFlushPageByMMUIdxData *d
646 = g_new(TLBFlushPageByMMUIdxData, 1);
647
648 d->addr = addr;
649 d->idxmap = idxmap;
650 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
651 RUN_ON_CPU_HOST_PTR(d));
652 }
653 }
654 }
655
656 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
657}
658
659void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
660{
661 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
662}
663
664void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
665 target_ulong addr,
666 uint16_t idxmap)
667{
668 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
669
670
671 addr &= TARGET_PAGE_MASK;
672
673
674
675
676
677 if (idxmap < TARGET_PAGE_SIZE) {
678 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
679 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
680 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
681 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
682 } else {
683 CPUState *dst_cpu;
684 TLBFlushPageByMMUIdxData *d;
685
686
687 CPU_FOREACH(dst_cpu) {
688 if (dst_cpu != src_cpu) {
689 d = g_new(TLBFlushPageByMMUIdxData, 1);
690 d->addr = addr;
691 d->idxmap = idxmap;
692 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
693 RUN_ON_CPU_HOST_PTR(d));
694 }
695 }
696
697 d = g_new(TLBFlushPageByMMUIdxData, 1);
698 d->addr = addr;
699 d->idxmap = idxmap;
700 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
701 RUN_ON_CPU_HOST_PTR(d));
702 }
703}
704
705void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
706{
707 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
708}
709
710static void tlb_flush_range_locked(CPUArchState *env, int midx,
711 target_ulong addr, target_ulong len,
712 unsigned bits)
713{
714 CPUTLBDesc *d = &env_tlb(env)->d[midx];
715 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
716 target_ulong mask = MAKE_64BIT_MASK(0, bits);
717
718
719
720
721
722
723
724
725
726
727
728 if (mask < f->mask || len > f->mask) {
729 tlb_debug("forcing full flush midx %d ("
730 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
731 midx, addr, mask, len);
732 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
733 return;
734 }
735
736
737
738
739
740
741 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
742 tlb_debug("forcing full flush midx %d ("
743 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
744 midx, d->large_page_addr, d->large_page_mask);
745 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
746 return;
747 }
748
749 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
750 target_ulong page = addr + i;
751 CPUTLBEntry *entry = tlb_entry(env, midx, page);
752
753 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
754 tlb_n_used_entries_dec(env, midx);
755 }
756 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
757 }
758}
759
760typedef struct {
761 target_ulong addr;
762 target_ulong len;
763 uint16_t idxmap;
764 uint16_t bits;
765} TLBFlushRangeData;
766
767static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
768 TLBFlushRangeData d)
769{
770 CPUArchState *env = cpu->env_ptr;
771 int mmu_idx;
772
773 assert_cpu_is_self(cpu);
774
775 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
776 d.addr, d.bits, d.len, d.idxmap);
777
778 qemu_spin_lock(&env_tlb(env)->c.lock);
779 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
780 if ((d.idxmap >> mmu_idx) & 1) {
781 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
782 }
783 }
784 qemu_spin_unlock(&env_tlb(env)->c.lock);
785
786
787
788
789
790 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
791 cpu_tb_jmp_cache_clear(cpu);
792 return;
793 }
794
795 for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
796 tb_flush_jmp_cache(cpu, d.addr + i);
797 }
798}
799
800static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
801 run_on_cpu_data data)
802{
803 TLBFlushRangeData *d = data.host_ptr;
804 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
805 g_free(d);
806}
807
808void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
809 target_ulong len, uint16_t idxmap,
810 unsigned bits)
811{
812 TLBFlushRangeData d;
813
814
815
816
817
818 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
819 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
820 return;
821 }
822
823 if (bits < TARGET_PAGE_BITS) {
824 tlb_flush_by_mmuidx(cpu, idxmap);
825 return;
826 }
827
828
829 d.addr = addr & TARGET_PAGE_MASK;
830 d.len = len;
831 d.idxmap = idxmap;
832 d.bits = bits;
833
834 if (qemu_cpu_is_self(cpu)) {
835 tlb_flush_range_by_mmuidx_async_0(cpu, d);
836 } else {
837
838 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
839 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
840 RUN_ON_CPU_HOST_PTR(p));
841 }
842}
843
844void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
845 uint16_t idxmap, unsigned bits)
846{
847 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
848}
849
850void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
851 target_ulong addr, target_ulong len,
852 uint16_t idxmap, unsigned bits)
853{
854 TLBFlushRangeData d;
855 CPUState *dst_cpu;
856
857
858
859
860
861 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
862 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
863 return;
864 }
865
866 if (bits < TARGET_PAGE_BITS) {
867 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
868 return;
869 }
870
871
872 d.addr = addr & TARGET_PAGE_MASK;
873 d.len = len;
874 d.idxmap = idxmap;
875 d.bits = bits;
876
877
878 CPU_FOREACH(dst_cpu) {
879 if (dst_cpu != src_cpu) {
880 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
881 async_run_on_cpu(dst_cpu,
882 tlb_flush_range_by_mmuidx_async_1,
883 RUN_ON_CPU_HOST_PTR(p));
884 }
885 }
886
887 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
888}
889
890void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
891 target_ulong addr,
892 uint16_t idxmap, unsigned bits)
893{
894 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
895 idxmap, bits);
896}
897
898void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
899 target_ulong addr,
900 target_ulong len,
901 uint16_t idxmap,
902 unsigned bits)
903{
904 TLBFlushRangeData d, *p;
905 CPUState *dst_cpu;
906
907
908
909
910
911 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
912 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
913 return;
914 }
915
916 if (bits < TARGET_PAGE_BITS) {
917 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
918 return;
919 }
920
921
922 d.addr = addr & TARGET_PAGE_MASK;
923 d.len = len;
924 d.idxmap = idxmap;
925 d.bits = bits;
926
927
928 CPU_FOREACH(dst_cpu) {
929 if (dst_cpu != src_cpu) {
930 p = g_memdup(&d, sizeof(d));
931 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
932 RUN_ON_CPU_HOST_PTR(p));
933 }
934 }
935
936 p = g_memdup(&d, sizeof(d));
937 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
938 RUN_ON_CPU_HOST_PTR(p));
939}
940
941void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
942 target_ulong addr,
943 uint16_t idxmap,
944 unsigned bits)
945{
946 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
947 idxmap, bits);
948}
949
950
951
952void tlb_protect_code(ram_addr_t ram_addr)
953{
954 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
955 DIRTY_MEMORY_CODE);
956}
957
958
959
960void tlb_unprotect_code(ram_addr_t ram_addr)
961{
962 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
983 uintptr_t start, uintptr_t length)
984{
985 uintptr_t addr = tlb_entry->addr_write;
986
987 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
988 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
989 addr &= TARGET_PAGE_MASK;
990 addr += tlb_entry->addend;
991 if ((addr - start) < length) {
992#if TCG_OVERSIZED_GUEST
993 tlb_entry->addr_write |= TLB_NOTDIRTY;
994#else
995 qatomic_set(&tlb_entry->addr_write,
996 tlb_entry->addr_write | TLB_NOTDIRTY);
997#endif
998 }
999 }
1000}
1001
1002
1003
1004
1005
1006static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1007{
1008 *d = *s;
1009}
1010
1011
1012
1013
1014
1015
1016void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1017{
1018 CPUArchState *env;
1019
1020 int mmu_idx;
1021
1022 env = cpu->env_ptr;
1023 qemu_spin_lock(&env_tlb(env)->c.lock);
1024 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1025 unsigned int i;
1026 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1027
1028 for (i = 0; i < n; i++) {
1029 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1030 start1, length);
1031 }
1032
1033 for (i = 0; i < CPU_VTLB_SIZE; i++) {
1034 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1035 start1, length);
1036 }
1037 }
1038 qemu_spin_unlock(&env_tlb(env)->c.lock);
1039}
1040
1041
1042static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1043 target_ulong vaddr)
1044{
1045 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1046 tlb_entry->addr_write = vaddr;
1047 }
1048}
1049
1050
1051
1052void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1053{
1054 CPUArchState *env = cpu->env_ptr;
1055 int mmu_idx;
1056
1057 assert_cpu_is_self(cpu);
1058
1059 vaddr &= TARGET_PAGE_MASK;
1060 qemu_spin_lock(&env_tlb(env)->c.lock);
1061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1062 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1063 }
1064
1065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1066 int k;
1067 for (k = 0; k < CPU_VTLB_SIZE; k++) {
1068 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1069 }
1070 }
1071 qemu_spin_unlock(&env_tlb(env)->c.lock);
1072}
1073
1074
1075
1076static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1077 target_ulong vaddr, target_ulong size)
1078{
1079 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1080 target_ulong lp_mask = ~(size - 1);
1081
1082 if (lp_addr == (target_ulong)-1) {
1083
1084 lp_addr = vaddr;
1085 } else {
1086
1087
1088
1089 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1090 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1091 lp_mask <<= 1;
1092 }
1093 }
1094 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1095 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1106 hwaddr paddr, MemTxAttrs attrs, int prot,
1107 int mmu_idx, target_ulong size)
1108{
1109 CPUArchState *env = cpu->env_ptr;
1110 CPUTLB *tlb = env_tlb(env);
1111 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1112 MemoryRegionSection *section;
1113 unsigned int index;
1114 target_ulong address;
1115 target_ulong write_address;
1116 uintptr_t addend;
1117 CPUTLBEntry *te, tn;
1118 hwaddr iotlb, xlat, sz, paddr_page;
1119 target_ulong vaddr_page;
1120 int asidx = cpu_asidx_from_attrs(cpu, attrs);
1121 int wp_flags;
1122 bool is_ram, is_romd;
1123
1124 assert_cpu_is_self(cpu);
1125
1126 if (size <= TARGET_PAGE_SIZE) {
1127 sz = TARGET_PAGE_SIZE;
1128 } else {
1129 tlb_add_large_page(env, mmu_idx, vaddr, size);
1130 sz = size;
1131 }
1132 vaddr_page = vaddr & TARGET_PAGE_MASK;
1133 paddr_page = paddr & TARGET_PAGE_MASK;
1134
1135 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1136 &xlat, &sz, attrs, &prot);
1137 assert(sz >= TARGET_PAGE_SIZE);
1138
1139 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1140 " prot=%x idx=%d\n",
1141 vaddr, paddr, prot, mmu_idx);
1142
1143 address = vaddr_page;
1144 if (size < TARGET_PAGE_SIZE) {
1145
1146 address |= TLB_INVALID_MASK;
1147 }
1148 if (attrs.byte_swap) {
1149 address |= TLB_BSWAP;
1150 }
1151
1152 is_ram = memory_region_is_ram(section->mr);
1153 is_romd = memory_region_is_romd(section->mr);
1154
1155 if (is_ram || is_romd) {
1156
1157 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1158 } else {
1159
1160 addend = 0;
1161 }
1162
1163 write_address = address;
1164 if (is_ram) {
1165 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1166
1167
1168
1169
1170 if (prot & PAGE_WRITE) {
1171 if (section->readonly) {
1172 write_address |= TLB_DISCARD_WRITE;
1173 } else if (cpu_physical_memory_is_clean(iotlb)) {
1174 write_address |= TLB_NOTDIRTY;
1175 }
1176 }
1177 } else {
1178
1179 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1180
1181
1182
1183
1184
1185 write_address |= TLB_MMIO;
1186 if (!is_romd) {
1187 address = write_address;
1188 }
1189 }
1190
1191 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1192 TARGET_PAGE_SIZE);
1193
1194 index = tlb_index(env, mmu_idx, vaddr_page);
1195 te = tlb_entry(env, mmu_idx, vaddr_page);
1196
1197
1198
1199
1200
1201
1202
1203
1204 qemu_spin_lock(&tlb->c.lock);
1205
1206
1207 tlb->c.dirty |= 1 << mmu_idx;
1208
1209
1210 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1211
1212
1213
1214
1215
1216 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1217 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1218 CPUTLBEntry *tv = &desc->vtable[vidx];
1219
1220
1221 copy_tlb_helper_locked(tv, te);
1222 desc->viotlb[vidx] = desc->iotlb[index];
1223 tlb_n_used_entries_dec(env, mmu_idx);
1224 }
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239 desc->iotlb[index].addr = iotlb - vaddr_page;
1240 desc->iotlb[index].attrs = attrs;
1241
1242
1243 tn.addend = addend - vaddr_page;
1244 if (prot & PAGE_READ) {
1245 tn.addr_read = address;
1246 if (wp_flags & BP_MEM_READ) {
1247 tn.addr_read |= TLB_WATCHPOINT;
1248 }
1249 } else {
1250 tn.addr_read = -1;
1251 }
1252
1253 if (prot & PAGE_EXEC) {
1254 tn.addr_code = address;
1255 } else {
1256 tn.addr_code = -1;
1257 }
1258
1259 tn.addr_write = -1;
1260 if (prot & PAGE_WRITE) {
1261 tn.addr_write = write_address;
1262 if (prot & PAGE_WRITE_INV) {
1263 tn.addr_write |= TLB_INVALID_MASK;
1264 }
1265 if (wp_flags & BP_MEM_WRITE) {
1266 tn.addr_write |= TLB_WATCHPOINT;
1267 }
1268 }
1269
1270 copy_tlb_helper_locked(te, &tn);
1271 tlb_n_used_entries_inc(env, mmu_idx);
1272 qemu_spin_unlock(&tlb->c.lock);
1273}
1274
1275
1276
1277
1278void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1279 hwaddr paddr, int prot,
1280 int mmu_idx, target_ulong size)
1281{
1282 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1283 prot, mmu_idx, size);
1284}
1285
1286static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1287{
1288 ram_addr_t ram_addr;
1289
1290 ram_addr = qemu_ram_addr_from_host(ptr);
1291 if (ram_addr == RAM_ADDR_INVALID) {
1292 error_report("Bad ram pointer %p", ptr);
1293 abort();
1294 }
1295 return ram_addr;
1296}
1297
1298
1299
1300
1301
1302
1303static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1304 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1305{
1306 CPUClass *cc = CPU_GET_CLASS(cpu);
1307 bool ok;
1308
1309
1310
1311
1312
1313 ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
1314 access_type, mmu_idx, false, retaddr);
1315 assert(ok);
1316}
1317
1318static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1319 MMUAccessType access_type,
1320 int mmu_idx, uintptr_t retaddr)
1321{
1322 CPUClass *cc = CPU_GET_CLASS(cpu);
1323
1324 cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
1325}
1326
1327static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1328 vaddr addr, unsigned size,
1329 MMUAccessType access_type,
1330 int mmu_idx, MemTxAttrs attrs,
1331 MemTxResult response,
1332 uintptr_t retaddr)
1333{
1334 CPUClass *cc = CPU_GET_CLASS(cpu);
1335
1336 if (!cpu->ignore_memory_transaction_failures &&
1337 cc->tcg_ops->do_transaction_failed) {
1338 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1339 access_type, mmu_idx, attrs,
1340 response, retaddr);
1341 }
1342}
1343
1344static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1345 int mmu_idx, target_ulong addr, uintptr_t retaddr,
1346 MMUAccessType access_type, MemOp op)
1347{
1348 CPUState *cpu = env_cpu(env);
1349 hwaddr mr_offset;
1350 MemoryRegionSection *section;
1351 MemoryRegion *mr;
1352 uint64_t val;
1353 bool locked = false;
1354 MemTxResult r;
1355
1356 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1357 mr = section->mr;
1358 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1359 cpu->mem_io_pc = retaddr;
1360 if (!cpu->can_do_io) {
1361 cpu_io_recompile(cpu, retaddr);
1362 }
1363
1364 if (!qemu_mutex_iothread_locked()) {
1365 qemu_mutex_lock_iothread();
1366 locked = true;
1367 }
1368 r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
1369 if (r != MEMTX_OK) {
1370 hwaddr physaddr = mr_offset +
1371 section->offset_within_address_space -
1372 section->offset_within_region;
1373
1374 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1375 mmu_idx, iotlbentry->attrs, r, retaddr);
1376 }
1377 if (locked) {
1378 qemu_mutex_unlock_iothread();
1379 }
1380
1381 return val;
1382}
1383
1384
1385
1386
1387
1388
1389static void save_iotlb_data(CPUState *cs, hwaddr addr,
1390 MemoryRegionSection *section, hwaddr mr_offset)
1391{
1392#ifdef CONFIG_PLUGIN
1393 SavedIOTLB *saved = &cs->saved_iotlb;
1394 saved->addr = addr;
1395 saved->section = section;
1396 saved->mr_offset = mr_offset;
1397#endif
1398}
1399
1400static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1401 int mmu_idx, uint64_t val, target_ulong addr,
1402 uintptr_t retaddr, MemOp op)
1403{
1404 CPUState *cpu = env_cpu(env);
1405 hwaddr mr_offset;
1406 MemoryRegionSection *section;
1407 MemoryRegion *mr;
1408 bool locked = false;
1409 MemTxResult r;
1410
1411 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1412 mr = section->mr;
1413 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1414 if (!cpu->can_do_io) {
1415 cpu_io_recompile(cpu, retaddr);
1416 }
1417 cpu->mem_io_pc = retaddr;
1418
1419
1420
1421
1422
1423 save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
1424
1425 if (!qemu_mutex_iothread_locked()) {
1426 qemu_mutex_lock_iothread();
1427 locked = true;
1428 }
1429 r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
1430 if (r != MEMTX_OK) {
1431 hwaddr physaddr = mr_offset +
1432 section->offset_within_address_space -
1433 section->offset_within_region;
1434
1435 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1436 MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
1437 retaddr);
1438 }
1439 if (locked) {
1440 qemu_mutex_unlock_iothread();
1441 }
1442}
1443
1444static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1445{
1446#if TCG_OVERSIZED_GUEST
1447 return *(target_ulong *)((uintptr_t)entry + ofs);
1448#else
1449
1450 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
1451#endif
1452}
1453
1454
1455
1456static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1457 size_t elt_ofs, target_ulong page)
1458{
1459 size_t vidx;
1460
1461 assert_cpu_is_self(env_cpu(env));
1462 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1463 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1464 target_ulong cmp;
1465
1466
1467#if TCG_OVERSIZED_GUEST
1468 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1469#else
1470 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1471#endif
1472
1473 if (cmp == page) {
1474
1475 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1476
1477 qemu_spin_lock(&env_tlb(env)->c.lock);
1478 copy_tlb_helper_locked(&tmptlb, tlb);
1479 copy_tlb_helper_locked(tlb, vtlb);
1480 copy_tlb_helper_locked(vtlb, &tmptlb);
1481 qemu_spin_unlock(&env_tlb(env)->c.lock);
1482
1483 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1484 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1485 tmpio = *io; *io = *vio; *vio = tmpio;
1486 return true;
1487 }
1488 }
1489 return false;
1490}
1491
1492
1493#define VICTIM_TLB_HIT(TY, ADDR) \
1494 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1495 (ADDR) & TARGET_PAGE_MASK)
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1508 void **hostp)
1509{
1510 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1511 uintptr_t index = tlb_index(env, mmu_idx, addr);
1512 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1513 void *p;
1514
1515 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1516 if (!VICTIM_TLB_HIT(addr_code, addr)) {
1517 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1518 index = tlb_index(env, mmu_idx, addr);
1519 entry = tlb_entry(env, mmu_idx, addr);
1520
1521 if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1522
1523
1524
1525
1526 return -1;
1527 }
1528 }
1529 assert(tlb_hit(entry->addr_code, addr));
1530 }
1531
1532 if (unlikely(entry->addr_code & TLB_MMIO)) {
1533
1534 if (hostp) {
1535 *hostp = NULL;
1536 }
1537 return -1;
1538 }
1539
1540 p = (void *)((uintptr_t)addr + entry->addend);
1541 if (hostp) {
1542 *hostp = p;
1543 }
1544 return qemu_ram_addr_from_host_nofail(p);
1545}
1546
1547tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1548{
1549 return get_page_addr_code_hostp(env, addr, NULL);
1550}
1551
1552static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1553 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1554{
1555 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1556
1557 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1558
1559 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1560 struct page_collection *pages
1561 = page_collection_lock(ram_addr, ram_addr + size);
1562 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1563 page_collection_unlock(pages);
1564 }
1565
1566
1567
1568
1569
1570 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1571
1572
1573 if (!cpu_physical_memory_is_clean(ram_addr)) {
1574 trace_memory_notdirty_set_dirty(mem_vaddr);
1575 tlb_set_dirty(cpu, mem_vaddr);
1576 }
1577}
1578
1579static int probe_access_internal(CPUArchState *env, target_ulong addr,
1580 int fault_size, MMUAccessType access_type,
1581 int mmu_idx, bool nonfault,
1582 void **phost, uintptr_t retaddr)
1583{
1584 uintptr_t index = tlb_index(env, mmu_idx, addr);
1585 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1586 target_ulong tlb_addr, page_addr;
1587 size_t elt_ofs;
1588 int flags;
1589
1590 switch (access_type) {
1591 case MMU_DATA_LOAD:
1592 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1593 break;
1594 case MMU_DATA_STORE:
1595 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1596 break;
1597 case MMU_INST_FETCH:
1598 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1599 break;
1600 default:
1601 g_assert_not_reached();
1602 }
1603 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1604
1605 page_addr = addr & TARGET_PAGE_MASK;
1606 if (!tlb_hit_page(tlb_addr, page_addr)) {
1607 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1608 CPUState *cs = env_cpu(env);
1609 CPUClass *cc = CPU_GET_CLASS(cs);
1610
1611 if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1612 mmu_idx, nonfault, retaddr)) {
1613
1614 *phost = NULL;
1615 return TLB_INVALID_MASK;
1616 }
1617
1618
1619 entry = tlb_entry(env, mmu_idx, addr);
1620 }
1621 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1622 }
1623 flags = tlb_addr & TLB_FLAGS_MASK;
1624
1625
1626 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1627 *phost = NULL;
1628 return TLB_MMIO;
1629 }
1630
1631
1632 *phost = (void *)((uintptr_t)addr + entry->addend);
1633 return flags;
1634}
1635
1636int probe_access_flags(CPUArchState *env, target_ulong addr,
1637 MMUAccessType access_type, int mmu_idx,
1638 bool nonfault, void **phost, uintptr_t retaddr)
1639{
1640 int flags;
1641
1642 flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1643 nonfault, phost, retaddr);
1644
1645
1646 if (unlikely(flags & TLB_NOTDIRTY)) {
1647 uintptr_t index = tlb_index(env, mmu_idx, addr);
1648 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1649
1650 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1651 flags &= ~TLB_NOTDIRTY;
1652 }
1653
1654 return flags;
1655}
1656
1657void *probe_access(CPUArchState *env, target_ulong addr, int size,
1658 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1659{
1660 void *host;
1661 int flags;
1662
1663 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1664
1665 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1666 false, &host, retaddr);
1667
1668
1669 if (size == 0) {
1670 return NULL;
1671 }
1672
1673 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1674 uintptr_t index = tlb_index(env, mmu_idx, addr);
1675 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1676
1677
1678 if (flags & TLB_WATCHPOINT) {
1679 int wp_access = (access_type == MMU_DATA_STORE
1680 ? BP_MEM_WRITE : BP_MEM_READ);
1681 cpu_check_watchpoint(env_cpu(env), addr, size,
1682 iotlbentry->attrs, wp_access, retaddr);
1683 }
1684
1685
1686 if (flags & TLB_NOTDIRTY) {
1687 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1688 }
1689 }
1690
1691 return host;
1692}
1693
1694void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1695 MMUAccessType access_type, int mmu_idx)
1696{
1697 void *host;
1698 int flags;
1699
1700 flags = probe_access_internal(env, addr, 0, access_type,
1701 mmu_idx, true, &host, 0);
1702
1703
1704 return flags ? NULL : host;
1705}
1706
1707#ifdef CONFIG_PLUGIN
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1723 bool is_store, struct qemu_plugin_hwaddr *data)
1724{
1725 CPUArchState *env = cpu->env_ptr;
1726 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1727 uintptr_t index = tlb_index(env, mmu_idx, addr);
1728 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1729
1730 if (likely(tlb_hit(tlb_addr, addr))) {
1731
1732 if (tlb_addr & TLB_MMIO) {
1733 CPUIOTLBEntry *iotlbentry;
1734 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1735 data->is_io = true;
1736 data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1737 data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1738 } else {
1739 data->is_io = false;
1740 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1741 }
1742 return true;
1743 } else {
1744 SavedIOTLB *saved = &cpu->saved_iotlb;
1745 data->is_io = true;
1746 data->v.io.section = saved->section;
1747 data->v.io.offset = saved->mr_offset;
1748 return true;
1749 }
1750}
1751
1752#endif
1753
1754
1755
1756
1757
1758
1759
1760static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1761 MemOpIdx oi, int size, int prot,
1762 uintptr_t retaddr)
1763{
1764 uintptr_t mmu_idx = get_mmuidx(oi);
1765 MemOp mop = get_memop(oi);
1766 int a_bits = get_alignment_bits(mop);
1767 uintptr_t index;
1768 CPUTLBEntry *tlbe;
1769 target_ulong tlb_addr;
1770 void *hostaddr;
1771
1772 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1773
1774
1775 retaddr -= GETPC_ADJ;
1776
1777
1778 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1779
1780 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1781 mmu_idx, retaddr);
1782 }
1783
1784
1785 if (unlikely(addr & (size - 1))) {
1786
1787
1788
1789
1790 goto stop_the_world;
1791 }
1792
1793 index = tlb_index(env, mmu_idx, addr);
1794 tlbe = tlb_entry(env, mmu_idx, addr);
1795
1796
1797 if (prot & PAGE_WRITE) {
1798 tlb_addr = tlb_addr_write(tlbe);
1799 if (!tlb_hit(tlb_addr, addr)) {
1800 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1801 tlb_fill(env_cpu(env), addr, size,
1802 MMU_DATA_STORE, mmu_idx, retaddr);
1803 index = tlb_index(env, mmu_idx, addr);
1804 tlbe = tlb_entry(env, mmu_idx, addr);
1805 }
1806 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1807 }
1808
1809
1810 if ((prot & PAGE_READ) &&
1811 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1812 tlb_fill(env_cpu(env), addr, size,
1813 MMU_DATA_LOAD, mmu_idx, retaddr);
1814
1815
1816
1817
1818
1819 goto stop_the_world;
1820 }
1821 } else {
1822 tlb_addr = tlbe->addr_read;
1823 if (!tlb_hit(tlb_addr, addr)) {
1824 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1825 tlb_fill(env_cpu(env), addr, size,
1826 MMU_DATA_LOAD, mmu_idx, retaddr);
1827 index = tlb_index(env, mmu_idx, addr);
1828 tlbe = tlb_entry(env, mmu_idx, addr);
1829 }
1830 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
1831 }
1832 }
1833
1834
1835 if (unlikely(tlb_addr & TLB_MMIO)) {
1836
1837
1838 goto stop_the_world;
1839 }
1840
1841 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1842
1843 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1844 notdirty_write(env_cpu(env), addr, size,
1845 &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
1846 }
1847
1848 return hostaddr;
1849
1850 stop_the_world:
1851 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865static void validate_memop(MemOpIdx oi, MemOp expected)
1866{
1867#ifdef CONFIG_DEBUG_TCG
1868 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1869 assert(have == expected);
1870#endif
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1883 MemOpIdx oi, uintptr_t retaddr);
1884
1885static inline uint64_t QEMU_ALWAYS_INLINE
1886load_memop(const void *haddr, MemOp op)
1887{
1888 switch (op) {
1889 case MO_UB:
1890 return ldub_p(haddr);
1891 case MO_BEUW:
1892 return lduw_be_p(haddr);
1893 case MO_LEUW:
1894 return lduw_le_p(haddr);
1895 case MO_BEUL:
1896 return (uint32_t)ldl_be_p(haddr);
1897 case MO_LEUL:
1898 return (uint32_t)ldl_le_p(haddr);
1899 case MO_BEUQ:
1900 return ldq_be_p(haddr);
1901 case MO_LEUQ:
1902 return ldq_le_p(haddr);
1903 default:
1904 qemu_build_not_reached();
1905 }
1906}
1907
1908static inline uint64_t QEMU_ALWAYS_INLINE
1909load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
1910 uintptr_t retaddr, MemOp op, bool code_read,
1911 FullLoadHelper *full_load)
1912{
1913 const size_t tlb_off = code_read ?
1914 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1915 const MMUAccessType access_type =
1916 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1917 const unsigned a_bits = get_alignment_bits(get_memop(oi));
1918 const size_t size = memop_size(op);
1919 uintptr_t mmu_idx = get_mmuidx(oi);
1920 uintptr_t index;
1921 CPUTLBEntry *entry;
1922 target_ulong tlb_addr;
1923 void *haddr;
1924 uint64_t res;
1925
1926 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1927
1928
1929 if (addr & ((1 << a_bits) - 1)) {
1930 cpu_unaligned_access(env_cpu(env), addr, access_type,
1931 mmu_idx, retaddr);
1932 }
1933
1934 index = tlb_index(env, mmu_idx, addr);
1935 entry = tlb_entry(env, mmu_idx, addr);
1936 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1937
1938
1939 if (!tlb_hit(tlb_addr, addr)) {
1940 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1941 addr & TARGET_PAGE_MASK)) {
1942 tlb_fill(env_cpu(env), addr, size,
1943 access_type, mmu_idx, retaddr);
1944 index = tlb_index(env, mmu_idx, addr);
1945 entry = tlb_entry(env, mmu_idx, addr);
1946 }
1947 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1948 tlb_addr &= ~TLB_INVALID_MASK;
1949 }
1950
1951
1952 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1953 CPUIOTLBEntry *iotlbentry;
1954 bool need_swap;
1955
1956
1957 if ((addr & (size - 1)) != 0) {
1958 goto do_unaligned_access;
1959 }
1960
1961 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1962
1963
1964 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1965
1966 cpu_check_watchpoint(env_cpu(env), addr, size,
1967 iotlbentry->attrs, BP_MEM_READ, retaddr);
1968 }
1969
1970 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1971
1972
1973 if (likely(tlb_addr & TLB_MMIO)) {
1974 return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1975 access_type, op ^ (need_swap * MO_BSWAP));
1976 }
1977
1978 haddr = (void *)((uintptr_t)addr + entry->addend);
1979
1980
1981
1982
1983
1984
1985 if (unlikely(need_swap)) {
1986 return load_memop(haddr, op ^ MO_BSWAP);
1987 }
1988 return load_memop(haddr, op);
1989 }
1990
1991
1992 if (size > 1
1993 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1994 >= TARGET_PAGE_SIZE)) {
1995 target_ulong addr1, addr2;
1996 uint64_t r1, r2;
1997 unsigned shift;
1998 do_unaligned_access:
1999 addr1 = addr & ~((target_ulong)size - 1);
2000 addr2 = addr1 + size;
2001 r1 = full_load(env, addr1, oi, retaddr);
2002 r2 = full_load(env, addr2, oi, retaddr);
2003 shift = (addr & (size - 1)) * 8;
2004
2005 if (memop_big_endian(op)) {
2006
2007 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
2008 } else {
2009
2010 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
2011 }
2012 return res & MAKE_64BIT_MASK(0, size * 8);
2013 }
2014
2015 haddr = (void *)((uintptr_t)addr + entry->addend);
2016 return load_memop(haddr, op);
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
2030 MemOpIdx oi, uintptr_t retaddr)
2031{
2032 validate_memop(oi, MO_UB);
2033 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
2034}
2035
2036tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
2037 MemOpIdx oi, uintptr_t retaddr)
2038{
2039 return full_ldub_mmu(env, addr, oi, retaddr);
2040}
2041
2042static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2043 MemOpIdx oi, uintptr_t retaddr)
2044{
2045 validate_memop(oi, MO_LEUW);
2046 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2047 full_le_lduw_mmu);
2048}
2049
2050tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2051 MemOpIdx oi, uintptr_t retaddr)
2052{
2053 return full_le_lduw_mmu(env, addr, oi, retaddr);
2054}
2055
2056static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2057 MemOpIdx oi, uintptr_t retaddr)
2058{
2059 validate_memop(oi, MO_BEUW);
2060 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2061 full_be_lduw_mmu);
2062}
2063
2064tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2065 MemOpIdx oi, uintptr_t retaddr)
2066{
2067 return full_be_lduw_mmu(env, addr, oi, retaddr);
2068}
2069
2070static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2071 MemOpIdx oi, uintptr_t retaddr)
2072{
2073 validate_memop(oi, MO_LEUL);
2074 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2075 full_le_ldul_mmu);
2076}
2077
2078tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2079 MemOpIdx oi, uintptr_t retaddr)
2080{
2081 return full_le_ldul_mmu(env, addr, oi, retaddr);
2082}
2083
2084static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2085 MemOpIdx oi, uintptr_t retaddr)
2086{
2087 validate_memop(oi, MO_BEUL);
2088 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2089 full_be_ldul_mmu);
2090}
2091
2092tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2093 MemOpIdx oi, uintptr_t retaddr)
2094{
2095 return full_be_ldul_mmu(env, addr, oi, retaddr);
2096}
2097
2098uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
2099 MemOpIdx oi, uintptr_t retaddr)
2100{
2101 validate_memop(oi, MO_LEUQ);
2102 return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
2103 helper_le_ldq_mmu);
2104}
2105
2106uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
2107 MemOpIdx oi, uintptr_t retaddr)
2108{
2109 validate_memop(oi, MO_BEUQ);
2110 return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
2111 helper_be_ldq_mmu);
2112}
2113
2114
2115
2116
2117
2118
2119
2120tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
2121 MemOpIdx oi, uintptr_t retaddr)
2122{
2123 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2124}
2125
2126tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
2127 MemOpIdx oi, uintptr_t retaddr)
2128{
2129 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2130}
2131
2132tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
2133 MemOpIdx oi, uintptr_t retaddr)
2134{
2135 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2136}
2137
2138tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
2139 MemOpIdx oi, uintptr_t retaddr)
2140{
2141 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2142}
2143
2144tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
2145 MemOpIdx oi, uintptr_t retaddr)
2146{
2147 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2148}
2149
2150
2151
2152
2153
2154static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2155 MemOpIdx oi, uintptr_t retaddr,
2156 FullLoadHelper *full_load)
2157{
2158 uint64_t ret;
2159
2160 ret = full_load(env, addr, oi, retaddr);
2161 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2162 return ret;
2163}
2164
2165uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2166{
2167 return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
2168}
2169
2170uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2171 MemOpIdx oi, uintptr_t ra)
2172{
2173 return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
2174}
2175
2176uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2177 MemOpIdx oi, uintptr_t ra)
2178{
2179 return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
2180}
2181
2182uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2183 MemOpIdx oi, uintptr_t ra)
2184{
2185 return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
2186}
2187
2188uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2189 MemOpIdx oi, uintptr_t ra)
2190{
2191 return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
2192}
2193
2194uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2195 MemOpIdx oi, uintptr_t ra)
2196{
2197 return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
2198}
2199
2200uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2201 MemOpIdx oi, uintptr_t ra)
2202{
2203 return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
2204}
2205
2206
2207
2208
2209
2210static inline void QEMU_ALWAYS_INLINE
2211store_memop(void *haddr, uint64_t val, MemOp op)
2212{
2213 switch (op) {
2214 case MO_UB:
2215 stb_p(haddr, val);
2216 break;
2217 case MO_BEUW:
2218 stw_be_p(haddr, val);
2219 break;
2220 case MO_LEUW:
2221 stw_le_p(haddr, val);
2222 break;
2223 case MO_BEUL:
2224 stl_be_p(haddr, val);
2225 break;
2226 case MO_LEUL:
2227 stl_le_p(haddr, val);
2228 break;
2229 case MO_BEUQ:
2230 stq_be_p(haddr, val);
2231 break;
2232 case MO_LEUQ:
2233 stq_le_p(haddr, val);
2234 break;
2235 default:
2236 qemu_build_not_reached();
2237 }
2238}
2239
2240static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2241 MemOpIdx oi, uintptr_t retaddr);
2242
2243static void __attribute__((noinline))
2244store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2245 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2246 bool big_endian)
2247{
2248 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2249 uintptr_t index, index2;
2250 CPUTLBEntry *entry, *entry2;
2251 target_ulong page1, page2, tlb_addr, tlb_addr2;
2252 MemOpIdx oi;
2253 size_t size2;
2254 int i;
2255
2256
2257
2258
2259
2260
2261
2262 page1 = addr & TARGET_PAGE_MASK;
2263 page2 = (addr + size) & TARGET_PAGE_MASK;
2264 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2265 index2 = tlb_index(env, mmu_idx, page2);
2266 entry2 = tlb_entry(env, mmu_idx, page2);
2267
2268 tlb_addr2 = tlb_addr_write(entry2);
2269 if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
2270 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2271 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2272 mmu_idx, retaddr);
2273 index2 = tlb_index(env, mmu_idx, page2);
2274 entry2 = tlb_entry(env, mmu_idx, page2);
2275 }
2276 tlb_addr2 = tlb_addr_write(entry2);
2277 }
2278
2279 index = tlb_index(env, mmu_idx, addr);
2280 entry = tlb_entry(env, mmu_idx, addr);
2281 tlb_addr = tlb_addr_write(entry);
2282
2283
2284
2285
2286
2287 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2288 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2289 env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
2290 BP_MEM_WRITE, retaddr);
2291 }
2292 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2293 cpu_check_watchpoint(env_cpu(env), page2, size2,
2294 env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
2295 BP_MEM_WRITE, retaddr);
2296 }
2297
2298
2299
2300
2301
2302
2303 oi = make_memop_idx(MO_UB, mmu_idx);
2304 if (big_endian) {
2305 for (i = 0; i < size; ++i) {
2306
2307 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2308 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2309 }
2310 } else {
2311 for (i = 0; i < size; ++i) {
2312
2313 uint8_t val8 = val >> (i * 8);
2314 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2315 }
2316 }
2317}
2318
2319static inline void QEMU_ALWAYS_INLINE
2320store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2321 MemOpIdx oi, uintptr_t retaddr, MemOp op)
2322{
2323 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2324 const unsigned a_bits = get_alignment_bits(get_memop(oi));
2325 const size_t size = memop_size(op);
2326 uintptr_t mmu_idx = get_mmuidx(oi);
2327 uintptr_t index;
2328 CPUTLBEntry *entry;
2329 target_ulong tlb_addr;
2330 void *haddr;
2331
2332 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
2333
2334
2335 if (addr & ((1 << a_bits) - 1)) {
2336 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2337 mmu_idx, retaddr);
2338 }
2339
2340 index = tlb_index(env, mmu_idx, addr);
2341 entry = tlb_entry(env, mmu_idx, addr);
2342 tlb_addr = tlb_addr_write(entry);
2343
2344
2345 if (!tlb_hit(tlb_addr, addr)) {
2346 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2347 addr & TARGET_PAGE_MASK)) {
2348 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2349 mmu_idx, retaddr);
2350 index = tlb_index(env, mmu_idx, addr);
2351 entry = tlb_entry(env, mmu_idx, addr);
2352 }
2353 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2354 }
2355
2356
2357 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
2358 CPUIOTLBEntry *iotlbentry;
2359 bool need_swap;
2360
2361
2362 if ((addr & (size - 1)) != 0) {
2363 goto do_unaligned_access;
2364 }
2365
2366 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
2367
2368
2369 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2370
2371 cpu_check_watchpoint(env_cpu(env), addr, size,
2372 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
2373 }
2374
2375 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2376
2377
2378 if (tlb_addr & TLB_MMIO) {
2379 io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
2380 op ^ (need_swap * MO_BSWAP));
2381 return;
2382 }
2383
2384
2385 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2386 return;
2387 }
2388
2389
2390 if (tlb_addr & TLB_NOTDIRTY) {
2391 notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
2392 }
2393
2394 haddr = (void *)((uintptr_t)addr + entry->addend);
2395
2396
2397
2398
2399
2400
2401 if (unlikely(need_swap)) {
2402 store_memop(haddr, val, op ^ MO_BSWAP);
2403 } else {
2404 store_memop(haddr, val, op);
2405 }
2406 return;
2407 }
2408
2409
2410 if (size > 1
2411 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2412 >= TARGET_PAGE_SIZE)) {
2413 do_unaligned_access:
2414 store_helper_unaligned(env, addr, val, retaddr, size,
2415 mmu_idx, memop_big_endian(op));
2416 return;
2417 }
2418
2419 haddr = (void *)((uintptr_t)addr + entry->addend);
2420 store_memop(haddr, val, op);
2421}
2422
2423static void __attribute__((noinline))
2424full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2425 MemOpIdx oi, uintptr_t retaddr)
2426{
2427 validate_memop(oi, MO_UB);
2428 store_helper(env, addr, val, oi, retaddr, MO_UB);
2429}
2430
2431void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2432 MemOpIdx oi, uintptr_t retaddr)
2433{
2434 full_stb_mmu(env, addr, val, oi, retaddr);
2435}
2436
2437static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2438 MemOpIdx oi, uintptr_t retaddr)
2439{
2440 validate_memop(oi, MO_LEUW);
2441 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2442}
2443
2444void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2445 MemOpIdx oi, uintptr_t retaddr)
2446{
2447 full_le_stw_mmu(env, addr, val, oi, retaddr);
2448}
2449
2450static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2451 MemOpIdx oi, uintptr_t retaddr)
2452{
2453 validate_memop(oi, MO_BEUW);
2454 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2455}
2456
2457void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2458 MemOpIdx oi, uintptr_t retaddr)
2459{
2460 full_be_stw_mmu(env, addr, val, oi, retaddr);
2461}
2462
2463static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2464 MemOpIdx oi, uintptr_t retaddr)
2465{
2466 validate_memop(oi, MO_LEUL);
2467 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2468}
2469
2470void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2471 MemOpIdx oi, uintptr_t retaddr)
2472{
2473 full_le_stl_mmu(env, addr, val, oi, retaddr);
2474}
2475
2476static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2477 MemOpIdx oi, uintptr_t retaddr)
2478{
2479 validate_memop(oi, MO_BEUL);
2480 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2481}
2482
2483void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2484 MemOpIdx oi, uintptr_t retaddr)
2485{
2486 full_be_stl_mmu(env, addr, val, oi, retaddr);
2487}
2488
2489void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2490 MemOpIdx oi, uintptr_t retaddr)
2491{
2492 validate_memop(oi, MO_LEUQ);
2493 store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
2494}
2495
2496void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2497 MemOpIdx oi, uintptr_t retaddr)
2498{
2499 validate_memop(oi, MO_BEUQ);
2500 store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
2501}
2502
2503
2504
2505
2506
2507typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2508 uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2509
2510static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2511 uint64_t val, MemOpIdx oi, uintptr_t ra,
2512 FullStoreHelper *full_store)
2513{
2514 full_store(env, addr, val, oi, ra);
2515 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
2516}
2517
2518void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2519 MemOpIdx oi, uintptr_t retaddr)
2520{
2521 cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
2522}
2523
2524void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2525 MemOpIdx oi, uintptr_t retaddr)
2526{
2527 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
2528}
2529
2530void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2531 MemOpIdx oi, uintptr_t retaddr)
2532{
2533 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
2534}
2535
2536void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2537 MemOpIdx oi, uintptr_t retaddr)
2538{
2539 cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
2540}
2541
2542void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2543 MemOpIdx oi, uintptr_t retaddr)
2544{
2545 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
2546}
2547
2548void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2549 MemOpIdx oi, uintptr_t retaddr)
2550{
2551 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
2552}
2553
2554void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2555 MemOpIdx oi, uintptr_t retaddr)
2556{
2557 cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
2558}
2559
2560#include "ldst_common.c.inc"
2561
2562
2563
2564
2565
2566
2567#define ATOMIC_NAME(X) \
2568 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2569
2570#define ATOMIC_MMU_CLEANUP
2571
2572#include "atomic_common.c.inc"
2573
2574#define DATA_SIZE 1
2575#include "atomic_template.h"
2576
2577#define DATA_SIZE 2
2578#include "atomic_template.h"
2579
2580#define DATA_SIZE 4
2581#include "atomic_template.h"
2582
2583#ifdef CONFIG_ATOMIC64
2584#define DATA_SIZE 8
2585#include "atomic_template.h"
2586#endif
2587
2588#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2589#define DATA_SIZE 16
2590#include "atomic_template.h"
2591#endif
2592
2593
2594
2595static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2596 MemOpIdx oi, uintptr_t retaddr)
2597{
2598 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2599}
2600
2601uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2602{
2603 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2604 return full_ldub_code(env, addr, oi, 0);
2605}
2606
2607static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2608 MemOpIdx oi, uintptr_t retaddr)
2609{
2610 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
2611}
2612
2613uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2614{
2615 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2616 return full_lduw_code(env, addr, oi, 0);
2617}
2618
2619static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2620 MemOpIdx oi, uintptr_t retaddr)
2621{
2622 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
2623}
2624
2625uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2626{
2627 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2628 return full_ldl_code(env, addr, oi, 0);
2629}
2630
2631static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2632 MemOpIdx oi, uintptr_t retaddr)
2633{
2634 return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
2635}
2636
2637uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2638{
2639 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2640 return full_ldq_code(env, addr, oi, 0);
2641}
2642