1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "hw/core/tcg-cpu-ops.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/cpu_ldst.h"
26#include "exec/cputlb.h"
27#include "exec/memory-internal.h"
28#include "exec/ram_addr.h"
29#include "tcg/tcg.h"
30#include "qemu/error-report.h"
31#include "exec/log.h"
32#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
34#include "qemu/atomic128.h"
35#include "exec/translate-all.h"
36#include "trace/trace-root.h"
37#include "tb-hash.h"
38#include "internal.h"
39#ifdef CONFIG_PLUGIN
40#include "qemu/plugin-memory.h"
41#endif
42#include "tcg/tcg-ldst.h"
43
44
45
46
47
48#ifdef DEBUG_TLB
49# define DEBUG_TLB_GATE 1
50# ifdef DEBUG_TLB_LOG
51# define DEBUG_TLB_LOG_GATE 1
52# else
53# define DEBUG_TLB_LOG_GATE 0
54# endif
55#else
56# define DEBUG_TLB_GATE 0
57# define DEBUG_TLB_LOG_GATE 0
58#endif
59
60#define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67} while (0)
68
69#define assert_cpu_is_self(cpu) do { \
70 if (DEBUG_TLB_GATE) { \
71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
72 } \
73 } while (0)
74
75
76
77QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
79
80
81QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
84static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
85{
86 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
87}
88
89static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
90{
91 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
92}
93
94static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
95 size_t max_entries)
96{
97 desc->window_begin_ns = ns;
98 desc->window_max_entries = max_entries;
99}
100
101static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
102{
103 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
104
105 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
106 qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
107 }
108}
109
110static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
111{
112
113
114 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
115 tb_jmp_cache_clear_page(cpu, addr);
116}
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
159 int64_t now)
160{
161 size_t old_size = tlb_n_entries(fast);
162 size_t rate;
163 size_t new_size = old_size;
164 int64_t window_len_ms = 100;
165 int64_t window_len_ns = window_len_ms * 1000 * 1000;
166 bool window_expired = now > desc->window_begin_ns + window_len_ns;
167
168 if (desc->n_used_entries > desc->window_max_entries) {
169 desc->window_max_entries = desc->n_used_entries;
170 }
171 rate = desc->window_max_entries * 100 / old_size;
172
173 if (rate > 70) {
174 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
175 } else if (rate < 30 && window_expired) {
176 size_t ceil = pow2ceil(desc->window_max_entries);
177 size_t expected_rate = desc->window_max_entries * 100 / ceil;
178
179
180
181
182
183
184
185
186
187
188
189 if (expected_rate > 70) {
190 ceil *= 2;
191 }
192 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
193 }
194
195 if (new_size == old_size) {
196 if (window_expired) {
197 tlb_window_reset(desc, now, desc->n_used_entries);
198 }
199 return;
200 }
201
202 g_free(fast->table);
203 g_free(desc->iotlb);
204
205 tlb_window_reset(desc, now, 0);
206
207 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
208 fast->table = g_try_new(CPUTLBEntry, new_size);
209 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
210
211
212
213
214
215
216
217
218 while (fast->table == NULL || desc->iotlb == NULL) {
219 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
220 error_report("%s: %s", __func__, strerror(errno));
221 abort();
222 }
223 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
224 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
225
226 g_free(fast->table);
227 g_free(desc->iotlb);
228 fast->table = g_try_new(CPUTLBEntry, new_size);
229 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
230 }
231}
232
233static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
234{
235 desc->n_used_entries = 0;
236 desc->large_page_addr = -1;
237 desc->large_page_mask = -1;
238 desc->vindex = 0;
239 memset(fast->table, -1, sizeof_tlb(fast));
240 memset(desc->vtable, -1, sizeof(desc->vtable));
241}
242
243static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
244 int64_t now)
245{
246 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
247 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
248
249 tlb_mmu_resize_locked(desc, fast, now);
250 tlb_mmu_flush_locked(desc, fast);
251}
252
253static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
254{
255 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
256
257 tlb_window_reset(desc, now, 0);
258 desc->n_used_entries = 0;
259 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
260 fast->table = g_new(CPUTLBEntry, n_entries);
261 desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
262 tlb_mmu_flush_locked(desc, fast);
263}
264
265static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
266{
267 env_tlb(env)->d[mmu_idx].n_used_entries++;
268}
269
270static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
271{
272 env_tlb(env)->d[mmu_idx].n_used_entries--;
273}
274
275void tlb_init(CPUState *cpu)
276{
277 CPUArchState *env = cpu->env_ptr;
278 int64_t now = get_clock_realtime();
279 int i;
280
281 qemu_spin_init(&env_tlb(env)->c.lock);
282
283
284 env_tlb(env)->c.dirty = 0;
285
286 for (i = 0; i < NB_MMU_MODES; i++) {
287 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
288 }
289}
290
291void tlb_destroy(CPUState *cpu)
292{
293 CPUArchState *env = cpu->env_ptr;
294 int i;
295
296 qemu_spin_destroy(&env_tlb(env)->c.lock);
297 for (i = 0; i < NB_MMU_MODES; i++) {
298 CPUTLBDesc *desc = &env_tlb(env)->d[i];
299 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
300
301 g_free(fast->table);
302 g_free(desc->iotlb);
303 }
304}
305
306
307
308
309
310
311
312
313static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
314 run_on_cpu_data d)
315{
316 CPUState *cpu;
317
318 CPU_FOREACH(cpu) {
319 if (cpu != src) {
320 async_run_on_cpu(cpu, fn, d);
321 }
322 }
323}
324
325void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
326{
327 CPUState *cpu;
328 size_t full = 0, part = 0, elide = 0;
329
330 CPU_FOREACH(cpu) {
331 CPUArchState *env = cpu->env_ptr;
332
333 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
334 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
335 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
336 }
337 *pfull = full;
338 *ppart = part;
339 *pelide = elide;
340}
341
342static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
343{
344 CPUArchState *env = cpu->env_ptr;
345 uint16_t asked = data.host_int;
346 uint16_t all_dirty, work, to_clean;
347 int64_t now = get_clock_realtime();
348
349 assert_cpu_is_self(cpu);
350
351 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
352
353 qemu_spin_lock(&env_tlb(env)->c.lock);
354
355 all_dirty = env_tlb(env)->c.dirty;
356 to_clean = asked & all_dirty;
357 all_dirty &= ~to_clean;
358 env_tlb(env)->c.dirty = all_dirty;
359
360 for (work = to_clean; work != 0; work &= work - 1) {
361 int mmu_idx = ctz32(work);
362 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
363 }
364
365 qemu_spin_unlock(&env_tlb(env)->c.lock);
366
367 cpu_tb_jmp_cache_clear(cpu);
368
369 if (to_clean == ALL_MMUIDX_BITS) {
370 qatomic_set(&env_tlb(env)->c.full_flush_count,
371 env_tlb(env)->c.full_flush_count + 1);
372 } else {
373 qatomic_set(&env_tlb(env)->c.part_flush_count,
374 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
375 if (to_clean != asked) {
376 qatomic_set(&env_tlb(env)->c.elide_flush_count,
377 env_tlb(env)->c.elide_flush_count +
378 ctpop16(asked & ~to_clean));
379 }
380 }
381}
382
383void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
384{
385 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
386
387 if (cpu->created && !qemu_cpu_is_self(cpu)) {
388 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
389 RUN_ON_CPU_HOST_INT(idxmap));
390 } else {
391 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
392 }
393}
394
395void tlb_flush(CPUState *cpu)
396{
397 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
398}
399
400void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
401{
402 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
403
404 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
405
406 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
407 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
408}
409
410void tlb_flush_all_cpus(CPUState *src_cpu)
411{
412 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
413}
414
415void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
416{
417 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
418
419 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
420
421 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
423}
424
425void tlb_flush_all_cpus_synced(CPUState *src_cpu)
426{
427 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
428}
429
430static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
431 target_ulong page, target_ulong mask)
432{
433 page &= mask;
434 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
435
436 return (page == (tlb_entry->addr_read & mask) ||
437 page == (tlb_addr_write(tlb_entry) & mask) ||
438 page == (tlb_entry->addr_code & mask));
439}
440
441static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
442 target_ulong page)
443{
444 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
445}
446
447
448
449
450
451static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
452{
453 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
454}
455
456
457static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
458 target_ulong page,
459 target_ulong mask)
460{
461 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
462 memset(tlb_entry, -1, sizeof(*tlb_entry));
463 return true;
464 }
465 return false;
466}
467
468static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
469 target_ulong page)
470{
471 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
472}
473
474
475static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
476 target_ulong page,
477 target_ulong mask)
478{
479 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
480 int k;
481
482 assert_cpu_is_self(env_cpu(env));
483 for (k = 0; k < CPU_VTLB_SIZE; k++) {
484 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
485 tlb_n_used_entries_dec(env, mmu_idx);
486 }
487 }
488}
489
490static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
491 target_ulong page)
492{
493 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
494}
495
496static void tlb_flush_page_locked(CPUArchState *env, int midx,
497 target_ulong page)
498{
499 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
500 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
501
502
503 if ((page & lp_mask) == lp_addr) {
504 tlb_debug("forcing full flush midx %d ("
505 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
506 midx, lp_addr, lp_mask);
507 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
508 } else {
509 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
510 tlb_n_used_entries_dec(env, midx);
511 }
512 tlb_flush_vtlb_page_locked(env, midx, page);
513 }
514}
515
516
517
518
519
520
521
522
523
524
525static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
526 target_ulong addr,
527 uint16_t idxmap)
528{
529 CPUArchState *env = cpu->env_ptr;
530 int mmu_idx;
531
532 assert_cpu_is_self(cpu);
533
534 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
535
536 qemu_spin_lock(&env_tlb(env)->c.lock);
537 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
538 if ((idxmap >> mmu_idx) & 1) {
539 tlb_flush_page_locked(env, mmu_idx, addr);
540 }
541 }
542 qemu_spin_unlock(&env_tlb(env)->c.lock);
543
544 tb_flush_jmp_cache(cpu, addr);
545}
546
547
548
549
550
551
552
553
554
555
556
557static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
558 run_on_cpu_data data)
559{
560 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
561 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
562 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
563
564 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
565}
566
567typedef struct {
568 target_ulong addr;
569 uint16_t idxmap;
570} TLBFlushPageByMMUIdxData;
571
572
573
574
575
576
577
578
579
580
581
582static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
583 run_on_cpu_data data)
584{
585 TLBFlushPageByMMUIdxData *d = data.host_ptr;
586
587 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
588 g_free(d);
589}
590
591void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
592{
593 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
594
595
596 addr &= TARGET_PAGE_MASK;
597
598 if (qemu_cpu_is_self(cpu)) {
599 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
600 } else if (idxmap < TARGET_PAGE_SIZE) {
601
602
603
604
605
606 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
607 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
608 } else {
609 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
610
611
612 d->addr = addr;
613 d->idxmap = idxmap;
614 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
615 RUN_ON_CPU_HOST_PTR(d));
616 }
617}
618
619void tlb_flush_page(CPUState *cpu, target_ulong addr)
620{
621 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
622}
623
624void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
625 uint16_t idxmap)
626{
627 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
628
629
630 addr &= TARGET_PAGE_MASK;
631
632
633
634
635
636 if (idxmap < TARGET_PAGE_SIZE) {
637 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
638 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
639 } else {
640 CPUState *dst_cpu;
641
642
643 CPU_FOREACH(dst_cpu) {
644 if (dst_cpu != src_cpu) {
645 TLBFlushPageByMMUIdxData *d
646 = g_new(TLBFlushPageByMMUIdxData, 1);
647
648 d->addr = addr;
649 d->idxmap = idxmap;
650 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
651 RUN_ON_CPU_HOST_PTR(d));
652 }
653 }
654 }
655
656 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
657}
658
659void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
660{
661 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
662}
663
664void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
665 target_ulong addr,
666 uint16_t idxmap)
667{
668 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
669
670
671 addr &= TARGET_PAGE_MASK;
672
673
674
675
676
677 if (idxmap < TARGET_PAGE_SIZE) {
678 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
679 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
680 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
681 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
682 } else {
683 CPUState *dst_cpu;
684 TLBFlushPageByMMUIdxData *d;
685
686
687 CPU_FOREACH(dst_cpu) {
688 if (dst_cpu != src_cpu) {
689 d = g_new(TLBFlushPageByMMUIdxData, 1);
690 d->addr = addr;
691 d->idxmap = idxmap;
692 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
693 RUN_ON_CPU_HOST_PTR(d));
694 }
695 }
696
697 d = g_new(TLBFlushPageByMMUIdxData, 1);
698 d->addr = addr;
699 d->idxmap = idxmap;
700 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
701 RUN_ON_CPU_HOST_PTR(d));
702 }
703}
704
705void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
706{
707 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
708}
709
710static void tlb_flush_range_locked(CPUArchState *env, int midx,
711 target_ulong addr, target_ulong len,
712 unsigned bits)
713{
714 CPUTLBDesc *d = &env_tlb(env)->d[midx];
715 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
716 target_ulong mask = MAKE_64BIT_MASK(0, bits);
717
718
719
720
721
722
723
724
725
726
727
728 if (mask < f->mask || len > f->mask) {
729 tlb_debug("forcing full flush midx %d ("
730 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
731 midx, addr, mask, len);
732 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
733 return;
734 }
735
736
737
738
739
740
741 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
742 tlb_debug("forcing full flush midx %d ("
743 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
744 midx, d->large_page_addr, d->large_page_mask);
745 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
746 return;
747 }
748
749 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
750 target_ulong page = addr + i;
751 CPUTLBEntry *entry = tlb_entry(env, midx, page);
752
753 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
754 tlb_n_used_entries_dec(env, midx);
755 }
756 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
757 }
758}
759
760typedef struct {
761 target_ulong addr;
762 target_ulong len;
763 uint16_t idxmap;
764 uint16_t bits;
765} TLBFlushRangeData;
766
767static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
768 TLBFlushRangeData d)
769{
770 CPUArchState *env = cpu->env_ptr;
771 int mmu_idx;
772
773 assert_cpu_is_self(cpu);
774
775 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
776 d.addr, d.bits, d.len, d.idxmap);
777
778 qemu_spin_lock(&env_tlb(env)->c.lock);
779 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
780 if ((d.idxmap >> mmu_idx) & 1) {
781 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
782 }
783 }
784 qemu_spin_unlock(&env_tlb(env)->c.lock);
785
786
787
788
789
790 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
791 cpu_tb_jmp_cache_clear(cpu);
792 return;
793 }
794
795 for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
796 tb_flush_jmp_cache(cpu, d.addr + i);
797 }
798}
799
800static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
801 run_on_cpu_data data)
802{
803 TLBFlushRangeData *d = data.host_ptr;
804 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
805 g_free(d);
806}
807
808void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
809 target_ulong len, uint16_t idxmap,
810 unsigned bits)
811{
812 TLBFlushRangeData d;
813
814
815
816
817
818 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
819 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
820 return;
821 }
822
823 if (bits < TARGET_PAGE_BITS) {
824 tlb_flush_by_mmuidx(cpu, idxmap);
825 return;
826 }
827
828
829 d.addr = addr & TARGET_PAGE_MASK;
830 d.len = len;
831 d.idxmap = idxmap;
832 d.bits = bits;
833
834 if (qemu_cpu_is_self(cpu)) {
835 tlb_flush_range_by_mmuidx_async_0(cpu, d);
836 } else {
837
838 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
839 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
840 RUN_ON_CPU_HOST_PTR(p));
841 }
842}
843
844void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
845 uint16_t idxmap, unsigned bits)
846{
847 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
848}
849
850void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
851 target_ulong addr, target_ulong len,
852 uint16_t idxmap, unsigned bits)
853{
854 TLBFlushRangeData d;
855 CPUState *dst_cpu;
856
857
858
859
860
861 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
862 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
863 return;
864 }
865
866 if (bits < TARGET_PAGE_BITS) {
867 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
868 return;
869 }
870
871
872 d.addr = addr & TARGET_PAGE_MASK;
873 d.len = len;
874 d.idxmap = idxmap;
875 d.bits = bits;
876
877
878 CPU_FOREACH(dst_cpu) {
879 if (dst_cpu != src_cpu) {
880 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
881 async_run_on_cpu(dst_cpu,
882 tlb_flush_range_by_mmuidx_async_1,
883 RUN_ON_CPU_HOST_PTR(p));
884 }
885 }
886
887 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
888}
889
890void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
891 target_ulong addr,
892 uint16_t idxmap, unsigned bits)
893{
894 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
895 idxmap, bits);
896}
897
898void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
899 target_ulong addr,
900 target_ulong len,
901 uint16_t idxmap,
902 unsigned bits)
903{
904 TLBFlushRangeData d, *p;
905 CPUState *dst_cpu;
906
907
908
909
910
911 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
912 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
913 return;
914 }
915
916 if (bits < TARGET_PAGE_BITS) {
917 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
918 return;
919 }
920
921
922 d.addr = addr & TARGET_PAGE_MASK;
923 d.len = len;
924 d.idxmap = idxmap;
925 d.bits = bits;
926
927
928 CPU_FOREACH(dst_cpu) {
929 if (dst_cpu != src_cpu) {
930 p = g_memdup(&d, sizeof(d));
931 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
932 RUN_ON_CPU_HOST_PTR(p));
933 }
934 }
935
936 p = g_memdup(&d, sizeof(d));
937 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
938 RUN_ON_CPU_HOST_PTR(p));
939}
940
941void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
942 target_ulong addr,
943 uint16_t idxmap,
944 unsigned bits)
945{
946 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
947 idxmap, bits);
948}
949
950
951
952void tlb_protect_code(ram_addr_t ram_addr)
953{
954 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
955 DIRTY_MEMORY_CODE);
956}
957
958
959
960void tlb_unprotect_code(ram_addr_t ram_addr)
961{
962 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
983 uintptr_t start, uintptr_t length)
984{
985 uintptr_t addr = tlb_entry->addr_write;
986
987 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
988 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
989 addr &= TARGET_PAGE_MASK;
990 addr += tlb_entry->addend;
991 if ((addr - start) < length) {
992#if TCG_OVERSIZED_GUEST
993 tlb_entry->addr_write |= TLB_NOTDIRTY;
994#else
995 qatomic_set(&tlb_entry->addr_write,
996 tlb_entry->addr_write | TLB_NOTDIRTY);
997#endif
998 }
999 }
1000}
1001
1002
1003
1004
1005
1006static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1007{
1008 *d = *s;
1009}
1010
1011
1012
1013
1014
1015
1016void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1017{
1018 CPUArchState *env;
1019
1020 int mmu_idx;
1021
1022 env = cpu->env_ptr;
1023 qemu_spin_lock(&env_tlb(env)->c.lock);
1024 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1025 unsigned int i;
1026 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1027
1028 for (i = 0; i < n; i++) {
1029 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1030 start1, length);
1031 }
1032
1033 for (i = 0; i < CPU_VTLB_SIZE; i++) {
1034 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1035 start1, length);
1036 }
1037 }
1038 qemu_spin_unlock(&env_tlb(env)->c.lock);
1039}
1040
1041
1042static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1043 target_ulong vaddr)
1044{
1045 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1046 tlb_entry->addr_write = vaddr;
1047 }
1048}
1049
1050
1051
1052void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1053{
1054 CPUArchState *env = cpu->env_ptr;
1055 int mmu_idx;
1056
1057 assert_cpu_is_self(cpu);
1058
1059 vaddr &= TARGET_PAGE_MASK;
1060 qemu_spin_lock(&env_tlb(env)->c.lock);
1061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1062 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1063 }
1064
1065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1066 int k;
1067 for (k = 0; k < CPU_VTLB_SIZE; k++) {
1068 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1069 }
1070 }
1071 qemu_spin_unlock(&env_tlb(env)->c.lock);
1072}
1073
1074
1075
1076static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1077 target_ulong vaddr, target_ulong size)
1078{
1079 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1080 target_ulong lp_mask = ~(size - 1);
1081
1082 if (lp_addr == (target_ulong)-1) {
1083
1084 lp_addr = vaddr;
1085 } else {
1086
1087
1088
1089 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1090 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1091 lp_mask <<= 1;
1092 }
1093 }
1094 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1095 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1106 hwaddr paddr, MemTxAttrs attrs, int prot,
1107 int mmu_idx, target_ulong size)
1108{
1109 CPUArchState *env = cpu->env_ptr;
1110 CPUTLB *tlb = env_tlb(env);
1111 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1112 MemoryRegionSection *section;
1113 unsigned int index;
1114 target_ulong address;
1115 target_ulong write_address;
1116 uintptr_t addend;
1117 CPUTLBEntry *te, tn;
1118 hwaddr iotlb, xlat, sz, paddr_page;
1119 target_ulong vaddr_page;
1120 int asidx = cpu_asidx_from_attrs(cpu, attrs);
1121 int wp_flags;
1122 bool is_ram, is_romd;
1123
1124 assert_cpu_is_self(cpu);
1125
1126 if (size <= TARGET_PAGE_SIZE) {
1127 sz = TARGET_PAGE_SIZE;
1128 } else {
1129 tlb_add_large_page(env, mmu_idx, vaddr, size);
1130 sz = size;
1131 }
1132 vaddr_page = vaddr & TARGET_PAGE_MASK;
1133 paddr_page = paddr & TARGET_PAGE_MASK;
1134
1135 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1136 &xlat, &sz, attrs, &prot);
1137 assert(sz >= TARGET_PAGE_SIZE);
1138
1139 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1140 " prot=%x idx=%d\n",
1141 vaddr, paddr, prot, mmu_idx);
1142
1143 address = vaddr_page;
1144 if (size < TARGET_PAGE_SIZE) {
1145
1146 address |= TLB_INVALID_MASK;
1147 }
1148 if (attrs.byte_swap) {
1149 address |= TLB_BSWAP;
1150 }
1151
1152 is_ram = memory_region_is_ram(section->mr);
1153 is_romd = memory_region_is_romd(section->mr);
1154
1155 if (is_ram || is_romd) {
1156
1157 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1158 } else {
1159
1160 addend = 0;
1161 }
1162
1163 write_address = address;
1164 if (is_ram) {
1165 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1166
1167
1168
1169
1170 if (prot & PAGE_WRITE) {
1171 if (section->readonly) {
1172 write_address |= TLB_DISCARD_WRITE;
1173 } else if (cpu_physical_memory_is_clean(iotlb)) {
1174 write_address |= TLB_NOTDIRTY;
1175 }
1176 }
1177 } else {
1178
1179 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1180
1181
1182
1183
1184
1185 write_address |= TLB_MMIO;
1186 if (!is_romd) {
1187 address = write_address;
1188 }
1189 }
1190
1191 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1192 TARGET_PAGE_SIZE);
1193
1194 index = tlb_index(env, mmu_idx, vaddr_page);
1195 te = tlb_entry(env, mmu_idx, vaddr_page);
1196
1197
1198
1199
1200
1201
1202
1203
1204 qemu_spin_lock(&tlb->c.lock);
1205
1206
1207 tlb->c.dirty |= 1 << mmu_idx;
1208
1209
1210 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1211
1212
1213
1214
1215
1216 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1217 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1218 CPUTLBEntry *tv = &desc->vtable[vidx];
1219
1220
1221 copy_tlb_helper_locked(tv, te);
1222 desc->viotlb[vidx] = desc->iotlb[index];
1223 tlb_n_used_entries_dec(env, mmu_idx);
1224 }
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239 desc->iotlb[index].addr = iotlb - vaddr_page;
1240 desc->iotlb[index].attrs = attrs;
1241
1242
1243 tn.addend = addend - vaddr_page;
1244 if (prot & PAGE_READ) {
1245 tn.addr_read = address;
1246 if (wp_flags & BP_MEM_READ) {
1247 tn.addr_read |= TLB_WATCHPOINT;
1248 }
1249 } else {
1250 tn.addr_read = -1;
1251 }
1252
1253 if (prot & PAGE_EXEC) {
1254 tn.addr_code = address;
1255 } else {
1256 tn.addr_code = -1;
1257 }
1258
1259 tn.addr_write = -1;
1260 if (prot & PAGE_WRITE) {
1261 tn.addr_write = write_address;
1262 if (prot & PAGE_WRITE_INV) {
1263 tn.addr_write |= TLB_INVALID_MASK;
1264 }
1265 if (wp_flags & BP_MEM_WRITE) {
1266 tn.addr_write |= TLB_WATCHPOINT;
1267 }
1268 }
1269
1270 copy_tlb_helper_locked(te, &tn);
1271 tlb_n_used_entries_inc(env, mmu_idx);
1272 qemu_spin_unlock(&tlb->c.lock);
1273}
1274
1275
1276
1277
1278void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1279 hwaddr paddr, int prot,
1280 int mmu_idx, target_ulong size)
1281{
1282 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1283 prot, mmu_idx, size);
1284}
1285
1286static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1287{
1288 ram_addr_t ram_addr;
1289
1290 ram_addr = qemu_ram_addr_from_host(ptr);
1291 if (ram_addr == RAM_ADDR_INVALID) {
1292 error_report("Bad ram pointer %p", ptr);
1293 abort();
1294 }
1295 return ram_addr;
1296}
1297
1298
1299
1300
1301
1302
1303static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1304 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1305{
1306 CPUClass *cc = CPU_GET_CLASS(cpu);
1307 bool ok;
1308
1309
1310
1311
1312
1313 ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
1314 access_type, mmu_idx, false, retaddr);
1315 assert(ok);
1316}
1317
1318static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1319 MMUAccessType access_type,
1320 int mmu_idx, uintptr_t retaddr)
1321{
1322 CPUClass *cc = CPU_GET_CLASS(cpu);
1323
1324 cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
1325}
1326
1327static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1328 vaddr addr, unsigned size,
1329 MMUAccessType access_type,
1330 int mmu_idx, MemTxAttrs attrs,
1331 MemTxResult response,
1332 uintptr_t retaddr)
1333{
1334 CPUClass *cc = CPU_GET_CLASS(cpu);
1335
1336 if (!cpu->ignore_memory_transaction_failures &&
1337 cc->tcg_ops->do_transaction_failed) {
1338 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1339 access_type, mmu_idx, attrs,
1340 response, retaddr);
1341 }
1342}
1343
1344static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1345 int mmu_idx, target_ulong addr, uintptr_t retaddr,
1346 MMUAccessType access_type, MemOp op)
1347{
1348 CPUState *cpu = env_cpu(env);
1349 hwaddr mr_offset;
1350 MemoryRegionSection *section;
1351 MemoryRegion *mr;
1352 uint64_t val;
1353 bool locked = false;
1354 MemTxResult r;
1355
1356 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1357 mr = section->mr;
1358 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1359 cpu->mem_io_pc = retaddr;
1360 if (!cpu->can_do_io) {
1361 cpu_io_recompile(cpu, retaddr);
1362 }
1363
1364 if (!qemu_mutex_iothread_locked()) {
1365 qemu_mutex_lock_iothread();
1366 locked = true;
1367 }
1368 r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
1369 if (r != MEMTX_OK) {
1370 hwaddr physaddr = mr_offset +
1371 section->offset_within_address_space -
1372 section->offset_within_region;
1373
1374 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1375 mmu_idx, iotlbentry->attrs, r, retaddr);
1376 }
1377 if (locked) {
1378 qemu_mutex_unlock_iothread();
1379 }
1380
1381 return val;
1382}
1383
1384
1385
1386
1387
1388
1389static void save_iotlb_data(CPUState *cs, hwaddr addr,
1390 MemoryRegionSection *section, hwaddr mr_offset)
1391{
1392#ifdef CONFIG_PLUGIN
1393 SavedIOTLB *saved = &cs->saved_iotlb;
1394 saved->addr = addr;
1395 saved->section = section;
1396 saved->mr_offset = mr_offset;
1397#endif
1398}
1399
1400static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1401 int mmu_idx, uint64_t val, target_ulong addr,
1402 uintptr_t retaddr, MemOp op)
1403{
1404 CPUState *cpu = env_cpu(env);
1405 hwaddr mr_offset;
1406 MemoryRegionSection *section;
1407 MemoryRegion *mr;
1408 bool locked = false;
1409 MemTxResult r;
1410
1411 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1412 mr = section->mr;
1413 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1414 if (!cpu->can_do_io) {
1415 cpu_io_recompile(cpu, retaddr);
1416 }
1417 cpu->mem_io_pc = retaddr;
1418
1419
1420
1421
1422
1423 save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
1424
1425 if (!qemu_mutex_iothread_locked()) {
1426 qemu_mutex_lock_iothread();
1427 locked = true;
1428 }
1429 r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
1430 if (r != MEMTX_OK) {
1431 hwaddr physaddr = mr_offset +
1432 section->offset_within_address_space -
1433 section->offset_within_region;
1434
1435 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1436 MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
1437 retaddr);
1438 }
1439 if (locked) {
1440 qemu_mutex_unlock_iothread();
1441 }
1442}
1443
1444static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1445{
1446#if TCG_OVERSIZED_GUEST
1447 return *(target_ulong *)((uintptr_t)entry + ofs);
1448#else
1449
1450 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
1451#endif
1452}
1453
1454
1455
1456static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1457 size_t elt_ofs, target_ulong page)
1458{
1459 size_t vidx;
1460
1461 assert_cpu_is_self(env_cpu(env));
1462 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1463 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1464 target_ulong cmp;
1465
1466
1467#if TCG_OVERSIZED_GUEST
1468 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1469#else
1470 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1471#endif
1472
1473 if (cmp == page) {
1474
1475 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1476
1477 qemu_spin_lock(&env_tlb(env)->c.lock);
1478 copy_tlb_helper_locked(&tmptlb, tlb);
1479 copy_tlb_helper_locked(tlb, vtlb);
1480 copy_tlb_helper_locked(vtlb, &tmptlb);
1481 qemu_spin_unlock(&env_tlb(env)->c.lock);
1482
1483 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1484 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1485 tmpio = *io; *io = *vio; *vio = tmpio;
1486 return true;
1487 }
1488 }
1489 return false;
1490}
1491
1492
1493#define VICTIM_TLB_HIT(TY, ADDR) \
1494 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1495 (ADDR) & TARGET_PAGE_MASK)
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1508 void **hostp)
1509{
1510 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1511 uintptr_t index = tlb_index(env, mmu_idx, addr);
1512 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1513 void *p;
1514
1515 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1516 if (!VICTIM_TLB_HIT(addr_code, addr)) {
1517 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1518 index = tlb_index(env, mmu_idx, addr);
1519 entry = tlb_entry(env, mmu_idx, addr);
1520
1521 if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1522
1523
1524
1525
1526 return -1;
1527 }
1528 }
1529 assert(tlb_hit(entry->addr_code, addr));
1530 }
1531
1532 if (unlikely(entry->addr_code & TLB_MMIO)) {
1533
1534 if (hostp) {
1535 *hostp = NULL;
1536 }
1537 return -1;
1538 }
1539
1540 p = (void *)((uintptr_t)addr + entry->addend);
1541 if (hostp) {
1542 *hostp = p;
1543 }
1544 return qemu_ram_addr_from_host_nofail(p);
1545}
1546
1547tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1548{
1549 return get_page_addr_code_hostp(env, addr, NULL);
1550}
1551
1552static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1553 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1554{
1555 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1556
1557 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1558
1559 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1560 struct page_collection *pages
1561 = page_collection_lock(ram_addr, ram_addr + size);
1562 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1563 page_collection_unlock(pages);
1564 }
1565
1566
1567
1568
1569
1570 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1571
1572
1573 if (!cpu_physical_memory_is_clean(ram_addr)) {
1574 trace_memory_notdirty_set_dirty(mem_vaddr);
1575 tlb_set_dirty(cpu, mem_vaddr);
1576 }
1577}
1578
1579static int probe_access_internal(CPUArchState *env, target_ulong addr,
1580 int fault_size, MMUAccessType access_type,
1581 int mmu_idx, bool nonfault,
1582 void **phost, uintptr_t retaddr)
1583{
1584 uintptr_t index = tlb_index(env, mmu_idx, addr);
1585 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1586 target_ulong tlb_addr, page_addr;
1587 size_t elt_ofs;
1588 int flags;
1589
1590 switch (access_type) {
1591 case MMU_DATA_LOAD:
1592 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1593 break;
1594 case MMU_DATA_STORE:
1595 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1596 break;
1597 case MMU_INST_FETCH:
1598 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1599 break;
1600 default:
1601 g_assert_not_reached();
1602 }
1603 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1604
1605 page_addr = addr & TARGET_PAGE_MASK;
1606 if (!tlb_hit_page(tlb_addr, page_addr)) {
1607 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1608 CPUState *cs = env_cpu(env);
1609 CPUClass *cc = CPU_GET_CLASS(cs);
1610
1611 if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1612 mmu_idx, nonfault, retaddr)) {
1613
1614 *phost = NULL;
1615 return TLB_INVALID_MASK;
1616 }
1617
1618
1619 entry = tlb_entry(env, mmu_idx, addr);
1620 }
1621 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1622 }
1623 flags = tlb_addr & TLB_FLAGS_MASK;
1624
1625
1626 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1627 *phost = NULL;
1628 return TLB_MMIO;
1629 }
1630
1631
1632 *phost = (void *)((uintptr_t)addr + entry->addend);
1633 return flags;
1634}
1635
1636int probe_access_flags(CPUArchState *env, target_ulong addr,
1637 MMUAccessType access_type, int mmu_idx,
1638 bool nonfault, void **phost, uintptr_t retaddr)
1639{
1640 int flags;
1641
1642 flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1643 nonfault, phost, retaddr);
1644
1645
1646 if (unlikely(flags & TLB_NOTDIRTY)) {
1647 uintptr_t index = tlb_index(env, mmu_idx, addr);
1648 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1649
1650 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1651 flags &= ~TLB_NOTDIRTY;
1652 }
1653
1654 return flags;
1655}
1656
1657void *probe_access(CPUArchState *env, target_ulong addr, int size,
1658 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1659{
1660 void *host;
1661 int flags;
1662
1663 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1664
1665 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1666 false, &host, retaddr);
1667
1668
1669 if (size == 0) {
1670 return NULL;
1671 }
1672
1673 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1674 uintptr_t index = tlb_index(env, mmu_idx, addr);
1675 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1676
1677
1678 if (flags & TLB_WATCHPOINT) {
1679 int wp_access = (access_type == MMU_DATA_STORE
1680 ? BP_MEM_WRITE : BP_MEM_READ);
1681 cpu_check_watchpoint(env_cpu(env), addr, size,
1682 iotlbentry->attrs, wp_access, retaddr);
1683 }
1684
1685
1686 if (flags & TLB_NOTDIRTY) {
1687 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1688 }
1689 }
1690
1691 return host;
1692}
1693
1694void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1695 MMUAccessType access_type, int mmu_idx)
1696{
1697 void *host;
1698 int flags;
1699
1700 flags = probe_access_internal(env, addr, 0, access_type,
1701 mmu_idx, true, &host, 0);
1702
1703
1704 return flags ? NULL : host;
1705}
1706
1707#ifdef CONFIG_PLUGIN
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1723 bool is_store, struct qemu_plugin_hwaddr *data)
1724{
1725 CPUArchState *env = cpu->env_ptr;
1726 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1727 uintptr_t index = tlb_index(env, mmu_idx, addr);
1728 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1729
1730 if (likely(tlb_hit(tlb_addr, addr))) {
1731
1732 if (tlb_addr & TLB_MMIO) {
1733 CPUIOTLBEntry *iotlbentry;
1734 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1735 data->is_io = true;
1736 data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1737 data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1738 } else {
1739 data->is_io = false;
1740 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1741 }
1742 return true;
1743 } else {
1744 SavedIOTLB *saved = &cpu->saved_iotlb;
1745 data->is_io = true;
1746 data->v.io.section = saved->section;
1747 data->v.io.offset = saved->mr_offset;
1748 return true;
1749 }
1750}
1751
1752#endif
1753
1754
1755
1756
1757
1758
1759
1760static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1761 MemOpIdx oi, int size, int prot,
1762 uintptr_t retaddr)
1763{
1764 size_t mmu_idx = get_mmuidx(oi);
1765 MemOp mop = get_memop(oi);
1766 int a_bits = get_alignment_bits(mop);
1767 uintptr_t index;
1768 CPUTLBEntry *tlbe;
1769 target_ulong tlb_addr;
1770 void *hostaddr;
1771
1772
1773 retaddr -= GETPC_ADJ;
1774
1775
1776 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1777
1778 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1779 mmu_idx, retaddr);
1780 }
1781
1782
1783 if (unlikely(addr & (size - 1))) {
1784
1785
1786
1787
1788 goto stop_the_world;
1789 }
1790
1791 index = tlb_index(env, mmu_idx, addr);
1792 tlbe = tlb_entry(env, mmu_idx, addr);
1793
1794
1795 if (prot & PAGE_WRITE) {
1796 tlb_addr = tlb_addr_write(tlbe);
1797 if (!tlb_hit(tlb_addr, addr)) {
1798 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1799 tlb_fill(env_cpu(env), addr, size,
1800 MMU_DATA_STORE, mmu_idx, retaddr);
1801 index = tlb_index(env, mmu_idx, addr);
1802 tlbe = tlb_entry(env, mmu_idx, addr);
1803 }
1804 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1805 }
1806
1807
1808 if ((prot & PAGE_READ) &&
1809 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1810 tlb_fill(env_cpu(env), addr, size,
1811 MMU_DATA_LOAD, mmu_idx, retaddr);
1812
1813
1814
1815
1816
1817 goto stop_the_world;
1818 }
1819 } else {
1820 tlb_addr = tlbe->addr_read;
1821 if (!tlb_hit(tlb_addr, addr)) {
1822 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1823 tlb_fill(env_cpu(env), addr, size,
1824 MMU_DATA_LOAD, mmu_idx, retaddr);
1825 index = tlb_index(env, mmu_idx, addr);
1826 tlbe = tlb_entry(env, mmu_idx, addr);
1827 }
1828 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
1829 }
1830 }
1831
1832
1833 if (unlikely(tlb_addr & TLB_MMIO)) {
1834
1835
1836 goto stop_the_world;
1837 }
1838
1839 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1840
1841 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1842 notdirty_write(env_cpu(env), addr, size,
1843 &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
1844 }
1845
1846 return hostaddr;
1847
1848 stop_the_world:
1849 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1850}
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863static void validate_memop(MemOpIdx oi, MemOp expected)
1864{
1865#ifdef CONFIG_DEBUG_TCG
1866 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1867 assert(have == expected);
1868#endif
1869}
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1881 MemOpIdx oi, uintptr_t retaddr);
1882
1883static inline uint64_t QEMU_ALWAYS_INLINE
1884load_memop(const void *haddr, MemOp op)
1885{
1886 switch (op) {
1887 case MO_UB:
1888 return ldub_p(haddr);
1889 case MO_BEUW:
1890 return lduw_be_p(haddr);
1891 case MO_LEUW:
1892 return lduw_le_p(haddr);
1893 case MO_BEUL:
1894 return (uint32_t)ldl_be_p(haddr);
1895 case MO_LEUL:
1896 return (uint32_t)ldl_le_p(haddr);
1897 case MO_BEUQ:
1898 return ldq_be_p(haddr);
1899 case MO_LEUQ:
1900 return ldq_le_p(haddr);
1901 default:
1902 qemu_build_not_reached();
1903 }
1904}
1905
1906static inline uint64_t QEMU_ALWAYS_INLINE
1907load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
1908 uintptr_t retaddr, MemOp op, bool code_read,
1909 FullLoadHelper *full_load)
1910{
1911 uintptr_t mmu_idx = get_mmuidx(oi);
1912 uintptr_t index = tlb_index(env, mmu_idx, addr);
1913 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1914 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1915 const size_t tlb_off = code_read ?
1916 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1917 const MMUAccessType access_type =
1918 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1919 unsigned a_bits = get_alignment_bits(get_memop(oi));
1920 void *haddr;
1921 uint64_t res;
1922 size_t size = memop_size(op);
1923
1924
1925 if (addr & ((1 << a_bits) - 1)) {
1926 cpu_unaligned_access(env_cpu(env), addr, access_type,
1927 mmu_idx, retaddr);
1928 }
1929
1930
1931 if (!tlb_hit(tlb_addr, addr)) {
1932 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1933 addr & TARGET_PAGE_MASK)) {
1934 tlb_fill(env_cpu(env), addr, size,
1935 access_type, mmu_idx, retaddr);
1936 index = tlb_index(env, mmu_idx, addr);
1937 entry = tlb_entry(env, mmu_idx, addr);
1938 }
1939 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1940 tlb_addr &= ~TLB_INVALID_MASK;
1941 }
1942
1943
1944 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1945 CPUIOTLBEntry *iotlbentry;
1946 bool need_swap;
1947
1948
1949 if ((addr & (size - 1)) != 0) {
1950 goto do_unaligned_access;
1951 }
1952
1953 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1954
1955
1956 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1957
1958 cpu_check_watchpoint(env_cpu(env), addr, size,
1959 iotlbentry->attrs, BP_MEM_READ, retaddr);
1960 }
1961
1962 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1963
1964
1965 if (likely(tlb_addr & TLB_MMIO)) {
1966 return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1967 access_type, op ^ (need_swap * MO_BSWAP));
1968 }
1969
1970 haddr = (void *)((uintptr_t)addr + entry->addend);
1971
1972
1973
1974
1975
1976
1977 if (unlikely(need_swap)) {
1978 return load_memop(haddr, op ^ MO_BSWAP);
1979 }
1980 return load_memop(haddr, op);
1981 }
1982
1983
1984 if (size > 1
1985 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1986 >= TARGET_PAGE_SIZE)) {
1987 target_ulong addr1, addr2;
1988 uint64_t r1, r2;
1989 unsigned shift;
1990 do_unaligned_access:
1991 addr1 = addr & ~((target_ulong)size - 1);
1992 addr2 = addr1 + size;
1993 r1 = full_load(env, addr1, oi, retaddr);
1994 r2 = full_load(env, addr2, oi, retaddr);
1995 shift = (addr & (size - 1)) * 8;
1996
1997 if (memop_big_endian(op)) {
1998
1999 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
2000 } else {
2001
2002 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
2003 }
2004 return res & MAKE_64BIT_MASK(0, size * 8);
2005 }
2006
2007 haddr = (void *)((uintptr_t)addr + entry->addend);
2008 return load_memop(haddr, op);
2009}
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
2022 MemOpIdx oi, uintptr_t retaddr)
2023{
2024 validate_memop(oi, MO_UB);
2025 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
2026}
2027
2028tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
2029 MemOpIdx oi, uintptr_t retaddr)
2030{
2031 return full_ldub_mmu(env, addr, oi, retaddr);
2032}
2033
2034static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2035 MemOpIdx oi, uintptr_t retaddr)
2036{
2037 validate_memop(oi, MO_LEUW);
2038 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2039 full_le_lduw_mmu);
2040}
2041
2042tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2043 MemOpIdx oi, uintptr_t retaddr)
2044{
2045 return full_le_lduw_mmu(env, addr, oi, retaddr);
2046}
2047
2048static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2049 MemOpIdx oi, uintptr_t retaddr)
2050{
2051 validate_memop(oi, MO_BEUW);
2052 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2053 full_be_lduw_mmu);
2054}
2055
2056tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2057 MemOpIdx oi, uintptr_t retaddr)
2058{
2059 return full_be_lduw_mmu(env, addr, oi, retaddr);
2060}
2061
2062static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2063 MemOpIdx oi, uintptr_t retaddr)
2064{
2065 validate_memop(oi, MO_LEUL);
2066 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2067 full_le_ldul_mmu);
2068}
2069
2070tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2071 MemOpIdx oi, uintptr_t retaddr)
2072{
2073 return full_le_ldul_mmu(env, addr, oi, retaddr);
2074}
2075
2076static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2077 MemOpIdx oi, uintptr_t retaddr)
2078{
2079 validate_memop(oi, MO_BEUL);
2080 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2081 full_be_ldul_mmu);
2082}
2083
2084tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2085 MemOpIdx oi, uintptr_t retaddr)
2086{
2087 return full_be_ldul_mmu(env, addr, oi, retaddr);
2088}
2089
2090uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
2091 MemOpIdx oi, uintptr_t retaddr)
2092{
2093 validate_memop(oi, MO_LEUQ);
2094 return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
2095 helper_le_ldq_mmu);
2096}
2097
2098uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
2099 MemOpIdx oi, uintptr_t retaddr)
2100{
2101 validate_memop(oi, MO_BEUQ);
2102 return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
2103 helper_be_ldq_mmu);
2104}
2105
2106
2107
2108
2109
2110
2111
2112tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
2113 MemOpIdx oi, uintptr_t retaddr)
2114{
2115 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2116}
2117
2118tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
2119 MemOpIdx oi, uintptr_t retaddr)
2120{
2121 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2122}
2123
2124tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
2125 MemOpIdx oi, uintptr_t retaddr)
2126{
2127 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2128}
2129
2130tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
2131 MemOpIdx oi, uintptr_t retaddr)
2132{
2133 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2134}
2135
2136tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
2137 MemOpIdx oi, uintptr_t retaddr)
2138{
2139 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2140}
2141
2142
2143
2144
2145
2146static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2147 MemOpIdx oi, uintptr_t retaddr,
2148 FullLoadHelper *full_load)
2149{
2150 uint64_t ret;
2151
2152 ret = full_load(env, addr, oi, retaddr);
2153 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2154 return ret;
2155}
2156
2157uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2158{
2159 return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
2160}
2161
2162uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2163 MemOpIdx oi, uintptr_t ra)
2164{
2165 return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
2166}
2167
2168uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2169 MemOpIdx oi, uintptr_t ra)
2170{
2171 return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
2172}
2173
2174uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2175 MemOpIdx oi, uintptr_t ra)
2176{
2177 return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
2178}
2179
2180uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2181 MemOpIdx oi, uintptr_t ra)
2182{
2183 return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
2184}
2185
2186uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2187 MemOpIdx oi, uintptr_t ra)
2188{
2189 return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
2190}
2191
2192uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2193 MemOpIdx oi, uintptr_t ra)
2194{
2195 return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
2196}
2197
2198
2199
2200
2201
2202static inline void QEMU_ALWAYS_INLINE
2203store_memop(void *haddr, uint64_t val, MemOp op)
2204{
2205 switch (op) {
2206 case MO_UB:
2207 stb_p(haddr, val);
2208 break;
2209 case MO_BEUW:
2210 stw_be_p(haddr, val);
2211 break;
2212 case MO_LEUW:
2213 stw_le_p(haddr, val);
2214 break;
2215 case MO_BEUL:
2216 stl_be_p(haddr, val);
2217 break;
2218 case MO_LEUL:
2219 stl_le_p(haddr, val);
2220 break;
2221 case MO_BEUQ:
2222 stq_be_p(haddr, val);
2223 break;
2224 case MO_LEUQ:
2225 stq_le_p(haddr, val);
2226 break;
2227 default:
2228 qemu_build_not_reached();
2229 }
2230}
2231
2232static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2233 MemOpIdx oi, uintptr_t retaddr);
2234
2235static void __attribute__((noinline))
2236store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2237 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2238 bool big_endian)
2239{
2240 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2241 uintptr_t index, index2;
2242 CPUTLBEntry *entry, *entry2;
2243 target_ulong page2, tlb_addr, tlb_addr2;
2244 MemOpIdx oi;
2245 size_t size2;
2246 int i;
2247
2248
2249
2250
2251
2252
2253 page2 = (addr + size) & TARGET_PAGE_MASK;
2254 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2255 index2 = tlb_index(env, mmu_idx, page2);
2256 entry2 = tlb_entry(env, mmu_idx, page2);
2257
2258 tlb_addr2 = tlb_addr_write(entry2);
2259 if (!tlb_hit_page(tlb_addr2, page2)) {
2260 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2261 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2262 mmu_idx, retaddr);
2263 index2 = tlb_index(env, mmu_idx, page2);
2264 entry2 = tlb_entry(env, mmu_idx, page2);
2265 }
2266 tlb_addr2 = tlb_addr_write(entry2);
2267 }
2268
2269 index = tlb_index(env, mmu_idx, addr);
2270 entry = tlb_entry(env, mmu_idx, addr);
2271 tlb_addr = tlb_addr_write(entry);
2272
2273
2274
2275
2276
2277 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2278 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2279 env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
2280 BP_MEM_WRITE, retaddr);
2281 }
2282 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2283 cpu_check_watchpoint(env_cpu(env), page2, size2,
2284 env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
2285 BP_MEM_WRITE, retaddr);
2286 }
2287
2288
2289
2290
2291
2292
2293 oi = make_memop_idx(MO_UB, mmu_idx);
2294 if (big_endian) {
2295 for (i = 0; i < size; ++i) {
2296
2297 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2298 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2299 }
2300 } else {
2301 for (i = 0; i < size; ++i) {
2302
2303 uint8_t val8 = val >> (i * 8);
2304 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2305 }
2306 }
2307}
2308
2309static inline void QEMU_ALWAYS_INLINE
2310store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2311 MemOpIdx oi, uintptr_t retaddr, MemOp op)
2312{
2313 uintptr_t mmu_idx = get_mmuidx(oi);
2314 uintptr_t index = tlb_index(env, mmu_idx, addr);
2315 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
2316 target_ulong tlb_addr = tlb_addr_write(entry);
2317 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2318 unsigned a_bits = get_alignment_bits(get_memop(oi));
2319 void *haddr;
2320 size_t size = memop_size(op);
2321
2322
2323 if (addr & ((1 << a_bits) - 1)) {
2324 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2325 mmu_idx, retaddr);
2326 }
2327
2328
2329 if (!tlb_hit(tlb_addr, addr)) {
2330 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2331 addr & TARGET_PAGE_MASK)) {
2332 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2333 mmu_idx, retaddr);
2334 index = tlb_index(env, mmu_idx, addr);
2335 entry = tlb_entry(env, mmu_idx, addr);
2336 }
2337 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2338 }
2339
2340
2341 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
2342 CPUIOTLBEntry *iotlbentry;
2343 bool need_swap;
2344
2345
2346 if ((addr & (size - 1)) != 0) {
2347 goto do_unaligned_access;
2348 }
2349
2350 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
2351
2352
2353 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2354
2355 cpu_check_watchpoint(env_cpu(env), addr, size,
2356 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
2357 }
2358
2359 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2360
2361
2362 if (tlb_addr & TLB_MMIO) {
2363 io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
2364 op ^ (need_swap * MO_BSWAP));
2365 return;
2366 }
2367
2368
2369 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2370 return;
2371 }
2372
2373
2374 if (tlb_addr & TLB_NOTDIRTY) {
2375 notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
2376 }
2377
2378 haddr = (void *)((uintptr_t)addr + entry->addend);
2379
2380
2381
2382
2383
2384
2385 if (unlikely(need_swap)) {
2386 store_memop(haddr, val, op ^ MO_BSWAP);
2387 } else {
2388 store_memop(haddr, val, op);
2389 }
2390 return;
2391 }
2392
2393
2394 if (size > 1
2395 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2396 >= TARGET_PAGE_SIZE)) {
2397 do_unaligned_access:
2398 store_helper_unaligned(env, addr, val, retaddr, size,
2399 mmu_idx, memop_big_endian(op));
2400 return;
2401 }
2402
2403 haddr = (void *)((uintptr_t)addr + entry->addend);
2404 store_memop(haddr, val, op);
2405}
2406
2407static void __attribute__((noinline))
2408full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2409 MemOpIdx oi, uintptr_t retaddr)
2410{
2411 validate_memop(oi, MO_UB);
2412 store_helper(env, addr, val, oi, retaddr, MO_UB);
2413}
2414
2415void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2416 MemOpIdx oi, uintptr_t retaddr)
2417{
2418 full_stb_mmu(env, addr, val, oi, retaddr);
2419}
2420
2421static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2422 MemOpIdx oi, uintptr_t retaddr)
2423{
2424 validate_memop(oi, MO_LEUW);
2425 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2426}
2427
2428void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2429 MemOpIdx oi, uintptr_t retaddr)
2430{
2431 full_le_stw_mmu(env, addr, val, oi, retaddr);
2432}
2433
2434static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2435 MemOpIdx oi, uintptr_t retaddr)
2436{
2437 validate_memop(oi, MO_BEUW);
2438 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2439}
2440
2441void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2442 MemOpIdx oi, uintptr_t retaddr)
2443{
2444 full_be_stw_mmu(env, addr, val, oi, retaddr);
2445}
2446
2447static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2448 MemOpIdx oi, uintptr_t retaddr)
2449{
2450 validate_memop(oi, MO_LEUL);
2451 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2452}
2453
2454void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2455 MemOpIdx oi, uintptr_t retaddr)
2456{
2457 full_le_stl_mmu(env, addr, val, oi, retaddr);
2458}
2459
2460static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2461 MemOpIdx oi, uintptr_t retaddr)
2462{
2463 validate_memop(oi, MO_BEUL);
2464 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2465}
2466
2467void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2468 MemOpIdx oi, uintptr_t retaddr)
2469{
2470 full_be_stl_mmu(env, addr, val, oi, retaddr);
2471}
2472
2473void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2474 MemOpIdx oi, uintptr_t retaddr)
2475{
2476 validate_memop(oi, MO_LEUQ);
2477 store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
2478}
2479
2480void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2481 MemOpIdx oi, uintptr_t retaddr)
2482{
2483 validate_memop(oi, MO_BEUQ);
2484 store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
2485}
2486
2487
2488
2489
2490
2491typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2492 uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2493
2494static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2495 uint64_t val, MemOpIdx oi, uintptr_t ra,
2496 FullStoreHelper *full_store)
2497{
2498 full_store(env, addr, val, oi, ra);
2499 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
2500}
2501
2502void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2503 MemOpIdx oi, uintptr_t retaddr)
2504{
2505 cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
2506}
2507
2508void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2509 MemOpIdx oi, uintptr_t retaddr)
2510{
2511 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
2512}
2513
2514void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2515 MemOpIdx oi, uintptr_t retaddr)
2516{
2517 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
2518}
2519
2520void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2521 MemOpIdx oi, uintptr_t retaddr)
2522{
2523 cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
2524}
2525
2526void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2527 MemOpIdx oi, uintptr_t retaddr)
2528{
2529 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
2530}
2531
2532void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2533 MemOpIdx oi, uintptr_t retaddr)
2534{
2535 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
2536}
2537
2538void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2539 MemOpIdx oi, uintptr_t retaddr)
2540{
2541 cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
2542}
2543
2544#include "ldst_common.c.inc"
2545
2546
2547
2548
2549
2550
2551#define ATOMIC_NAME(X) \
2552 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2553
2554#define ATOMIC_MMU_CLEANUP
2555#define ATOMIC_MMU_IDX get_mmuidx(oi)
2556
2557#include "atomic_common.c.inc"
2558
2559#define DATA_SIZE 1
2560#include "atomic_template.h"
2561
2562#define DATA_SIZE 2
2563#include "atomic_template.h"
2564
2565#define DATA_SIZE 4
2566#include "atomic_template.h"
2567
2568#ifdef CONFIG_ATOMIC64
2569#define DATA_SIZE 8
2570#include "atomic_template.h"
2571#endif
2572
2573#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2574#define DATA_SIZE 16
2575#include "atomic_template.h"
2576#endif
2577
2578
2579
2580static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2581 MemOpIdx oi, uintptr_t retaddr)
2582{
2583 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2584}
2585
2586uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2587{
2588 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2589 return full_ldub_code(env, addr, oi, 0);
2590}
2591
2592static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2593 MemOpIdx oi, uintptr_t retaddr)
2594{
2595 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
2596}
2597
2598uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2599{
2600 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2601 return full_lduw_code(env, addr, oi, 0);
2602}
2603
2604static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2605 MemOpIdx oi, uintptr_t retaddr)
2606{
2607 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
2608}
2609
2610uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2611{
2612 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2613 return full_ldl_code(env, addr, oi, 0);
2614}
2615
2616static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2617 MemOpIdx oi, uintptr_t retaddr)
2618{
2619 return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
2620}
2621
2622uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2623{
2624 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2625 return full_ldq_code(env, addr, oi, 0);
2626}
2627