1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "hw/core/tcg-cpu-ops.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/cpu_ldst.h"
26#include "exec/cputlb.h"
27#include "exec/memory-internal.h"
28#include "exec/ram_addr.h"
29#include "tcg/tcg.h"
30#include "qemu/error-report.h"
31#include "exec/log.h"
32#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
34#include "qemu/atomic128.h"
35#include "exec/translate-all.h"
36#include "trace/trace-root.h"
37#include "tb-hash.h"
38#include "internal.h"
39#ifdef CONFIG_PLUGIN
40#include "qemu/plugin-memory.h"
41#endif
42#include "tcg/tcg-ldst.h"
43
44
45
46
47
48#ifdef DEBUG_TLB
49# define DEBUG_TLB_GATE 1
50# ifdef DEBUG_TLB_LOG
51# define DEBUG_TLB_LOG_GATE 1
52# else
53# define DEBUG_TLB_LOG_GATE 0
54# endif
55#else
56# define DEBUG_TLB_GATE 0
57# define DEBUG_TLB_LOG_GATE 0
58#endif
59
60#define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67} while (0)
68
69#define assert_cpu_is_self(cpu) do { \
70 if (DEBUG_TLB_GATE) { \
71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
72 } \
73 } while (0)
74
75
76
77QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
79
80
81QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
84static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
85{
86 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
87}
88
89static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
90{
91 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
92}
93
94static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
95 size_t max_entries)
96{
97 desc->window_begin_ns = ns;
98 desc->window_max_entries = max_entries;
99}
100
101static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
102{
103 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
104
105 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
106 qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
107 }
108}
109
110static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
111{
112
113
114 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
115 tb_jmp_cache_clear_page(cpu, addr);
116}
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
159 int64_t now)
160{
161 size_t old_size = tlb_n_entries(fast);
162 size_t rate;
163 size_t new_size = old_size;
164 int64_t window_len_ms = 100;
165 int64_t window_len_ns = window_len_ms * 1000 * 1000;
166 bool window_expired = now > desc->window_begin_ns + window_len_ns;
167
168 if (desc->n_used_entries > desc->window_max_entries) {
169 desc->window_max_entries = desc->n_used_entries;
170 }
171 rate = desc->window_max_entries * 100 / old_size;
172
173 if (rate > 70) {
174 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
175 } else if (rate < 30 && window_expired) {
176 size_t ceil = pow2ceil(desc->window_max_entries);
177 size_t expected_rate = desc->window_max_entries * 100 / ceil;
178
179
180
181
182
183
184
185
186
187
188
189 if (expected_rate > 70) {
190 ceil *= 2;
191 }
192 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
193 }
194
195 if (new_size == old_size) {
196 if (window_expired) {
197 tlb_window_reset(desc, now, desc->n_used_entries);
198 }
199 return;
200 }
201
202 g_free(fast->table);
203 g_free(desc->iotlb);
204
205 tlb_window_reset(desc, now, 0);
206
207 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
208 fast->table = g_try_new(CPUTLBEntry, new_size);
209 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
210
211
212
213
214
215
216
217
218 while (fast->table == NULL || desc->iotlb == NULL) {
219 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
220 error_report("%s: %s", __func__, strerror(errno));
221 abort();
222 }
223 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
224 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
225
226 g_free(fast->table);
227 g_free(desc->iotlb);
228 fast->table = g_try_new(CPUTLBEntry, new_size);
229 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
230 }
231}
232
233static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
234{
235 desc->n_used_entries = 0;
236 desc->large_page_addr = -1;
237 desc->large_page_mask = -1;
238 desc->vindex = 0;
239 memset(fast->table, -1, sizeof_tlb(fast));
240 memset(desc->vtable, -1, sizeof(desc->vtable));
241}
242
243static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
244 int64_t now)
245{
246 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
247 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
248
249 tlb_mmu_resize_locked(desc, fast, now);
250 tlb_mmu_flush_locked(desc, fast);
251}
252
253static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
254{
255 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
256
257 tlb_window_reset(desc, now, 0);
258 desc->n_used_entries = 0;
259 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
260 fast->table = g_new(CPUTLBEntry, n_entries);
261 desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
262 tlb_mmu_flush_locked(desc, fast);
263}
264
265static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
266{
267 env_tlb(env)->d[mmu_idx].n_used_entries++;
268}
269
270static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
271{
272 env_tlb(env)->d[mmu_idx].n_used_entries--;
273}
274
275void tlb_init(CPUState *cpu)
276{
277 CPUArchState *env = cpu->env_ptr;
278 int64_t now = get_clock_realtime();
279 int i;
280
281 qemu_spin_init(&env_tlb(env)->c.lock);
282
283
284 env_tlb(env)->c.dirty = 0;
285
286 for (i = 0; i < NB_MMU_MODES; i++) {
287 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
288 }
289}
290
291void tlb_destroy(CPUState *cpu)
292{
293 CPUArchState *env = cpu->env_ptr;
294 int i;
295
296 qemu_spin_destroy(&env_tlb(env)->c.lock);
297 for (i = 0; i < NB_MMU_MODES; i++) {
298 CPUTLBDesc *desc = &env_tlb(env)->d[i];
299 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
300
301 g_free(fast->table);
302 g_free(desc->iotlb);
303 }
304}
305
306
307
308
309
310
311
312
313static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
314 run_on_cpu_data d)
315{
316 CPUState *cpu;
317
318 CPU_FOREACH(cpu) {
319 if (cpu != src) {
320 async_run_on_cpu(cpu, fn, d);
321 }
322 }
323}
324
325void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
326{
327 CPUState *cpu;
328 size_t full = 0, part = 0, elide = 0;
329
330 CPU_FOREACH(cpu) {
331 CPUArchState *env = cpu->env_ptr;
332
333 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
334 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
335 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
336 }
337 *pfull = full;
338 *ppart = part;
339 *pelide = elide;
340}
341
342static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
343{
344 CPUArchState *env = cpu->env_ptr;
345 uint16_t asked = data.host_int;
346 uint16_t all_dirty, work, to_clean;
347 int64_t now = get_clock_realtime();
348
349 assert_cpu_is_self(cpu);
350
351 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
352
353 qemu_spin_lock(&env_tlb(env)->c.lock);
354
355 all_dirty = env_tlb(env)->c.dirty;
356 to_clean = asked & all_dirty;
357 all_dirty &= ~to_clean;
358 env_tlb(env)->c.dirty = all_dirty;
359
360 for (work = to_clean; work != 0; work &= work - 1) {
361 int mmu_idx = ctz32(work);
362 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
363 }
364
365 qemu_spin_unlock(&env_tlb(env)->c.lock);
366
367 cpu_tb_jmp_cache_clear(cpu);
368
369 if (to_clean == ALL_MMUIDX_BITS) {
370 qatomic_set(&env_tlb(env)->c.full_flush_count,
371 env_tlb(env)->c.full_flush_count + 1);
372 } else {
373 qatomic_set(&env_tlb(env)->c.part_flush_count,
374 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
375 if (to_clean != asked) {
376 qatomic_set(&env_tlb(env)->c.elide_flush_count,
377 env_tlb(env)->c.elide_flush_count +
378 ctpop16(asked & ~to_clean));
379 }
380 }
381}
382
383void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
384{
385 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
386
387 if (cpu->created && !qemu_cpu_is_self(cpu)) {
388 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
389 RUN_ON_CPU_HOST_INT(idxmap));
390 } else {
391 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
392 }
393}
394
395void tlb_flush(CPUState *cpu)
396{
397 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
398}
399
400void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
401{
402 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
403
404 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
405
406 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
407 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
408}
409
410void tlb_flush_all_cpus(CPUState *src_cpu)
411{
412 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
413}
414
415void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
416{
417 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
418
419 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
420
421 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
423}
424
425void tlb_flush_all_cpus_synced(CPUState *src_cpu)
426{
427 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
428}
429
430static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
431 target_ulong page, target_ulong mask)
432{
433 page &= mask;
434 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
435
436 return (page == (tlb_entry->addr_read & mask) ||
437 page == (tlb_addr_write(tlb_entry) & mask) ||
438 page == (tlb_entry->addr_code & mask));
439}
440
441static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
442 target_ulong page)
443{
444 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
445}
446
447
448
449
450
451static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
452{
453 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
454}
455
456
457static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
458 target_ulong page,
459 target_ulong mask)
460{
461 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
462 memset(tlb_entry, -1, sizeof(*tlb_entry));
463 return true;
464 }
465 return false;
466}
467
468static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
469 target_ulong page)
470{
471 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
472}
473
474
475static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
476 target_ulong page,
477 target_ulong mask)
478{
479 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
480 int k;
481
482 assert_cpu_is_self(env_cpu(env));
483 for (k = 0; k < CPU_VTLB_SIZE; k++) {
484 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
485 tlb_n_used_entries_dec(env, mmu_idx);
486 }
487 }
488}
489
490static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
491 target_ulong page)
492{
493 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
494}
495
496static void tlb_flush_page_locked(CPUArchState *env, int midx,
497 target_ulong page)
498{
499 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
500 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
501
502
503 if ((page & lp_mask) == lp_addr) {
504 tlb_debug("forcing full flush midx %d ("
505 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
506 midx, lp_addr, lp_mask);
507 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
508 } else {
509 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
510 tlb_n_used_entries_dec(env, midx);
511 }
512 tlb_flush_vtlb_page_locked(env, midx, page);
513 }
514}
515
516
517
518
519
520
521
522
523
524
525static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
526 target_ulong addr,
527 uint16_t idxmap)
528{
529 CPUArchState *env = cpu->env_ptr;
530 int mmu_idx;
531
532 assert_cpu_is_self(cpu);
533
534 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
535
536 qemu_spin_lock(&env_tlb(env)->c.lock);
537 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
538 if ((idxmap >> mmu_idx) & 1) {
539 tlb_flush_page_locked(env, mmu_idx, addr);
540 }
541 }
542 qemu_spin_unlock(&env_tlb(env)->c.lock);
543
544 tb_flush_jmp_cache(cpu, addr);
545}
546
547
548
549
550
551
552
553
554
555
556
557static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
558 run_on_cpu_data data)
559{
560 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
561 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
562 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
563
564 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
565}
566
567typedef struct {
568 target_ulong addr;
569 uint16_t idxmap;
570} TLBFlushPageByMMUIdxData;
571
572
573
574
575
576
577
578
579
580
581
582static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
583 run_on_cpu_data data)
584{
585 TLBFlushPageByMMUIdxData *d = data.host_ptr;
586
587 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
588 g_free(d);
589}
590
591void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
592{
593 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
594
595
596 addr &= TARGET_PAGE_MASK;
597
598 if (qemu_cpu_is_self(cpu)) {
599 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
600 } else if (idxmap < TARGET_PAGE_SIZE) {
601
602
603
604
605
606 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
607 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
608 } else {
609 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
610
611
612 d->addr = addr;
613 d->idxmap = idxmap;
614 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
615 RUN_ON_CPU_HOST_PTR(d));
616 }
617}
618
619void tlb_flush_page(CPUState *cpu, target_ulong addr)
620{
621 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
622}
623
624void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
625 uint16_t idxmap)
626{
627 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
628
629
630 addr &= TARGET_PAGE_MASK;
631
632
633
634
635
636 if (idxmap < TARGET_PAGE_SIZE) {
637 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
638 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
639 } else {
640 CPUState *dst_cpu;
641
642
643 CPU_FOREACH(dst_cpu) {
644 if (dst_cpu != src_cpu) {
645 TLBFlushPageByMMUIdxData *d
646 = g_new(TLBFlushPageByMMUIdxData, 1);
647
648 d->addr = addr;
649 d->idxmap = idxmap;
650 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
651 RUN_ON_CPU_HOST_PTR(d));
652 }
653 }
654 }
655
656 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
657}
658
659void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
660{
661 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
662}
663
664void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
665 target_ulong addr,
666 uint16_t idxmap)
667{
668 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
669
670
671 addr &= TARGET_PAGE_MASK;
672
673
674
675
676
677 if (idxmap < TARGET_PAGE_SIZE) {
678 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
679 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
680 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
681 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
682 } else {
683 CPUState *dst_cpu;
684 TLBFlushPageByMMUIdxData *d;
685
686
687 CPU_FOREACH(dst_cpu) {
688 if (dst_cpu != src_cpu) {
689 d = g_new(TLBFlushPageByMMUIdxData, 1);
690 d->addr = addr;
691 d->idxmap = idxmap;
692 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
693 RUN_ON_CPU_HOST_PTR(d));
694 }
695 }
696
697 d = g_new(TLBFlushPageByMMUIdxData, 1);
698 d->addr = addr;
699 d->idxmap = idxmap;
700 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
701 RUN_ON_CPU_HOST_PTR(d));
702 }
703}
704
705void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
706{
707 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
708}
709
710static void tlb_flush_range_locked(CPUArchState *env, int midx,
711 target_ulong addr, target_ulong len,
712 unsigned bits)
713{
714 CPUTLBDesc *d = &env_tlb(env)->d[midx];
715 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
716 target_ulong mask = MAKE_64BIT_MASK(0, bits);
717
718
719
720
721
722
723
724
725
726
727
728 if (mask < f->mask || len > f->mask) {
729 tlb_debug("forcing full flush midx %d ("
730 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
731 midx, addr, mask, len);
732 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
733 return;
734 }
735
736
737
738
739
740
741 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
742 tlb_debug("forcing full flush midx %d ("
743 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
744 midx, d->large_page_addr, d->large_page_mask);
745 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
746 return;
747 }
748
749 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
750 target_ulong page = addr + i;
751 CPUTLBEntry *entry = tlb_entry(env, midx, page);
752
753 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
754 tlb_n_used_entries_dec(env, midx);
755 }
756 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
757 }
758}
759
760typedef struct {
761 target_ulong addr;
762 target_ulong len;
763 uint16_t idxmap;
764 uint16_t bits;
765} TLBFlushRangeData;
766
767static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
768 TLBFlushRangeData d)
769{
770 CPUArchState *env = cpu->env_ptr;
771 int mmu_idx;
772
773 assert_cpu_is_self(cpu);
774
775 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
776 d.addr, d.bits, d.len, d.idxmap);
777
778 qemu_spin_lock(&env_tlb(env)->c.lock);
779 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
780 if ((d.idxmap >> mmu_idx) & 1) {
781 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
782 }
783 }
784 qemu_spin_unlock(&env_tlb(env)->c.lock);
785
786 for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
787 tb_flush_jmp_cache(cpu, d.addr + i);
788 }
789}
790
791static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
792 run_on_cpu_data data)
793{
794 TLBFlushRangeData *d = data.host_ptr;
795 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
796 g_free(d);
797}
798
799void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
800 target_ulong len, uint16_t idxmap,
801 unsigned bits)
802{
803 TLBFlushRangeData d;
804
805
806
807
808
809 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
810 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
811 return;
812 }
813
814 if (bits < TARGET_PAGE_BITS) {
815 tlb_flush_by_mmuidx(cpu, idxmap);
816 return;
817 }
818
819
820 d.addr = addr & TARGET_PAGE_MASK;
821 d.len = len;
822 d.idxmap = idxmap;
823 d.bits = bits;
824
825 if (qemu_cpu_is_self(cpu)) {
826 tlb_flush_range_by_mmuidx_async_0(cpu, d);
827 } else {
828
829 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
830 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
831 RUN_ON_CPU_HOST_PTR(p));
832 }
833}
834
835void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
836 uint16_t idxmap, unsigned bits)
837{
838 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
839}
840
841void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
842 target_ulong addr, target_ulong len,
843 uint16_t idxmap, unsigned bits)
844{
845 TLBFlushRangeData d;
846 CPUState *dst_cpu;
847
848
849
850
851
852 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
853 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
854 return;
855 }
856
857 if (bits < TARGET_PAGE_BITS) {
858 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
859 return;
860 }
861
862
863 d.addr = addr & TARGET_PAGE_MASK;
864 d.len = len;
865 d.idxmap = idxmap;
866 d.bits = bits;
867
868
869 CPU_FOREACH(dst_cpu) {
870 if (dst_cpu != src_cpu) {
871 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
872 async_run_on_cpu(dst_cpu,
873 tlb_flush_range_by_mmuidx_async_1,
874 RUN_ON_CPU_HOST_PTR(p));
875 }
876 }
877
878 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
879}
880
881void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
882 target_ulong addr,
883 uint16_t idxmap, unsigned bits)
884{
885 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
886 idxmap, bits);
887}
888
889void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
890 target_ulong addr,
891 target_ulong len,
892 uint16_t idxmap,
893 unsigned bits)
894{
895 TLBFlushRangeData d, *p;
896 CPUState *dst_cpu;
897
898
899
900
901
902 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
903 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
904 return;
905 }
906
907 if (bits < TARGET_PAGE_BITS) {
908 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
909 return;
910 }
911
912
913 d.addr = addr & TARGET_PAGE_MASK;
914 d.len = len;
915 d.idxmap = idxmap;
916 d.bits = bits;
917
918
919 CPU_FOREACH(dst_cpu) {
920 if (dst_cpu != src_cpu) {
921 p = g_memdup(&d, sizeof(d));
922 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
923 RUN_ON_CPU_HOST_PTR(p));
924 }
925 }
926
927 p = g_memdup(&d, sizeof(d));
928 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
929 RUN_ON_CPU_HOST_PTR(p));
930}
931
932void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
933 target_ulong addr,
934 uint16_t idxmap,
935 unsigned bits)
936{
937 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
938 idxmap, bits);
939}
940
941
942
943void tlb_protect_code(ram_addr_t ram_addr)
944{
945 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
946 DIRTY_MEMORY_CODE);
947}
948
949
950
951void tlb_unprotect_code(ram_addr_t ram_addr)
952{
953 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
954}
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
974 uintptr_t start, uintptr_t length)
975{
976 uintptr_t addr = tlb_entry->addr_write;
977
978 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
979 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
980 addr &= TARGET_PAGE_MASK;
981 addr += tlb_entry->addend;
982 if ((addr - start) < length) {
983#if TCG_OVERSIZED_GUEST
984 tlb_entry->addr_write |= TLB_NOTDIRTY;
985#else
986 qatomic_set(&tlb_entry->addr_write,
987 tlb_entry->addr_write | TLB_NOTDIRTY);
988#endif
989 }
990 }
991}
992
993
994
995
996
997static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
998{
999 *d = *s;
1000}
1001
1002
1003
1004
1005
1006
1007void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1008{
1009 CPUArchState *env;
1010
1011 int mmu_idx;
1012
1013 env = cpu->env_ptr;
1014 qemu_spin_lock(&env_tlb(env)->c.lock);
1015 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1016 unsigned int i;
1017 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1018
1019 for (i = 0; i < n; i++) {
1020 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1021 start1, length);
1022 }
1023
1024 for (i = 0; i < CPU_VTLB_SIZE; i++) {
1025 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1026 start1, length);
1027 }
1028 }
1029 qemu_spin_unlock(&env_tlb(env)->c.lock);
1030}
1031
1032
1033static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1034 target_ulong vaddr)
1035{
1036 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1037 tlb_entry->addr_write = vaddr;
1038 }
1039}
1040
1041
1042
1043void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1044{
1045 CPUArchState *env = cpu->env_ptr;
1046 int mmu_idx;
1047
1048 assert_cpu_is_self(cpu);
1049
1050 vaddr &= TARGET_PAGE_MASK;
1051 qemu_spin_lock(&env_tlb(env)->c.lock);
1052 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1053 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1054 }
1055
1056 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1057 int k;
1058 for (k = 0; k < CPU_VTLB_SIZE; k++) {
1059 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1060 }
1061 }
1062 qemu_spin_unlock(&env_tlb(env)->c.lock);
1063}
1064
1065
1066
1067static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1068 target_ulong vaddr, target_ulong size)
1069{
1070 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1071 target_ulong lp_mask = ~(size - 1);
1072
1073 if (lp_addr == (target_ulong)-1) {
1074
1075 lp_addr = vaddr;
1076 } else {
1077
1078
1079
1080 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1081 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1082 lp_mask <<= 1;
1083 }
1084 }
1085 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1086 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1097 hwaddr paddr, MemTxAttrs attrs, int prot,
1098 int mmu_idx, target_ulong size)
1099{
1100 CPUArchState *env = cpu->env_ptr;
1101 CPUTLB *tlb = env_tlb(env);
1102 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1103 MemoryRegionSection *section;
1104 unsigned int index;
1105 target_ulong address;
1106 target_ulong write_address;
1107 uintptr_t addend;
1108 CPUTLBEntry *te, tn;
1109 hwaddr iotlb, xlat, sz, paddr_page;
1110 target_ulong vaddr_page;
1111 int asidx = cpu_asidx_from_attrs(cpu, attrs);
1112 int wp_flags;
1113 bool is_ram, is_romd;
1114
1115 assert_cpu_is_self(cpu);
1116
1117 if (size <= TARGET_PAGE_SIZE) {
1118 sz = TARGET_PAGE_SIZE;
1119 } else {
1120 tlb_add_large_page(env, mmu_idx, vaddr, size);
1121 sz = size;
1122 }
1123 vaddr_page = vaddr & TARGET_PAGE_MASK;
1124 paddr_page = paddr & TARGET_PAGE_MASK;
1125
1126 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1127 &xlat, &sz, attrs, &prot);
1128 assert(sz >= TARGET_PAGE_SIZE);
1129
1130 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1131 " prot=%x idx=%d\n",
1132 vaddr, paddr, prot, mmu_idx);
1133
1134 address = vaddr_page;
1135 if (size < TARGET_PAGE_SIZE) {
1136
1137 address |= TLB_INVALID_MASK;
1138 }
1139 if (attrs.byte_swap) {
1140 address |= TLB_BSWAP;
1141 }
1142
1143 is_ram = memory_region_is_ram(section->mr);
1144 is_romd = memory_region_is_romd(section->mr);
1145
1146 if (is_ram || is_romd) {
1147
1148 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1149 } else {
1150
1151 addend = 0;
1152 }
1153
1154 write_address = address;
1155 if (is_ram) {
1156 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1157
1158
1159
1160
1161 if (prot & PAGE_WRITE) {
1162 if (section->readonly) {
1163 write_address |= TLB_DISCARD_WRITE;
1164 } else if (cpu_physical_memory_is_clean(iotlb)) {
1165 write_address |= TLB_NOTDIRTY;
1166 }
1167 }
1168 } else {
1169
1170 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1171
1172
1173
1174
1175
1176 write_address |= TLB_MMIO;
1177 if (!is_romd) {
1178 address = write_address;
1179 }
1180 }
1181
1182 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1183 TARGET_PAGE_SIZE);
1184
1185 index = tlb_index(env, mmu_idx, vaddr_page);
1186 te = tlb_entry(env, mmu_idx, vaddr_page);
1187
1188
1189
1190
1191
1192
1193
1194
1195 qemu_spin_lock(&tlb->c.lock);
1196
1197
1198 tlb->c.dirty |= 1 << mmu_idx;
1199
1200
1201 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1202
1203
1204
1205
1206
1207 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1208 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1209 CPUTLBEntry *tv = &desc->vtable[vidx];
1210
1211
1212 copy_tlb_helper_locked(tv, te);
1213 desc->viotlb[vidx] = desc->iotlb[index];
1214 tlb_n_used_entries_dec(env, mmu_idx);
1215 }
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 desc->iotlb[index].addr = iotlb - vaddr_page;
1231 desc->iotlb[index].attrs = attrs;
1232
1233
1234 tn.addend = addend - vaddr_page;
1235 if (prot & PAGE_READ) {
1236 tn.addr_read = address;
1237 if (wp_flags & BP_MEM_READ) {
1238 tn.addr_read |= TLB_WATCHPOINT;
1239 }
1240 } else {
1241 tn.addr_read = -1;
1242 }
1243
1244 if (prot & PAGE_EXEC) {
1245 tn.addr_code = address;
1246 } else {
1247 tn.addr_code = -1;
1248 }
1249
1250 tn.addr_write = -1;
1251 if (prot & PAGE_WRITE) {
1252 tn.addr_write = write_address;
1253 if (prot & PAGE_WRITE_INV) {
1254 tn.addr_write |= TLB_INVALID_MASK;
1255 }
1256 if (wp_flags & BP_MEM_WRITE) {
1257 tn.addr_write |= TLB_WATCHPOINT;
1258 }
1259 }
1260
1261 copy_tlb_helper_locked(te, &tn);
1262 tlb_n_used_entries_inc(env, mmu_idx);
1263 qemu_spin_unlock(&tlb->c.lock);
1264}
1265
1266
1267
1268
1269void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1270 hwaddr paddr, int prot,
1271 int mmu_idx, target_ulong size)
1272{
1273 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1274 prot, mmu_idx, size);
1275}
1276
1277static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1278{
1279 ram_addr_t ram_addr;
1280
1281 ram_addr = qemu_ram_addr_from_host(ptr);
1282 if (ram_addr == RAM_ADDR_INVALID) {
1283 error_report("Bad ram pointer %p", ptr);
1284 abort();
1285 }
1286 return ram_addr;
1287}
1288
1289
1290
1291
1292
1293
1294static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1295 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1296{
1297 CPUClass *cc = CPU_GET_CLASS(cpu);
1298 bool ok;
1299
1300
1301
1302
1303
1304 ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
1305 access_type, mmu_idx, false, retaddr);
1306 assert(ok);
1307}
1308
1309static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1310 MMUAccessType access_type,
1311 int mmu_idx, uintptr_t retaddr)
1312{
1313 CPUClass *cc = CPU_GET_CLASS(cpu);
1314
1315 cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
1316}
1317
1318static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1319 vaddr addr, unsigned size,
1320 MMUAccessType access_type,
1321 int mmu_idx, MemTxAttrs attrs,
1322 MemTxResult response,
1323 uintptr_t retaddr)
1324{
1325 CPUClass *cc = CPU_GET_CLASS(cpu);
1326
1327 if (!cpu->ignore_memory_transaction_failures &&
1328 cc->tcg_ops->do_transaction_failed) {
1329 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1330 access_type, mmu_idx, attrs,
1331 response, retaddr);
1332 }
1333}
1334
1335static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1336 int mmu_idx, target_ulong addr, uintptr_t retaddr,
1337 MMUAccessType access_type, MemOp op)
1338{
1339 CPUState *cpu = env_cpu(env);
1340 hwaddr mr_offset;
1341 MemoryRegionSection *section;
1342 MemoryRegion *mr;
1343 uint64_t val;
1344 bool locked = false;
1345 MemTxResult r;
1346
1347 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1348 mr = section->mr;
1349 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1350 cpu->mem_io_pc = retaddr;
1351 if (!cpu->can_do_io) {
1352 cpu_io_recompile(cpu, retaddr);
1353 }
1354
1355 if (!qemu_mutex_iothread_locked()) {
1356 qemu_mutex_lock_iothread();
1357 locked = true;
1358 }
1359 r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
1360 if (r != MEMTX_OK) {
1361 hwaddr physaddr = mr_offset +
1362 section->offset_within_address_space -
1363 section->offset_within_region;
1364
1365 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1366 mmu_idx, iotlbentry->attrs, r, retaddr);
1367 }
1368 if (locked) {
1369 qemu_mutex_unlock_iothread();
1370 }
1371
1372 return val;
1373}
1374
1375
1376
1377
1378
1379
1380static void save_iotlb_data(CPUState *cs, hwaddr addr,
1381 MemoryRegionSection *section, hwaddr mr_offset)
1382{
1383#ifdef CONFIG_PLUGIN
1384 SavedIOTLB *saved = &cs->saved_iotlb;
1385 saved->addr = addr;
1386 saved->section = section;
1387 saved->mr_offset = mr_offset;
1388#endif
1389}
1390
1391static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1392 int mmu_idx, uint64_t val, target_ulong addr,
1393 uintptr_t retaddr, MemOp op)
1394{
1395 CPUState *cpu = env_cpu(env);
1396 hwaddr mr_offset;
1397 MemoryRegionSection *section;
1398 MemoryRegion *mr;
1399 bool locked = false;
1400 MemTxResult r;
1401
1402 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1403 mr = section->mr;
1404 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1405 if (!cpu->can_do_io) {
1406 cpu_io_recompile(cpu, retaddr);
1407 }
1408 cpu->mem_io_pc = retaddr;
1409
1410
1411
1412
1413
1414 save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
1415
1416 if (!qemu_mutex_iothread_locked()) {
1417 qemu_mutex_lock_iothread();
1418 locked = true;
1419 }
1420 r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
1421 if (r != MEMTX_OK) {
1422 hwaddr physaddr = mr_offset +
1423 section->offset_within_address_space -
1424 section->offset_within_region;
1425
1426 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1427 MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
1428 retaddr);
1429 }
1430 if (locked) {
1431 qemu_mutex_unlock_iothread();
1432 }
1433}
1434
1435static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1436{
1437#if TCG_OVERSIZED_GUEST
1438 return *(target_ulong *)((uintptr_t)entry + ofs);
1439#else
1440
1441 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
1442#endif
1443}
1444
1445
1446
1447static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1448 size_t elt_ofs, target_ulong page)
1449{
1450 size_t vidx;
1451
1452 assert_cpu_is_self(env_cpu(env));
1453 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1454 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1455 target_ulong cmp;
1456
1457
1458#if TCG_OVERSIZED_GUEST
1459 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1460#else
1461 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1462#endif
1463
1464 if (cmp == page) {
1465
1466 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1467
1468 qemu_spin_lock(&env_tlb(env)->c.lock);
1469 copy_tlb_helper_locked(&tmptlb, tlb);
1470 copy_tlb_helper_locked(tlb, vtlb);
1471 copy_tlb_helper_locked(vtlb, &tmptlb);
1472 qemu_spin_unlock(&env_tlb(env)->c.lock);
1473
1474 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1475 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1476 tmpio = *io; *io = *vio; *vio = tmpio;
1477 return true;
1478 }
1479 }
1480 return false;
1481}
1482
1483
1484#define VICTIM_TLB_HIT(TY, ADDR) \
1485 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1486 (ADDR) & TARGET_PAGE_MASK)
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1499 void **hostp)
1500{
1501 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1502 uintptr_t index = tlb_index(env, mmu_idx, addr);
1503 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1504 void *p;
1505
1506 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1507 if (!VICTIM_TLB_HIT(addr_code, addr)) {
1508 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1509 index = tlb_index(env, mmu_idx, addr);
1510 entry = tlb_entry(env, mmu_idx, addr);
1511
1512 if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1513
1514
1515
1516
1517 return -1;
1518 }
1519 }
1520 assert(tlb_hit(entry->addr_code, addr));
1521 }
1522
1523 if (unlikely(entry->addr_code & TLB_MMIO)) {
1524
1525 if (hostp) {
1526 *hostp = NULL;
1527 }
1528 return -1;
1529 }
1530
1531 p = (void *)((uintptr_t)addr + entry->addend);
1532 if (hostp) {
1533 *hostp = p;
1534 }
1535 return qemu_ram_addr_from_host_nofail(p);
1536}
1537
1538tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1539{
1540 return get_page_addr_code_hostp(env, addr, NULL);
1541}
1542
1543static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1544 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1545{
1546 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1547
1548 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1549
1550 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1551 struct page_collection *pages
1552 = page_collection_lock(ram_addr, ram_addr + size);
1553 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1554 page_collection_unlock(pages);
1555 }
1556
1557
1558
1559
1560
1561 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1562
1563
1564 if (!cpu_physical_memory_is_clean(ram_addr)) {
1565 trace_memory_notdirty_set_dirty(mem_vaddr);
1566 tlb_set_dirty(cpu, mem_vaddr);
1567 }
1568}
1569
1570static int probe_access_internal(CPUArchState *env, target_ulong addr,
1571 int fault_size, MMUAccessType access_type,
1572 int mmu_idx, bool nonfault,
1573 void **phost, uintptr_t retaddr)
1574{
1575 uintptr_t index = tlb_index(env, mmu_idx, addr);
1576 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1577 target_ulong tlb_addr, page_addr;
1578 size_t elt_ofs;
1579 int flags;
1580
1581 switch (access_type) {
1582 case MMU_DATA_LOAD:
1583 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1584 break;
1585 case MMU_DATA_STORE:
1586 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1587 break;
1588 case MMU_INST_FETCH:
1589 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1590 break;
1591 default:
1592 g_assert_not_reached();
1593 }
1594 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1595
1596 page_addr = addr & TARGET_PAGE_MASK;
1597 if (!tlb_hit_page(tlb_addr, page_addr)) {
1598 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1599 CPUState *cs = env_cpu(env);
1600 CPUClass *cc = CPU_GET_CLASS(cs);
1601
1602 if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1603 mmu_idx, nonfault, retaddr)) {
1604
1605 *phost = NULL;
1606 return TLB_INVALID_MASK;
1607 }
1608
1609
1610 entry = tlb_entry(env, mmu_idx, addr);
1611 }
1612 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1613 }
1614 flags = tlb_addr & TLB_FLAGS_MASK;
1615
1616
1617 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1618 *phost = NULL;
1619 return TLB_MMIO;
1620 }
1621
1622
1623 *phost = (void *)((uintptr_t)addr + entry->addend);
1624 return flags;
1625}
1626
1627int probe_access_flags(CPUArchState *env, target_ulong addr,
1628 MMUAccessType access_type, int mmu_idx,
1629 bool nonfault, void **phost, uintptr_t retaddr)
1630{
1631 int flags;
1632
1633 flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1634 nonfault, phost, retaddr);
1635
1636
1637 if (unlikely(flags & TLB_NOTDIRTY)) {
1638 uintptr_t index = tlb_index(env, mmu_idx, addr);
1639 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1640
1641 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1642 flags &= ~TLB_NOTDIRTY;
1643 }
1644
1645 return flags;
1646}
1647
1648void *probe_access(CPUArchState *env, target_ulong addr, int size,
1649 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1650{
1651 void *host;
1652 int flags;
1653
1654 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1655
1656 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1657 false, &host, retaddr);
1658
1659
1660 if (size == 0) {
1661 return NULL;
1662 }
1663
1664 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1665 uintptr_t index = tlb_index(env, mmu_idx, addr);
1666 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1667
1668
1669 if (flags & TLB_WATCHPOINT) {
1670 int wp_access = (access_type == MMU_DATA_STORE
1671 ? BP_MEM_WRITE : BP_MEM_READ);
1672 cpu_check_watchpoint(env_cpu(env), addr, size,
1673 iotlbentry->attrs, wp_access, retaddr);
1674 }
1675
1676
1677 if (flags & TLB_NOTDIRTY) {
1678 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1679 }
1680 }
1681
1682 return host;
1683}
1684
1685void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1686 MMUAccessType access_type, int mmu_idx)
1687{
1688 void *host;
1689 int flags;
1690
1691 flags = probe_access_internal(env, addr, 0, access_type,
1692 mmu_idx, true, &host, 0);
1693
1694
1695 return flags ? NULL : host;
1696}
1697
1698#ifdef CONFIG_PLUGIN
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1714 bool is_store, struct qemu_plugin_hwaddr *data)
1715{
1716 CPUArchState *env = cpu->env_ptr;
1717 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1718 uintptr_t index = tlb_index(env, mmu_idx, addr);
1719 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1720
1721 if (likely(tlb_hit(tlb_addr, addr))) {
1722
1723 if (tlb_addr & TLB_MMIO) {
1724 CPUIOTLBEntry *iotlbentry;
1725 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1726 data->is_io = true;
1727 data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1728 data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1729 } else {
1730 data->is_io = false;
1731 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1732 }
1733 return true;
1734 } else {
1735 SavedIOTLB *saved = &cpu->saved_iotlb;
1736 data->is_io = true;
1737 data->v.io.section = saved->section;
1738 data->v.io.offset = saved->mr_offset;
1739 return true;
1740 }
1741}
1742
1743#endif
1744
1745
1746
1747
1748
1749
1750
1751static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1752 MemOpIdx oi, int size, int prot,
1753 uintptr_t retaddr)
1754{
1755 size_t mmu_idx = get_mmuidx(oi);
1756 MemOp mop = get_memop(oi);
1757 int a_bits = get_alignment_bits(mop);
1758 uintptr_t index;
1759 CPUTLBEntry *tlbe;
1760 target_ulong tlb_addr;
1761 void *hostaddr;
1762
1763
1764 retaddr -= GETPC_ADJ;
1765
1766
1767 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1768
1769 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1770 mmu_idx, retaddr);
1771 }
1772
1773
1774 if (unlikely(addr & (size - 1))) {
1775
1776
1777
1778
1779 goto stop_the_world;
1780 }
1781
1782 index = tlb_index(env, mmu_idx, addr);
1783 tlbe = tlb_entry(env, mmu_idx, addr);
1784
1785
1786 if (prot & PAGE_WRITE) {
1787 tlb_addr = tlb_addr_write(tlbe);
1788 if (!tlb_hit(tlb_addr, addr)) {
1789 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1790 tlb_fill(env_cpu(env), addr, size,
1791 MMU_DATA_STORE, mmu_idx, retaddr);
1792 index = tlb_index(env, mmu_idx, addr);
1793 tlbe = tlb_entry(env, mmu_idx, addr);
1794 }
1795 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1796 }
1797
1798
1799 if ((prot & PAGE_READ) &&
1800 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1801 tlb_fill(env_cpu(env), addr, size,
1802 MMU_DATA_LOAD, mmu_idx, retaddr);
1803
1804
1805
1806
1807
1808 goto stop_the_world;
1809 }
1810 } else {
1811 tlb_addr = tlbe->addr_read;
1812 if (!tlb_hit(tlb_addr, addr)) {
1813 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1814 tlb_fill(env_cpu(env), addr, size,
1815 MMU_DATA_LOAD, mmu_idx, retaddr);
1816 index = tlb_index(env, mmu_idx, addr);
1817 tlbe = tlb_entry(env, mmu_idx, addr);
1818 }
1819 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
1820 }
1821 }
1822
1823
1824 if (unlikely(tlb_addr & TLB_MMIO)) {
1825
1826
1827 goto stop_the_world;
1828 }
1829
1830 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1831
1832 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1833 notdirty_write(env_cpu(env), addr, size,
1834 &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
1835 }
1836
1837 return hostaddr;
1838
1839 stop_the_world:
1840 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854static void validate_memop(MemOpIdx oi, MemOp expected)
1855{
1856#ifdef CONFIG_DEBUG_TCG
1857 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1858 assert(have == expected);
1859#endif
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1872 MemOpIdx oi, uintptr_t retaddr);
1873
1874static inline uint64_t QEMU_ALWAYS_INLINE
1875load_memop(const void *haddr, MemOp op)
1876{
1877 switch (op) {
1878 case MO_UB:
1879 return ldub_p(haddr);
1880 case MO_BEUW:
1881 return lduw_be_p(haddr);
1882 case MO_LEUW:
1883 return lduw_le_p(haddr);
1884 case MO_BEUL:
1885 return (uint32_t)ldl_be_p(haddr);
1886 case MO_LEUL:
1887 return (uint32_t)ldl_le_p(haddr);
1888 case MO_BEQ:
1889 return ldq_be_p(haddr);
1890 case MO_LEQ:
1891 return ldq_le_p(haddr);
1892 default:
1893 qemu_build_not_reached();
1894 }
1895}
1896
1897static inline uint64_t QEMU_ALWAYS_INLINE
1898load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
1899 uintptr_t retaddr, MemOp op, bool code_read,
1900 FullLoadHelper *full_load)
1901{
1902 uintptr_t mmu_idx = get_mmuidx(oi);
1903 uintptr_t index = tlb_index(env, mmu_idx, addr);
1904 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1905 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1906 const size_t tlb_off = code_read ?
1907 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1908 const MMUAccessType access_type =
1909 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1910 unsigned a_bits = get_alignment_bits(get_memop(oi));
1911 void *haddr;
1912 uint64_t res;
1913 size_t size = memop_size(op);
1914
1915
1916 if (addr & ((1 << a_bits) - 1)) {
1917 cpu_unaligned_access(env_cpu(env), addr, access_type,
1918 mmu_idx, retaddr);
1919 }
1920
1921
1922 if (!tlb_hit(tlb_addr, addr)) {
1923 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1924 addr & TARGET_PAGE_MASK)) {
1925 tlb_fill(env_cpu(env), addr, size,
1926 access_type, mmu_idx, retaddr);
1927 index = tlb_index(env, mmu_idx, addr);
1928 entry = tlb_entry(env, mmu_idx, addr);
1929 }
1930 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1931 tlb_addr &= ~TLB_INVALID_MASK;
1932 }
1933
1934
1935 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1936 CPUIOTLBEntry *iotlbentry;
1937 bool need_swap;
1938
1939
1940 if ((addr & (size - 1)) != 0) {
1941 goto do_unaligned_access;
1942 }
1943
1944 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1945
1946
1947 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1948
1949 cpu_check_watchpoint(env_cpu(env), addr, size,
1950 iotlbentry->attrs, BP_MEM_READ, retaddr);
1951 }
1952
1953 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1954
1955
1956 if (likely(tlb_addr & TLB_MMIO)) {
1957 return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1958 access_type, op ^ (need_swap * MO_BSWAP));
1959 }
1960
1961 haddr = (void *)((uintptr_t)addr + entry->addend);
1962
1963
1964
1965
1966
1967
1968 if (unlikely(need_swap)) {
1969 return load_memop(haddr, op ^ MO_BSWAP);
1970 }
1971 return load_memop(haddr, op);
1972 }
1973
1974
1975 if (size > 1
1976 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1977 >= TARGET_PAGE_SIZE)) {
1978 target_ulong addr1, addr2;
1979 uint64_t r1, r2;
1980 unsigned shift;
1981 do_unaligned_access:
1982 addr1 = addr & ~((target_ulong)size - 1);
1983 addr2 = addr1 + size;
1984 r1 = full_load(env, addr1, oi, retaddr);
1985 r2 = full_load(env, addr2, oi, retaddr);
1986 shift = (addr & (size - 1)) * 8;
1987
1988 if (memop_big_endian(op)) {
1989
1990 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1991 } else {
1992
1993 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1994 }
1995 return res & MAKE_64BIT_MASK(0, size * 8);
1996 }
1997
1998 haddr = (void *)((uintptr_t)addr + entry->addend);
1999 return load_memop(haddr, op);
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
2013 MemOpIdx oi, uintptr_t retaddr)
2014{
2015 validate_memop(oi, MO_UB);
2016 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
2017}
2018
2019tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
2020 MemOpIdx oi, uintptr_t retaddr)
2021{
2022 return full_ldub_mmu(env, addr, oi, retaddr);
2023}
2024
2025static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2026 MemOpIdx oi, uintptr_t retaddr)
2027{
2028 validate_memop(oi, MO_LEUW);
2029 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2030 full_le_lduw_mmu);
2031}
2032
2033tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2034 MemOpIdx oi, uintptr_t retaddr)
2035{
2036 return full_le_lduw_mmu(env, addr, oi, retaddr);
2037}
2038
2039static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2040 MemOpIdx oi, uintptr_t retaddr)
2041{
2042 validate_memop(oi, MO_BEUW);
2043 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2044 full_be_lduw_mmu);
2045}
2046
2047tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2048 MemOpIdx oi, uintptr_t retaddr)
2049{
2050 return full_be_lduw_mmu(env, addr, oi, retaddr);
2051}
2052
2053static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2054 MemOpIdx oi, uintptr_t retaddr)
2055{
2056 validate_memop(oi, MO_LEUL);
2057 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2058 full_le_ldul_mmu);
2059}
2060
2061tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2062 MemOpIdx oi, uintptr_t retaddr)
2063{
2064 return full_le_ldul_mmu(env, addr, oi, retaddr);
2065}
2066
2067static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2068 MemOpIdx oi, uintptr_t retaddr)
2069{
2070 validate_memop(oi, MO_BEUL);
2071 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2072 full_be_ldul_mmu);
2073}
2074
2075tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2076 MemOpIdx oi, uintptr_t retaddr)
2077{
2078 return full_be_ldul_mmu(env, addr, oi, retaddr);
2079}
2080
2081uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
2082 MemOpIdx oi, uintptr_t retaddr)
2083{
2084 validate_memop(oi, MO_LEQ);
2085 return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
2086 helper_le_ldq_mmu);
2087}
2088
2089uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
2090 MemOpIdx oi, uintptr_t retaddr)
2091{
2092 validate_memop(oi, MO_BEQ);
2093 return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
2094 helper_be_ldq_mmu);
2095}
2096
2097
2098
2099
2100
2101
2102
2103tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
2104 MemOpIdx oi, uintptr_t retaddr)
2105{
2106 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2107}
2108
2109tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
2110 MemOpIdx oi, uintptr_t retaddr)
2111{
2112 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2113}
2114
2115tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
2116 MemOpIdx oi, uintptr_t retaddr)
2117{
2118 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2119}
2120
2121tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
2122 MemOpIdx oi, uintptr_t retaddr)
2123{
2124 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2125}
2126
2127tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
2128 MemOpIdx oi, uintptr_t retaddr)
2129{
2130 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2131}
2132
2133
2134
2135
2136
2137static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2138 MemOpIdx oi, uintptr_t retaddr,
2139 FullLoadHelper *full_load)
2140{
2141 uint64_t ret;
2142
2143 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
2144 ret = full_load(env, addr, oi, retaddr);
2145 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2146 return ret;
2147}
2148
2149uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2150{
2151 return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
2152}
2153
2154uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2155 MemOpIdx oi, uintptr_t ra)
2156{
2157 return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
2158}
2159
2160uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2161 MemOpIdx oi, uintptr_t ra)
2162{
2163 return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
2164}
2165
2166uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2167 MemOpIdx oi, uintptr_t ra)
2168{
2169 return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu);
2170}
2171
2172uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2173 MemOpIdx oi, uintptr_t ra)
2174{
2175 return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
2176}
2177
2178uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2179 MemOpIdx oi, uintptr_t ra)
2180{
2181 return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
2182}
2183
2184uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2185 MemOpIdx oi, uintptr_t ra)
2186{
2187 return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
2188}
2189
2190
2191
2192
2193
2194static inline void QEMU_ALWAYS_INLINE
2195store_memop(void *haddr, uint64_t val, MemOp op)
2196{
2197 switch (op) {
2198 case MO_UB:
2199 stb_p(haddr, val);
2200 break;
2201 case MO_BEUW:
2202 stw_be_p(haddr, val);
2203 break;
2204 case MO_LEUW:
2205 stw_le_p(haddr, val);
2206 break;
2207 case MO_BEUL:
2208 stl_be_p(haddr, val);
2209 break;
2210 case MO_LEUL:
2211 stl_le_p(haddr, val);
2212 break;
2213 case MO_BEQ:
2214 stq_be_p(haddr, val);
2215 break;
2216 case MO_LEQ:
2217 stq_le_p(haddr, val);
2218 break;
2219 default:
2220 qemu_build_not_reached();
2221 }
2222}
2223
2224static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2225 MemOpIdx oi, uintptr_t retaddr);
2226
2227static void __attribute__((noinline))
2228store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2229 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2230 bool big_endian)
2231{
2232 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2233 uintptr_t index, index2;
2234 CPUTLBEntry *entry, *entry2;
2235 target_ulong page2, tlb_addr, tlb_addr2;
2236 MemOpIdx oi;
2237 size_t size2;
2238 int i;
2239
2240
2241
2242
2243
2244
2245 page2 = (addr + size) & TARGET_PAGE_MASK;
2246 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2247 index2 = tlb_index(env, mmu_idx, page2);
2248 entry2 = tlb_entry(env, mmu_idx, page2);
2249
2250 tlb_addr2 = tlb_addr_write(entry2);
2251 if (!tlb_hit_page(tlb_addr2, page2)) {
2252 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2253 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2254 mmu_idx, retaddr);
2255 index2 = tlb_index(env, mmu_idx, page2);
2256 entry2 = tlb_entry(env, mmu_idx, page2);
2257 }
2258 tlb_addr2 = tlb_addr_write(entry2);
2259 }
2260
2261 index = tlb_index(env, mmu_idx, addr);
2262 entry = tlb_entry(env, mmu_idx, addr);
2263 tlb_addr = tlb_addr_write(entry);
2264
2265
2266
2267
2268
2269 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2270 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2271 env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
2272 BP_MEM_WRITE, retaddr);
2273 }
2274 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2275 cpu_check_watchpoint(env_cpu(env), page2, size2,
2276 env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
2277 BP_MEM_WRITE, retaddr);
2278 }
2279
2280
2281
2282
2283
2284
2285 oi = make_memop_idx(MO_UB, mmu_idx);
2286 if (big_endian) {
2287 for (i = 0; i < size; ++i) {
2288
2289 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2290 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2291 }
2292 } else {
2293 for (i = 0; i < size; ++i) {
2294
2295 uint8_t val8 = val >> (i * 8);
2296 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2297 }
2298 }
2299}
2300
2301static inline void QEMU_ALWAYS_INLINE
2302store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2303 MemOpIdx oi, uintptr_t retaddr, MemOp op)
2304{
2305 uintptr_t mmu_idx = get_mmuidx(oi);
2306 uintptr_t index = tlb_index(env, mmu_idx, addr);
2307 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
2308 target_ulong tlb_addr = tlb_addr_write(entry);
2309 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2310 unsigned a_bits = get_alignment_bits(get_memop(oi));
2311 void *haddr;
2312 size_t size = memop_size(op);
2313
2314
2315 if (addr & ((1 << a_bits) - 1)) {
2316 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2317 mmu_idx, retaddr);
2318 }
2319
2320
2321 if (!tlb_hit(tlb_addr, addr)) {
2322 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2323 addr & TARGET_PAGE_MASK)) {
2324 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2325 mmu_idx, retaddr);
2326 index = tlb_index(env, mmu_idx, addr);
2327 entry = tlb_entry(env, mmu_idx, addr);
2328 }
2329 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2330 }
2331
2332
2333 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
2334 CPUIOTLBEntry *iotlbentry;
2335 bool need_swap;
2336
2337
2338 if ((addr & (size - 1)) != 0) {
2339 goto do_unaligned_access;
2340 }
2341
2342 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
2343
2344
2345 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2346
2347 cpu_check_watchpoint(env_cpu(env), addr, size,
2348 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
2349 }
2350
2351 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2352
2353
2354 if (tlb_addr & TLB_MMIO) {
2355 io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
2356 op ^ (need_swap * MO_BSWAP));
2357 return;
2358 }
2359
2360
2361 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2362 return;
2363 }
2364
2365
2366 if (tlb_addr & TLB_NOTDIRTY) {
2367 notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
2368 }
2369
2370 haddr = (void *)((uintptr_t)addr + entry->addend);
2371
2372
2373
2374
2375
2376
2377 if (unlikely(need_swap)) {
2378 store_memop(haddr, val, op ^ MO_BSWAP);
2379 } else {
2380 store_memop(haddr, val, op);
2381 }
2382 return;
2383 }
2384
2385
2386 if (size > 1
2387 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2388 >= TARGET_PAGE_SIZE)) {
2389 do_unaligned_access:
2390 store_helper_unaligned(env, addr, val, retaddr, size,
2391 mmu_idx, memop_big_endian(op));
2392 return;
2393 }
2394
2395 haddr = (void *)((uintptr_t)addr + entry->addend);
2396 store_memop(haddr, val, op);
2397}
2398
2399static void __attribute__((noinline))
2400full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2401 MemOpIdx oi, uintptr_t retaddr)
2402{
2403 validate_memop(oi, MO_UB);
2404 store_helper(env, addr, val, oi, retaddr, MO_UB);
2405}
2406
2407void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2408 MemOpIdx oi, uintptr_t retaddr)
2409{
2410 full_stb_mmu(env, addr, val, oi, retaddr);
2411}
2412
2413static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2414 MemOpIdx oi, uintptr_t retaddr)
2415{
2416 validate_memop(oi, MO_LEUW);
2417 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2418}
2419
2420void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2421 MemOpIdx oi, uintptr_t retaddr)
2422{
2423 full_le_stw_mmu(env, addr, val, oi, retaddr);
2424}
2425
2426static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2427 MemOpIdx oi, uintptr_t retaddr)
2428{
2429 validate_memop(oi, MO_BEUW);
2430 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2431}
2432
2433void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2434 MemOpIdx oi, uintptr_t retaddr)
2435{
2436 full_be_stw_mmu(env, addr, val, oi, retaddr);
2437}
2438
2439static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2440 MemOpIdx oi, uintptr_t retaddr)
2441{
2442 validate_memop(oi, MO_LEUL);
2443 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2444}
2445
2446void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2447 MemOpIdx oi, uintptr_t retaddr)
2448{
2449 full_le_stl_mmu(env, addr, val, oi, retaddr);
2450}
2451
2452static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2453 MemOpIdx oi, uintptr_t retaddr)
2454{
2455 validate_memop(oi, MO_BEUL);
2456 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2457}
2458
2459void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2460 MemOpIdx oi, uintptr_t retaddr)
2461{
2462 full_be_stl_mmu(env, addr, val, oi, retaddr);
2463}
2464
2465void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2466 MemOpIdx oi, uintptr_t retaddr)
2467{
2468 validate_memop(oi, MO_LEQ);
2469 store_helper(env, addr, val, oi, retaddr, MO_LEQ);
2470}
2471
2472void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2473 MemOpIdx oi, uintptr_t retaddr)
2474{
2475 validate_memop(oi, MO_BEQ);
2476 store_helper(env, addr, val, oi, retaddr, MO_BEQ);
2477}
2478
2479
2480
2481
2482
2483typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2484 uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2485
2486static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2487 uint64_t val, MemOpIdx oi, uintptr_t ra,
2488 FullStoreHelper *full_store)
2489{
2490 trace_guest_st_before_exec(env_cpu(env), addr, oi);
2491 full_store(env, addr, val, oi, ra);
2492 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
2493}
2494
2495void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2496 MemOpIdx oi, uintptr_t retaddr)
2497{
2498 cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
2499}
2500
2501void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2502 MemOpIdx oi, uintptr_t retaddr)
2503{
2504 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
2505}
2506
2507void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2508 MemOpIdx oi, uintptr_t retaddr)
2509{
2510 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
2511}
2512
2513void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2514 MemOpIdx oi, uintptr_t retaddr)
2515{
2516 cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
2517}
2518
2519void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2520 MemOpIdx oi, uintptr_t retaddr)
2521{
2522 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
2523}
2524
2525void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2526 MemOpIdx oi, uintptr_t retaddr)
2527{
2528 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
2529}
2530
2531void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2532 MemOpIdx oi, uintptr_t retaddr)
2533{
2534 cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
2535}
2536
2537#include "ldst_common.c.inc"
2538
2539
2540
2541
2542
2543
2544#define ATOMIC_NAME(X) \
2545 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2546
2547#define ATOMIC_MMU_CLEANUP
2548#define ATOMIC_MMU_IDX get_mmuidx(oi)
2549
2550#include "atomic_common.c.inc"
2551
2552#define DATA_SIZE 1
2553#include "atomic_template.h"
2554
2555#define DATA_SIZE 2
2556#include "atomic_template.h"
2557
2558#define DATA_SIZE 4
2559#include "atomic_template.h"
2560
2561#ifdef CONFIG_ATOMIC64
2562#define DATA_SIZE 8
2563#include "atomic_template.h"
2564#endif
2565
2566#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2567#define DATA_SIZE 16
2568#include "atomic_template.h"
2569#endif
2570
2571
2572
2573static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2574 MemOpIdx oi, uintptr_t retaddr)
2575{
2576 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2577}
2578
2579uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2580{
2581 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2582 return full_ldub_code(env, addr, oi, 0);
2583}
2584
2585static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2586 MemOpIdx oi, uintptr_t retaddr)
2587{
2588 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
2589}
2590
2591uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2592{
2593 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2594 return full_lduw_code(env, addr, oi, 0);
2595}
2596
2597static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2598 MemOpIdx oi, uintptr_t retaddr)
2599{
2600 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
2601}
2602
2603uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2604{
2605 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2606 return full_ldl_code(env, addr, oi, 0);
2607}
2608
2609static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2610 MemOpIdx oi, uintptr_t retaddr)
2611{
2612 return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
2613}
2614
2615uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2616{
2617 MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
2618 return full_ldq_code(env, addr, oi, 0);
2619}
2620