1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "hw/core/tcg-cpu-ops.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/cpu_ldst.h"
26#include "exec/cputlb.h"
27#include "exec/memory-internal.h"
28#include "exec/ram_addr.h"
29#include "tcg/tcg.h"
30#include "qemu/error-report.h"
31#include "exec/log.h"
32#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
34#include "qemu/atomic128.h"
35#include "exec/translate-all.h"
36#include "trace/trace-root.h"
37#include "tb-hash.h"
38#include "internal.h"
39#ifdef CONFIG_PLUGIN
40#include "qemu/plugin-memory.h"
41#endif
42#include "tcg/tcg-ldst.h"
43
44
45
46
47
48#ifdef DEBUG_TLB
49# define DEBUG_TLB_GATE 1
50# ifdef DEBUG_TLB_LOG
51# define DEBUG_TLB_LOG_GATE 1
52# else
53# define DEBUG_TLB_LOG_GATE 0
54# endif
55#else
56# define DEBUG_TLB_GATE 0
57# define DEBUG_TLB_LOG_GATE 0
58#endif
59
60#define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67} while (0)
68
69#define assert_cpu_is_self(cpu) do { \
70 if (DEBUG_TLB_GATE) { \
71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
72 } \
73 } while (0)
74
75
76
77QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
79
80
81QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
84static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
85{
86 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
87}
88
89static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
90{
91 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
92}
93
94static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
95 size_t max_entries)
96{
97 desc->window_begin_ns = ns;
98 desc->window_max_entries = max_entries;
99}
100
101static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
102{
103 int i, i0 = tb_jmp_cache_hash_page(page_addr);
104 CPUJumpCache *jc = cpu->tb_jmp_cache;
105
106 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
107 qatomic_set(&jc->array[i0 + i].tb, NULL);
108 }
109}
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
152 int64_t now)
153{
154 size_t old_size = tlb_n_entries(fast);
155 size_t rate;
156 size_t new_size = old_size;
157 int64_t window_len_ms = 100;
158 int64_t window_len_ns = window_len_ms * 1000 * 1000;
159 bool window_expired = now > desc->window_begin_ns + window_len_ns;
160
161 if (desc->n_used_entries > desc->window_max_entries) {
162 desc->window_max_entries = desc->n_used_entries;
163 }
164 rate = desc->window_max_entries * 100 / old_size;
165
166 if (rate > 70) {
167 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
168 } else if (rate < 30 && window_expired) {
169 size_t ceil = pow2ceil(desc->window_max_entries);
170 size_t expected_rate = desc->window_max_entries * 100 / ceil;
171
172
173
174
175
176
177
178
179
180
181
182 if (expected_rate > 70) {
183 ceil *= 2;
184 }
185 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
186 }
187
188 if (new_size == old_size) {
189 if (window_expired) {
190 tlb_window_reset(desc, now, desc->n_used_entries);
191 }
192 return;
193 }
194
195 g_free(fast->table);
196 g_free(desc->fulltlb);
197
198 tlb_window_reset(desc, now, 0);
199
200 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
201 fast->table = g_try_new(CPUTLBEntry, new_size);
202 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
203
204
205
206
207
208
209
210
211 while (fast->table == NULL || desc->fulltlb == NULL) {
212 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
213 error_report("%s: %s", __func__, strerror(errno));
214 abort();
215 }
216 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
217 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
218
219 g_free(fast->table);
220 g_free(desc->fulltlb);
221 fast->table = g_try_new(CPUTLBEntry, new_size);
222 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
223 }
224}
225
226static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
227{
228 desc->n_used_entries = 0;
229 desc->large_page_addr = -1;
230 desc->large_page_mask = -1;
231 desc->vindex = 0;
232 memset(fast->table, -1, sizeof_tlb(fast));
233 memset(desc->vtable, -1, sizeof(desc->vtable));
234}
235
236static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
237 int64_t now)
238{
239 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
240 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
241
242 tlb_mmu_resize_locked(desc, fast, now);
243 tlb_mmu_flush_locked(desc, fast);
244}
245
246static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
247{
248 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
249
250 tlb_window_reset(desc, now, 0);
251 desc->n_used_entries = 0;
252 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
253 fast->table = g_new(CPUTLBEntry, n_entries);
254 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
255 tlb_mmu_flush_locked(desc, fast);
256}
257
258static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
259{
260 env_tlb(env)->d[mmu_idx].n_used_entries++;
261}
262
263static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
264{
265 env_tlb(env)->d[mmu_idx].n_used_entries--;
266}
267
268void tlb_init(CPUState *cpu)
269{
270 CPUArchState *env = cpu->env_ptr;
271 int64_t now = get_clock_realtime();
272 int i;
273
274 qemu_spin_init(&env_tlb(env)->c.lock);
275
276
277 env_tlb(env)->c.dirty = 0;
278
279 for (i = 0; i < NB_MMU_MODES; i++) {
280 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
281 }
282}
283
284void tlb_destroy(CPUState *cpu)
285{
286 CPUArchState *env = cpu->env_ptr;
287 int i;
288
289 qemu_spin_destroy(&env_tlb(env)->c.lock);
290 for (i = 0; i < NB_MMU_MODES; i++) {
291 CPUTLBDesc *desc = &env_tlb(env)->d[i];
292 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
293
294 g_free(fast->table);
295 g_free(desc->fulltlb);
296 }
297}
298
299
300
301
302
303
304
305
306static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
307 run_on_cpu_data d)
308{
309 CPUState *cpu;
310
311 CPU_FOREACH(cpu) {
312 if (cpu != src) {
313 async_run_on_cpu(cpu, fn, d);
314 }
315 }
316}
317
318void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
319{
320 CPUState *cpu;
321 size_t full = 0, part = 0, elide = 0;
322
323 CPU_FOREACH(cpu) {
324 CPUArchState *env = cpu->env_ptr;
325
326 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
327 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
328 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
329 }
330 *pfull = full;
331 *ppart = part;
332 *pelide = elide;
333}
334
335static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
336{
337 CPUArchState *env = cpu->env_ptr;
338 uint16_t asked = data.host_int;
339 uint16_t all_dirty, work, to_clean;
340 int64_t now = get_clock_realtime();
341
342 assert_cpu_is_self(cpu);
343
344 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
345
346 qemu_spin_lock(&env_tlb(env)->c.lock);
347
348 all_dirty = env_tlb(env)->c.dirty;
349 to_clean = asked & all_dirty;
350 all_dirty &= ~to_clean;
351 env_tlb(env)->c.dirty = all_dirty;
352
353 for (work = to_clean; work != 0; work &= work - 1) {
354 int mmu_idx = ctz32(work);
355 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
356 }
357
358 qemu_spin_unlock(&env_tlb(env)->c.lock);
359
360 tcg_flush_jmp_cache(cpu);
361
362 if (to_clean == ALL_MMUIDX_BITS) {
363 qatomic_set(&env_tlb(env)->c.full_flush_count,
364 env_tlb(env)->c.full_flush_count + 1);
365 } else {
366 qatomic_set(&env_tlb(env)->c.part_flush_count,
367 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
368 if (to_clean != asked) {
369 qatomic_set(&env_tlb(env)->c.elide_flush_count,
370 env_tlb(env)->c.elide_flush_count +
371 ctpop16(asked & ~to_clean));
372 }
373 }
374}
375
376void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
377{
378 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
379
380 if (cpu->created && !qemu_cpu_is_self(cpu)) {
381 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
382 RUN_ON_CPU_HOST_INT(idxmap));
383 } else {
384 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
385 }
386}
387
388void tlb_flush(CPUState *cpu)
389{
390 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
391}
392
393void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
394{
395 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
396
397 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
398
399 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
400 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
401}
402
403void tlb_flush_all_cpus(CPUState *src_cpu)
404{
405 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
406}
407
408void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
409{
410 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
411
412 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
413
414 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
415 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
416}
417
418void tlb_flush_all_cpus_synced(CPUState *src_cpu)
419{
420 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
421}
422
423static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
424 target_ulong page, target_ulong mask)
425{
426 page &= mask;
427 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
428
429 return (page == (tlb_entry->addr_read & mask) ||
430 page == (tlb_addr_write(tlb_entry) & mask) ||
431 page == (tlb_entry->addr_code & mask));
432}
433
434static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
435 target_ulong page)
436{
437 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
438}
439
440
441
442
443
444static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
445{
446 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
447}
448
449
450static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
451 target_ulong page,
452 target_ulong mask)
453{
454 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
455 memset(tlb_entry, -1, sizeof(*tlb_entry));
456 return true;
457 }
458 return false;
459}
460
461static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
462 target_ulong page)
463{
464 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
465}
466
467
468static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
469 target_ulong page,
470 target_ulong mask)
471{
472 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
473 int k;
474
475 assert_cpu_is_self(env_cpu(env));
476 for (k = 0; k < CPU_VTLB_SIZE; k++) {
477 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
478 tlb_n_used_entries_dec(env, mmu_idx);
479 }
480 }
481}
482
483static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
484 target_ulong page)
485{
486 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
487}
488
489static void tlb_flush_page_locked(CPUArchState *env, int midx,
490 target_ulong page)
491{
492 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
493 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
494
495
496 if ((page & lp_mask) == lp_addr) {
497 tlb_debug("forcing full flush midx %d ("
498 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
499 midx, lp_addr, lp_mask);
500 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
501 } else {
502 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
503 tlb_n_used_entries_dec(env, midx);
504 }
505 tlb_flush_vtlb_page_locked(env, midx, page);
506 }
507}
508
509
510
511
512
513
514
515
516
517
518static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
519 target_ulong addr,
520 uint16_t idxmap)
521{
522 CPUArchState *env = cpu->env_ptr;
523 int mmu_idx;
524
525 assert_cpu_is_self(cpu);
526
527 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
528
529 qemu_spin_lock(&env_tlb(env)->c.lock);
530 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
531 if ((idxmap >> mmu_idx) & 1) {
532 tlb_flush_page_locked(env, mmu_idx, addr);
533 }
534 }
535 qemu_spin_unlock(&env_tlb(env)->c.lock);
536
537
538
539
540
541 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
542 tb_jmp_cache_clear_page(cpu, addr);
543}
544
545
546
547
548
549
550
551
552
553
554
555static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
556 run_on_cpu_data data)
557{
558 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
559 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
560 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
561
562 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
563}
564
565typedef struct {
566 target_ulong addr;
567 uint16_t idxmap;
568} TLBFlushPageByMMUIdxData;
569
570
571
572
573
574
575
576
577
578
579
580static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
581 run_on_cpu_data data)
582{
583 TLBFlushPageByMMUIdxData *d = data.host_ptr;
584
585 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
586 g_free(d);
587}
588
589void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
590{
591 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
592
593
594 addr &= TARGET_PAGE_MASK;
595
596 if (qemu_cpu_is_self(cpu)) {
597 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
598 } else if (idxmap < TARGET_PAGE_SIZE) {
599
600
601
602
603
604 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
605 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
606 } else {
607 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
608
609
610 d->addr = addr;
611 d->idxmap = idxmap;
612 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
613 RUN_ON_CPU_HOST_PTR(d));
614 }
615}
616
617void tlb_flush_page(CPUState *cpu, target_ulong addr)
618{
619 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
620}
621
622void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
623 uint16_t idxmap)
624{
625 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
626
627
628 addr &= TARGET_PAGE_MASK;
629
630
631
632
633
634 if (idxmap < TARGET_PAGE_SIZE) {
635 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
636 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
637 } else {
638 CPUState *dst_cpu;
639
640
641 CPU_FOREACH(dst_cpu) {
642 if (dst_cpu != src_cpu) {
643 TLBFlushPageByMMUIdxData *d
644 = g_new(TLBFlushPageByMMUIdxData, 1);
645
646 d->addr = addr;
647 d->idxmap = idxmap;
648 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
649 RUN_ON_CPU_HOST_PTR(d));
650 }
651 }
652 }
653
654 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
655}
656
657void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
658{
659 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
660}
661
662void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
663 target_ulong addr,
664 uint16_t idxmap)
665{
666 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
667
668
669 addr &= TARGET_PAGE_MASK;
670
671
672
673
674
675 if (idxmap < TARGET_PAGE_SIZE) {
676 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
677 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
678 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
679 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
680 } else {
681 CPUState *dst_cpu;
682 TLBFlushPageByMMUIdxData *d;
683
684
685 CPU_FOREACH(dst_cpu) {
686 if (dst_cpu != src_cpu) {
687 d = g_new(TLBFlushPageByMMUIdxData, 1);
688 d->addr = addr;
689 d->idxmap = idxmap;
690 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
691 RUN_ON_CPU_HOST_PTR(d));
692 }
693 }
694
695 d = g_new(TLBFlushPageByMMUIdxData, 1);
696 d->addr = addr;
697 d->idxmap = idxmap;
698 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
699 RUN_ON_CPU_HOST_PTR(d));
700 }
701}
702
703void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
704{
705 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
706}
707
708static void tlb_flush_range_locked(CPUArchState *env, int midx,
709 target_ulong addr, target_ulong len,
710 unsigned bits)
711{
712 CPUTLBDesc *d = &env_tlb(env)->d[midx];
713 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
714 target_ulong mask = MAKE_64BIT_MASK(0, bits);
715
716
717
718
719
720
721
722
723
724
725
726 if (mask < f->mask || len > f->mask) {
727 tlb_debug("forcing full flush midx %d ("
728 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
729 midx, addr, mask, len);
730 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
731 return;
732 }
733
734
735
736
737
738
739 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
740 tlb_debug("forcing full flush midx %d ("
741 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
742 midx, d->large_page_addr, d->large_page_mask);
743 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
744 return;
745 }
746
747 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
748 target_ulong page = addr + i;
749 CPUTLBEntry *entry = tlb_entry(env, midx, page);
750
751 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
752 tlb_n_used_entries_dec(env, midx);
753 }
754 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
755 }
756}
757
758typedef struct {
759 target_ulong addr;
760 target_ulong len;
761 uint16_t idxmap;
762 uint16_t bits;
763} TLBFlushRangeData;
764
765static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
766 TLBFlushRangeData d)
767{
768 CPUArchState *env = cpu->env_ptr;
769 int mmu_idx;
770
771 assert_cpu_is_self(cpu);
772
773 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
774 d.addr, d.bits, d.len, d.idxmap);
775
776 qemu_spin_lock(&env_tlb(env)->c.lock);
777 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
778 if ((d.idxmap >> mmu_idx) & 1) {
779 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
780 }
781 }
782 qemu_spin_unlock(&env_tlb(env)->c.lock);
783
784
785
786
787
788 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
789 tcg_flush_jmp_cache(cpu);
790 return;
791 }
792
793
794
795
796
797 d.addr -= TARGET_PAGE_SIZE;
798 for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
799 tb_jmp_cache_clear_page(cpu, d.addr);
800 d.addr += TARGET_PAGE_SIZE;
801 }
802}
803
804static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
805 run_on_cpu_data data)
806{
807 TLBFlushRangeData *d = data.host_ptr;
808 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
809 g_free(d);
810}
811
812void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
813 target_ulong len, uint16_t idxmap,
814 unsigned bits)
815{
816 TLBFlushRangeData d;
817
818
819
820
821
822 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
823 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
824 return;
825 }
826
827 if (bits < TARGET_PAGE_BITS) {
828 tlb_flush_by_mmuidx(cpu, idxmap);
829 return;
830 }
831
832
833 d.addr = addr & TARGET_PAGE_MASK;
834 d.len = len;
835 d.idxmap = idxmap;
836 d.bits = bits;
837
838 if (qemu_cpu_is_self(cpu)) {
839 tlb_flush_range_by_mmuidx_async_0(cpu, d);
840 } else {
841
842 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
843 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
844 RUN_ON_CPU_HOST_PTR(p));
845 }
846}
847
848void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
849 uint16_t idxmap, unsigned bits)
850{
851 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
852}
853
854void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
855 target_ulong addr, target_ulong len,
856 uint16_t idxmap, unsigned bits)
857{
858 TLBFlushRangeData d;
859 CPUState *dst_cpu;
860
861
862
863
864
865 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
866 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
867 return;
868 }
869
870 if (bits < TARGET_PAGE_BITS) {
871 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
872 return;
873 }
874
875
876 d.addr = addr & TARGET_PAGE_MASK;
877 d.len = len;
878 d.idxmap = idxmap;
879 d.bits = bits;
880
881
882 CPU_FOREACH(dst_cpu) {
883 if (dst_cpu != src_cpu) {
884 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
885 async_run_on_cpu(dst_cpu,
886 tlb_flush_range_by_mmuidx_async_1,
887 RUN_ON_CPU_HOST_PTR(p));
888 }
889 }
890
891 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
892}
893
894void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
895 target_ulong addr,
896 uint16_t idxmap, unsigned bits)
897{
898 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
899 idxmap, bits);
900}
901
902void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
903 target_ulong addr,
904 target_ulong len,
905 uint16_t idxmap,
906 unsigned bits)
907{
908 TLBFlushRangeData d, *p;
909 CPUState *dst_cpu;
910
911
912
913
914
915 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
916 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
917 return;
918 }
919
920 if (bits < TARGET_PAGE_BITS) {
921 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
922 return;
923 }
924
925
926 d.addr = addr & TARGET_PAGE_MASK;
927 d.len = len;
928 d.idxmap = idxmap;
929 d.bits = bits;
930
931
932 CPU_FOREACH(dst_cpu) {
933 if (dst_cpu != src_cpu) {
934 p = g_memdup(&d, sizeof(d));
935 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
936 RUN_ON_CPU_HOST_PTR(p));
937 }
938 }
939
940 p = g_memdup(&d, sizeof(d));
941 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
942 RUN_ON_CPU_HOST_PTR(p));
943}
944
945void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
946 target_ulong addr,
947 uint16_t idxmap,
948 unsigned bits)
949{
950 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
951 idxmap, bits);
952}
953
954
955
956void tlb_protect_code(ram_addr_t ram_addr)
957{
958 cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
959 TARGET_PAGE_SIZE,
960 DIRTY_MEMORY_CODE);
961}
962
963
964
965void tlb_unprotect_code(ram_addr_t ram_addr)
966{
967 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
968}
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
988 uintptr_t start, uintptr_t length)
989{
990 uintptr_t addr = tlb_entry->addr_write;
991
992 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
993 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
994 addr &= TARGET_PAGE_MASK;
995 addr += tlb_entry->addend;
996 if ((addr - start) < length) {
997#if TCG_OVERSIZED_GUEST
998 tlb_entry->addr_write |= TLB_NOTDIRTY;
999#else
1000 qatomic_set(&tlb_entry->addr_write,
1001 tlb_entry->addr_write | TLB_NOTDIRTY);
1002#endif
1003 }
1004 }
1005}
1006
1007
1008
1009
1010
1011static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1012{
1013 *d = *s;
1014}
1015
1016
1017
1018
1019
1020
1021void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1022{
1023 CPUArchState *env;
1024
1025 int mmu_idx;
1026
1027 env = cpu->env_ptr;
1028 qemu_spin_lock(&env_tlb(env)->c.lock);
1029 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1030 unsigned int i;
1031 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1032
1033 for (i = 0; i < n; i++) {
1034 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1035 start1, length);
1036 }
1037
1038 for (i = 0; i < CPU_VTLB_SIZE; i++) {
1039 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1040 start1, length);
1041 }
1042 }
1043 qemu_spin_unlock(&env_tlb(env)->c.lock);
1044}
1045
1046
1047static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1048 target_ulong vaddr)
1049{
1050 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1051 tlb_entry->addr_write = vaddr;
1052 }
1053}
1054
1055
1056
1057void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1058{
1059 CPUArchState *env = cpu->env_ptr;
1060 int mmu_idx;
1061
1062 assert_cpu_is_self(cpu);
1063
1064 vaddr &= TARGET_PAGE_MASK;
1065 qemu_spin_lock(&env_tlb(env)->c.lock);
1066 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1067 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1068 }
1069
1070 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1071 int k;
1072 for (k = 0; k < CPU_VTLB_SIZE; k++) {
1073 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1074 }
1075 }
1076 qemu_spin_unlock(&env_tlb(env)->c.lock);
1077}
1078
1079
1080
1081static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1082 target_ulong vaddr, target_ulong size)
1083{
1084 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1085 target_ulong lp_mask = ~(size - 1);
1086
1087 if (lp_addr == (target_ulong)-1) {
1088
1089 lp_addr = vaddr;
1090 } else {
1091
1092
1093
1094 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1095 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1096 lp_mask <<= 1;
1097 }
1098 }
1099 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1100 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1112 target_ulong vaddr, CPUTLBEntryFull *full)
1113{
1114 CPUArchState *env = cpu->env_ptr;
1115 CPUTLB *tlb = env_tlb(env);
1116 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1117 MemoryRegionSection *section;
1118 unsigned int index;
1119 target_ulong address;
1120 target_ulong write_address;
1121 uintptr_t addend;
1122 CPUTLBEntry *te, tn;
1123 hwaddr iotlb, xlat, sz, paddr_page;
1124 target_ulong vaddr_page;
1125 int asidx, wp_flags, prot;
1126 bool is_ram, is_romd;
1127
1128 assert_cpu_is_self(cpu);
1129
1130 if (full->lg_page_size <= TARGET_PAGE_BITS) {
1131 sz = TARGET_PAGE_SIZE;
1132 } else {
1133 sz = (hwaddr)1 << full->lg_page_size;
1134 tlb_add_large_page(env, mmu_idx, vaddr, sz);
1135 }
1136 vaddr_page = vaddr & TARGET_PAGE_MASK;
1137 paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1138
1139 prot = full->prot;
1140 asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1141 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1142 &xlat, &sz, full->attrs, &prot);
1143 assert(sz >= TARGET_PAGE_SIZE);
1144
1145 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1146 " prot=%x idx=%d\n",
1147 vaddr, full->phys_addr, prot, mmu_idx);
1148
1149 address = vaddr_page;
1150 if (full->lg_page_size < TARGET_PAGE_BITS) {
1151
1152 address |= TLB_INVALID_MASK;
1153 }
1154 if (full->attrs.byte_swap) {
1155 address |= TLB_BSWAP;
1156 }
1157
1158 is_ram = memory_region_is_ram(section->mr);
1159 is_romd = memory_region_is_romd(section->mr);
1160
1161 if (is_ram || is_romd) {
1162
1163 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1164 } else {
1165
1166 addend = 0;
1167 }
1168
1169 write_address = address;
1170 if (is_ram) {
1171 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1172
1173
1174
1175
1176 if (prot & PAGE_WRITE) {
1177 if (section->readonly) {
1178 write_address |= TLB_DISCARD_WRITE;
1179 } else if (cpu_physical_memory_is_clean(iotlb)) {
1180 write_address |= TLB_NOTDIRTY;
1181 }
1182 }
1183 } else {
1184
1185 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1186
1187
1188
1189
1190
1191 write_address |= TLB_MMIO;
1192 if (!is_romd) {
1193 address = write_address;
1194 }
1195 }
1196
1197 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1198 TARGET_PAGE_SIZE);
1199
1200 index = tlb_index(env, mmu_idx, vaddr_page);
1201 te = tlb_entry(env, mmu_idx, vaddr_page);
1202
1203
1204
1205
1206
1207
1208
1209
1210 qemu_spin_lock(&tlb->c.lock);
1211
1212
1213 tlb->c.dirty |= 1 << mmu_idx;
1214
1215
1216 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1217
1218
1219
1220
1221
1222 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1223 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1224 CPUTLBEntry *tv = &desc->vtable[vidx];
1225
1226
1227 copy_tlb_helper_locked(tv, te);
1228 desc->vfulltlb[vidx] = desc->fulltlb[index];
1229 tlb_n_used_entries_dec(env, mmu_idx);
1230 }
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245 desc->fulltlb[index] = *full;
1246 desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
1247 desc->fulltlb[index].phys_addr = paddr_page;
1248 desc->fulltlb[index].prot = prot;
1249
1250
1251 tn.addend = addend - vaddr_page;
1252 if (prot & PAGE_READ) {
1253 tn.addr_read = address;
1254 if (wp_flags & BP_MEM_READ) {
1255 tn.addr_read |= TLB_WATCHPOINT;
1256 }
1257 } else {
1258 tn.addr_read = -1;
1259 }
1260
1261 if (prot & PAGE_EXEC) {
1262 tn.addr_code = address;
1263 } else {
1264 tn.addr_code = -1;
1265 }
1266
1267 tn.addr_write = -1;
1268 if (prot & PAGE_WRITE) {
1269 tn.addr_write = write_address;
1270 if (prot & PAGE_WRITE_INV) {
1271 tn.addr_write |= TLB_INVALID_MASK;
1272 }
1273 if (wp_flags & BP_MEM_WRITE) {
1274 tn.addr_write |= TLB_WATCHPOINT;
1275 }
1276 }
1277
1278 copy_tlb_helper_locked(te, &tn);
1279 tlb_n_used_entries_inc(env, mmu_idx);
1280 qemu_spin_unlock(&tlb->c.lock);
1281}
1282
1283void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1284 hwaddr paddr, MemTxAttrs attrs, int prot,
1285 int mmu_idx, target_ulong size)
1286{
1287 CPUTLBEntryFull full = {
1288 .phys_addr = paddr,
1289 .attrs = attrs,
1290 .prot = prot,
1291 .lg_page_size = ctz64(size)
1292 };
1293
1294 assert(is_power_of_2(size));
1295 tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
1296}
1297
1298void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1299 hwaddr paddr, int prot,
1300 int mmu_idx, target_ulong size)
1301{
1302 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1303 prot, mmu_idx, size);
1304}
1305
1306
1307
1308
1309
1310
1311static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1312 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1313{
1314 bool ok;
1315
1316
1317
1318
1319
1320 ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1321 access_type, mmu_idx, false, retaddr);
1322 assert(ok);
1323}
1324
1325static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1326 MMUAccessType access_type,
1327 int mmu_idx, uintptr_t retaddr)
1328{
1329 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1330 mmu_idx, retaddr);
1331}
1332
1333static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1334 vaddr addr, unsigned size,
1335 MMUAccessType access_type,
1336 int mmu_idx, MemTxAttrs attrs,
1337 MemTxResult response,
1338 uintptr_t retaddr)
1339{
1340 CPUClass *cc = CPU_GET_CLASS(cpu);
1341
1342 if (!cpu->ignore_memory_transaction_failures &&
1343 cc->tcg_ops->do_transaction_failed) {
1344 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1345 access_type, mmu_idx, attrs,
1346 response, retaddr);
1347 }
1348}
1349
1350static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
1351 int mmu_idx, target_ulong addr, uintptr_t retaddr,
1352 MMUAccessType access_type, MemOp op)
1353{
1354 CPUState *cpu = env_cpu(env);
1355 hwaddr mr_offset;
1356 MemoryRegionSection *section;
1357 MemoryRegion *mr;
1358 uint64_t val;
1359 bool locked = false;
1360 MemTxResult r;
1361
1362 section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
1363 mr = section->mr;
1364 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1365 cpu->mem_io_pc = retaddr;
1366 if (!cpu->can_do_io) {
1367 cpu_io_recompile(cpu, retaddr);
1368 }
1369
1370 if (!qemu_mutex_iothread_locked()) {
1371 qemu_mutex_lock_iothread();
1372 locked = true;
1373 }
1374 r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
1375 if (r != MEMTX_OK) {
1376 hwaddr physaddr = mr_offset +
1377 section->offset_within_address_space -
1378 section->offset_within_region;
1379
1380 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1381 mmu_idx, full->attrs, r, retaddr);
1382 }
1383 if (locked) {
1384 qemu_mutex_unlock_iothread();
1385 }
1386
1387 return val;
1388}
1389
1390
1391
1392
1393
1394
1395static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
1396 hwaddr mr_offset)
1397{
1398#ifdef CONFIG_PLUGIN
1399 SavedIOTLB *saved = &cs->saved_iotlb;
1400 saved->section = section;
1401 saved->mr_offset = mr_offset;
1402#endif
1403}
1404
1405static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
1406 int mmu_idx, uint64_t val, target_ulong addr,
1407 uintptr_t retaddr, MemOp op)
1408{
1409 CPUState *cpu = env_cpu(env);
1410 hwaddr mr_offset;
1411 MemoryRegionSection *section;
1412 MemoryRegion *mr;
1413 bool locked = false;
1414 MemTxResult r;
1415
1416 section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
1417 mr = section->mr;
1418 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1419 if (!cpu->can_do_io) {
1420 cpu_io_recompile(cpu, retaddr);
1421 }
1422 cpu->mem_io_pc = retaddr;
1423
1424
1425
1426
1427
1428 save_iotlb_data(cpu, section, mr_offset);
1429
1430 if (!qemu_mutex_iothread_locked()) {
1431 qemu_mutex_lock_iothread();
1432 locked = true;
1433 }
1434 r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
1435 if (r != MEMTX_OK) {
1436 hwaddr physaddr = mr_offset +
1437 section->offset_within_address_space -
1438 section->offset_within_region;
1439
1440 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1441 MMU_DATA_STORE, mmu_idx, full->attrs, r,
1442 retaddr);
1443 }
1444 if (locked) {
1445 qemu_mutex_unlock_iothread();
1446 }
1447}
1448
1449static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1450{
1451#if TCG_OVERSIZED_GUEST
1452 return *(target_ulong *)((uintptr_t)entry + ofs);
1453#else
1454
1455 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
1456#endif
1457}
1458
1459
1460
1461static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1462 size_t elt_ofs, target_ulong page)
1463{
1464 size_t vidx;
1465
1466 assert_cpu_is_self(env_cpu(env));
1467 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1468 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1469 target_ulong cmp;
1470
1471
1472#if TCG_OVERSIZED_GUEST
1473 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1474#else
1475 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1476#endif
1477
1478 if (cmp == page) {
1479
1480 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1481
1482 qemu_spin_lock(&env_tlb(env)->c.lock);
1483 copy_tlb_helper_locked(&tmptlb, tlb);
1484 copy_tlb_helper_locked(tlb, vtlb);
1485 copy_tlb_helper_locked(vtlb, &tmptlb);
1486 qemu_spin_unlock(&env_tlb(env)->c.lock);
1487
1488 CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1489 CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
1490 CPUTLBEntryFull tmpf;
1491 tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1492 return true;
1493 }
1494 }
1495 return false;
1496}
1497
1498
1499#define VICTIM_TLB_HIT(TY, ADDR) \
1500 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1501 (ADDR) & TARGET_PAGE_MASK)
1502
1503static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1504 CPUTLBEntryFull *full, uintptr_t retaddr)
1505{
1506 ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1507
1508 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1509
1510 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1511 struct page_collection *pages
1512 = page_collection_lock(ram_addr, ram_addr + size);
1513 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1514 page_collection_unlock(pages);
1515 }
1516
1517
1518
1519
1520
1521 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1522
1523
1524 if (!cpu_physical_memory_is_clean(ram_addr)) {
1525 trace_memory_notdirty_set_dirty(mem_vaddr);
1526 tlb_set_dirty(cpu, mem_vaddr);
1527 }
1528}
1529
1530static int probe_access_internal(CPUArchState *env, target_ulong addr,
1531 int fault_size, MMUAccessType access_type,
1532 int mmu_idx, bool nonfault,
1533 void **phost, CPUTLBEntryFull **pfull,
1534 uintptr_t retaddr)
1535{
1536 uintptr_t index = tlb_index(env, mmu_idx, addr);
1537 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1538 target_ulong tlb_addr, page_addr;
1539 size_t elt_ofs;
1540 int flags;
1541
1542 switch (access_type) {
1543 case MMU_DATA_LOAD:
1544 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1545 break;
1546 case MMU_DATA_STORE:
1547 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1548 break;
1549 case MMU_INST_FETCH:
1550 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1551 break;
1552 default:
1553 g_assert_not_reached();
1554 }
1555 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1556
1557 flags = TLB_FLAGS_MASK;
1558 page_addr = addr & TARGET_PAGE_MASK;
1559 if (!tlb_hit_page(tlb_addr, page_addr)) {
1560 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1561 CPUState *cs = env_cpu(env);
1562
1563 if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1564 mmu_idx, nonfault, retaddr)) {
1565
1566 *phost = NULL;
1567 *pfull = NULL;
1568 return TLB_INVALID_MASK;
1569 }
1570
1571
1572 index = tlb_index(env, mmu_idx, addr);
1573 entry = tlb_entry(env, mmu_idx, addr);
1574
1575
1576
1577
1578
1579
1580 flags &= ~TLB_INVALID_MASK;
1581 }
1582 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1583 }
1584 flags &= tlb_addr;
1585
1586 *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1587
1588
1589 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1590 *phost = NULL;
1591 return TLB_MMIO;
1592 }
1593
1594
1595 *phost = (void *)((uintptr_t)addr + entry->addend);
1596 return flags;
1597}
1598
1599int probe_access_full(CPUArchState *env, target_ulong addr,
1600 MMUAccessType access_type, int mmu_idx,
1601 bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1602 uintptr_t retaddr)
1603{
1604 int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1605 nonfault, phost, pfull, retaddr);
1606
1607
1608 if (unlikely(flags & TLB_NOTDIRTY)) {
1609 notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1610 flags &= ~TLB_NOTDIRTY;
1611 }
1612
1613 return flags;
1614}
1615
1616int probe_access_flags(CPUArchState *env, target_ulong addr,
1617 MMUAccessType access_type, int mmu_idx,
1618 bool nonfault, void **phost, uintptr_t retaddr)
1619{
1620 CPUTLBEntryFull *full;
1621
1622 return probe_access_full(env, addr, access_type, mmu_idx,
1623 nonfault, phost, &full, retaddr);
1624}
1625
1626void *probe_access(CPUArchState *env, target_ulong addr, int size,
1627 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1628{
1629 CPUTLBEntryFull *full;
1630 void *host;
1631 int flags;
1632
1633 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1634
1635 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1636 false, &host, &full, retaddr);
1637
1638
1639 if (size == 0) {
1640 return NULL;
1641 }
1642
1643 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1644
1645 if (flags & TLB_WATCHPOINT) {
1646 int wp_access = (access_type == MMU_DATA_STORE
1647 ? BP_MEM_WRITE : BP_MEM_READ);
1648 cpu_check_watchpoint(env_cpu(env), addr, size,
1649 full->attrs, wp_access, retaddr);
1650 }
1651
1652
1653 if (flags & TLB_NOTDIRTY) {
1654 notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1655 }
1656 }
1657
1658 return host;
1659}
1660
1661void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1662 MMUAccessType access_type, int mmu_idx)
1663{
1664 CPUTLBEntryFull *full;
1665 void *host;
1666 int flags;
1667
1668 flags = probe_access_internal(env, addr, 0, access_type,
1669 mmu_idx, true, &host, &full, 0);
1670
1671
1672 return flags ? NULL : host;
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1686 void **hostp)
1687{
1688 CPUTLBEntryFull *full;
1689 void *p;
1690
1691 (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1692 cpu_mmu_index(env, true), false, &p, &full, 0);
1693 if (p == NULL) {
1694 return -1;
1695 }
1696 if (hostp) {
1697 *hostp = p;
1698 }
1699 return qemu_ram_addr_from_host_nofail(p);
1700}
1701
1702#ifdef CONFIG_PLUGIN
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1718 bool is_store, struct qemu_plugin_hwaddr *data)
1719{
1720 CPUArchState *env = cpu->env_ptr;
1721 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1722 uintptr_t index = tlb_index(env, mmu_idx, addr);
1723 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1724
1725 if (likely(tlb_hit(tlb_addr, addr))) {
1726
1727 if (tlb_addr & TLB_MMIO) {
1728 CPUTLBEntryFull *full;
1729 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1730 data->is_io = true;
1731 data->v.io.section =
1732 iotlb_to_section(cpu, full->xlat_section, full->attrs);
1733 data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1734 } else {
1735 data->is_io = false;
1736 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1737 }
1738 return true;
1739 } else {
1740 SavedIOTLB *saved = &cpu->saved_iotlb;
1741 data->is_io = true;
1742 data->v.io.section = saved->section;
1743 data->v.io.offset = saved->mr_offset;
1744 return true;
1745 }
1746}
1747
1748#endif
1749
1750
1751
1752
1753
1754
1755
1756static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1757 MemOpIdx oi, int size, int prot,
1758 uintptr_t retaddr)
1759{
1760 uintptr_t mmu_idx = get_mmuidx(oi);
1761 MemOp mop = get_memop(oi);
1762 int a_bits = get_alignment_bits(mop);
1763 uintptr_t index;
1764 CPUTLBEntry *tlbe;
1765 target_ulong tlb_addr;
1766 void *hostaddr;
1767
1768 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1769
1770
1771 retaddr -= GETPC_ADJ;
1772
1773
1774 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1775
1776 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1777 mmu_idx, retaddr);
1778 }
1779
1780
1781 if (unlikely(addr & (size - 1))) {
1782
1783
1784
1785
1786 goto stop_the_world;
1787 }
1788
1789 index = tlb_index(env, mmu_idx, addr);
1790 tlbe = tlb_entry(env, mmu_idx, addr);
1791
1792
1793 if (prot & PAGE_WRITE) {
1794 tlb_addr = tlb_addr_write(tlbe);
1795 if (!tlb_hit(tlb_addr, addr)) {
1796 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1797 tlb_fill(env_cpu(env), addr, size,
1798 MMU_DATA_STORE, mmu_idx, retaddr);
1799 index = tlb_index(env, mmu_idx, addr);
1800 tlbe = tlb_entry(env, mmu_idx, addr);
1801 }
1802 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1803 }
1804
1805
1806 if ((prot & PAGE_READ) &&
1807 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1808 tlb_fill(env_cpu(env), addr, size,
1809 MMU_DATA_LOAD, mmu_idx, retaddr);
1810
1811
1812
1813
1814
1815 goto stop_the_world;
1816 }
1817 } else {
1818 tlb_addr = tlbe->addr_read;
1819 if (!tlb_hit(tlb_addr, addr)) {
1820 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1821 tlb_fill(env_cpu(env), addr, size,
1822 MMU_DATA_LOAD, mmu_idx, retaddr);
1823 index = tlb_index(env, mmu_idx, addr);
1824 tlbe = tlb_entry(env, mmu_idx, addr);
1825 }
1826 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
1827 }
1828 }
1829
1830
1831 if (unlikely(tlb_addr & TLB_MMIO)) {
1832
1833
1834 goto stop_the_world;
1835 }
1836
1837 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1838
1839 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1840 notdirty_write(env_cpu(env), addr, size,
1841 &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
1842 }
1843
1844 return hostaddr;
1845
1846 stop_the_world:
1847 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1848}
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861static void validate_memop(MemOpIdx oi, MemOp expected)
1862{
1863#ifdef CONFIG_DEBUG_TCG
1864 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1865 assert(have == expected);
1866#endif
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1879 MemOpIdx oi, uintptr_t retaddr);
1880
1881static inline uint64_t QEMU_ALWAYS_INLINE
1882load_memop(const void *haddr, MemOp op)
1883{
1884 switch (op) {
1885 case MO_UB:
1886 return ldub_p(haddr);
1887 case MO_BEUW:
1888 return lduw_be_p(haddr);
1889 case MO_LEUW:
1890 return lduw_le_p(haddr);
1891 case MO_BEUL:
1892 return (uint32_t)ldl_be_p(haddr);
1893 case MO_LEUL:
1894 return (uint32_t)ldl_le_p(haddr);
1895 case MO_BEUQ:
1896 return ldq_be_p(haddr);
1897 case MO_LEUQ:
1898 return ldq_le_p(haddr);
1899 default:
1900 qemu_build_not_reached();
1901 }
1902}
1903
1904static inline uint64_t QEMU_ALWAYS_INLINE
1905load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
1906 uintptr_t retaddr, MemOp op, bool code_read,
1907 FullLoadHelper *full_load)
1908{
1909 const size_t tlb_off = code_read ?
1910 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1911 const MMUAccessType access_type =
1912 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1913 const unsigned a_bits = get_alignment_bits(get_memop(oi));
1914 const size_t size = memop_size(op);
1915 uintptr_t mmu_idx = get_mmuidx(oi);
1916 uintptr_t index;
1917 CPUTLBEntry *entry;
1918 target_ulong tlb_addr;
1919 void *haddr;
1920 uint64_t res;
1921
1922 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1923
1924
1925 if (addr & ((1 << a_bits) - 1)) {
1926 cpu_unaligned_access(env_cpu(env), addr, access_type,
1927 mmu_idx, retaddr);
1928 }
1929
1930 index = tlb_index(env, mmu_idx, addr);
1931 entry = tlb_entry(env, mmu_idx, addr);
1932 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1933
1934
1935 if (!tlb_hit(tlb_addr, addr)) {
1936 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1937 addr & TARGET_PAGE_MASK)) {
1938 tlb_fill(env_cpu(env), addr, size,
1939 access_type, mmu_idx, retaddr);
1940 index = tlb_index(env, mmu_idx, addr);
1941 entry = tlb_entry(env, mmu_idx, addr);
1942 }
1943 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1944 tlb_addr &= ~TLB_INVALID_MASK;
1945 }
1946
1947
1948 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1949 CPUTLBEntryFull *full;
1950 bool need_swap;
1951
1952
1953 if ((addr & (size - 1)) != 0) {
1954 goto do_unaligned_access;
1955 }
1956
1957 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1958
1959
1960 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1961
1962 cpu_check_watchpoint(env_cpu(env), addr, size,
1963 full->attrs, BP_MEM_READ, retaddr);
1964 }
1965
1966 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1967
1968
1969 if (likely(tlb_addr & TLB_MMIO)) {
1970 return io_readx(env, full, mmu_idx, addr, retaddr,
1971 access_type, op ^ (need_swap * MO_BSWAP));
1972 }
1973
1974 haddr = (void *)((uintptr_t)addr + entry->addend);
1975
1976
1977
1978
1979
1980
1981 if (unlikely(need_swap)) {
1982 return load_memop(haddr, op ^ MO_BSWAP);
1983 }
1984 return load_memop(haddr, op);
1985 }
1986
1987
1988 if (size > 1
1989 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1990 >= TARGET_PAGE_SIZE)) {
1991 target_ulong addr1, addr2;
1992 uint64_t r1, r2;
1993 unsigned shift;
1994 do_unaligned_access:
1995 addr1 = addr & ~((target_ulong)size - 1);
1996 addr2 = addr1 + size;
1997 r1 = full_load(env, addr1, oi, retaddr);
1998 r2 = full_load(env, addr2, oi, retaddr);
1999 shift = (addr & (size - 1)) * 8;
2000
2001 if (memop_big_endian(op)) {
2002
2003 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
2004 } else {
2005
2006 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
2007 }
2008 return res & MAKE_64BIT_MASK(0, size * 8);
2009 }
2010
2011 haddr = (void *)((uintptr_t)addr + entry->addend);
2012 return load_memop(haddr, op);
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
2026 MemOpIdx oi, uintptr_t retaddr)
2027{
2028 validate_memop(oi, MO_UB);
2029 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
2030}
2031
2032tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
2033 MemOpIdx oi, uintptr_t retaddr)
2034{
2035 return full_ldub_mmu(env, addr, oi, retaddr);
2036}
2037
2038static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2039 MemOpIdx oi, uintptr_t retaddr)
2040{
2041 validate_memop(oi, MO_LEUW);
2042 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2043 full_le_lduw_mmu);
2044}
2045
2046tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2047 MemOpIdx oi, uintptr_t retaddr)
2048{
2049 return full_le_lduw_mmu(env, addr, oi, retaddr);
2050}
2051
2052static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2053 MemOpIdx oi, uintptr_t retaddr)
2054{
2055 validate_memop(oi, MO_BEUW);
2056 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2057 full_be_lduw_mmu);
2058}
2059
2060tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2061 MemOpIdx oi, uintptr_t retaddr)
2062{
2063 return full_be_lduw_mmu(env, addr, oi, retaddr);
2064}
2065
2066static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2067 MemOpIdx oi, uintptr_t retaddr)
2068{
2069 validate_memop(oi, MO_LEUL);
2070 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2071 full_le_ldul_mmu);
2072}
2073
2074tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2075 MemOpIdx oi, uintptr_t retaddr)
2076{
2077 return full_le_ldul_mmu(env, addr, oi, retaddr);
2078}
2079
2080static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2081 MemOpIdx oi, uintptr_t retaddr)
2082{
2083 validate_memop(oi, MO_BEUL);
2084 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2085 full_be_ldul_mmu);
2086}
2087
2088tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2089 MemOpIdx oi, uintptr_t retaddr)
2090{
2091 return full_be_ldul_mmu(env, addr, oi, retaddr);
2092}
2093
2094uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
2095 MemOpIdx oi, uintptr_t retaddr)
2096{
2097 validate_memop(oi, MO_LEUQ);
2098 return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
2099 helper_le_ldq_mmu);
2100}
2101
2102uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
2103 MemOpIdx oi, uintptr_t retaddr)
2104{
2105 validate_memop(oi, MO_BEUQ);
2106 return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
2107 helper_be_ldq_mmu);
2108}
2109
2110
2111
2112
2113
2114
2115
2116tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
2117 MemOpIdx oi, uintptr_t retaddr)
2118{
2119 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2120}
2121
2122tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
2123 MemOpIdx oi, uintptr_t retaddr)
2124{
2125 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2126}
2127
2128tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
2129 MemOpIdx oi, uintptr_t retaddr)
2130{
2131 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2132}
2133
2134tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
2135 MemOpIdx oi, uintptr_t retaddr)
2136{
2137 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2138}
2139
2140tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
2141 MemOpIdx oi, uintptr_t retaddr)
2142{
2143 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2144}
2145
2146
2147
2148
2149
2150static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2151 MemOpIdx oi, uintptr_t retaddr,
2152 FullLoadHelper *full_load)
2153{
2154 uint64_t ret;
2155
2156 ret = full_load(env, addr, oi, retaddr);
2157 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2158 return ret;
2159}
2160
2161uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2162{
2163 return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
2164}
2165
2166uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2167 MemOpIdx oi, uintptr_t ra)
2168{
2169 return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
2170}
2171
2172uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2173 MemOpIdx oi, uintptr_t ra)
2174{
2175 return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
2176}
2177
2178uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2179 MemOpIdx oi, uintptr_t ra)
2180{
2181 return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
2182}
2183
2184uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2185 MemOpIdx oi, uintptr_t ra)
2186{
2187 return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
2188}
2189
2190uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2191 MemOpIdx oi, uintptr_t ra)
2192{
2193 return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
2194}
2195
2196uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2197 MemOpIdx oi, uintptr_t ra)
2198{
2199 return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
2200}
2201
2202
2203
2204
2205
2206static inline void QEMU_ALWAYS_INLINE
2207store_memop(void *haddr, uint64_t val, MemOp op)
2208{
2209 switch (op) {
2210 case MO_UB:
2211 stb_p(haddr, val);
2212 break;
2213 case MO_BEUW:
2214 stw_be_p(haddr, val);
2215 break;
2216 case MO_LEUW:
2217 stw_le_p(haddr, val);
2218 break;
2219 case MO_BEUL:
2220 stl_be_p(haddr, val);
2221 break;
2222 case MO_LEUL:
2223 stl_le_p(haddr, val);
2224 break;
2225 case MO_BEUQ:
2226 stq_be_p(haddr, val);
2227 break;
2228 case MO_LEUQ:
2229 stq_le_p(haddr, val);
2230 break;
2231 default:
2232 qemu_build_not_reached();
2233 }
2234}
2235
2236static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2237 MemOpIdx oi, uintptr_t retaddr);
2238
2239static void __attribute__((noinline))
2240store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2241 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2242 bool big_endian)
2243{
2244 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2245 uintptr_t index, index2;
2246 CPUTLBEntry *entry, *entry2;
2247 target_ulong page1, page2, tlb_addr, tlb_addr2;
2248 MemOpIdx oi;
2249 size_t size2;
2250 int i;
2251
2252
2253
2254
2255
2256
2257
2258 page1 = addr & TARGET_PAGE_MASK;
2259 page2 = (addr + size) & TARGET_PAGE_MASK;
2260 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2261 index2 = tlb_index(env, mmu_idx, page2);
2262 entry2 = tlb_entry(env, mmu_idx, page2);
2263
2264 tlb_addr2 = tlb_addr_write(entry2);
2265 if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
2266 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2267 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2268 mmu_idx, retaddr);
2269 index2 = tlb_index(env, mmu_idx, page2);
2270 entry2 = tlb_entry(env, mmu_idx, page2);
2271 }
2272 tlb_addr2 = tlb_addr_write(entry2);
2273 }
2274
2275 index = tlb_index(env, mmu_idx, addr);
2276 entry = tlb_entry(env, mmu_idx, addr);
2277 tlb_addr = tlb_addr_write(entry);
2278
2279
2280
2281
2282
2283 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2284 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2285 env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
2286 BP_MEM_WRITE, retaddr);
2287 }
2288 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2289 cpu_check_watchpoint(env_cpu(env), page2, size2,
2290 env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
2291 BP_MEM_WRITE, retaddr);
2292 }
2293
2294
2295
2296
2297
2298
2299 oi = make_memop_idx(MO_UB, mmu_idx);
2300 if (big_endian) {
2301 for (i = 0; i < size; ++i) {
2302
2303 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2304 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2305 }
2306 } else {
2307 for (i = 0; i < size; ++i) {
2308
2309 uint8_t val8 = val >> (i * 8);
2310 full_stb_mmu(env, addr + i, val8, oi, retaddr);
2311 }
2312 }
2313}
2314
2315static inline void QEMU_ALWAYS_INLINE
2316store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2317 MemOpIdx oi, uintptr_t retaddr, MemOp op)
2318{
2319 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2320 const unsigned a_bits = get_alignment_bits(get_memop(oi));
2321 const size_t size = memop_size(op);
2322 uintptr_t mmu_idx = get_mmuidx(oi);
2323 uintptr_t index;
2324 CPUTLBEntry *entry;
2325 target_ulong tlb_addr;
2326 void *haddr;
2327
2328 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
2329
2330
2331 if (addr & ((1 << a_bits) - 1)) {
2332 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2333 mmu_idx, retaddr);
2334 }
2335
2336 index = tlb_index(env, mmu_idx, addr);
2337 entry = tlb_entry(env, mmu_idx, addr);
2338 tlb_addr = tlb_addr_write(entry);
2339
2340
2341 if (!tlb_hit(tlb_addr, addr)) {
2342 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2343 addr & TARGET_PAGE_MASK)) {
2344 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2345 mmu_idx, retaddr);
2346 index = tlb_index(env, mmu_idx, addr);
2347 entry = tlb_entry(env, mmu_idx, addr);
2348 }
2349 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2350 }
2351
2352
2353 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
2354 CPUTLBEntryFull *full;
2355 bool need_swap;
2356
2357
2358 if ((addr & (size - 1)) != 0) {
2359 goto do_unaligned_access;
2360 }
2361
2362 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
2363
2364
2365 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2366
2367 cpu_check_watchpoint(env_cpu(env), addr, size,
2368 full->attrs, BP_MEM_WRITE, retaddr);
2369 }
2370
2371 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2372
2373
2374 if (tlb_addr & TLB_MMIO) {
2375 io_writex(env, full, mmu_idx, val, addr, retaddr,
2376 op ^ (need_swap * MO_BSWAP));
2377 return;
2378 }
2379
2380
2381 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2382 return;
2383 }
2384
2385
2386 if (tlb_addr & TLB_NOTDIRTY) {
2387 notdirty_write(env_cpu(env), addr, size, full, retaddr);
2388 }
2389
2390 haddr = (void *)((uintptr_t)addr + entry->addend);
2391
2392
2393
2394
2395
2396
2397 if (unlikely(need_swap)) {
2398 store_memop(haddr, val, op ^ MO_BSWAP);
2399 } else {
2400 store_memop(haddr, val, op);
2401 }
2402 return;
2403 }
2404
2405
2406 if (size > 1
2407 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2408 >= TARGET_PAGE_SIZE)) {
2409 do_unaligned_access:
2410 store_helper_unaligned(env, addr, val, retaddr, size,
2411 mmu_idx, memop_big_endian(op));
2412 return;
2413 }
2414
2415 haddr = (void *)((uintptr_t)addr + entry->addend);
2416 store_memop(haddr, val, op);
2417}
2418
2419static void __attribute__((noinline))
2420full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2421 MemOpIdx oi, uintptr_t retaddr)
2422{
2423 validate_memop(oi, MO_UB);
2424 store_helper(env, addr, val, oi, retaddr, MO_UB);
2425}
2426
2427void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2428 MemOpIdx oi, uintptr_t retaddr)
2429{
2430 full_stb_mmu(env, addr, val, oi, retaddr);
2431}
2432
2433static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2434 MemOpIdx oi, uintptr_t retaddr)
2435{
2436 validate_memop(oi, MO_LEUW);
2437 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2438}
2439
2440void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2441 MemOpIdx oi, uintptr_t retaddr)
2442{
2443 full_le_stw_mmu(env, addr, val, oi, retaddr);
2444}
2445
2446static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2447 MemOpIdx oi, uintptr_t retaddr)
2448{
2449 validate_memop(oi, MO_BEUW);
2450 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2451}
2452
2453void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2454 MemOpIdx oi, uintptr_t retaddr)
2455{
2456 full_be_stw_mmu(env, addr, val, oi, retaddr);
2457}
2458
2459static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2460 MemOpIdx oi, uintptr_t retaddr)
2461{
2462 validate_memop(oi, MO_LEUL);
2463 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2464}
2465
2466void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2467 MemOpIdx oi, uintptr_t retaddr)
2468{
2469 full_le_stl_mmu(env, addr, val, oi, retaddr);
2470}
2471
2472static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2473 MemOpIdx oi, uintptr_t retaddr)
2474{
2475 validate_memop(oi, MO_BEUL);
2476 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2477}
2478
2479void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2480 MemOpIdx oi, uintptr_t retaddr)
2481{
2482 full_be_stl_mmu(env, addr, val, oi, retaddr);
2483}
2484
2485void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2486 MemOpIdx oi, uintptr_t retaddr)
2487{
2488 validate_memop(oi, MO_LEUQ);
2489 store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
2490}
2491
2492void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2493 MemOpIdx oi, uintptr_t retaddr)
2494{
2495 validate_memop(oi, MO_BEUQ);
2496 store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
2497}
2498
2499
2500
2501
2502
2503typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2504 uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2505
2506static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2507 uint64_t val, MemOpIdx oi, uintptr_t ra,
2508 FullStoreHelper *full_store)
2509{
2510 full_store(env, addr, val, oi, ra);
2511 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
2512}
2513
2514void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2515 MemOpIdx oi, uintptr_t retaddr)
2516{
2517 cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
2518}
2519
2520void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2521 MemOpIdx oi, uintptr_t retaddr)
2522{
2523 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
2524}
2525
2526void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2527 MemOpIdx oi, uintptr_t retaddr)
2528{
2529 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
2530}
2531
2532void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2533 MemOpIdx oi, uintptr_t retaddr)
2534{
2535 cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
2536}
2537
2538void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2539 MemOpIdx oi, uintptr_t retaddr)
2540{
2541 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
2542}
2543
2544void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2545 MemOpIdx oi, uintptr_t retaddr)
2546{
2547 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
2548}
2549
2550void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2551 MemOpIdx oi, uintptr_t retaddr)
2552{
2553 cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
2554}
2555
2556#include "ldst_common.c.inc"
2557
2558
2559
2560
2561
2562
2563#define ATOMIC_NAME(X) \
2564 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2565
2566#define ATOMIC_MMU_CLEANUP
2567
2568#include "atomic_common.c.inc"
2569
2570#define DATA_SIZE 1
2571#include "atomic_template.h"
2572
2573#define DATA_SIZE 2
2574#include "atomic_template.h"
2575
2576#define DATA_SIZE 4
2577#include "atomic_template.h"
2578
2579#ifdef CONFIG_ATOMIC64
2580#define DATA_SIZE 8
2581#include "atomic_template.h"
2582#endif
2583
2584#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2585#define DATA_SIZE 16
2586#include "atomic_template.h"
2587#endif
2588
2589
2590
2591static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2592 MemOpIdx oi, uintptr_t retaddr)
2593{
2594 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2595}
2596
2597uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2598{
2599 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2600 return full_ldub_code(env, addr, oi, 0);
2601}
2602
2603static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2604 MemOpIdx oi, uintptr_t retaddr)
2605{
2606 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
2607}
2608
2609uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2610{
2611 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2612 return full_lduw_code(env, addr, oi, 0);
2613}
2614
2615static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2616 MemOpIdx oi, uintptr_t retaddr)
2617{
2618 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
2619}
2620
2621uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2622{
2623 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2624 return full_ldl_code(env, addr, oi, 0);
2625}
2626
2627static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2628 MemOpIdx oi, uintptr_t retaddr)
2629{
2630 return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
2631}
2632
2633uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2634{
2635 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2636 return full_ldq_code(env, addr, oi, 0);
2637}
2638