1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "hw/core/tcg-cpu-ops.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/cpu_ldst.h"
26#include "exec/cputlb.h"
27#include "exec/memory-internal.h"
28#include "exec/ram_addr.h"
29#include "tcg/tcg.h"
30#include "qemu/error-report.h"
31#include "exec/log.h"
32#include "exec/helper-proto-common.h"
33#include "qemu/atomic.h"
34#include "qemu/atomic128.h"
35#include "exec/translate-all.h"
36#include "trace.h"
37#include "tb-hash.h"
38#include "internal.h"
39#ifdef CONFIG_PLUGIN
40#include "qemu/plugin-memory.h"
41#endif
42#include "tcg/tcg-ldst.h"
43#include "tcg/oversized-guest.h"
44
45
46
47
48
49#ifdef DEBUG_TLB
50# define DEBUG_TLB_GATE 1
51# ifdef DEBUG_TLB_LOG
52# define DEBUG_TLB_LOG_GATE 1
53# else
54# define DEBUG_TLB_LOG_GATE 0
55# endif
56#else
57# define DEBUG_TLB_GATE 0
58# define DEBUG_TLB_LOG_GATE 0
59#endif
60
61#define tlb_debug(fmt, ...) do { \
62 if (DEBUG_TLB_LOG_GATE) { \
63 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
64 ## __VA_ARGS__); \
65 } else if (DEBUG_TLB_GATE) { \
66 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
67 } \
68} while (0)
69
70#define assert_cpu_is_self(cpu) do { \
71 if (DEBUG_TLB_GATE) { \
72 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
73 } \
74 } while (0)
75
76
77
78QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
79
80
81
82QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
83#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
84
85static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
86{
87 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
88}
89
90static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
91{
92 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
93}
94
95static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
96 size_t max_entries)
97{
98 desc->window_begin_ns = ns;
99 desc->window_max_entries = max_entries;
100}
101
102static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
103{
104 CPUJumpCache *jc = cpu->tb_jmp_cache;
105 int i, i0;
106
107 if (unlikely(!jc)) {
108 return;
109 }
110
111 i0 = tb_jmp_cache_hash_page(page_addr);
112 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
113 qatomic_set(&jc->array[i0 + i].tb, NULL);
114 }
115}
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
158 int64_t now)
159{
160 size_t old_size = tlb_n_entries(fast);
161 size_t rate;
162 size_t new_size = old_size;
163 int64_t window_len_ms = 100;
164 int64_t window_len_ns = window_len_ms * 1000 * 1000;
165 bool window_expired = now > desc->window_begin_ns + window_len_ns;
166
167 if (desc->n_used_entries > desc->window_max_entries) {
168 desc->window_max_entries = desc->n_used_entries;
169 }
170 rate = desc->window_max_entries * 100 / old_size;
171
172 if (rate > 70) {
173 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
174 } else if (rate < 30 && window_expired) {
175 size_t ceil = pow2ceil(desc->window_max_entries);
176 size_t expected_rate = desc->window_max_entries * 100 / ceil;
177
178
179
180
181
182
183
184
185
186
187
188 if (expected_rate > 70) {
189 ceil *= 2;
190 }
191 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
192 }
193
194 if (new_size == old_size) {
195 if (window_expired) {
196 tlb_window_reset(desc, now, desc->n_used_entries);
197 }
198 return;
199 }
200
201 g_free(fast->table);
202 g_free(desc->fulltlb);
203
204 tlb_window_reset(desc, now, 0);
205
206 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
207 fast->table = g_try_new(CPUTLBEntry, new_size);
208 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
209
210
211
212
213
214
215
216
217 while (fast->table == NULL || desc->fulltlb == NULL) {
218 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
219 error_report("%s: %s", __func__, strerror(errno));
220 abort();
221 }
222 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
223 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
224
225 g_free(fast->table);
226 g_free(desc->fulltlb);
227 fast->table = g_try_new(CPUTLBEntry, new_size);
228 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
229 }
230}
231
232static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
233{
234 desc->n_used_entries = 0;
235 desc->large_page_addr = -1;
236 desc->large_page_mask = -1;
237 desc->vindex = 0;
238 memset(fast->table, -1, sizeof_tlb(fast));
239 memset(desc->vtable, -1, sizeof(desc->vtable));
240}
241
242static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
243 int64_t now)
244{
245 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
246 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
247
248 tlb_mmu_resize_locked(desc, fast, now);
249 tlb_mmu_flush_locked(desc, fast);
250}
251
252static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
253{
254 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
255
256 tlb_window_reset(desc, now, 0);
257 desc->n_used_entries = 0;
258 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
259 fast->table = g_new(CPUTLBEntry, n_entries);
260 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
261 tlb_mmu_flush_locked(desc, fast);
262}
263
264static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
265{
266 env_tlb(env)->d[mmu_idx].n_used_entries++;
267}
268
269static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
270{
271 env_tlb(env)->d[mmu_idx].n_used_entries--;
272}
273
274void tlb_init(CPUState *cpu)
275{
276 CPUArchState *env = cpu->env_ptr;
277 int64_t now = get_clock_realtime();
278 int i;
279
280 qemu_spin_init(&env_tlb(env)->c.lock);
281
282
283 env_tlb(env)->c.dirty = 0;
284
285 for (i = 0; i < NB_MMU_MODES; i++) {
286 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
287 }
288}
289
290void tlb_destroy(CPUState *cpu)
291{
292 CPUArchState *env = cpu->env_ptr;
293 int i;
294
295 qemu_spin_destroy(&env_tlb(env)->c.lock);
296 for (i = 0; i < NB_MMU_MODES; i++) {
297 CPUTLBDesc *desc = &env_tlb(env)->d[i];
298 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
299
300 g_free(fast->table);
301 g_free(desc->fulltlb);
302 }
303}
304
305
306
307
308
309
310
311
312static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
313 run_on_cpu_data d)
314{
315 CPUState *cpu;
316
317 CPU_FOREACH(cpu) {
318 if (cpu != src) {
319 async_run_on_cpu(cpu, fn, d);
320 }
321 }
322}
323
324void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
325{
326 CPUState *cpu;
327 size_t full = 0, part = 0, elide = 0;
328
329 CPU_FOREACH(cpu) {
330 CPUArchState *env = cpu->env_ptr;
331
332 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
333 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
334 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
335 }
336 *pfull = full;
337 *ppart = part;
338 *pelide = elide;
339}
340
341static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
342{
343 CPUArchState *env = cpu->env_ptr;
344 uint16_t asked = data.host_int;
345 uint16_t all_dirty, work, to_clean;
346 int64_t now = get_clock_realtime();
347
348 assert_cpu_is_self(cpu);
349
350 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
351
352 qemu_spin_lock(&env_tlb(env)->c.lock);
353
354 all_dirty = env_tlb(env)->c.dirty;
355 to_clean = asked & all_dirty;
356 all_dirty &= ~to_clean;
357 env_tlb(env)->c.dirty = all_dirty;
358
359 for (work = to_clean; work != 0; work &= work - 1) {
360 int mmu_idx = ctz32(work);
361 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
362 }
363
364 qemu_spin_unlock(&env_tlb(env)->c.lock);
365
366 tcg_flush_jmp_cache(cpu);
367
368 if (to_clean == ALL_MMUIDX_BITS) {
369 qatomic_set(&env_tlb(env)->c.full_flush_count,
370 env_tlb(env)->c.full_flush_count + 1);
371 } else {
372 qatomic_set(&env_tlb(env)->c.part_flush_count,
373 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
374 if (to_clean != asked) {
375 qatomic_set(&env_tlb(env)->c.elide_flush_count,
376 env_tlb(env)->c.elide_flush_count +
377 ctpop16(asked & ~to_clean));
378 }
379 }
380}
381
382void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
383{
384 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
385
386 if (cpu->created && !qemu_cpu_is_self(cpu)) {
387 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
388 RUN_ON_CPU_HOST_INT(idxmap));
389 } else {
390 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
391 }
392}
393
394void tlb_flush(CPUState *cpu)
395{
396 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
397}
398
399void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
400{
401 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
402
403 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
404
405 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
406 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
407}
408
409void tlb_flush_all_cpus(CPUState *src_cpu)
410{
411 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
412}
413
414void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
415{
416 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
417
418 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
419
420 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
421 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422}
423
424void tlb_flush_all_cpus_synced(CPUState *src_cpu)
425{
426 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
427}
428
429static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
430 vaddr page, vaddr mask)
431{
432 page &= mask;
433 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
434
435 return (page == (tlb_entry->addr_read & mask) ||
436 page == (tlb_addr_write(tlb_entry) & mask) ||
437 page == (tlb_entry->addr_code & mask));
438}
439
440static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
441{
442 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
443}
444
445
446
447
448
449static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
450{
451 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
452}
453
454
455static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
456 vaddr page,
457 vaddr mask)
458{
459 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
460 memset(tlb_entry, -1, sizeof(*tlb_entry));
461 return true;
462 }
463 return false;
464}
465
466static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
467{
468 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
469}
470
471
472static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
473 vaddr page,
474 vaddr mask)
475{
476 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
477 int k;
478
479 assert_cpu_is_self(env_cpu(env));
480 for (k = 0; k < CPU_VTLB_SIZE; k++) {
481 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
482 tlb_n_used_entries_dec(env, mmu_idx);
483 }
484 }
485}
486
487static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
488 vaddr page)
489{
490 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
491}
492
493static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
494{
495 vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr;
496 vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask;
497
498
499 if ((page & lp_mask) == lp_addr) {
500 tlb_debug("forcing full flush midx %d (%016"
501 VADDR_PRIx "/%016" VADDR_PRIx ")\n",
502 midx, lp_addr, lp_mask);
503 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
504 } else {
505 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
506 tlb_n_used_entries_dec(env, midx);
507 }
508 tlb_flush_vtlb_page_locked(env, midx, page);
509 }
510}
511
512
513
514
515
516
517
518
519
520
521static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
522 vaddr addr,
523 uint16_t idxmap)
524{
525 CPUArchState *env = cpu->env_ptr;
526 int mmu_idx;
527
528 assert_cpu_is_self(cpu);
529
530 tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
531
532 qemu_spin_lock(&env_tlb(env)->c.lock);
533 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
534 if ((idxmap >> mmu_idx) & 1) {
535 tlb_flush_page_locked(env, mmu_idx, addr);
536 }
537 }
538 qemu_spin_unlock(&env_tlb(env)->c.lock);
539
540
541
542
543
544 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
545 tb_jmp_cache_clear_page(cpu, addr);
546}
547
548
549
550
551
552
553
554
555
556
557
558static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
559 run_on_cpu_data data)
560{
561 vaddr addr_and_idxmap = data.target_ptr;
562 vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
563 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
564
565 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
566}
567
568typedef struct {
569 vaddr addr;
570 uint16_t idxmap;
571} TLBFlushPageByMMUIdxData;
572
573
574
575
576
577
578
579
580
581
582
583static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
584 run_on_cpu_data data)
585{
586 TLBFlushPageByMMUIdxData *d = data.host_ptr;
587
588 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
589 g_free(d);
590}
591
592void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
593{
594 tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
595
596
597 addr &= TARGET_PAGE_MASK;
598
599 if (qemu_cpu_is_self(cpu)) {
600 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
601 } else if (idxmap < TARGET_PAGE_SIZE) {
602
603
604
605
606
607 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
608 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
609 } else {
610 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
611
612
613 d->addr = addr;
614 d->idxmap = idxmap;
615 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
616 RUN_ON_CPU_HOST_PTR(d));
617 }
618}
619
620void tlb_flush_page(CPUState *cpu, vaddr addr)
621{
622 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
623}
624
625void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
626 uint16_t idxmap)
627{
628 tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
629
630
631 addr &= TARGET_PAGE_MASK;
632
633
634
635
636
637 if (idxmap < TARGET_PAGE_SIZE) {
638 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
639 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
640 } else {
641 CPUState *dst_cpu;
642
643
644 CPU_FOREACH(dst_cpu) {
645 if (dst_cpu != src_cpu) {
646 TLBFlushPageByMMUIdxData *d
647 = g_new(TLBFlushPageByMMUIdxData, 1);
648
649 d->addr = addr;
650 d->idxmap = idxmap;
651 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
652 RUN_ON_CPU_HOST_PTR(d));
653 }
654 }
655 }
656
657 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
658}
659
660void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
661{
662 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
663}
664
665void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
666 vaddr addr,
667 uint16_t idxmap)
668{
669 tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
670
671
672 addr &= TARGET_PAGE_MASK;
673
674
675
676
677
678 if (idxmap < TARGET_PAGE_SIZE) {
679 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
680 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
681 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
682 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
683 } else {
684 CPUState *dst_cpu;
685 TLBFlushPageByMMUIdxData *d;
686
687
688 CPU_FOREACH(dst_cpu) {
689 if (dst_cpu != src_cpu) {
690 d = g_new(TLBFlushPageByMMUIdxData, 1);
691 d->addr = addr;
692 d->idxmap = idxmap;
693 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
694 RUN_ON_CPU_HOST_PTR(d));
695 }
696 }
697
698 d = g_new(TLBFlushPageByMMUIdxData, 1);
699 d->addr = addr;
700 d->idxmap = idxmap;
701 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
702 RUN_ON_CPU_HOST_PTR(d));
703 }
704}
705
706void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
707{
708 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
709}
710
711static void tlb_flush_range_locked(CPUArchState *env, int midx,
712 vaddr addr, vaddr len,
713 unsigned bits)
714{
715 CPUTLBDesc *d = &env_tlb(env)->d[midx];
716 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
717 vaddr mask = MAKE_64BIT_MASK(0, bits);
718
719
720
721
722
723
724
725
726
727
728
729 if (mask < f->mask || len > f->mask) {
730 tlb_debug("forcing full flush midx %d ("
731 "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
732 midx, addr, mask, len);
733 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
734 return;
735 }
736
737
738
739
740
741
742 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
743 tlb_debug("forcing full flush midx %d ("
744 "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
745 midx, d->large_page_addr, d->large_page_mask);
746 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
747 return;
748 }
749
750 for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
751 vaddr page = addr + i;
752 CPUTLBEntry *entry = tlb_entry(env, midx, page);
753
754 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
755 tlb_n_used_entries_dec(env, midx);
756 }
757 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
758 }
759}
760
761typedef struct {
762 vaddr addr;
763 vaddr len;
764 uint16_t idxmap;
765 uint16_t bits;
766} TLBFlushRangeData;
767
768static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
769 TLBFlushRangeData d)
770{
771 CPUArchState *env = cpu->env_ptr;
772 int mmu_idx;
773
774 assert_cpu_is_self(cpu);
775
776 tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
777 d.addr, d.bits, d.len, d.idxmap);
778
779 qemu_spin_lock(&env_tlb(env)->c.lock);
780 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
781 if ((d.idxmap >> mmu_idx) & 1) {
782 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
783 }
784 }
785 qemu_spin_unlock(&env_tlb(env)->c.lock);
786
787
788
789
790
791 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
792 tcg_flush_jmp_cache(cpu);
793 return;
794 }
795
796
797
798
799
800 d.addr -= TARGET_PAGE_SIZE;
801 for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
802 tb_jmp_cache_clear_page(cpu, d.addr);
803 d.addr += TARGET_PAGE_SIZE;
804 }
805}
806
807static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
808 run_on_cpu_data data)
809{
810 TLBFlushRangeData *d = data.host_ptr;
811 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
812 g_free(d);
813}
814
815void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
816 vaddr len, uint16_t idxmap,
817 unsigned bits)
818{
819 TLBFlushRangeData d;
820
821
822
823
824
825 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
826 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
827 return;
828 }
829
830 if (bits < TARGET_PAGE_BITS) {
831 tlb_flush_by_mmuidx(cpu, idxmap);
832 return;
833 }
834
835
836 d.addr = addr & TARGET_PAGE_MASK;
837 d.len = len;
838 d.idxmap = idxmap;
839 d.bits = bits;
840
841 if (qemu_cpu_is_self(cpu)) {
842 tlb_flush_range_by_mmuidx_async_0(cpu, d);
843 } else {
844
845 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
846 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
847 RUN_ON_CPU_HOST_PTR(p));
848 }
849}
850
851void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
852 uint16_t idxmap, unsigned bits)
853{
854 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
855}
856
857void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
858 vaddr addr, vaddr len,
859 uint16_t idxmap, unsigned bits)
860{
861 TLBFlushRangeData d;
862 CPUState *dst_cpu;
863
864
865
866
867
868 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
869 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
870 return;
871 }
872
873 if (bits < TARGET_PAGE_BITS) {
874 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
875 return;
876 }
877
878
879 d.addr = addr & TARGET_PAGE_MASK;
880 d.len = len;
881 d.idxmap = idxmap;
882 d.bits = bits;
883
884
885 CPU_FOREACH(dst_cpu) {
886 if (dst_cpu != src_cpu) {
887 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
888 async_run_on_cpu(dst_cpu,
889 tlb_flush_range_by_mmuidx_async_1,
890 RUN_ON_CPU_HOST_PTR(p));
891 }
892 }
893
894 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
895}
896
897void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
898 vaddr addr, uint16_t idxmap,
899 unsigned bits)
900{
901 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
902 idxmap, bits);
903}
904
905void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
906 vaddr addr,
907 vaddr len,
908 uint16_t idxmap,
909 unsigned bits)
910{
911 TLBFlushRangeData d, *p;
912 CPUState *dst_cpu;
913
914
915
916
917
918 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
919 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
920 return;
921 }
922
923 if (bits < TARGET_PAGE_BITS) {
924 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
925 return;
926 }
927
928
929 d.addr = addr & TARGET_PAGE_MASK;
930 d.len = len;
931 d.idxmap = idxmap;
932 d.bits = bits;
933
934
935 CPU_FOREACH(dst_cpu) {
936 if (dst_cpu != src_cpu) {
937 p = g_memdup(&d, sizeof(d));
938 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
939 RUN_ON_CPU_HOST_PTR(p));
940 }
941 }
942
943 p = g_memdup(&d, sizeof(d));
944 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
945 RUN_ON_CPU_HOST_PTR(p));
946}
947
948void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
949 vaddr addr,
950 uint16_t idxmap,
951 unsigned bits)
952{
953 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
954 idxmap, bits);
955}
956
957
958
959void tlb_protect_code(ram_addr_t ram_addr)
960{
961 cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
962 TARGET_PAGE_SIZE,
963 DIRTY_MEMORY_CODE);
964}
965
966
967
968void tlb_unprotect_code(ram_addr_t ram_addr)
969{
970 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
971}
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
991 uintptr_t start, uintptr_t length)
992{
993 uintptr_t addr = tlb_entry->addr_write;
994
995 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
996 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
997 addr &= TARGET_PAGE_MASK;
998 addr += tlb_entry->addend;
999 if ((addr - start) < length) {
1000#if TARGET_LONG_BITS == 32
1001 uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
1002 ptr_write += HOST_BIG_ENDIAN;
1003 qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
1004#elif TCG_OVERSIZED_GUEST
1005 tlb_entry->addr_write |= TLB_NOTDIRTY;
1006#else
1007 qatomic_set(&tlb_entry->addr_write,
1008 tlb_entry->addr_write | TLB_NOTDIRTY);
1009#endif
1010 }
1011 }
1012}
1013
1014
1015
1016
1017
1018static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1019{
1020 *d = *s;
1021}
1022
1023
1024
1025
1026
1027
1028void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1029{
1030 CPUArchState *env;
1031
1032 int mmu_idx;
1033
1034 env = cpu->env_ptr;
1035 qemu_spin_lock(&env_tlb(env)->c.lock);
1036 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1037 unsigned int i;
1038 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1039
1040 for (i = 0; i < n; i++) {
1041 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1042 start1, length);
1043 }
1044
1045 for (i = 0; i < CPU_VTLB_SIZE; i++) {
1046 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1047 start1, length);
1048 }
1049 }
1050 qemu_spin_unlock(&env_tlb(env)->c.lock);
1051}
1052
1053
1054static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1055 vaddr addr)
1056{
1057 if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1058 tlb_entry->addr_write = addr;
1059 }
1060}
1061
1062
1063
1064void tlb_set_dirty(CPUState *cpu, vaddr addr)
1065{
1066 CPUArchState *env = cpu->env_ptr;
1067 int mmu_idx;
1068
1069 assert_cpu_is_self(cpu);
1070
1071 addr &= TARGET_PAGE_MASK;
1072 qemu_spin_lock(&env_tlb(env)->c.lock);
1073 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1074 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr);
1075 }
1076
1077 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1078 int k;
1079 for (k = 0; k < CPU_VTLB_SIZE; k++) {
1080 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr);
1081 }
1082 }
1083 qemu_spin_unlock(&env_tlb(env)->c.lock);
1084}
1085
1086
1087
1088static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1089 vaddr addr, uint64_t size)
1090{
1091 vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1092 vaddr lp_mask = ~(size - 1);
1093
1094 if (lp_addr == (vaddr)-1) {
1095
1096 lp_addr = addr;
1097 } else {
1098
1099
1100
1101 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1102 while (((lp_addr ^ addr) & lp_mask) != 0) {
1103 lp_mask <<= 1;
1104 }
1105 }
1106 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1107 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1108}
1109
1110static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1111 target_ulong address, int flags,
1112 MMUAccessType access_type, bool enable)
1113{
1114 if (enable) {
1115 address |= flags & TLB_FLAGS_MASK;
1116 flags &= TLB_SLOW_FLAGS_MASK;
1117 if (flags) {
1118 address |= TLB_FORCE_SLOW;
1119 }
1120 } else {
1121 address = -1;
1122 flags = 0;
1123 }
1124 ent->addr_idx[access_type] = address;
1125 full->slow_flags[access_type] = flags;
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1137 vaddr addr, CPUTLBEntryFull *full)
1138{
1139 CPUArchState *env = cpu->env_ptr;
1140 CPUTLB *tlb = env_tlb(env);
1141 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1142 MemoryRegionSection *section;
1143 unsigned int index, read_flags, write_flags;
1144 uintptr_t addend;
1145 CPUTLBEntry *te, tn;
1146 hwaddr iotlb, xlat, sz, paddr_page;
1147 vaddr addr_page;
1148 int asidx, wp_flags, prot;
1149 bool is_ram, is_romd;
1150
1151 assert_cpu_is_self(cpu);
1152
1153 if (full->lg_page_size <= TARGET_PAGE_BITS) {
1154 sz = TARGET_PAGE_SIZE;
1155 } else {
1156 sz = (hwaddr)1 << full->lg_page_size;
1157 tlb_add_large_page(env, mmu_idx, addr, sz);
1158 }
1159 addr_page = addr & TARGET_PAGE_MASK;
1160 paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1161
1162 prot = full->prot;
1163 asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1164 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1165 &xlat, &sz, full->attrs, &prot);
1166 assert(sz >= TARGET_PAGE_SIZE);
1167
1168 tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1169 " prot=%x idx=%d\n",
1170 addr, full->phys_addr, prot, mmu_idx);
1171
1172 read_flags = 0;
1173 if (full->lg_page_size < TARGET_PAGE_BITS) {
1174
1175 read_flags |= TLB_INVALID_MASK;
1176 }
1177 if (full->attrs.byte_swap) {
1178 read_flags |= TLB_BSWAP;
1179 }
1180
1181 is_ram = memory_region_is_ram(section->mr);
1182 is_romd = memory_region_is_romd(section->mr);
1183
1184 if (is_ram || is_romd) {
1185
1186 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1187 } else {
1188
1189 addend = 0;
1190 }
1191
1192 write_flags = read_flags;
1193 if (is_ram) {
1194 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1195
1196
1197
1198
1199 if (prot & PAGE_WRITE) {
1200 if (section->readonly) {
1201 write_flags |= TLB_DISCARD_WRITE;
1202 } else if (cpu_physical_memory_is_clean(iotlb)) {
1203 write_flags |= TLB_NOTDIRTY;
1204 }
1205 }
1206 } else {
1207
1208 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1209
1210
1211
1212
1213
1214 write_flags |= TLB_MMIO;
1215 if (!is_romd) {
1216 read_flags = write_flags;
1217 }
1218 }
1219
1220 wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
1221 TARGET_PAGE_SIZE);
1222
1223 index = tlb_index(env, mmu_idx, addr_page);
1224 te = tlb_entry(env, mmu_idx, addr_page);
1225
1226
1227
1228
1229
1230
1231
1232
1233 qemu_spin_lock(&tlb->c.lock);
1234
1235
1236 tlb->c.dirty |= 1 << mmu_idx;
1237
1238
1239 tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page);
1240
1241
1242
1243
1244
1245 if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1246 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1247 CPUTLBEntry *tv = &desc->vtable[vidx];
1248
1249
1250 copy_tlb_helper_locked(tv, te);
1251 desc->vfulltlb[vidx] = desc->fulltlb[index];
1252 tlb_n_used_entries_dec(env, mmu_idx);
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 desc->fulltlb[index] = *full;
1269 full = &desc->fulltlb[index];
1270 full->xlat_section = iotlb - addr_page;
1271 full->phys_addr = paddr_page;
1272
1273
1274 tn.addend = addend - addr_page;
1275
1276 tlb_set_compare(full, &tn, addr_page, read_flags,
1277 MMU_INST_FETCH, prot & PAGE_EXEC);
1278
1279 if (wp_flags & BP_MEM_READ) {
1280 read_flags |= TLB_WATCHPOINT;
1281 }
1282 tlb_set_compare(full, &tn, addr_page, read_flags,
1283 MMU_DATA_LOAD, prot & PAGE_READ);
1284
1285 if (prot & PAGE_WRITE_INV) {
1286 write_flags |= TLB_INVALID_MASK;
1287 }
1288 if (wp_flags & BP_MEM_WRITE) {
1289 write_flags |= TLB_WATCHPOINT;
1290 }
1291 tlb_set_compare(full, &tn, addr_page, write_flags,
1292 MMU_DATA_STORE, prot & PAGE_WRITE);
1293
1294 copy_tlb_helper_locked(te, &tn);
1295 tlb_n_used_entries_inc(env, mmu_idx);
1296 qemu_spin_unlock(&tlb->c.lock);
1297}
1298
1299void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
1300 hwaddr paddr, MemTxAttrs attrs, int prot,
1301 int mmu_idx, uint64_t size)
1302{
1303 CPUTLBEntryFull full = {
1304 .phys_addr = paddr,
1305 .attrs = attrs,
1306 .prot = prot,
1307 .lg_page_size = ctz64(size)
1308 };
1309
1310 assert(is_power_of_2(size));
1311 tlb_set_page_full(cpu, mmu_idx, addr, &full);
1312}
1313
1314void tlb_set_page(CPUState *cpu, vaddr addr,
1315 hwaddr paddr, int prot,
1316 int mmu_idx, uint64_t size)
1317{
1318 tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1319 prot, mmu_idx, size);
1320}
1321
1322
1323
1324
1325
1326
1327static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1328 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1329{
1330 bool ok;
1331
1332
1333
1334
1335
1336 ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1337 access_type, mmu_idx, false, retaddr);
1338 assert(ok);
1339}
1340
1341static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1342 MMUAccessType access_type,
1343 int mmu_idx, uintptr_t retaddr)
1344{
1345 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1346 mmu_idx, retaddr);
1347}
1348
1349static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1350 vaddr addr, unsigned size,
1351 MMUAccessType access_type,
1352 int mmu_idx, MemTxAttrs attrs,
1353 MemTxResult response,
1354 uintptr_t retaddr)
1355{
1356 CPUClass *cc = CPU_GET_CLASS(cpu);
1357
1358 if (!cpu->ignore_memory_transaction_failures &&
1359 cc->tcg_ops->do_transaction_failed) {
1360 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1361 access_type, mmu_idx, attrs,
1362 response, retaddr);
1363 }
1364}
1365
1366
1367
1368
1369
1370
1371static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
1372 hwaddr mr_offset)
1373{
1374#ifdef CONFIG_PLUGIN
1375 SavedIOTLB *saved = &cs->saved_iotlb;
1376 saved->section = section;
1377 saved->mr_offset = mr_offset;
1378#endif
1379}
1380
1381static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
1382 int mmu_idx, vaddr addr, uintptr_t retaddr,
1383 MMUAccessType access_type, MemOp op)
1384{
1385 CPUState *cpu = env_cpu(env);
1386 hwaddr mr_offset;
1387 MemoryRegionSection *section;
1388 MemoryRegion *mr;
1389 uint64_t val;
1390 MemTxResult r;
1391
1392 section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
1393 mr = section->mr;
1394 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1395 cpu->mem_io_pc = retaddr;
1396 if (!cpu->can_do_io) {
1397 cpu_io_recompile(cpu, retaddr);
1398 }
1399
1400
1401
1402
1403
1404 save_iotlb_data(cpu, section, mr_offset);
1405
1406 {
1407 QEMU_IOTHREAD_LOCK_GUARD();
1408 r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
1409 }
1410
1411 if (r != MEMTX_OK) {
1412 hwaddr physaddr = mr_offset +
1413 section->offset_within_address_space -
1414 section->offset_within_region;
1415
1416 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1417 mmu_idx, full->attrs, r, retaddr);
1418 }
1419 return val;
1420}
1421
1422static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
1423 int mmu_idx, uint64_t val, vaddr addr,
1424 uintptr_t retaddr, MemOp op)
1425{
1426 CPUState *cpu = env_cpu(env);
1427 hwaddr mr_offset;
1428 MemoryRegionSection *section;
1429 MemoryRegion *mr;
1430 MemTxResult r;
1431
1432 section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
1433 mr = section->mr;
1434 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1435 if (!cpu->can_do_io) {
1436 cpu_io_recompile(cpu, retaddr);
1437 }
1438 cpu->mem_io_pc = retaddr;
1439
1440
1441
1442
1443
1444 save_iotlb_data(cpu, section, mr_offset);
1445
1446 {
1447 QEMU_IOTHREAD_LOCK_GUARD();
1448 r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
1449 }
1450
1451 if (r != MEMTX_OK) {
1452 hwaddr physaddr = mr_offset +
1453 section->offset_within_address_space -
1454 section->offset_within_region;
1455
1456 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1457 MMU_DATA_STORE, mmu_idx, full->attrs, r,
1458 retaddr);
1459 }
1460}
1461
1462
1463
1464static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1465 MMUAccessType access_type, vaddr page)
1466{
1467 size_t vidx;
1468
1469 assert_cpu_is_self(env_cpu(env));
1470 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1471 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1472 uint64_t cmp = tlb_read_idx(vtlb, access_type);
1473
1474 if (cmp == page) {
1475
1476 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1477
1478 qemu_spin_lock(&env_tlb(env)->c.lock);
1479 copy_tlb_helper_locked(&tmptlb, tlb);
1480 copy_tlb_helper_locked(tlb, vtlb);
1481 copy_tlb_helper_locked(vtlb, &tmptlb);
1482 qemu_spin_unlock(&env_tlb(env)->c.lock);
1483
1484 CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1485 CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
1486 CPUTLBEntryFull tmpf;
1487 tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1488 return true;
1489 }
1490 }
1491 return false;
1492}
1493
1494static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1495 CPUTLBEntryFull *full, uintptr_t retaddr)
1496{
1497 ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1498
1499 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1500
1501 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1502 tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1503 }
1504
1505
1506
1507
1508
1509 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1510
1511
1512 if (!cpu_physical_memory_is_clean(ram_addr)) {
1513 trace_memory_notdirty_set_dirty(mem_vaddr);
1514 tlb_set_dirty(cpu, mem_vaddr);
1515 }
1516}
1517
1518static int probe_access_internal(CPUArchState *env, vaddr addr,
1519 int fault_size, MMUAccessType access_type,
1520 int mmu_idx, bool nonfault,
1521 void **phost, CPUTLBEntryFull **pfull,
1522 uintptr_t retaddr, bool check_mem_cbs)
1523{
1524 uintptr_t index = tlb_index(env, mmu_idx, addr);
1525 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1526 uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1527 vaddr page_addr = addr & TARGET_PAGE_MASK;
1528 int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1529 bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(env_cpu(env));
1530 CPUTLBEntryFull *full;
1531
1532 if (!tlb_hit_page(tlb_addr, page_addr)) {
1533 if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
1534 CPUState *cs = env_cpu(env);
1535
1536 if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1537 mmu_idx, nonfault, retaddr)) {
1538
1539 *phost = NULL;
1540 *pfull = NULL;
1541 return TLB_INVALID_MASK;
1542 }
1543
1544
1545 index = tlb_index(env, mmu_idx, addr);
1546 entry = tlb_entry(env, mmu_idx, addr);
1547
1548
1549
1550
1551
1552
1553 flags &= ~TLB_INVALID_MASK;
1554 }
1555 tlb_addr = tlb_read_idx(entry, access_type);
1556 }
1557 flags &= tlb_addr;
1558
1559 *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1560 flags |= full->slow_flags[access_type];
1561
1562
1563 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
1564 ||
1565 (access_type != MMU_INST_FETCH && force_mmio)) {
1566 *phost = NULL;
1567 return TLB_MMIO;
1568 }
1569
1570
1571 *phost = (void *)((uintptr_t)addr + entry->addend);
1572 return flags;
1573}
1574
1575int probe_access_full(CPUArchState *env, vaddr addr, int size,
1576 MMUAccessType access_type, int mmu_idx,
1577 bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1578 uintptr_t retaddr)
1579{
1580 int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1581 nonfault, phost, pfull, retaddr, true);
1582
1583
1584 if (unlikely(flags & TLB_NOTDIRTY)) {
1585 notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1586 flags &= ~TLB_NOTDIRTY;
1587 }
1588
1589 return flags;
1590}
1591
1592int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1593 MMUAccessType access_type, int mmu_idx,
1594 void **phost, CPUTLBEntryFull **pfull)
1595{
1596 void *discard_phost;
1597 CPUTLBEntryFull *discard_tlb;
1598
1599
1600 phost = phost ? phost : &discard_phost;
1601 pfull = pfull ? pfull : &discard_tlb;
1602
1603 int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1604 true, phost, pfull, 0, false);
1605
1606
1607 if (unlikely(flags & TLB_NOTDIRTY)) {
1608 notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
1609 flags &= ~TLB_NOTDIRTY;
1610 }
1611
1612 return flags;
1613}
1614
1615int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1616 MMUAccessType access_type, int mmu_idx,
1617 bool nonfault, void **phost, uintptr_t retaddr)
1618{
1619 CPUTLBEntryFull *full;
1620 int flags;
1621
1622 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1623
1624 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1625 nonfault, phost, &full, retaddr, true);
1626
1627
1628 if (unlikely(flags & TLB_NOTDIRTY)) {
1629 notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1630 flags &= ~TLB_NOTDIRTY;
1631 }
1632
1633 return flags;
1634}
1635
1636void *probe_access(CPUArchState *env, vaddr addr, int size,
1637 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1638{
1639 CPUTLBEntryFull *full;
1640 void *host;
1641 int flags;
1642
1643 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1644
1645 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1646 false, &host, &full, retaddr, true);
1647
1648
1649 if (size == 0) {
1650 return NULL;
1651 }
1652
1653 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1654
1655 if (flags & TLB_WATCHPOINT) {
1656 int wp_access = (access_type == MMU_DATA_STORE
1657 ? BP_MEM_WRITE : BP_MEM_READ);
1658 cpu_check_watchpoint(env_cpu(env), addr, size,
1659 full->attrs, wp_access, retaddr);
1660 }
1661
1662
1663 if (flags & TLB_NOTDIRTY) {
1664 notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1665 }
1666 }
1667
1668 return host;
1669}
1670
1671void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1672 MMUAccessType access_type, int mmu_idx)
1673{
1674 CPUTLBEntryFull *full;
1675 void *host;
1676 int flags;
1677
1678 flags = probe_access_internal(env, addr, 0, access_type,
1679 mmu_idx, true, &host, &full, 0, false);
1680
1681
1682 return flags ? NULL : host;
1683}
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
1696 void **hostp)
1697{
1698 CPUTLBEntryFull *full;
1699 void *p;
1700
1701 (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1702 cpu_mmu_index(env, true), false,
1703 &p, &full, 0, false);
1704 if (p == NULL) {
1705 return -1;
1706 }
1707
1708 if (full->lg_page_size < TARGET_PAGE_BITS) {
1709 return -1;
1710 }
1711
1712 if (hostp) {
1713 *hostp = p;
1714 }
1715 return qemu_ram_addr_from_host_nofail(p);
1716}
1717
1718
1719#include "ldst_atomicity.c.inc"
1720
1721#ifdef CONFIG_PLUGIN
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1737 bool is_store, struct qemu_plugin_hwaddr *data)
1738{
1739 CPUArchState *env = cpu->env_ptr;
1740 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1741 uintptr_t index = tlb_index(env, mmu_idx, addr);
1742 uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1743
1744 if (likely(tlb_hit(tlb_addr, addr))) {
1745
1746 if (tlb_addr & TLB_MMIO) {
1747 CPUTLBEntryFull *full;
1748 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1749 data->is_io = true;
1750 data->v.io.section =
1751 iotlb_to_section(cpu, full->xlat_section, full->attrs);
1752 data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1753 } else {
1754 data->is_io = false;
1755 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1756 }
1757 return true;
1758 } else {
1759 SavedIOTLB *saved = &cpu->saved_iotlb;
1760 data->is_io = true;
1761 data->v.io.section = saved->section;
1762 data->v.io.offset = saved->mr_offset;
1763 return true;
1764 }
1765}
1766
1767#endif
1768
1769
1770
1771
1772
1773
1774typedef struct MMULookupPageData {
1775 CPUTLBEntryFull *full;
1776 void *haddr;
1777 vaddr addr;
1778 int flags;
1779 int size;
1780} MMULookupPageData;
1781
1782typedef struct MMULookupLocals {
1783 MMULookupPageData page[2];
1784 MemOp memop;
1785 int mmu_idx;
1786} MMULookupLocals;
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
1802 int mmu_idx, MMUAccessType access_type, uintptr_t ra)
1803{
1804 vaddr addr = data->addr;
1805 uintptr_t index = tlb_index(env, mmu_idx, addr);
1806 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1807 uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1808 bool maybe_resized = false;
1809 CPUTLBEntryFull *full;
1810 int flags;
1811
1812
1813 if (!tlb_hit(tlb_addr, addr)) {
1814 if (!victim_tlb_hit(env, mmu_idx, index, access_type,
1815 addr & TARGET_PAGE_MASK)) {
1816 tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra);
1817 maybe_resized = true;
1818 index = tlb_index(env, mmu_idx, addr);
1819 entry = tlb_entry(env, mmu_idx, addr);
1820 }
1821 tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
1822 }
1823
1824 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1825 flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
1826 flags |= full->slow_flags[access_type];
1827
1828 data->full = full;
1829 data->flags = flags;
1830
1831 data->haddr = (void *)((uintptr_t)addr + entry->addend);
1832
1833 return maybe_resized;
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
1847 MMUAccessType access_type, uintptr_t ra)
1848{
1849 CPUTLBEntryFull *full = data->full;
1850 vaddr addr = data->addr;
1851 int flags = data->flags;
1852 int size = data->size;
1853
1854
1855 if (flags & TLB_WATCHPOINT) {
1856 int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1857 cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra);
1858 flags &= ~TLB_WATCHPOINT;
1859 }
1860
1861
1862 if (flags & TLB_NOTDIRTY) {
1863 notdirty_write(env_cpu(env), addr, size, full, ra);
1864 flags &= ~TLB_NOTDIRTY;
1865 }
1866 data->flags = flags;
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
1882 uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
1883{
1884 unsigned a_bits;
1885 bool crosspage;
1886 int flags;
1887
1888 l->memop = get_memop(oi);
1889 l->mmu_idx = get_mmuidx(oi);
1890
1891 tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
1892
1893
1894 a_bits = get_alignment_bits(l->memop);
1895 if (addr & ((1 << a_bits) - 1)) {
1896 cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
1897 }
1898
1899 l->page[0].addr = addr;
1900 l->page[0].size = memop_size(l->memop);
1901 l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
1902 l->page[1].size = 0;
1903 crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
1904
1905 if (likely(!crosspage)) {
1906 mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
1907
1908 flags = l->page[0].flags;
1909 if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1910 mmu_watch_or_dirty(env, &l->page[0], type, ra);
1911 }
1912 if (unlikely(flags & TLB_BSWAP)) {
1913 l->memop ^= MO_BSWAP;
1914 }
1915 } else {
1916
1917 int size0 = l->page[1].addr - addr;
1918 l->page[1].size = l->page[0].size - size0;
1919 l->page[0].size = size0;
1920
1921
1922
1923
1924
1925 mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
1926 if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) {
1927 uintptr_t index = tlb_index(env, l->mmu_idx, addr);
1928 l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index];
1929 }
1930
1931 flags = l->page[0].flags | l->page[1].flags;
1932 if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1933 mmu_watch_or_dirty(env, &l->page[0], type, ra);
1934 mmu_watch_or_dirty(env, &l->page[1], type, ra);
1935 }
1936
1937
1938
1939
1940
1941
1942 tcg_debug_assert((flags & TLB_BSWAP) == 0);
1943 }
1944
1945 return crosspage;
1946}
1947
1948
1949
1950
1951
1952static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
1953 int size, uintptr_t retaddr)
1954{
1955 uintptr_t mmu_idx = get_mmuidx(oi);
1956 MemOp mop = get_memop(oi);
1957 int a_bits = get_alignment_bits(mop);
1958 uintptr_t index;
1959 CPUTLBEntry *tlbe;
1960 vaddr tlb_addr;
1961 void *hostaddr;
1962 CPUTLBEntryFull *full;
1963
1964 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1965
1966
1967 retaddr -= GETPC_ADJ;
1968
1969
1970 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1971
1972 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1973 mmu_idx, retaddr);
1974 }
1975
1976
1977 if (unlikely(addr & (size - 1))) {
1978
1979
1980
1981
1982 goto stop_the_world;
1983 }
1984
1985 index = tlb_index(env, mmu_idx, addr);
1986 tlbe = tlb_entry(env, mmu_idx, addr);
1987
1988
1989 tlb_addr = tlb_addr_write(tlbe);
1990 if (!tlb_hit(tlb_addr, addr)) {
1991 if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
1992 addr & TARGET_PAGE_MASK)) {
1993 tlb_fill(env_cpu(env), addr, size,
1994 MMU_DATA_STORE, mmu_idx, retaddr);
1995 index = tlb_index(env, mmu_idx, addr);
1996 tlbe = tlb_entry(env, mmu_idx, addr);
1997 }
1998 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1999 }
2000
2001
2002
2003
2004
2005
2006
2007 if (unlikely(tlbe->addr_read == -1)) {
2008 tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
2009
2010
2011
2012
2013
2014
2015 goto stop_the_world;
2016 }
2017
2018 tlb_addr |= tlbe->addr_read;
2019
2020
2021 if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
2022
2023
2024 goto stop_the_world;
2025 }
2026
2027 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
2028 full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
2029
2030 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
2031 notdirty_write(env_cpu(env), addr, size, full, retaddr);
2032 }
2033
2034 if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
2035 int wp_flags = 0;
2036
2037 if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
2038 wp_flags |= BP_MEM_WRITE;
2039 }
2040 if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
2041 wp_flags |= BP_MEM_READ;
2042 }
2043 if (wp_flags) {
2044 cpu_check_watchpoint(env_cpu(env), addr, size,
2045 full->attrs, wp_flags, retaddr);
2046 }
2047 }
2048
2049 return hostaddr;
2050
2051 stop_the_world:
2052 cpu_loop_exit_atomic(env_cpu(env), retaddr);
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
2087 uint64_t ret_be, vaddr addr, int size,
2088 int mmu_idx, MMUAccessType type, uintptr_t ra)
2089{
2090 uint64_t t;
2091
2092 tcg_debug_assert(size > 0 && size <= 8);
2093 do {
2094
2095 switch ((size | (int)addr) & 7) {
2096 case 1:
2097 case 3:
2098 case 5:
2099 case 7:
2100 t = io_readx(env, full, mmu_idx, addr, ra, type, MO_UB);
2101 ret_be = (ret_be << 8) | t;
2102 size -= 1;
2103 addr += 1;
2104 break;
2105 case 2:
2106 case 6:
2107 t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUW);
2108 ret_be = (ret_be << 16) | t;
2109 size -= 2;
2110 addr += 2;
2111 break;
2112 case 4:
2113 t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUL);
2114 ret_be = (ret_be << 32) | t;
2115 size -= 4;
2116 addr += 4;
2117 break;
2118 case 0:
2119 return io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUQ);
2120 default:
2121 qemu_build_not_reached();
2122 }
2123 } while (size);
2124 return ret_be;
2125}
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
2136{
2137 uint8_t *haddr = p->haddr;
2138 int i, size = p->size;
2139
2140 for (i = 0; i < size; i++) {
2141 ret_be = (ret_be << 8) | haddr[i];
2142 }
2143 return ret_be;
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2154{
2155 void *haddr = p->haddr;
2156 int size = p->size;
2157
2158 do {
2159 uint64_t x;
2160 int n;
2161
2162
2163
2164
2165
2166
2167
2168 switch (((uintptr_t)haddr | size) & 7) {
2169 case 4:
2170 x = cpu_to_be32(load_atomic4(haddr));
2171 ret_be = (ret_be << 32) | x;
2172 n = 4;
2173 break;
2174 case 2:
2175 case 6:
2176 x = cpu_to_be16(load_atomic2(haddr));
2177 ret_be = (ret_be << 16) | x;
2178 n = 2;
2179 break;
2180 default:
2181 x = *(uint8_t *)haddr;
2182 ret_be = (ret_be << 8) | x;
2183 n = 1;
2184 break;
2185 case 0:
2186 g_assert_not_reached();
2187 }
2188 haddr += n;
2189 size -= n;
2190 } while (size != 0);
2191 return ret_be;
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2203{
2204 int o = p->addr & 3;
2205 uint32_t x = load_atomic4(p->haddr - o);
2206
2207 x = cpu_to_be32(x);
2208 x <<= o * 8;
2209 x >>= (4 - p->size) * 8;
2210 return (ret_be << (p->size * 8)) | x;
2211}
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra,
2222 MMULookupPageData *p, uint64_t ret_be)
2223{
2224 int o = p->addr & 7;
2225 uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o);
2226
2227 x = cpu_to_be64(x);
2228 x <<= o * 8;
2229 x >>= (8 - p->size) * 8;
2230 return (ret_be << (p->size * 8)) | x;
2231}
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra,
2242 MMULookupPageData *p, uint64_t ret_be)
2243{
2244 int o = p->addr & 15;
2245 Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o);
2246 int size = p->size;
2247
2248 if (!HOST_BIG_ENDIAN) {
2249 y = bswap128(y);
2250 }
2251 y = int128_lshift(y, o * 8);
2252 y = int128_urshift(y, (16 - size) * 8);
2253 x = int128_make64(ret_be);
2254 x = int128_lshift(x, size * 8);
2255 return int128_or(x, y);
2256}
2257
2258
2259
2260
2261static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
2262 uint64_t ret_be, int mmu_idx, MMUAccessType type,
2263 MemOp mop, uintptr_t ra)
2264{
2265 MemOp atom;
2266 unsigned tmp, half_size;
2267
2268 if (unlikely(p->flags & TLB_MMIO)) {
2269 QEMU_IOTHREAD_LOCK_GUARD();
2270 return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
2271 mmu_idx, type, ra);
2272 }
2273
2274
2275
2276
2277
2278 atom = mop & MO_ATOM_MASK;
2279 switch (atom) {
2280 case MO_ATOM_SUBALIGN:
2281 return do_ld_parts_beN(p, ret_be);
2282
2283 case MO_ATOM_IFALIGN_PAIR:
2284 case MO_ATOM_WITHIN16_PAIR:
2285 tmp = mop & MO_SIZE;
2286 tmp = tmp ? tmp - 1 : 0;
2287 half_size = 1 << tmp;
2288 if (atom == MO_ATOM_IFALIGN_PAIR
2289 ? p->size == half_size
2290 : p->size >= half_size) {
2291 if (!HAVE_al8_fast && p->size < 4) {
2292 return do_ld_whole_be4(p, ret_be);
2293 } else {
2294 return do_ld_whole_be8(env, ra, p, ret_be);
2295 }
2296 }
2297
2298
2299 case MO_ATOM_IFALIGN:
2300 case MO_ATOM_WITHIN16:
2301 case MO_ATOM_NONE:
2302 return do_ld_bytes_beN(p, ret_be);
2303
2304 default:
2305 g_assert_not_reached();
2306 }
2307}
2308
2309
2310
2311
2312static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
2313 uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
2314{
2315 int size = p->size;
2316 uint64_t b;
2317 MemOp atom;
2318
2319 if (unlikely(p->flags & TLB_MMIO)) {
2320 QEMU_IOTHREAD_LOCK_GUARD();
2321 a = do_ld_mmio_beN(env, p->full, a, p->addr, size - 8,
2322 mmu_idx, MMU_DATA_LOAD, ra);
2323 b = do_ld_mmio_beN(env, p->full, 0, p->addr + 8, 8,
2324 mmu_idx, MMU_DATA_LOAD, ra);
2325 return int128_make128(b, a);
2326 }
2327
2328
2329
2330
2331
2332 atom = mop & MO_ATOM_MASK;
2333 switch (atom) {
2334 case MO_ATOM_SUBALIGN:
2335 p->size = size - 8;
2336 a = do_ld_parts_beN(p, a);
2337 p->haddr += size - 8;
2338 p->size = 8;
2339 b = do_ld_parts_beN(p, 0);
2340 break;
2341
2342 case MO_ATOM_WITHIN16_PAIR:
2343
2344 return do_ld_whole_be16(env, ra, p, a);
2345
2346 case MO_ATOM_IFALIGN_PAIR:
2347
2348
2349
2350
2351 case MO_ATOM_IFALIGN:
2352 case MO_ATOM_WITHIN16:
2353 case MO_ATOM_NONE:
2354 p->size = size - 8;
2355 a = do_ld_bytes_beN(p, a);
2356 b = ldq_be_p(p->haddr + size - 8);
2357 break;
2358
2359 default:
2360 g_assert_not_reached();
2361 }
2362
2363 return int128_make128(b, a);
2364}
2365
2366static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
2367 MMUAccessType type, uintptr_t ra)
2368{
2369 if (unlikely(p->flags & TLB_MMIO)) {
2370 return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB);
2371 } else {
2372 return *(uint8_t *)p->haddr;
2373 }
2374}
2375
2376static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
2377 MMUAccessType type, MemOp memop, uintptr_t ra)
2378{
2379 uint16_t ret;
2380
2381 if (unlikely(p->flags & TLB_MMIO)) {
2382 QEMU_IOTHREAD_LOCK_GUARD();
2383 ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2384 if ((memop & MO_BSWAP) == MO_LE) {
2385 ret = bswap16(ret);
2386 }
2387 } else {
2388
2389 ret = load_atom_2(env, ra, p->haddr, memop);
2390 if (memop & MO_BSWAP) {
2391 ret = bswap16(ret);
2392 }
2393 }
2394 return ret;
2395}
2396
2397static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
2398 MMUAccessType type, MemOp memop, uintptr_t ra)
2399{
2400 uint32_t ret;
2401
2402 if (unlikely(p->flags & TLB_MMIO)) {
2403 QEMU_IOTHREAD_LOCK_GUARD();
2404 ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2405 if ((memop & MO_BSWAP) == MO_LE) {
2406 ret = bswap32(ret);
2407 }
2408 } else {
2409
2410 ret = load_atom_4(env, ra, p->haddr, memop);
2411 if (memop & MO_BSWAP) {
2412 ret = bswap32(ret);
2413 }
2414 }
2415 return ret;
2416}
2417
2418static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
2419 MMUAccessType type, MemOp memop, uintptr_t ra)
2420{
2421 uint64_t ret;
2422
2423 if (unlikely(p->flags & TLB_MMIO)) {
2424 QEMU_IOTHREAD_LOCK_GUARD();
2425 ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2426 if ((memop & MO_BSWAP) == MO_LE) {
2427 ret = bswap64(ret);
2428 }
2429 } else {
2430
2431 ret = load_atom_8(env, ra, p->haddr, memop);
2432 if (memop & MO_BSWAP) {
2433 ret = bswap64(ret);
2434 }
2435 }
2436 return ret;
2437}
2438
2439static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
2440 uintptr_t ra, MMUAccessType access_type)
2441{
2442 MMULookupLocals l;
2443 bool crosspage;
2444
2445 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2446 crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
2447 tcg_debug_assert(!crosspage);
2448
2449 return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
2450}
2451
2452tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
2453 MemOpIdx oi, uintptr_t retaddr)
2454{
2455 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
2456 return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2457}
2458
2459static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
2460 uintptr_t ra, MMUAccessType access_type)
2461{
2462 MMULookupLocals l;
2463 bool crosspage;
2464 uint16_t ret;
2465 uint8_t a, b;
2466
2467 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2468 crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
2469 if (likely(!crosspage)) {
2470 return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2471 }
2472
2473 a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
2474 b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra);
2475
2476 if ((l.memop & MO_BSWAP) == MO_LE) {
2477 ret = a | (b << 8);
2478 } else {
2479 ret = b | (a << 8);
2480 }
2481 return ret;
2482}
2483
2484tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
2485 MemOpIdx oi, uintptr_t retaddr)
2486{
2487 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2488 return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2489}
2490
2491static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
2492 uintptr_t ra, MMUAccessType access_type)
2493{
2494 MMULookupLocals l;
2495 bool crosspage;
2496 uint32_t ret;
2497
2498 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2499 crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
2500 if (likely(!crosspage)) {
2501 return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2502 }
2503
2504 ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2505 ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2506 if ((l.memop & MO_BSWAP) == MO_LE) {
2507 ret = bswap32(ret);
2508 }
2509 return ret;
2510}
2511
2512tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
2513 MemOpIdx oi, uintptr_t retaddr)
2514{
2515 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
2516 return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2517}
2518
2519static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
2520 uintptr_t ra, MMUAccessType access_type)
2521{
2522 MMULookupLocals l;
2523 bool crosspage;
2524 uint64_t ret;
2525
2526 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2527 crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
2528 if (likely(!crosspage)) {
2529 return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2530 }
2531
2532 ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2533 ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2534 if ((l.memop & MO_BSWAP) == MO_LE) {
2535 ret = bswap64(ret);
2536 }
2537 return ret;
2538}
2539
2540uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
2541 MemOpIdx oi, uintptr_t retaddr)
2542{
2543 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
2544 return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2545}
2546
2547
2548
2549
2550
2551
2552tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
2553 MemOpIdx oi, uintptr_t retaddr)
2554{
2555 return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
2556}
2557
2558tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
2559 MemOpIdx oi, uintptr_t retaddr)
2560{
2561 return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
2562}
2563
2564tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
2565 MemOpIdx oi, uintptr_t retaddr)
2566{
2567 return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
2568}
2569
2570static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
2571 MemOpIdx oi, uintptr_t ra)
2572{
2573 MMULookupLocals l;
2574 bool crosspage;
2575 uint64_t a, b;
2576 Int128 ret;
2577 int first;
2578
2579 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2580 crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
2581 if (likely(!crosspage)) {
2582 if (unlikely(l.page[0].flags & TLB_MMIO)) {
2583 QEMU_IOTHREAD_LOCK_GUARD();
2584 a = do_ld_mmio_beN(env, l.page[0].full, 0, addr, 8,
2585 l.mmu_idx, MMU_DATA_LOAD, ra);
2586 b = do_ld_mmio_beN(env, l.page[0].full, 0, addr + 8, 8,
2587 l.mmu_idx, MMU_DATA_LOAD, ra);
2588 ret = int128_make128(b, a);
2589 if ((l.memop & MO_BSWAP) == MO_LE) {
2590 ret = bswap128(ret);
2591 }
2592 } else {
2593
2594 ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
2595 if (l.memop & MO_BSWAP) {
2596 ret = bswap128(ret);
2597 }
2598 }
2599 return ret;
2600 }
2601
2602 first = l.page[0].size;
2603 if (first == 8) {
2604 MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
2605
2606 a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2607 b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2608 if ((mop8 & MO_BSWAP) == MO_LE) {
2609 ret = int128_make128(a, b);
2610 } else {
2611 ret = int128_make128(b, a);
2612 }
2613 return ret;
2614 }
2615
2616 if (first < 8) {
2617 a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx,
2618 MMU_DATA_LOAD, l.memop, ra);
2619 ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra);
2620 } else {
2621 ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra);
2622 b = int128_getlo(ret);
2623 ret = int128_lshift(ret, l.page[1].size * 8);
2624 a = int128_gethi(ret);
2625 b = do_ld_beN(env, &l.page[1], b, l.mmu_idx,
2626 MMU_DATA_LOAD, l.memop, ra);
2627 ret = int128_make128(b, a);
2628 }
2629 if ((l.memop & MO_BSWAP) == MO_LE) {
2630 ret = bswap128(ret);
2631 }
2632 return ret;
2633}
2634
2635Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
2636 uint32_t oi, uintptr_t retaddr)
2637{
2638 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
2639 return do_ld16_mmu(env, addr, oi, retaddr);
2640}
2641
2642Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
2643{
2644 return helper_ld16_mmu(env, addr, oi, GETPC());
2645}
2646
2647
2648
2649
2650
2651static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
2652{
2653 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2654}
2655
2656uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2657{
2658 uint8_t ret;
2659
2660 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
2661 ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
2662 plugin_load_cb(env, addr, oi);
2663 return ret;
2664}
2665
2666uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
2667 MemOpIdx oi, uintptr_t ra)
2668{
2669 uint16_t ret;
2670
2671 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2672 ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
2673 plugin_load_cb(env, addr, oi);
2674 return ret;
2675}
2676
2677uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
2678 MemOpIdx oi, uintptr_t ra)
2679{
2680 uint32_t ret;
2681
2682 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
2683 ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
2684 plugin_load_cb(env, addr, oi);
2685 return ret;
2686}
2687
2688uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
2689 MemOpIdx oi, uintptr_t ra)
2690{
2691 uint64_t ret;
2692
2693 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
2694 ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
2695 plugin_load_cb(env, addr, oi);
2696 return ret;
2697}
2698
2699Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
2700 MemOpIdx oi, uintptr_t ra)
2701{
2702 Int128 ret;
2703
2704 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
2705 ret = do_ld16_mmu(env, addr, oi, ra);
2706 plugin_load_cb(env, addr, oi);
2707 return ret;
2708}
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
2730 uint64_t val_le, vaddr addr, int size,
2731 int mmu_idx, uintptr_t ra)
2732{
2733 tcg_debug_assert(size > 0 && size <= 8);
2734
2735 do {
2736
2737 switch ((size | (int)addr) & 7) {
2738 case 1:
2739 case 3:
2740 case 5:
2741 case 7:
2742 io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB);
2743 val_le >>= 8;
2744 size -= 1;
2745 addr += 1;
2746 break;
2747 case 2:
2748 case 6:
2749 io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW);
2750 val_le >>= 16;
2751 size -= 2;
2752 addr += 2;
2753 break;
2754 case 4:
2755 io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL);
2756 val_le >>= 32;
2757 size -= 4;
2758 addr += 4;
2759 break;
2760 case 0:
2761 io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ);
2762 return 0;
2763 default:
2764 qemu_build_not_reached();
2765 }
2766 } while (size);
2767
2768 return val_le;
2769}
2770
2771
2772
2773
2774static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
2775 uint64_t val_le, int mmu_idx,
2776 MemOp mop, uintptr_t ra)
2777{
2778 MemOp atom;
2779 unsigned tmp, half_size;
2780
2781 if (unlikely(p->flags & TLB_MMIO)) {
2782 QEMU_IOTHREAD_LOCK_GUARD();
2783 return do_st_mmio_leN(env, p->full, val_le, p->addr,
2784 p->size, mmu_idx, ra);
2785 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2786 return val_le >> (p->size * 8);
2787 }
2788
2789
2790
2791
2792
2793 atom = mop & MO_ATOM_MASK;
2794 switch (atom) {
2795 case MO_ATOM_SUBALIGN:
2796 return store_parts_leN(p->haddr, p->size, val_le);
2797
2798 case MO_ATOM_IFALIGN_PAIR:
2799 case MO_ATOM_WITHIN16_PAIR:
2800 tmp = mop & MO_SIZE;
2801 tmp = tmp ? tmp - 1 : 0;
2802 half_size = 1 << tmp;
2803 if (atom == MO_ATOM_IFALIGN_PAIR
2804 ? p->size == half_size
2805 : p->size >= half_size) {
2806 if (!HAVE_al8_fast && p->size <= 4) {
2807 return store_whole_le4(p->haddr, p->size, val_le);
2808 } else if (HAVE_al8) {
2809 return store_whole_le8(p->haddr, p->size, val_le);
2810 } else {
2811 cpu_loop_exit_atomic(env_cpu(env), ra);
2812 }
2813 }
2814
2815
2816 case MO_ATOM_IFALIGN:
2817 case MO_ATOM_WITHIN16:
2818 case MO_ATOM_NONE:
2819 return store_bytes_leN(p->haddr, p->size, val_le);
2820
2821 default:
2822 g_assert_not_reached();
2823 }
2824}
2825
2826
2827
2828
2829static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
2830 Int128 val_le, int mmu_idx,
2831 MemOp mop, uintptr_t ra)
2832{
2833 int size = p->size;
2834 MemOp atom;
2835
2836 if (unlikely(p->flags & TLB_MMIO)) {
2837 QEMU_IOTHREAD_LOCK_GUARD();
2838 do_st_mmio_leN(env, p->full, int128_getlo(val_le),
2839 p->addr, 8, mmu_idx, ra);
2840 return do_st_mmio_leN(env, p->full, int128_gethi(val_le),
2841 p->addr + 8, size - 8, mmu_idx, ra);
2842 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2843 return int128_gethi(val_le) >> ((size - 8) * 8);
2844 }
2845
2846
2847
2848
2849
2850 atom = mop & MO_ATOM_MASK;
2851 switch (atom) {
2852 case MO_ATOM_SUBALIGN:
2853 store_parts_leN(p->haddr, 8, int128_getlo(val_le));
2854 return store_parts_leN(p->haddr + 8, p->size - 8,
2855 int128_gethi(val_le));
2856
2857 case MO_ATOM_WITHIN16_PAIR:
2858
2859 if (!HAVE_ATOMIC128_RW) {
2860 cpu_loop_exit_atomic(env_cpu(env), ra);
2861 }
2862 return store_whole_le16(p->haddr, p->size, val_le);
2863
2864 case MO_ATOM_IFALIGN_PAIR:
2865
2866
2867
2868
2869 case MO_ATOM_IFALIGN:
2870 case MO_ATOM_WITHIN16:
2871 case MO_ATOM_NONE:
2872 stq_le_p(p->haddr, int128_getlo(val_le));
2873 return store_bytes_leN(p->haddr + 8, p->size - 8,
2874 int128_gethi(val_le));
2875
2876 default:
2877 g_assert_not_reached();
2878 }
2879}
2880
2881static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
2882 int mmu_idx, uintptr_t ra)
2883{
2884 if (unlikely(p->flags & TLB_MMIO)) {
2885 io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB);
2886 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2887
2888 } else {
2889 *(uint8_t *)p->haddr = val;
2890 }
2891}
2892
2893static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
2894 int mmu_idx, MemOp memop, uintptr_t ra)
2895{
2896 if (unlikely(p->flags & TLB_MMIO)) {
2897 if ((memop & MO_BSWAP) != MO_LE) {
2898 val = bswap16(val);
2899 }
2900 QEMU_IOTHREAD_LOCK_GUARD();
2901 do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
2902 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2903
2904 } else {
2905
2906 if (memop & MO_BSWAP) {
2907 val = bswap16(val);
2908 }
2909 store_atom_2(env, ra, p->haddr, memop, val);
2910 }
2911}
2912
2913static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
2914 int mmu_idx, MemOp memop, uintptr_t ra)
2915{
2916 if (unlikely(p->flags & TLB_MMIO)) {
2917 if ((memop & MO_BSWAP) != MO_LE) {
2918 val = bswap32(val);
2919 }
2920 QEMU_IOTHREAD_LOCK_GUARD();
2921 do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
2922 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2923
2924 } else {
2925
2926 if (memop & MO_BSWAP) {
2927 val = bswap32(val);
2928 }
2929 store_atom_4(env, ra, p->haddr, memop, val);
2930 }
2931}
2932
2933static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
2934 int mmu_idx, MemOp memop, uintptr_t ra)
2935{
2936 if (unlikely(p->flags & TLB_MMIO)) {
2937 if ((memop & MO_BSWAP) != MO_LE) {
2938 val = bswap64(val);
2939 }
2940 QEMU_IOTHREAD_LOCK_GUARD();
2941 do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
2942 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2943
2944 } else {
2945
2946 if (memop & MO_BSWAP) {
2947 val = bswap64(val);
2948 }
2949 store_atom_8(env, ra, p->haddr, memop, val);
2950 }
2951}
2952
2953void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
2954 MemOpIdx oi, uintptr_t ra)
2955{
2956 MMULookupLocals l;
2957 bool crosspage;
2958
2959 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
2960 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2961 crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
2962 tcg_debug_assert(!crosspage);
2963
2964 do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
2965}
2966
2967static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
2968 MemOpIdx oi, uintptr_t ra)
2969{
2970 MMULookupLocals l;
2971 bool crosspage;
2972 uint8_t a, b;
2973
2974 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2975 crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
2976 if (likely(!crosspage)) {
2977 do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
2978 return;
2979 }
2980
2981 if ((l.memop & MO_BSWAP) == MO_LE) {
2982 a = val, b = val >> 8;
2983 } else {
2984 b = val, a = val >> 8;
2985 }
2986 do_st_1(env, &l.page[0], a, l.mmu_idx, ra);
2987 do_st_1(env, &l.page[1], b, l.mmu_idx, ra);
2988}
2989
2990void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
2991 MemOpIdx oi, uintptr_t retaddr)
2992{
2993 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2994 do_st2_mmu(env, addr, val, oi, retaddr);
2995}
2996
2997static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
2998 MemOpIdx oi, uintptr_t ra)
2999{
3000 MMULookupLocals l;
3001 bool crosspage;
3002
3003 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
3004 crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
3005 if (likely(!crosspage)) {
3006 do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
3007 return;
3008 }
3009
3010
3011 if ((l.memop & MO_BSWAP) != MO_LE) {
3012 val = bswap32(val);
3013 }
3014 val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
3015 (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
3016}
3017
3018void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
3019 MemOpIdx oi, uintptr_t retaddr)
3020{
3021 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
3022 do_st4_mmu(env, addr, val, oi, retaddr);
3023}
3024
3025static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
3026 MemOpIdx oi, uintptr_t ra)
3027{
3028 MMULookupLocals l;
3029 bool crosspage;
3030
3031 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
3032 crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
3033 if (likely(!crosspage)) {
3034 do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
3035 return;
3036 }
3037
3038
3039 if ((l.memop & MO_BSWAP) != MO_LE) {
3040 val = bswap64(val);
3041 }
3042 val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
3043 (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
3044}
3045
3046void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
3047 MemOpIdx oi, uintptr_t retaddr)
3048{
3049 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
3050 do_st8_mmu(env, addr, val, oi, retaddr);
3051}
3052
3053static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
3054 MemOpIdx oi, uintptr_t ra)
3055{
3056 MMULookupLocals l;
3057 bool crosspage;
3058 uint64_t a, b;
3059 int first;
3060
3061 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
3062 crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
3063 if (likely(!crosspage)) {
3064 if (unlikely(l.page[0].flags & TLB_MMIO)) {
3065 if ((l.memop & MO_BSWAP) != MO_LE) {
3066 val = bswap128(val);
3067 }
3068 a = int128_getlo(val);
3069 b = int128_gethi(val);
3070 QEMU_IOTHREAD_LOCK_GUARD();
3071 do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra);
3072 do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra);
3073 } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
3074
3075 } else {
3076
3077 if (l.memop & MO_BSWAP) {
3078 val = bswap128(val);
3079 }
3080 store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
3081 }
3082 return;
3083 }
3084
3085 first = l.page[0].size;
3086 if (first == 8) {
3087 MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
3088
3089 if (l.memop & MO_BSWAP) {
3090 val = bswap128(val);
3091 }
3092 if (HOST_BIG_ENDIAN) {
3093 b = int128_getlo(val), a = int128_gethi(val);
3094 } else {
3095 a = int128_getlo(val), b = int128_gethi(val);
3096 }
3097 do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra);
3098 do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra);
3099 return;
3100 }
3101
3102 if ((l.memop & MO_BSWAP) != MO_LE) {
3103 val = bswap128(val);
3104 }
3105 if (first < 8) {
3106 do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
3107 val = int128_urshift(val, first * 8);
3108 do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
3109 } else {
3110 b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
3111 do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra);
3112 }
3113}
3114
3115void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
3116 MemOpIdx oi, uintptr_t retaddr)
3117{
3118 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
3119 do_st16_mmu(env, addr, val, oi, retaddr);
3120}
3121
3122void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
3123{
3124 helper_st16_mmu(env, addr, val, oi, GETPC());
3125}
3126
3127
3128
3129
3130
3131static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
3132{
3133 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
3134}
3135
3136void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
3137 MemOpIdx oi, uintptr_t retaddr)
3138{
3139 helper_stb_mmu(env, addr, val, oi, retaddr);
3140 plugin_store_cb(env, addr, oi);
3141}
3142
3143void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
3144 MemOpIdx oi, uintptr_t retaddr)
3145{
3146 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
3147 do_st2_mmu(env, addr, val, oi, retaddr);
3148 plugin_store_cb(env, addr, oi);
3149}
3150
3151void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
3152 MemOpIdx oi, uintptr_t retaddr)
3153{
3154 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
3155 do_st4_mmu(env, addr, val, oi, retaddr);
3156 plugin_store_cb(env, addr, oi);
3157}
3158
3159void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
3160 MemOpIdx oi, uintptr_t retaddr)
3161{
3162 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
3163 do_st8_mmu(env, addr, val, oi, retaddr);
3164 plugin_store_cb(env, addr, oi);
3165}
3166
3167void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
3168 MemOpIdx oi, uintptr_t retaddr)
3169{
3170 tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
3171 do_st16_mmu(env, addr, val, oi, retaddr);
3172 plugin_store_cb(env, addr, oi);
3173}
3174
3175#include "ldst_common.c.inc"
3176
3177
3178
3179
3180
3181
3182#define ATOMIC_NAME(X) \
3183 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
3184
3185#define ATOMIC_MMU_CLEANUP
3186
3187#include "atomic_common.c.inc"
3188
3189#define DATA_SIZE 1
3190#include "atomic_template.h"
3191
3192#define DATA_SIZE 2
3193#include "atomic_template.h"
3194
3195#define DATA_SIZE 4
3196#include "atomic_template.h"
3197
3198#ifdef CONFIG_ATOMIC64
3199#define DATA_SIZE 8
3200#include "atomic_template.h"
3201#endif
3202
3203#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
3204#define DATA_SIZE 16
3205#include "atomic_template.h"
3206#endif
3207
3208
3209
3210uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
3211{
3212 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
3213 return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3214}
3215
3216uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
3217{
3218 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
3219 return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3220}
3221
3222uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
3223{
3224 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
3225 return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3226}
3227
3228uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
3229{
3230 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
3231 return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3232}
3233
3234uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
3235 MemOpIdx oi, uintptr_t retaddr)
3236{
3237 return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
3238}
3239
3240uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
3241 MemOpIdx oi, uintptr_t retaddr)
3242{
3243 return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
3244}
3245
3246uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
3247 MemOpIdx oi, uintptr_t retaddr)
3248{
3249 return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
3250}
3251
3252uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
3253 MemOpIdx oi, uintptr_t retaddr)
3254{
3255 return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
3256}
3257