1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "hw/core/tcg-cpu-ops.h"
21#include "disas/disas.h"
22#include "exec/exec-all.h"
23#include "tcg/tcg.h"
24#include "qemu/bitops.h"
25#include "qemu/rcu.h"
26#include "exec/cpu_ldst.h"
27#include "exec/translate-all.h"
28#include "exec/helper-proto.h"
29#include "qemu/atomic128.h"
30#include "trace/trace-root.h"
31#include "tcg/tcg-ldst.h"
32#include "internal.h"
33
34__thread uintptr_t helper_retaddr;
35
36
37
38
39
40
41MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
42{
43 switch (helper_retaddr) {
44 default:
45
46
47
48
49
50
51 *pc = helper_retaddr;
52 break;
53
54 case 0:
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71 *pc += GETPC_ADJ;
72 break;
73
74 case 1:
75
76
77
78
79
80
81
82
83
84
85 *pc = 0;
86 return MMU_INST_FETCH;
87 }
88
89 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
90}
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
111 uintptr_t host_pc, abi_ptr guest_addr)
112{
113 switch (page_unprotect(guest_addr, host_pc)) {
114 case 0:
115
116
117
118
119 return false;
120 case 1:
121
122
123
124
125 return true;
126 case 2:
127
128
129
130
131 sigprocmask(SIG_SETMASK, old_set, NULL);
132 cpu_loop_exit_noexc(cpu);
133
134 default:
135 g_assert_not_reached();
136 }
137}
138
139typedef struct PageFlagsNode {
140 struct rcu_head rcu;
141 IntervalTreeNode itree;
142 int flags;
143} PageFlagsNode;
144
145static IntervalTreeRoot pageflags_root;
146
147static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
148{
149 IntervalTreeNode *n;
150
151 n = interval_tree_iter_first(&pageflags_root, start, last);
152 return n ? container_of(n, PageFlagsNode, itree) : NULL;
153}
154
155static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
156 target_long last)
157{
158 IntervalTreeNode *n;
159
160 n = interval_tree_iter_next(&p->itree, start, last);
161 return n ? container_of(n, PageFlagsNode, itree) : NULL;
162}
163
164int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
165{
166 IntervalTreeNode *n;
167 int rc = 0;
168
169 mmap_lock();
170 for (n = interval_tree_iter_first(&pageflags_root, 0, -1);
171 n != NULL;
172 n = interval_tree_iter_next(n, 0, -1)) {
173 PageFlagsNode *p = container_of(n, PageFlagsNode, itree);
174
175 rc = fn(priv, n->start, n->last + 1, p->flags);
176 if (rc != 0) {
177 break;
178 }
179 }
180 mmap_unlock();
181
182 return rc;
183}
184
185static int dump_region(void *priv, target_ulong start,
186 target_ulong end, unsigned long prot)
187{
188 FILE *f = (FILE *)priv;
189
190 fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
191 start, end, end - start,
192 ((prot & PAGE_READ) ? 'r' : '-'),
193 ((prot & PAGE_WRITE) ? 'w' : '-'),
194 ((prot & PAGE_EXEC) ? 'x' : '-'));
195 return 0;
196}
197
198
199void page_dump(FILE *f)
200{
201 const int length = sizeof(target_ulong) * 2;
202
203 fprintf(f, "%-*s %-*s %-*s %s\n",
204 length, "start", length, "end", length, "size", "prot");
205 walk_memory_regions(f, dump_region);
206}
207
208int page_get_flags(target_ulong address)
209{
210 PageFlagsNode *p = pageflags_find(address, address);
211
212
213
214
215
216
217 if (p) {
218 return p->flags;
219 }
220 if (have_mmap_lock()) {
221 return 0;
222 }
223
224 mmap_lock();
225 p = pageflags_find(address, address);
226 mmap_unlock();
227 return p ? p->flags : 0;
228}
229
230
231static void pageflags_create(target_ulong start, target_ulong last, int flags)
232{
233 PageFlagsNode *p = g_new(PageFlagsNode, 1);
234
235 p->itree.start = start;
236 p->itree.last = last;
237 p->flags = flags;
238 interval_tree_insert(&p->itree, &pageflags_root);
239}
240
241
242static bool pageflags_unset(target_ulong start, target_ulong last)
243{
244 bool inval_tb = false;
245
246 while (true) {
247 PageFlagsNode *p = pageflags_find(start, last);
248 target_ulong p_last;
249
250 if (!p) {
251 break;
252 }
253
254 if (p->flags & PAGE_EXEC) {
255 inval_tb = true;
256 }
257
258 interval_tree_remove(&p->itree, &pageflags_root);
259 p_last = p->itree.last;
260
261 if (p->itree.start < start) {
262
263 p->itree.last = start - 1;
264 interval_tree_insert(&p->itree, &pageflags_root);
265 if (last < p_last) {
266 pageflags_create(last + 1, p_last, p->flags);
267 break;
268 }
269 } else if (p_last <= last) {
270
271 g_free_rcu(p, rcu);
272 } else {
273
274 p->itree.start = last + 1;
275 interval_tree_insert(&p->itree, &pageflags_root);
276 break;
277 }
278 }
279
280 return inval_tb;
281}
282
283
284
285
286
287static void pageflags_create_merge(target_ulong start, target_ulong last,
288 int flags)
289{
290 PageFlagsNode *next = NULL, *prev = NULL;
291
292 if (start > 0) {
293 prev = pageflags_find(start - 1, start - 1);
294 if (prev) {
295 if (prev->flags == flags) {
296 interval_tree_remove(&prev->itree, &pageflags_root);
297 } else {
298 prev = NULL;
299 }
300 }
301 }
302 if (last + 1 != 0) {
303 next = pageflags_find(last + 1, last + 1);
304 if (next) {
305 if (next->flags == flags) {
306 interval_tree_remove(&next->itree, &pageflags_root);
307 } else {
308 next = NULL;
309 }
310 }
311 }
312
313 if (prev) {
314 if (next) {
315 prev->itree.last = next->itree.last;
316 g_free_rcu(next, rcu);
317 } else {
318 prev->itree.last = last;
319 }
320 interval_tree_insert(&prev->itree, &pageflags_root);
321 } else if (next) {
322 next->itree.start = start;
323 interval_tree_insert(&next->itree, &pageflags_root);
324 } else {
325 pageflags_create(start, last, flags);
326 }
327}
328
329
330
331
332
333#ifndef PAGE_TARGET_STICKY
334#define PAGE_TARGET_STICKY 0
335#endif
336#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
337
338
339static bool pageflags_set_clear(target_ulong start, target_ulong last,
340 int set_flags, int clear_flags)
341{
342 PageFlagsNode *p;
343 target_ulong p_start, p_last;
344 int p_flags, merge_flags;
345 bool inval_tb = false;
346
347 restart:
348 p = pageflags_find(start, last);
349 if (!p) {
350 if (set_flags) {
351 pageflags_create_merge(start, last, set_flags);
352 }
353 goto done;
354 }
355
356 p_start = p->itree.start;
357 p_last = p->itree.last;
358 p_flags = p->flags;
359
360 merge_flags = (p_flags & ~clear_flags) | set_flags;
361
362
363
364
365
366 if ((p_flags & PAGE_EXEC)
367 && (!(merge_flags & PAGE_EXEC)
368 || (merge_flags & ~p_flags & PAGE_WRITE))) {
369 inval_tb = true;
370 }
371
372
373
374
375
376 if (start == p_start && last == p_last) {
377 if (merge_flags) {
378 p->flags = merge_flags;
379 } else {
380 interval_tree_remove(&p->itree, &pageflags_root);
381 g_free_rcu(p, rcu);
382 }
383 goto done;
384 }
385
386
387
388
389
390 if (set_flags != merge_flags) {
391 if (p_start < start) {
392 interval_tree_remove(&p->itree, &pageflags_root);
393 p->itree.last = start - 1;
394 interval_tree_insert(&p->itree, &pageflags_root);
395
396 if (last < p_last) {
397 if (merge_flags) {
398 pageflags_create(start, last, merge_flags);
399 }
400 pageflags_create(last + 1, p_last, p_flags);
401 } else {
402 if (merge_flags) {
403 pageflags_create(start, p_last, merge_flags);
404 }
405 if (p_last < last) {
406 start = p_last + 1;
407 goto restart;
408 }
409 }
410 } else {
411 if (start < p_start && set_flags) {
412 pageflags_create(start, p_start - 1, set_flags);
413 }
414 if (last < p_last) {
415 interval_tree_remove(&p->itree, &pageflags_root);
416 p->itree.start = last + 1;
417 interval_tree_insert(&p->itree, &pageflags_root);
418 if (merge_flags) {
419 pageflags_create(start, last, merge_flags);
420 }
421 } else {
422 if (merge_flags) {
423 p->flags = merge_flags;
424 } else {
425 interval_tree_remove(&p->itree, &pageflags_root);
426 g_free_rcu(p, rcu);
427 }
428 if (p_last < last) {
429 start = p_last + 1;
430 goto restart;
431 }
432 }
433 }
434 goto done;
435 }
436
437
438 if (set_flags == p_flags) {
439 if (start < p_start) {
440 interval_tree_remove(&p->itree, &pageflags_root);
441 p->itree.start = start;
442 interval_tree_insert(&p->itree, &pageflags_root);
443 }
444 if (p_last < last) {
445 start = p_last + 1;
446 goto restart;
447 }
448 goto done;
449 }
450
451
452 interval_tree_remove(&p->itree, &pageflags_root);
453 if (p_start < start) {
454 p->itree.last = start - 1;
455 interval_tree_insert(&p->itree, &pageflags_root);
456
457 if (p_last < last) {
458 goto restart;
459 }
460 if (last < p_last) {
461 pageflags_create(last + 1, p_last, p_flags);
462 }
463 } else if (last < p_last) {
464 p->itree.start = last + 1;
465 interval_tree_insert(&p->itree, &pageflags_root);
466 } else {
467 g_free_rcu(p, rcu);
468 goto restart;
469 }
470 if (set_flags) {
471 pageflags_create(start, last, set_flags);
472 }
473
474 done:
475 return inval_tb;
476}
477
478
479
480
481
482
483void page_set_flags(target_ulong start, target_ulong last, int flags)
484{
485 bool reset = false;
486 bool inval_tb = false;
487
488
489
490
491 assert(start <= last);
492 assert(last <= GUEST_ADDR_MAX);
493
494 assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
495 assert_memory_lock();
496
497 start &= TARGET_PAGE_MASK;
498 last |= ~TARGET_PAGE_MASK;
499
500 if (!(flags & PAGE_VALID)) {
501 flags = 0;
502 } else {
503 reset = flags & PAGE_RESET;
504 flags &= ~PAGE_RESET;
505 if (flags & PAGE_WRITE) {
506 flags |= PAGE_WRITE_ORG;
507 }
508 }
509
510 if (!flags || reset) {
511 page_reset_target_data(start, last);
512 inval_tb |= pageflags_unset(start, last);
513 }
514 if (flags) {
515 inval_tb |= pageflags_set_clear(start, last, flags,
516 ~(reset ? 0 : PAGE_STICKY));
517 }
518 if (inval_tb) {
519 tb_invalidate_phys_range(start, last);
520 }
521}
522
523int page_check_range(target_ulong start, target_ulong len, int flags)
524{
525 target_ulong last;
526 int locked;
527 int ret;
528
529 if (len == 0) {
530 return 0;
531 }
532
533 last = start + len - 1;
534 if (last < start) {
535 return -1;
536 }
537
538 locked = have_mmap_lock();
539 while (true) {
540 PageFlagsNode *p = pageflags_find(start, last);
541 int missing;
542
543 if (!p) {
544 if (!locked) {
545
546
547
548
549 mmap_lock();
550 locked = -1;
551 p = pageflags_find(start, last);
552 }
553 if (!p) {
554 ret = -1;
555 break;
556 }
557 }
558 if (start < p->itree.start) {
559 ret = -1;
560 break;
561 }
562
563 missing = flags & ~p->flags;
564 if (missing & PAGE_READ) {
565 ret = -1;
566 break;
567 }
568 if (missing & PAGE_WRITE) {
569 if (!(p->flags & PAGE_WRITE_ORG)) {
570 ret = -1;
571 break;
572 }
573
574 if (!page_unprotect(start, 0)) {
575 ret = -1;
576 break;
577 }
578
579 if (last - start < TARGET_PAGE_SIZE) {
580 ret = 0;
581 break;
582 }
583 start += TARGET_PAGE_SIZE;
584 continue;
585 }
586
587 if (last <= p->itree.last) {
588 ret = 0;
589 break;
590 }
591 start = p->itree.last + 1;
592 }
593
594
595 if (locked < 0) {
596 mmap_unlock();
597 }
598 return ret;
599}
600
601void page_protect(tb_page_addr_t address)
602{
603 PageFlagsNode *p;
604 target_ulong start, last;
605 int prot;
606
607 assert_memory_lock();
608
609 if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
610 start = address & TARGET_PAGE_MASK;
611 last = start + TARGET_PAGE_SIZE - 1;
612 } else {
613 start = address & qemu_host_page_mask;
614 last = start + qemu_host_page_size - 1;
615 }
616
617 p = pageflags_find(start, last);
618 if (!p) {
619 return;
620 }
621 prot = p->flags;
622
623 if (unlikely(p->itree.last < last)) {
624
625 assert(TARGET_PAGE_SIZE < qemu_host_page_size);
626 while ((p = pageflags_next(p, start, last)) != NULL) {
627 prot |= p->flags;
628 }
629 }
630
631 if (prot & PAGE_WRITE) {
632 pageflags_set_clear(start, last, 0, PAGE_WRITE);
633 mprotect(g2h_untagged(start), qemu_host_page_size,
634 prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
635 }
636}
637
638
639
640
641
642
643
644
645int page_unprotect(target_ulong address, uintptr_t pc)
646{
647 PageFlagsNode *p;
648 bool current_tb_invalidated;
649
650
651
652
653
654
655 mmap_lock();
656
657 p = pageflags_find(address, address);
658
659
660 if (!p || !(p->flags & PAGE_WRITE_ORG)) {
661 mmap_unlock();
662 return 0;
663 }
664
665 current_tb_invalidated = false;
666 if (p->flags & PAGE_WRITE) {
667
668
669
670
671
672#ifdef TARGET_HAS_PRECISE_SMC
673 TranslationBlock *current_tb = tcg_tb_lookup(pc);
674 if (current_tb) {
675 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
676 }
677#endif
678 } else {
679 target_ulong start, len, i;
680 int prot;
681
682 if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
683 start = address & TARGET_PAGE_MASK;
684 len = TARGET_PAGE_SIZE;
685 prot = p->flags | PAGE_WRITE;
686 pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
687 current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
688 } else {
689 start = address & qemu_host_page_mask;
690 len = qemu_host_page_size;
691 prot = 0;
692
693 for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
694 target_ulong addr = start + i;
695
696 p = pageflags_find(addr, addr);
697 if (p) {
698 prot |= p->flags;
699 if (p->flags & PAGE_WRITE_ORG) {
700 prot |= PAGE_WRITE;
701 pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1,
702 PAGE_WRITE, 0);
703 }
704 }
705
706
707
708
709 current_tb_invalidated |=
710 tb_invalidate_phys_page_unwind(addr, pc);
711 }
712 }
713 if (prot & PAGE_EXEC) {
714 prot = (prot & ~PAGE_EXEC) | PAGE_READ;
715 }
716 mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
717 }
718 mmap_unlock();
719
720
721 return current_tb_invalidated ? 2 : 1;
722}
723
724static int probe_access_internal(CPUArchState *env, target_ulong addr,
725 int fault_size, MMUAccessType access_type,
726 bool nonfault, uintptr_t ra)
727{
728 int acc_flag;
729 bool maperr;
730
731 switch (access_type) {
732 case MMU_DATA_STORE:
733 acc_flag = PAGE_WRITE_ORG;
734 break;
735 case MMU_DATA_LOAD:
736 acc_flag = PAGE_READ;
737 break;
738 case MMU_INST_FETCH:
739 acc_flag = PAGE_EXEC;
740 break;
741 default:
742 g_assert_not_reached();
743 }
744
745 if (guest_addr_valid_untagged(addr)) {
746 int page_flags = page_get_flags(addr);
747 if (page_flags & acc_flag) {
748 return 0;
749 }
750 maperr = !(page_flags & PAGE_VALID);
751 } else {
752 maperr = true;
753 }
754
755 if (nonfault) {
756 return TLB_INVALID_MASK;
757 }
758
759 cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
760}
761
762int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
763 MMUAccessType access_type, int mmu_idx,
764 bool nonfault, void **phost, uintptr_t ra)
765{
766 int flags;
767
768 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
769 flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
770 *phost = flags ? NULL : g2h(env_cpu(env), addr);
771 return flags;
772}
773
774void *probe_access(CPUArchState *env, target_ulong addr, int size,
775 MMUAccessType access_type, int mmu_idx, uintptr_t ra)
776{
777 int flags;
778
779 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
780 flags = probe_access_internal(env, addr, size, access_type, false, ra);
781 g_assert(flags == 0);
782
783 return size ? g2h(env_cpu(env), addr) : NULL;
784}
785
786tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
787 void **hostp)
788{
789 int flags;
790
791 flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
792 g_assert(flags == 0);
793
794 if (hostp) {
795 *hostp = g2h_untagged(addr);
796 }
797 return addr;
798}
799
800#ifdef TARGET_PAGE_DATA_SIZE
801
802
803
804
805
806#define TPD_PAGES 64
807#define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
808
809typedef struct TargetPageDataNode {
810 struct rcu_head rcu;
811 IntervalTreeNode itree;
812 char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
813} TargetPageDataNode;
814
815static IntervalTreeRoot targetdata_root;
816
817void page_reset_target_data(target_ulong start, target_ulong last)
818{
819 IntervalTreeNode *n, *next;
820
821 assert_memory_lock();
822
823 start &= TARGET_PAGE_MASK;
824 last |= ~TARGET_PAGE_MASK;
825
826 for (n = interval_tree_iter_first(&targetdata_root, start, last),
827 next = n ? interval_tree_iter_next(n, start, last) : NULL;
828 n != NULL;
829 n = next,
830 next = next ? interval_tree_iter_next(n, start, last) : NULL) {
831 target_ulong n_start, n_last, p_ofs, p_len;
832 TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
833
834 if (n->start >= start && n->last <= last) {
835 interval_tree_remove(n, &targetdata_root);
836 g_free_rcu(t, rcu);
837 continue;
838 }
839
840 if (n->start < start) {
841 n_start = start;
842 p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
843 } else {
844 n_start = n->start;
845 p_ofs = 0;
846 }
847 n_last = MIN(last, n->last);
848 p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
849
850 memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
851 }
852}
853
854void *page_get_target_data(target_ulong address)
855{
856 IntervalTreeNode *n;
857 TargetPageDataNode *t;
858 target_ulong page, region;
859
860 page = address & TARGET_PAGE_MASK;
861 region = address & TBD_MASK;
862
863 n = interval_tree_iter_first(&targetdata_root, page, page);
864 if (!n) {
865
866
867
868
869
870
871 mmap_lock();
872 n = interval_tree_iter_first(&targetdata_root, page, page);
873 if (!n) {
874 t = g_new0(TargetPageDataNode, 1);
875 n = &t->itree;
876 n->start = region;
877 n->last = region | ~TBD_MASK;
878 interval_tree_insert(n, &targetdata_root);
879 }
880 mmap_unlock();
881 }
882
883 t = container_of(n, TargetPageDataNode, itree);
884 return t->data[(page - region) >> TARGET_PAGE_BITS];
885}
886#else
887void page_reset_target_data(target_ulong start, target_ulong last) { }
888#endif
889
890
891
892
893
894
895
896
897
898
899static void validate_memop(MemOpIdx oi, MemOp expected)
900{
901#ifdef CONFIG_DEBUG_TCG
902 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
903 assert(have == expected);
904#endif
905}
906
907void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
908{
909 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
910}
911
912void helper_unaligned_st(CPUArchState *env, target_ulong addr)
913{
914 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
915}
916
917static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
918 MemOpIdx oi, uintptr_t ra, MMUAccessType type)
919{
920 MemOp mop = get_memop(oi);
921 int a_bits = get_alignment_bits(mop);
922 void *ret;
923
924
925 if (unlikely(addr & ((1 << a_bits) - 1))) {
926 cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
927 }
928
929 ret = g2h(env_cpu(env), addr);
930 set_helper_retaddr(ra);
931 return ret;
932}
933
934uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
935 MemOpIdx oi, uintptr_t ra)
936{
937 void *haddr;
938 uint8_t ret;
939
940 validate_memop(oi, MO_UB);
941 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
942 ret = ldub_p(haddr);
943 clear_helper_retaddr();
944 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
945 return ret;
946}
947
948uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
949 MemOpIdx oi, uintptr_t ra)
950{
951 void *haddr;
952 uint16_t ret;
953
954 validate_memop(oi, MO_BEUW);
955 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
956 ret = lduw_be_p(haddr);
957 clear_helper_retaddr();
958 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
959 return ret;
960}
961
962uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
963 MemOpIdx oi, uintptr_t ra)
964{
965 void *haddr;
966 uint32_t ret;
967
968 validate_memop(oi, MO_BEUL);
969 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
970 ret = ldl_be_p(haddr);
971 clear_helper_retaddr();
972 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
973 return ret;
974}
975
976uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
977 MemOpIdx oi, uintptr_t ra)
978{
979 void *haddr;
980 uint64_t ret;
981
982 validate_memop(oi, MO_BEUQ);
983 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
984 ret = ldq_be_p(haddr);
985 clear_helper_retaddr();
986 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
987 return ret;
988}
989
990uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
991 MemOpIdx oi, uintptr_t ra)
992{
993 void *haddr;
994 uint16_t ret;
995
996 validate_memop(oi, MO_LEUW);
997 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
998 ret = lduw_le_p(haddr);
999 clear_helper_retaddr();
1000 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1001 return ret;
1002}
1003
1004uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
1005 MemOpIdx oi, uintptr_t ra)
1006{
1007 void *haddr;
1008 uint32_t ret;
1009
1010 validate_memop(oi, MO_LEUL);
1011 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1012 ret = ldl_le_p(haddr);
1013 clear_helper_retaddr();
1014 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1015 return ret;
1016}
1017
1018uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
1019 MemOpIdx oi, uintptr_t ra)
1020{
1021 void *haddr;
1022 uint64_t ret;
1023
1024 validate_memop(oi, MO_LEUQ);
1025 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1026 ret = ldq_le_p(haddr);
1027 clear_helper_retaddr();
1028 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1029 return ret;
1030}
1031
1032Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
1033 MemOpIdx oi, uintptr_t ra)
1034{
1035 void *haddr;
1036 Int128 ret;
1037
1038 validate_memop(oi, MO_128 | MO_BE);
1039 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1040 memcpy(&ret, haddr, 16);
1041 clear_helper_retaddr();
1042 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1043
1044 if (!HOST_BIG_ENDIAN) {
1045 ret = bswap128(ret);
1046 }
1047 return ret;
1048}
1049
1050Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
1051 MemOpIdx oi, uintptr_t ra)
1052{
1053 void *haddr;
1054 Int128 ret;
1055
1056 validate_memop(oi, MO_128 | MO_LE);
1057 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1058 memcpy(&ret, haddr, 16);
1059 clear_helper_retaddr();
1060 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1061
1062 if (HOST_BIG_ENDIAN) {
1063 ret = bswap128(ret);
1064 }
1065 return ret;
1066}
1067
1068void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1069 MemOpIdx oi, uintptr_t ra)
1070{
1071 void *haddr;
1072
1073 validate_memop(oi, MO_UB);
1074 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1075 stb_p(haddr, val);
1076 clear_helper_retaddr();
1077 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1078}
1079
1080void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1081 MemOpIdx oi, uintptr_t ra)
1082{
1083 void *haddr;
1084
1085 validate_memop(oi, MO_BEUW);
1086 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1087 stw_be_p(haddr, val);
1088 clear_helper_retaddr();
1089 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1090}
1091
1092void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1093 MemOpIdx oi, uintptr_t ra)
1094{
1095 void *haddr;
1096
1097 validate_memop(oi, MO_BEUL);
1098 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1099 stl_be_p(haddr, val);
1100 clear_helper_retaddr();
1101 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1102}
1103
1104void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1105 MemOpIdx oi, uintptr_t ra)
1106{
1107 void *haddr;
1108
1109 validate_memop(oi, MO_BEUQ);
1110 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1111 stq_be_p(haddr, val);
1112 clear_helper_retaddr();
1113 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1114}
1115
1116void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1117 MemOpIdx oi, uintptr_t ra)
1118{
1119 void *haddr;
1120
1121 validate_memop(oi, MO_LEUW);
1122 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1123 stw_le_p(haddr, val);
1124 clear_helper_retaddr();
1125 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1126}
1127
1128void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1129 MemOpIdx oi, uintptr_t ra)
1130{
1131 void *haddr;
1132
1133 validate_memop(oi, MO_LEUL);
1134 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1135 stl_le_p(haddr, val);
1136 clear_helper_retaddr();
1137 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1138}
1139
1140void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1141 MemOpIdx oi, uintptr_t ra)
1142{
1143 void *haddr;
1144
1145 validate_memop(oi, MO_LEUQ);
1146 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1147 stq_le_p(haddr, val);
1148 clear_helper_retaddr();
1149 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1150}
1151
1152void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
1153 Int128 val, MemOpIdx oi, uintptr_t ra)
1154{
1155 void *haddr;
1156
1157 validate_memop(oi, MO_128 | MO_BE);
1158 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1159 if (!HOST_BIG_ENDIAN) {
1160 val = bswap128(val);
1161 }
1162 memcpy(haddr, &val, 16);
1163 clear_helper_retaddr();
1164 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1165}
1166
1167void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
1168 Int128 val, MemOpIdx oi, uintptr_t ra)
1169{
1170 void *haddr;
1171
1172 validate_memop(oi, MO_128 | MO_LE);
1173 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1174 if (HOST_BIG_ENDIAN) {
1175 val = bswap128(val);
1176 }
1177 memcpy(haddr, &val, 16);
1178 clear_helper_retaddr();
1179 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1180}
1181
1182uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1183{
1184 uint32_t ret;
1185
1186 set_helper_retaddr(1);
1187 ret = ldub_p(g2h_untagged(ptr));
1188 clear_helper_retaddr();
1189 return ret;
1190}
1191
1192uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1193{
1194 uint32_t ret;
1195
1196 set_helper_retaddr(1);
1197 ret = lduw_p(g2h_untagged(ptr));
1198 clear_helper_retaddr();
1199 return ret;
1200}
1201
1202uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1203{
1204 uint32_t ret;
1205
1206 set_helper_retaddr(1);
1207 ret = ldl_p(g2h_untagged(ptr));
1208 clear_helper_retaddr();
1209 return ret;
1210}
1211
1212uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1213{
1214 uint64_t ret;
1215
1216 set_helper_retaddr(1);
1217 ret = ldq_p(g2h_untagged(ptr));
1218 clear_helper_retaddr();
1219 return ret;
1220}
1221
1222#include "ldst_common.c.inc"
1223
1224
1225
1226
1227
1228
1229static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1230 MemOpIdx oi, int size, int prot,
1231 uintptr_t retaddr)
1232{
1233 MemOp mop = get_memop(oi);
1234 int a_bits = get_alignment_bits(mop);
1235 void *ret;
1236
1237
1238 if (unlikely(addr & ((1 << a_bits) - 1))) {
1239 MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
1240 cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
1241 }
1242
1243
1244 if (unlikely(addr & (size - 1))) {
1245 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1246 }
1247
1248 ret = g2h(env_cpu(env), addr);
1249 set_helper_retaddr(retaddr);
1250 return ret;
1251}
1252
1253#include "atomic_common.c.inc"
1254
1255
1256
1257
1258
1259
1260#define ATOMIC_NAME(X) \
1261 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1262#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1263
1264#define DATA_SIZE 1
1265#include "atomic_template.h"
1266
1267#define DATA_SIZE 2
1268#include "atomic_template.h"
1269
1270#define DATA_SIZE 4
1271#include "atomic_template.h"
1272
1273#ifdef CONFIG_ATOMIC64
1274#define DATA_SIZE 8
1275#include "atomic_template.h"
1276#endif
1277
1278#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1279#define DATA_SIZE 16
1280#include "atomic_template.h"
1281#endif
1282