1
2
3
4
5
6
7
8
9#include "qemu/osdep.h"
10#include "qemu/log.h"
11#include "qemu/range.h"
12#include "cpu.h"
13#include "internals.h"
14#include "idau.h"
15
16
17static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
18 MMUAccessType access_type, ARMMMUIdx mmu_idx,
19 bool s1_is_el0, hwaddr *phys_ptr,
20 MemTxAttrs *txattrs, int *prot,
21 target_ulong *page_size_ptr,
22 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
23 __attribute__((nonnull));
24
25
26static const uint8_t pamax_map[] = {
27 [0] = 32,
28 [1] = 36,
29 [2] = 40,
30 [3] = 42,
31 [4] = 44,
32 [5] = 48,
33 [6] = 52,
34};
35
36
37unsigned int arm_pamax(ARMCPU *cpu)
38{
39 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
40 unsigned int parange =
41 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
42
43
44
45
46
47 assert(parange < ARRAY_SIZE(pamax_map));
48 return pamax_map[parange];
49 }
50
51
52
53
54
55 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
56 arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
57
58 return 40;
59 }
60
61 return 32;
62}
63
64
65
66
67ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
68{
69 switch (mmu_idx) {
70 case ARMMMUIdx_SE10_0:
71 return ARMMMUIdx_Stage1_SE0;
72 case ARMMMUIdx_SE10_1:
73 return ARMMMUIdx_Stage1_SE1;
74 case ARMMMUIdx_SE10_1_PAN:
75 return ARMMMUIdx_Stage1_SE1_PAN;
76 case ARMMMUIdx_E10_0:
77 return ARMMMUIdx_Stage1_E0;
78 case ARMMMUIdx_E10_1:
79 return ARMMMUIdx_Stage1_E1;
80 case ARMMMUIdx_E10_1_PAN:
81 return ARMMMUIdx_Stage1_E1_PAN;
82 default:
83 return mmu_idx;
84 }
85}
86
87ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
88{
89 return stage_1_mmu_idx(arm_mmu_idx(env));
90}
91
92static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
93{
94 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
95}
96
97static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
98{
99 switch (mmu_idx) {
100 case ARMMMUIdx_SE10_0:
101 case ARMMMUIdx_E20_0:
102 case ARMMMUIdx_SE20_0:
103 case ARMMMUIdx_Stage1_E0:
104 case ARMMMUIdx_Stage1_SE0:
105 case ARMMMUIdx_MUser:
106 case ARMMMUIdx_MSUser:
107 case ARMMMUIdx_MUserNegPri:
108 case ARMMMUIdx_MSUserNegPri:
109 return true;
110 default:
111 return false;
112 case ARMMMUIdx_E10_0:
113 case ARMMMUIdx_E10_1:
114 case ARMMMUIdx_E10_1_PAN:
115 g_assert_not_reached();
116 }
117}
118
119
120static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
121{
122 if (mmu_idx == ARMMMUIdx_Stage2) {
123 return env->cp15.vttbr_el2;
124 }
125 if (mmu_idx == ARMMMUIdx_Stage2_S) {
126 return env->cp15.vsttbr_el2;
127 }
128 if (ttbrn == 0) {
129 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
130 } else {
131 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
132 }
133}
134
135
136static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx)
137{
138 uint64_t hcr_el2;
139
140 if (arm_feature(env, ARM_FEATURE_M)) {
141 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
142 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
143 case R_V7M_MPU_CTRL_ENABLE_MASK:
144
145 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
146 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
147
148 return false;
149 case 0:
150 default:
151
152
153
154
155 return true;
156 }
157 }
158
159 hcr_el2 = arm_hcr_el2_eff(env);
160
161 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
162
163 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
164 }
165
166 if (hcr_el2 & HCR_TGE) {
167
168 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
169 return true;
170 }
171 }
172
173 if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
174
175 return true;
176 }
177
178 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
179}
180
181static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
182{
183
184
185
186
187
188
189
190
191
192 assert(cacheattrs.is_s2_format);
193 if (arm_hcr_el2_eff(env) & HCR_FWB) {
194 return (cacheattrs.attrs & 0x4) == 0;
195 } else {
196 return (cacheattrs.attrs & 0xc) == 0;
197 }
198}
199
200
201static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
202 hwaddr addr, bool *is_secure,
203 ARMMMUFaultInfo *fi)
204{
205 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
206 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
207 target_ulong s2size;
208 hwaddr s2pa;
209 int s2prot;
210 int ret;
211 ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
212 : ARMMMUIdx_Stage2;
213 ARMCacheAttrs cacheattrs = {};
214 MemTxAttrs txattrs = {};
215
216 ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
217 &s2pa, &txattrs, &s2prot, &s2size, fi,
218 &cacheattrs);
219 if (ret) {
220 assert(fi->type != ARMFault_None);
221 fi->s2addr = addr;
222 fi->stage2 = true;
223 fi->s1ptw = true;
224 fi->s1ns = !*is_secure;
225 return ~0;
226 }
227 if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
228 ptw_attrs_are_device(env, cacheattrs)) {
229
230
231
232
233 fi->type = ARMFault_Permission;
234 fi->s2addr = addr;
235 fi->stage2 = true;
236 fi->s1ptw = true;
237 fi->s1ns = !*is_secure;
238 return ~0;
239 }
240
241 if (arm_is_secure_below_el3(env)) {
242
243 if (*is_secure) {
244 *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
245 } else {
246 *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
247 }
248 } else {
249 assert(!*is_secure);
250 }
251
252 addr = s2pa;
253 }
254 return addr;
255}
256
257
258static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
259 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
260{
261 CPUState *cs = env_cpu(env);
262 MemTxAttrs attrs = {};
263 MemTxResult result = MEMTX_OK;
264 AddressSpace *as;
265 uint32_t data;
266
267 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
268 attrs.secure = is_secure;
269 as = arm_addressspace(cs, attrs);
270 if (fi->s1ptw) {
271 return 0;
272 }
273 if (regime_translation_big_endian(env, mmu_idx)) {
274 data = address_space_ldl_be(as, addr, attrs, &result);
275 } else {
276 data = address_space_ldl_le(as, addr, attrs, &result);
277 }
278 if (result == MEMTX_OK) {
279 return data;
280 }
281 fi->type = ARMFault_SyncExternalOnWalk;
282 fi->ea = arm_extabort_type(result);
283 return 0;
284}
285
286static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
287 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
288{
289 CPUState *cs = env_cpu(env);
290 MemTxAttrs attrs = {};
291 MemTxResult result = MEMTX_OK;
292 AddressSpace *as;
293 uint64_t data;
294
295 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
296 attrs.secure = is_secure;
297 as = arm_addressspace(cs, attrs);
298 if (fi->s1ptw) {
299 return 0;
300 }
301 if (regime_translation_big_endian(env, mmu_idx)) {
302 data = address_space_ldq_be(as, addr, attrs, &result);
303 } else {
304 data = address_space_ldq_le(as, addr, attrs, &result);
305 }
306 if (result == MEMTX_OK) {
307 return data;
308 }
309 fi->type = ARMFault_SyncExternalOnWalk;
310 fi->ea = arm_extabort_type(result);
311 return 0;
312}
313
314static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
315 uint32_t *table, uint32_t address)
316{
317
318 uint64_t tcr = regime_tcr(env, mmu_idx);
319 int maskshift = extract32(tcr, 0, 3);
320 uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
321 uint32_t base_mask;
322
323 if (address & mask) {
324 if (tcr & TTBCR_PD1) {
325
326 return false;
327 }
328 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
329 } else {
330 if (tcr & TTBCR_PD0) {
331
332 return false;
333 }
334 base_mask = ~((uint32_t)0x3fffu >> maskshift);
335 *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
336 }
337 *table |= (address >> 18) & 0x3ffc;
338 return true;
339}
340
341
342
343
344
345
346
347
348static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
349 int ap, int domain_prot)
350{
351 bool is_user = regime_is_user(env, mmu_idx);
352
353 if (domain_prot == 3) {
354 return PAGE_READ | PAGE_WRITE;
355 }
356
357 switch (ap) {
358 case 0:
359 if (arm_feature(env, ARM_FEATURE_V7)) {
360 return 0;
361 }
362 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
363 case SCTLR_S:
364 return is_user ? 0 : PAGE_READ;
365 case SCTLR_R:
366 return PAGE_READ;
367 default:
368 return 0;
369 }
370 case 1:
371 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
372 case 2:
373 if (is_user) {
374 return PAGE_READ;
375 } else {
376 return PAGE_READ | PAGE_WRITE;
377 }
378 case 3:
379 return PAGE_READ | PAGE_WRITE;
380 case 4:
381 return 0;
382 case 5:
383 return is_user ? 0 : PAGE_READ;
384 case 6:
385 return PAGE_READ;
386 case 7:
387 if (!arm_feature(env, ARM_FEATURE_V6K)) {
388 return 0;
389 }
390 return PAGE_READ;
391 default:
392 g_assert_not_reached();
393 }
394}
395
396
397
398
399
400
401static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
402{
403 switch (ap) {
404 case 0:
405 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
406 case 1:
407 return PAGE_READ | PAGE_WRITE;
408 case 2:
409 return is_user ? 0 : PAGE_READ;
410 case 3:
411 return PAGE_READ;
412 default:
413 g_assert_not_reached();
414 }
415}
416
417static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
418{
419 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
420}
421
422static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
423 MMUAccessType access_type, ARMMMUIdx mmu_idx,
424 hwaddr *phys_ptr, int *prot,
425 target_ulong *page_size,
426 ARMMMUFaultInfo *fi)
427{
428 int level = 1;
429 uint32_t table;
430 uint32_t desc;
431 int type;
432 int ap;
433 int domain = 0;
434 int domain_prot;
435 hwaddr phys_addr;
436 uint32_t dacr;
437
438
439
440 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
441
442 fi->type = ARMFault_Translation;
443 goto do_fault;
444 }
445 desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
446 mmu_idx, fi);
447 if (fi->type != ARMFault_None) {
448 goto do_fault;
449 }
450 type = (desc & 3);
451 domain = (desc >> 5) & 0x0f;
452 if (regime_el(env, mmu_idx) == 1) {
453 dacr = env->cp15.dacr_ns;
454 } else {
455 dacr = env->cp15.dacr_s;
456 }
457 domain_prot = (dacr >> (domain * 2)) & 3;
458 if (type == 0) {
459
460 fi->type = ARMFault_Translation;
461 goto do_fault;
462 }
463 if (type != 2) {
464 level = 2;
465 }
466 if (domain_prot == 0 || domain_prot == 2) {
467 fi->type = ARMFault_Domain;
468 goto do_fault;
469 }
470 if (type == 2) {
471
472 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
473 ap = (desc >> 10) & 3;
474 *page_size = 1024 * 1024;
475 } else {
476
477 if (type == 1) {
478
479 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
480 } else {
481
482 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
483 }
484 desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
485 mmu_idx, fi);
486 if (fi->type != ARMFault_None) {
487 goto do_fault;
488 }
489 switch (desc & 3) {
490 case 0:
491 fi->type = ARMFault_Translation;
492 goto do_fault;
493 case 1:
494 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
495 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
496 *page_size = 0x10000;
497 break;
498 case 2:
499 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
500 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
501 *page_size = 0x1000;
502 break;
503 case 3:
504 if (type == 1) {
505
506 if (arm_feature(env, ARM_FEATURE_XSCALE)
507 || arm_feature(env, ARM_FEATURE_V6)) {
508 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
509 *page_size = 0x1000;
510 } else {
511
512
513
514
515 fi->type = ARMFault_Translation;
516 goto do_fault;
517 }
518 } else {
519 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
520 *page_size = 0x400;
521 }
522 ap = (desc >> 4) & 3;
523 break;
524 default:
525
526 g_assert_not_reached();
527 }
528 }
529 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
530 *prot |= *prot ? PAGE_EXEC : 0;
531 if (!(*prot & (1 << access_type))) {
532
533 fi->type = ARMFault_Permission;
534 goto do_fault;
535 }
536 *phys_ptr = phys_addr;
537 return false;
538do_fault:
539 fi->domain = domain;
540 fi->level = level;
541 return true;
542}
543
544static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
545 MMUAccessType access_type, ARMMMUIdx mmu_idx,
546 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
547 target_ulong *page_size, ARMMMUFaultInfo *fi)
548{
549 ARMCPU *cpu = env_archcpu(env);
550 int level = 1;
551 uint32_t table;
552 uint32_t desc;
553 uint32_t xn;
554 uint32_t pxn = 0;
555 int type;
556 int ap;
557 int domain = 0;
558 int domain_prot;
559 hwaddr phys_addr;
560 uint32_t dacr;
561 bool ns;
562
563
564
565 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
566
567 fi->type = ARMFault_Translation;
568 goto do_fault;
569 }
570 desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
571 mmu_idx, fi);
572 if (fi->type != ARMFault_None) {
573 goto do_fault;
574 }
575 type = (desc & 3);
576 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
577
578
579
580 fi->type = ARMFault_Translation;
581 goto do_fault;
582 }
583 if ((type == 1) || !(desc & (1 << 18))) {
584
585 domain = (desc >> 5) & 0x0f;
586 }
587 if (regime_el(env, mmu_idx) == 1) {
588 dacr = env->cp15.dacr_ns;
589 } else {
590 dacr = env->cp15.dacr_s;
591 }
592 if (type == 1) {
593 level = 2;
594 }
595 domain_prot = (dacr >> (domain * 2)) & 3;
596 if (domain_prot == 0 || domain_prot == 2) {
597
598 fi->type = ARMFault_Domain;
599 goto do_fault;
600 }
601 if (type != 1) {
602 if (desc & (1 << 18)) {
603
604 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
605 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
606 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
607 *page_size = 0x1000000;
608 } else {
609
610 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
611 *page_size = 0x100000;
612 }
613 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
614 xn = desc & (1 << 4);
615 pxn = desc & 1;
616 ns = extract32(desc, 19, 1);
617 } else {
618 if (cpu_isar_feature(aa32_pxn, cpu)) {
619 pxn = (desc >> 2) & 1;
620 }
621 ns = extract32(desc, 3, 1);
622
623 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
624 desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
625 mmu_idx, fi);
626 if (fi->type != ARMFault_None) {
627 goto do_fault;
628 }
629 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
630 switch (desc & 3) {
631 case 0:
632 fi->type = ARMFault_Translation;
633 goto do_fault;
634 case 1:
635 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
636 xn = desc & (1 << 15);
637 *page_size = 0x10000;
638 break;
639 case 2: case 3:
640 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
641 xn = desc & 1;
642 *page_size = 0x1000;
643 break;
644 default:
645
646 g_assert_not_reached();
647 }
648 }
649 if (domain_prot == 3) {
650 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
651 } else {
652 if (pxn && !regime_is_user(env, mmu_idx)) {
653 xn = 1;
654 }
655 if (xn && access_type == MMU_INST_FETCH) {
656 fi->type = ARMFault_Permission;
657 goto do_fault;
658 }
659
660 if (arm_feature(env, ARM_FEATURE_V6K) &&
661 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
662
663 if ((ap & 1) == 0) {
664
665 fi->type = ARMFault_AccessFlag;
666 goto do_fault;
667 }
668 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
669 } else {
670 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
671 }
672 if (*prot && !xn) {
673 *prot |= PAGE_EXEC;
674 }
675 if (!(*prot & (1 << access_type))) {
676
677 fi->type = ARMFault_Permission;
678 goto do_fault;
679 }
680 }
681 if (ns) {
682
683
684
685
686 attrs->secure = false;
687 }
688 *phys_ptr = phys_addr;
689 return false;
690do_fault:
691 fi->domain = domain;
692 fi->level = level;
693 return true;
694}
695
696
697
698
699
700
701
702
703static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
704{
705 int prot = 0;
706
707 if (s2ap & 1) {
708 prot |= PAGE_READ;
709 }
710 if (s2ap & 2) {
711 prot |= PAGE_WRITE;
712 }
713
714 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
715 switch (xn) {
716 case 0:
717 prot |= PAGE_EXEC;
718 break;
719 case 1:
720 if (s1_is_el0) {
721 prot |= PAGE_EXEC;
722 }
723 break;
724 case 2:
725 break;
726 case 3:
727 if (!s1_is_el0) {
728 prot |= PAGE_EXEC;
729 }
730 break;
731 default:
732 g_assert_not_reached();
733 }
734 } else {
735 if (!extract32(xn, 1, 1)) {
736 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
737 prot |= PAGE_EXEC;
738 }
739 }
740 }
741 return prot;
742}
743
744
745
746
747
748
749
750
751
752
753
754static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
755 int ap, int ns, int xn, int pxn)
756{
757 bool is_user = regime_is_user(env, mmu_idx);
758 int prot_rw, user_rw;
759 bool have_wxn;
760 int wxn = 0;
761
762 assert(mmu_idx != ARMMMUIdx_Stage2);
763 assert(mmu_idx != ARMMMUIdx_Stage2_S);
764
765 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
766 if (is_user) {
767 prot_rw = user_rw;
768 } else {
769 if (user_rw && regime_is_pan(env, mmu_idx)) {
770
771 prot_rw = 0;
772 } else {
773 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
774 }
775 }
776
777 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
778 return prot_rw;
779 }
780
781
782
783
784
785
786 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
787
788 if (have_wxn) {
789 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
790 }
791
792 if (is_aa64) {
793 if (regime_has_2_ranges(mmu_idx) && !is_user) {
794 xn = pxn || (user_rw & PAGE_WRITE);
795 }
796 } else if (arm_feature(env, ARM_FEATURE_V7)) {
797 switch (regime_el(env, mmu_idx)) {
798 case 1:
799 case 3:
800 if (is_user) {
801 xn = xn || !(user_rw & PAGE_READ);
802 } else {
803 int uwxn = 0;
804 if (have_wxn) {
805 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
806 }
807 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
808 (uwxn && (user_rw & PAGE_WRITE));
809 }
810 break;
811 case 2:
812 break;
813 }
814 } else {
815 xn = wxn = 0;
816 }
817
818 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
819 return prot_rw;
820 }
821 return prot_rw | PAGE_EXEC;
822}
823
824static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
825 ARMMMUIdx mmu_idx)
826{
827 uint64_t tcr = regime_tcr(env, mmu_idx);
828 uint32_t el = regime_el(env, mmu_idx);
829 int select, tsz;
830 bool epd, hpd;
831
832 assert(mmu_idx != ARMMMUIdx_Stage2_S);
833
834 if (mmu_idx == ARMMMUIdx_Stage2) {
835
836 bool sext = extract32(tcr, 4, 1);
837 bool sign = extract32(tcr, 3, 1);
838
839
840
841
842
843 if (sign != sext) {
844 qemu_log_mask(LOG_GUEST_ERROR,
845 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
846 }
847 tsz = sextract32(tcr, 0, 4) + 8;
848 select = 0;
849 hpd = false;
850 epd = false;
851 } else if (el == 2) {
852
853 tsz = extract32(tcr, 0, 3);
854 select = 0;
855 hpd = extract64(tcr, 24, 1);
856 epd = false;
857 } else {
858 int t0sz = extract32(tcr, 0, 3);
859 int t1sz = extract32(tcr, 16, 3);
860
861 if (t1sz == 0) {
862 select = va > (0xffffffffu >> t0sz);
863 } else {
864
865 select = va >= ~(0xffffffffu >> t1sz);
866 }
867 if (!select) {
868 tsz = t0sz;
869 epd = extract32(tcr, 7, 1);
870 hpd = extract64(tcr, 41, 1);
871 } else {
872 tsz = t1sz;
873 epd = extract32(tcr, 23, 1);
874 hpd = extract64(tcr, 42, 1);
875 }
876
877 hpd &= extract32(tcr, 6, 1);
878 }
879
880 return (ARMVAParameters) {
881 .tsz = tsz,
882 .select = select,
883 .epd = epd,
884 .hpd = hpd,
885 };
886}
887
888
889
890
891
892
893
894
895
896
897
898
899static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
900 int inputsize, int stride, int outputsize)
901{
902 const int grainsize = stride + 3;
903 int startsizecheck;
904
905
906
907
908
909
910
911 if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
912 return false;
913 }
914
915 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
916 if (startsizecheck < 1 || startsizecheck > stride + 4) {
917 return false;
918 }
919
920 if (is_aa64) {
921 switch (stride) {
922 case 13:
923 if (level == 0 || (level == 1 && outputsize <= 42)) {
924 return false;
925 }
926 break;
927 case 11:
928 if (level == 0 || (level == 1 && outputsize <= 40)) {
929 return false;
930 }
931 break;
932 case 9:
933 if (level == 0 && outputsize <= 42) {
934 return false;
935 }
936 break;
937 default:
938 g_assert_not_reached();
939 }
940
941
942 if (inputsize > outputsize &&
943 (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
944
945 return false;
946 }
947 } else {
948
949 assert(stride == 9);
950
951 if (level == 0) {
952 return false;
953 }
954 }
955 return true;
956}
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
983 MMUAccessType access_type, ARMMMUIdx mmu_idx,
984 bool s1_is_el0, hwaddr *phys_ptr,
985 MemTxAttrs *txattrs, int *prot,
986 target_ulong *page_size_ptr,
987 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
988{
989 ARMCPU *cpu = env_archcpu(env);
990
991 ARMFaultType fault_type = ARMFault_Translation;
992 uint32_t level;
993 ARMVAParameters param;
994 uint64_t ttbr;
995 hwaddr descaddr, indexmask, indexmask_grainsize;
996 uint32_t tableattrs;
997 target_ulong page_size;
998 uint32_t attrs;
999 int32_t stride;
1000 int addrsize, inputsize, outputsize;
1001 uint64_t tcr = regime_tcr(env, mmu_idx);
1002 int ap, ns, xn, pxn;
1003 uint32_t el = regime_el(env, mmu_idx);
1004 uint64_t descaddrmask;
1005 bool aarch64 = arm_el_is_aa64(env, el);
1006 bool guarded = false;
1007
1008
1009 if (aarch64) {
1010 int ps;
1011
1012 param = aa64_va_parameters(env, address, mmu_idx,
1013 access_type != MMU_INST_FETCH);
1014 level = 0;
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 if (param.tsz_oob) {
1026 fault_type = ARMFault_Translation;
1027 goto do_fault;
1028 }
1029
1030 addrsize = 64 - 8 * param.tbi;
1031 inputsize = 64 - param.tsz;
1032
1033
1034
1035
1036
1037
1038 ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1039 ps = MIN(ps, param.ps);
1040 assert(ps < ARRAY_SIZE(pamax_map));
1041 outputsize = pamax_map[ps];
1042 } else {
1043 param = aa32_va_parameters(env, address, mmu_idx);
1044 level = 1;
1045 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1046 inputsize = addrsize - param.tsz;
1047 outputsize = 40;
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 if (inputsize < addrsize) {
1060 target_ulong top_bits = sextract64(address, inputsize,
1061 addrsize - inputsize);
1062 if (-top_bits != param.select) {
1063
1064 fault_type = ARMFault_Translation;
1065 goto do_fault;
1066 }
1067 }
1068
1069 if (param.using64k) {
1070 stride = 13;
1071 } else if (param.using16k) {
1072 stride = 11;
1073 } else {
1074 stride = 9;
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 ttbr = regime_ttbr(env, mmu_idx, param.select);
1086
1087
1088
1089
1090
1091
1092 if (param.epd) {
1093
1094
1095
1096
1097 goto do_fault;
1098 }
1099
1100 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 level = 4 - (inputsize - 4) / stride;
1114 } else {
1115
1116
1117
1118
1119 uint32_t sl0 = extract32(tcr, 6, 2);
1120 uint32_t sl2 = extract64(tcr, 33, 1);
1121 uint32_t startlevel;
1122 bool ok;
1123
1124
1125 if (param.ds && stride == 9 && sl2) {
1126 if (sl0 != 0) {
1127 level = 0;
1128 fault_type = ARMFault_Translation;
1129 goto do_fault;
1130 }
1131 startlevel = -1;
1132 } else if (!aarch64 || stride == 9) {
1133
1134 startlevel = 2 - sl0;
1135
1136 if (cpu_isar_feature(aa64_st, cpu)) {
1137 startlevel &= 3;
1138 }
1139 } else {
1140
1141 startlevel = 3 - sl0;
1142 }
1143
1144
1145 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1146 inputsize, stride, outputsize);
1147 if (!ok) {
1148 fault_type = ARMFault_Translation;
1149 goto do_fault;
1150 }
1151 level = startlevel;
1152 }
1153
1154 indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1155 indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1156
1157
1158 descaddr = extract64(ttbr, 0, 48);
1159
1160
1161
1162
1163
1164
1165
1166
1167 if (outputsize > 48) {
1168 descaddr |= extract64(ttbr, 2, 4) << 48;
1169 } else if (descaddr >> outputsize) {
1170 level = 0;
1171 fault_type = ARMFault_AddressSize;
1172 goto do_fault;
1173 }
1174
1175
1176
1177
1178
1179 descaddr &= ~indexmask;
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 if (param.ds) {
1190 descaddrmask = MAKE_64BIT_MASK(0, 50);
1191 } else if (arm_feature(env, ARM_FEATURE_V8)) {
1192 descaddrmask = MAKE_64BIT_MASK(0, 48);
1193 } else {
1194 descaddrmask = MAKE_64BIT_MASK(0, 40);
1195 }
1196 descaddrmask &= ~indexmask_grainsize;
1197
1198
1199
1200
1201
1202
1203
1204 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
1205 for (;;) {
1206 uint64_t descriptor;
1207 bool nstable;
1208
1209 descaddr |= (address >> (stride * (4 - level))) & indexmask;
1210 descaddr &= ~7ULL;
1211 nstable = extract32(tableattrs, 4, 1);
1212 descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi);
1213 if (fi->type != ARMFault_None) {
1214 goto do_fault;
1215 }
1216
1217 if (!(descriptor & 1) ||
1218 (!(descriptor & 2) && (level == 3))) {
1219
1220 goto do_fault;
1221 }
1222
1223 descaddr = descriptor & descaddrmask;
1224
1225
1226
1227
1228
1229
1230
1231 if (outputsize > 48) {
1232 if (param.ds) {
1233 descaddr |= extract64(descriptor, 8, 2) << 50;
1234 } else {
1235 descaddr |= extract64(descriptor, 12, 4) << 48;
1236 }
1237 } else if (descaddr >> outputsize) {
1238 fault_type = ARMFault_AddressSize;
1239 goto do_fault;
1240 }
1241
1242 if ((descriptor & 2) && (level < 3)) {
1243
1244
1245
1246
1247
1248
1249 tableattrs |= extract64(descriptor, 59, 5);
1250 level++;
1251 indexmask = indexmask_grainsize;
1252 continue;
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 page_size = (1ULL << ((stride * (4 - level)) + 3));
1264 descaddr &= ~(hwaddr)(page_size - 1);
1265 descaddr |= (address & (page_size - 1));
1266
1267 attrs = extract64(descriptor, 2, 10)
1268 | (extract64(descriptor, 52, 12) << 10);
1269
1270 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1271
1272 break;
1273 }
1274
1275 attrs |= nstable << 3;
1276 guarded = extract64(descriptor, 50, 1);
1277 if (param.hpd) {
1278
1279 break;
1280 }
1281 attrs |= extract32(tableattrs, 0, 2) << 11;
1282
1283
1284
1285
1286 attrs &= ~(extract32(tableattrs, 2, 1) << 4);
1287 attrs |= extract32(tableattrs, 3, 1) << 5;
1288 break;
1289 }
1290
1291
1292
1293
1294 fault_type = ARMFault_AccessFlag;
1295 if ((attrs & (1 << 8)) == 0) {
1296
1297 goto do_fault;
1298 }
1299
1300 ap = extract32(attrs, 4, 2);
1301
1302 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1303 ns = mmu_idx == ARMMMUIdx_Stage2;
1304 xn = extract32(attrs, 11, 2);
1305 *prot = get_S2prot(env, ap, xn, s1_is_el0);
1306 } else {
1307 ns = extract32(attrs, 3, 1);
1308 xn = extract32(attrs, 12, 1);
1309 pxn = extract32(attrs, 11, 1);
1310 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
1311 }
1312
1313 fault_type = ARMFault_Permission;
1314 if (!(*prot & (1 << access_type))) {
1315 goto do_fault;
1316 }
1317
1318 if (ns) {
1319
1320
1321
1322
1323
1324 txattrs->secure = false;
1325 }
1326
1327 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
1328 arm_tlb_bti_gp(txattrs) = true;
1329 }
1330
1331 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1332 cacheattrs->is_s2_format = true;
1333 cacheattrs->attrs = extract32(attrs, 0, 4);
1334 } else {
1335
1336 uint8_t attrindx = extract32(attrs, 0, 3);
1337 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1338 assert(attrindx <= 7);
1339 cacheattrs->is_s2_format = false;
1340 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
1341 }
1342
1343
1344
1345
1346
1347
1348 if (param.ds) {
1349 cacheattrs->shareability = param.sh;
1350 } else {
1351 cacheattrs->shareability = extract32(attrs, 6, 2);
1352 }
1353
1354 *phys_ptr = descaddr;
1355 *page_size_ptr = page_size;
1356 return false;
1357
1358do_fault:
1359 fi->type = fault_type;
1360 fi->level = level;
1361
1362 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
1363 mmu_idx == ARMMMUIdx_Stage2_S);
1364 fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1365 return true;
1366}
1367
1368static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1369 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1370 hwaddr *phys_ptr, int *prot,
1371 ARMMMUFaultInfo *fi)
1372{
1373 int n;
1374 uint32_t mask;
1375 uint32_t base;
1376 bool is_user = regime_is_user(env, mmu_idx);
1377
1378 if (regime_translation_disabled(env, mmu_idx)) {
1379
1380 *phys_ptr = address;
1381 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1382 return false;
1383 }
1384
1385 *phys_ptr = address;
1386 for (n = 7; n >= 0; n--) {
1387 base = env->cp15.c6_region[n];
1388 if ((base & 1) == 0) {
1389 continue;
1390 }
1391 mask = 1 << ((base >> 1) & 0x1f);
1392
1393
1394 mask = (mask << 1) - 1;
1395 if (((base ^ address) & ~mask) == 0) {
1396 break;
1397 }
1398 }
1399 if (n < 0) {
1400 fi->type = ARMFault_Background;
1401 return true;
1402 }
1403
1404 if (access_type == MMU_INST_FETCH) {
1405 mask = env->cp15.pmsav5_insn_ap;
1406 } else {
1407 mask = env->cp15.pmsav5_data_ap;
1408 }
1409 mask = (mask >> (n * 4)) & 0xf;
1410 switch (mask) {
1411 case 0:
1412 fi->type = ARMFault_Permission;
1413 fi->level = 1;
1414 return true;
1415 case 1:
1416 if (is_user) {
1417 fi->type = ARMFault_Permission;
1418 fi->level = 1;
1419 return true;
1420 }
1421 *prot = PAGE_READ | PAGE_WRITE;
1422 break;
1423 case 2:
1424 *prot = PAGE_READ;
1425 if (!is_user) {
1426 *prot |= PAGE_WRITE;
1427 }
1428 break;
1429 case 3:
1430 *prot = PAGE_READ | PAGE_WRITE;
1431 break;
1432 case 5:
1433 if (is_user) {
1434 fi->type = ARMFault_Permission;
1435 fi->level = 1;
1436 return true;
1437 }
1438 *prot = PAGE_READ;
1439 break;
1440 case 6:
1441 *prot = PAGE_READ;
1442 break;
1443 default:
1444
1445 fi->type = ARMFault_Permission;
1446 fi->level = 1;
1447 return true;
1448 }
1449 *prot |= PAGE_EXEC;
1450 return false;
1451}
1452
1453static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
1454 int32_t address, int *prot)
1455{
1456 if (!arm_feature(env, ARM_FEATURE_M)) {
1457 *prot = PAGE_READ | PAGE_WRITE;
1458 switch (address) {
1459 case 0xF0000000 ... 0xFFFFFFFF:
1460 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1461
1462 *prot |= PAGE_EXEC;
1463 }
1464 break;
1465 case 0x00000000 ... 0x7FFFFFFF:
1466 *prot |= PAGE_EXEC;
1467 break;
1468 }
1469 } else {
1470
1471
1472
1473
1474 switch (address) {
1475 case 0x00000000 ... 0x1fffffff:
1476 case 0x20000000 ... 0x3fffffff:
1477 case 0x60000000 ... 0x7fffffff:
1478 case 0x80000000 ... 0x9fffffff:
1479 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1480 break;
1481 case 0x40000000 ... 0x5fffffff:
1482 case 0xa0000000 ... 0xbfffffff:
1483 case 0xc0000000 ... 0xdfffffff:
1484 case 0xe0000000 ... 0xffffffff:
1485 *prot = PAGE_READ | PAGE_WRITE;
1486 break;
1487 default:
1488 g_assert_not_reached();
1489 }
1490 }
1491}
1492
1493static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1494{
1495
1496 return arm_feature(env, ARM_FEATURE_M) &&
1497 extract32(address, 20, 12) == 0xe00;
1498}
1499
1500static bool m_is_system_region(CPUARMState *env, uint32_t address)
1501{
1502
1503
1504
1505
1506 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1507}
1508
1509static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1510 bool is_user)
1511{
1512
1513
1514
1515
1516 CPUARMState *env = &cpu->env;
1517
1518 if (is_user) {
1519 return false;
1520 }
1521
1522 if (arm_feature(env, ARM_FEATURE_M)) {
1523 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
1524 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1525 } else {
1526 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1527 }
1528}
1529
1530static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1531 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1532 hwaddr *phys_ptr, int *prot,
1533 target_ulong *page_size,
1534 ARMMMUFaultInfo *fi)
1535{
1536 ARMCPU *cpu = env_archcpu(env);
1537 int n;
1538 bool is_user = regime_is_user(env, mmu_idx);
1539
1540 *phys_ptr = address;
1541 *page_size = TARGET_PAGE_SIZE;
1542 *prot = 0;
1543
1544 if (regime_translation_disabled(env, mmu_idx) ||
1545 m_is_ppb_region(env, address)) {
1546
1547
1548
1549
1550
1551
1552
1553
1554 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1555 } else {
1556 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1557
1558 uint32_t base = env->pmsav7.drbar[n];
1559 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1560 uint32_t rmask;
1561 bool srdis = false;
1562
1563 if (!(env->pmsav7.drsr[n] & 0x1)) {
1564 continue;
1565 }
1566
1567 if (!rsize) {
1568 qemu_log_mask(LOG_GUEST_ERROR,
1569 "DRSR[%d]: Rsize field cannot be 0\n", n);
1570 continue;
1571 }
1572 rsize++;
1573 rmask = (1ull << rsize) - 1;
1574
1575 if (base & rmask) {
1576 qemu_log_mask(LOG_GUEST_ERROR,
1577 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1578 "to DRSR region size, mask = 0x%" PRIx32 "\n",
1579 n, base, rmask);
1580 continue;
1581 }
1582
1583 if (address < base || address > base + rmask) {
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593 if (ranges_overlap(base, rmask,
1594 address & TARGET_PAGE_MASK,
1595 TARGET_PAGE_SIZE)) {
1596 *page_size = 1;
1597 }
1598 continue;
1599 }
1600
1601
1602
1603 if (rsize >= 8) {
1604 int i, snd;
1605 uint32_t srdis_mask;
1606
1607 rsize -= 3;
1608 snd = ((address - base) >> rsize) & 0x7;
1609 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1610
1611 srdis_mask = srdis ? 0x3 : 0x0;
1612 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1613
1614
1615
1616
1617
1618
1619
1620 int snd_rounded = snd & ~(i - 1);
1621 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1622 snd_rounded + 8, i);
1623 if (srdis_mask ^ srdis_multi) {
1624 break;
1625 }
1626 srdis_mask = (srdis_mask << i) | srdis_mask;
1627 rsize++;
1628 }
1629 }
1630 if (srdis) {
1631 continue;
1632 }
1633 if (rsize < TARGET_PAGE_BITS) {
1634 *page_size = 1 << rsize;
1635 }
1636 break;
1637 }
1638
1639 if (n == -1) {
1640 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
1641
1642 fi->type = ARMFault_Background;
1643 return true;
1644 }
1645 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1646 } else {
1647 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1648 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1649
1650 if (m_is_system_region(env, address)) {
1651
1652 xn = 1;
1653 }
1654
1655 if (is_user) {
1656 switch (ap) {
1657 case 0:
1658 case 1:
1659 case 5:
1660 break;
1661 case 3:
1662 *prot |= PAGE_WRITE;
1663
1664 case 2:
1665 case 6:
1666 *prot |= PAGE_READ | PAGE_EXEC;
1667 break;
1668 case 7:
1669
1670 if (arm_feature(env, ARM_FEATURE_M)) {
1671 *prot |= PAGE_READ | PAGE_EXEC;
1672 break;
1673 }
1674
1675 default:
1676 qemu_log_mask(LOG_GUEST_ERROR,
1677 "DRACR[%d]: Bad value for AP bits: 0x%"
1678 PRIx32 "\n", n, ap);
1679 }
1680 } else {
1681 switch (ap) {
1682 case 0:
1683 break;
1684 case 1:
1685 case 2:
1686 case 3:
1687 *prot |= PAGE_WRITE;
1688
1689 case 5:
1690 case 6:
1691 *prot |= PAGE_READ | PAGE_EXEC;
1692 break;
1693 case 7:
1694
1695 if (arm_feature(env, ARM_FEATURE_M)) {
1696 *prot |= PAGE_READ | PAGE_EXEC;
1697 break;
1698 }
1699
1700 default:
1701 qemu_log_mask(LOG_GUEST_ERROR,
1702 "DRACR[%d]: Bad value for AP bits: 0x%"
1703 PRIx32 "\n", n, ap);
1704 }
1705 }
1706
1707
1708 if (xn) {
1709 *prot &= ~PAGE_EXEC;
1710 }
1711 }
1712 }
1713
1714 fi->type = ARMFault_Permission;
1715 fi->level = 1;
1716 return !(*prot & (1 << access_type));
1717}
1718
1719bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1720 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1721 hwaddr *phys_ptr, MemTxAttrs *txattrs,
1722 int *prot, bool *is_subpage,
1723 ARMMMUFaultInfo *fi, uint32_t *mregion)
1724{
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734 ARMCPU *cpu = env_archcpu(env);
1735 bool is_user = regime_is_user(env, mmu_idx);
1736 uint32_t secure = regime_is_secure(env, mmu_idx);
1737 int n;
1738 int matchregion = -1;
1739 bool hit = false;
1740 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1741 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1742
1743 *is_subpage = false;
1744 *phys_ptr = address;
1745 *prot = 0;
1746 if (mregion) {
1747 *mregion = -1;
1748 }
1749
1750
1751
1752
1753
1754
1755
1756
1757 if (regime_translation_disabled(env, mmu_idx)) {
1758 hit = true;
1759 } else if (m_is_ppb_region(env, address)) {
1760 hit = true;
1761 } else {
1762 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
1763 hit = true;
1764 }
1765
1766 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1767
1768
1769
1770
1771
1772
1773 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
1774 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
1775
1776 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
1777
1778 continue;
1779 }
1780
1781 if (address < base || address > limit) {
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791 if (limit >= base &&
1792 ranges_overlap(base, limit - base + 1,
1793 addr_page_base,
1794 TARGET_PAGE_SIZE)) {
1795 *is_subpage = true;
1796 }
1797 continue;
1798 }
1799
1800 if (base > addr_page_base || limit < addr_page_limit) {
1801 *is_subpage = true;
1802 }
1803
1804 if (matchregion != -1) {
1805
1806
1807
1808
1809 fi->type = ARMFault_Permission;
1810 fi->level = 1;
1811 return true;
1812 }
1813
1814 matchregion = n;
1815 hit = true;
1816 }
1817 }
1818
1819 if (!hit) {
1820
1821 fi->type = ARMFault_Background;
1822 return true;
1823 }
1824
1825 if (matchregion == -1) {
1826
1827 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1828 } else {
1829 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
1830 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
1831 bool pxn = false;
1832
1833 if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1834 pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
1835 }
1836
1837 if (m_is_system_region(env, address)) {
1838
1839 xn = 1;
1840 }
1841
1842 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
1843 if (*prot && !xn && !(pxn && !is_user)) {
1844 *prot |= PAGE_EXEC;
1845 }
1846
1847
1848
1849
1850 if (mregion) {
1851 *mregion = matchregion;
1852 }
1853 }
1854
1855 fi->type = ARMFault_Permission;
1856 fi->level = 1;
1857 return !(*prot & (1 << access_type));
1858}
1859
1860static bool v8m_is_sau_exempt(CPUARMState *env,
1861 uint32_t address, MMUAccessType access_type)
1862{
1863
1864
1865
1866
1867 return
1868 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
1869 (address >= 0xe0000000 && address <= 0xe0002fff) ||
1870 (address >= 0xe000e000 && address <= 0xe000efff) ||
1871 (address >= 0xe002e000 && address <= 0xe002efff) ||
1872 (address >= 0xe0040000 && address <= 0xe0041fff) ||
1873 (address >= 0xe00ff000 && address <= 0xe00fffff);
1874}
1875
1876void v8m_security_lookup(CPUARMState *env, uint32_t address,
1877 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1878 V8M_SAttributes *sattrs)
1879{
1880
1881
1882
1883
1884
1885 ARMCPU *cpu = env_archcpu(env);
1886 int r;
1887 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
1888 int idau_region = IREGION_NOTVALID;
1889 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1890 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1891
1892 if (cpu->idau) {
1893 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
1894 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
1895
1896 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
1897 &idau_nsc);
1898 }
1899
1900 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
1901
1902 return;
1903 }
1904
1905 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
1906 sattrs->ns = !regime_is_secure(env, mmu_idx);
1907 return;
1908 }
1909
1910 if (idau_region != IREGION_NOTVALID) {
1911 sattrs->irvalid = true;
1912 sattrs->iregion = idau_region;
1913 }
1914
1915 switch (env->sau.ctrl & 3) {
1916 case 0:
1917 break;
1918 case 2:
1919 sattrs->ns = true;
1920 break;
1921 default:
1922 for (r = 0; r < cpu->sau_sregion; r++) {
1923 if (env->sau.rlar[r] & 1) {
1924 uint32_t base = env->sau.rbar[r] & ~0x1f;
1925 uint32_t limit = env->sau.rlar[r] | 0x1f;
1926
1927 if (base <= address && limit >= address) {
1928 if (base > addr_page_base || limit < addr_page_limit) {
1929 sattrs->subpage = true;
1930 }
1931 if (sattrs->srvalid) {
1932
1933
1934
1935
1936
1937 sattrs->ns = false;
1938 sattrs->nsc = false;
1939 sattrs->sregion = 0;
1940 sattrs->srvalid = false;
1941 break;
1942 } else {
1943 if (env->sau.rlar[r] & 2) {
1944 sattrs->nsc = true;
1945 } else {
1946 sattrs->ns = true;
1947 }
1948 sattrs->srvalid = true;
1949 sattrs->sregion = r;
1950 }
1951 } else {
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961 if (limit >= base &&
1962 ranges_overlap(base, limit - base + 1,
1963 addr_page_base,
1964 TARGET_PAGE_SIZE)) {
1965 sattrs->subpage = true;
1966 }
1967 }
1968 }
1969 }
1970 break;
1971 }
1972
1973
1974
1975
1976
1977 if (!idau_ns) {
1978 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
1979 sattrs->ns = false;
1980 sattrs->nsc = idau_nsc;
1981 }
1982 }
1983}
1984
1985static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
1986 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1987 hwaddr *phys_ptr, MemTxAttrs *txattrs,
1988 int *prot, target_ulong *page_size,
1989 ARMMMUFaultInfo *fi)
1990{
1991 uint32_t secure = regime_is_secure(env, mmu_idx);
1992 V8M_SAttributes sattrs = {};
1993 bool ret;
1994 bool mpu_is_subpage;
1995
1996 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1997 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
1998 if (access_type == MMU_INST_FETCH) {
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017 if (sattrs.ns != !secure) {
2018 if (sattrs.nsc) {
2019 fi->type = ARMFault_QEMU_NSCExec;
2020 } else {
2021 fi->type = ARMFault_QEMU_SFault;
2022 }
2023 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
2024 *phys_ptr = address;
2025 *prot = 0;
2026 return true;
2027 }
2028 } else {
2029
2030
2031
2032
2033
2034 if (sattrs.ns) {
2035 txattrs->secure = false;
2036 } else if (!secure) {
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047 fi->type = ARMFault_QEMU_SFault;
2048 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
2049 *phys_ptr = address;
2050 *prot = 0;
2051 return true;
2052 }
2053 }
2054 }
2055
2056 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
2057 txattrs, prot, &mpu_is_subpage, fi, NULL);
2058 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
2059 return ret;
2060}
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
2072{
2073 uint8_t hiattr = extract32(s2attrs, 2, 2);
2074 uint8_t loattr = extract32(s2attrs, 0, 2);
2075 uint8_t hihint = 0, lohint = 0;
2076
2077 if (hiattr != 0) {
2078 if (arm_hcr_el2_eff(env) & HCR_CD) {
2079 hiattr = loattr = 1;
2080 } else {
2081 if (hiattr != 1) {
2082 hihint = 3;
2083 }
2084 if (loattr != 1) {
2085 lohint = 3;
2086 }
2087 }
2088 }
2089
2090 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2102{
2103 if (s1 == 4 || s2 == 4) {
2104
2105 return 4;
2106 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2107
2108 return s1;
2109 } else if (extract32(s2, 2, 2) == 2) {
2110
2111
2112
2113 return (2 << 2) | extract32(s1, 0, 2);
2114 } else {
2115 return s1;
2116 }
2117}
2118
2119
2120
2121
2122
2123
2124static uint8_t combined_attrs_nofwb(CPUARMState *env,
2125 ARMCacheAttrs s1, ARMCacheAttrs s2)
2126{
2127 uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2128
2129 s2_mair_attrs = convert_stage2_attrs(env, s2.attrs);
2130
2131 s1lo = extract32(s1.attrs, 0, 4);
2132 s2lo = extract32(s2_mair_attrs, 0, 4);
2133 s1hi = extract32(s1.attrs, 4, 4);
2134 s2hi = extract32(s2_mair_attrs, 4, 4);
2135
2136
2137 if (s1hi == 0 || s2hi == 0) {
2138
2139 if (s1lo == 0 || s2lo == 0) {
2140
2141 ret_attrs = 0;
2142 } else if (s1lo == 4 || s2lo == 4) {
2143
2144 ret_attrs = 4;
2145 } else if (s1lo == 8 || s2lo == 8) {
2146
2147 ret_attrs = 8;
2148 } else {
2149 ret_attrs = 0xc;
2150 }
2151 } else {
2152
2153 ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2154 | combine_cacheattr_nibble(s1lo, s2lo);
2155 }
2156 return ret_attrs;
2157}
2158
2159static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2160{
2161
2162
2163
2164
2165
2166
2167 if (attr == 0 || attr == 4) {
2168
2169
2170
2171
2172
2173 return 0xf;
2174 }
2175
2176 return attr | 4;
2177}
2178
2179
2180
2181
2182
2183
2184static uint8_t combined_attrs_fwb(CPUARMState *env,
2185 ARMCacheAttrs s1, ARMCacheAttrs s2)
2186{
2187 switch (s2.attrs) {
2188 case 7:
2189
2190 return s1.attrs;
2191 case 6:
2192
2193
2194
2195
2196
2197 if ((s1.attrs & 0xf0) == 0) {
2198
2199 return 0xff;
2200 }
2201
2202 return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2203 force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2204 case 5:
2205
2206 if ((s1.attrs & 0xf0) == 0) {
2207 return s1.attrs;
2208 }
2209 return 0x44;
2210 case 0 ... 3:
2211
2212 return s2.attrs << 2;
2213 default:
2214
2215
2216
2217
2218 return 0;
2219 }
2220}
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
2231 ARMCacheAttrs s1, ARMCacheAttrs s2)
2232{
2233 ARMCacheAttrs ret;
2234 bool tagged = false;
2235
2236 assert(s2.is_s2_format && !s1.is_s2_format);
2237 ret.is_s2_format = false;
2238
2239 if (s1.attrs == 0xf0) {
2240 tagged = true;
2241 s1.attrs = 0xff;
2242 }
2243
2244
2245 if (s1.shareability == 2 || s2.shareability == 2) {
2246
2247 ret.shareability = 2;
2248 } else if (s1.shareability == 3 || s2.shareability == 3) {
2249
2250 ret.shareability = 3;
2251 } else {
2252
2253 ret.shareability = 0;
2254 }
2255
2256
2257 if (arm_hcr_el2_eff(env) & HCR_FWB) {
2258 ret.attrs = combined_attrs_fwb(env, s1, s2);
2259 } else {
2260 ret.attrs = combined_attrs_nofwb(env, s1, s2);
2261 }
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271 if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2272 ret.shareability = 2;
2273 }
2274
2275
2276 if (tagged && ret.attrs == 0xff) {
2277 ret.attrs = 0xf0;
2278 }
2279
2280 return ret;
2281}
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310bool get_phys_addr(CPUARMState *env, target_ulong address,
2311 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2312 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
2313 target_ulong *page_size,
2314 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
2315{
2316 ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
2317
2318 if (mmu_idx != s1_mmu_idx) {
2319
2320
2321
2322
2323 if (arm_feature(env, ARM_FEATURE_EL2)) {
2324 hwaddr ipa;
2325 int s2_prot;
2326 int ret;
2327 bool ipa_secure;
2328 ARMCacheAttrs cacheattrs2 = {};
2329 ARMMMUIdx s2_mmu_idx;
2330 bool is_el0;
2331
2332 ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
2333 attrs, prot, page_size, fi, cacheattrs);
2334
2335
2336 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
2337 *phys_ptr = ipa;
2338 return ret;
2339 }
2340
2341 ipa_secure = attrs->secure;
2342 if (arm_is_secure_below_el3(env)) {
2343 if (ipa_secure) {
2344 attrs->secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
2345 } else {
2346 attrs->secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
2347 }
2348 } else {
2349 assert(!ipa_secure);
2350 }
2351
2352 s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2353 is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
2354
2355
2356 ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
2357 phys_ptr, attrs, &s2_prot,
2358 page_size, fi, &cacheattrs2);
2359 fi->s2addr = ipa;
2360
2361 *prot &= s2_prot;
2362
2363
2364 if (ret) {
2365 return ret;
2366 }
2367
2368
2369 if (arm_hcr_el2_eff(env) & HCR_DC) {
2370
2371
2372
2373
2374
2375
2376
2377 if (cacheattrs->attrs != 0xf0) {
2378 cacheattrs->attrs = 0xff;
2379 }
2380 cacheattrs->shareability = 0;
2381 }
2382 *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
2383
2384
2385 if (arm_is_secure_below_el3(env)) {
2386 if (ipa_secure) {
2387 attrs->secure =
2388 !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
2389 } else {
2390 attrs->secure =
2391 !((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
2392 || (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
2393 }
2394 }
2395 return 0;
2396 } else {
2397
2398
2399
2400 mmu_idx = stage_1_mmu_idx(mmu_idx);
2401 }
2402 }
2403
2404
2405
2406
2407
2408
2409 attrs->secure = regime_is_secure(env, mmu_idx);
2410 attrs->user = regime_is_user(env, mmu_idx);
2411
2412
2413
2414
2415
2416 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2417 && !arm_feature(env, ARM_FEATURE_V8)) {
2418 if (regime_el(env, mmu_idx) == 3) {
2419 address += env->cp15.fcseidr_s;
2420 } else {
2421 address += env->cp15.fcseidr_ns;
2422 }
2423 }
2424
2425 if (arm_feature(env, ARM_FEATURE_PMSA)) {
2426 bool ret;
2427 *page_size = TARGET_PAGE_SIZE;
2428
2429 if (arm_feature(env, ARM_FEATURE_V8)) {
2430
2431 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2432 phys_ptr, attrs, prot, page_size, fi);
2433 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2434
2435 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2436 phys_ptr, prot, page_size, fi);
2437 } else {
2438
2439 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2440 phys_ptr, prot, fi);
2441 }
2442 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2443 " mmu_idx %u -> %s (prot %c%c%c)\n",
2444 access_type == MMU_DATA_LOAD ? "reading" :
2445 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2446 (uint32_t)address, mmu_idx,
2447 ret ? "Miss" : "Hit",
2448 *prot & PAGE_READ ? 'r' : '-',
2449 *prot & PAGE_WRITE ? 'w' : '-',
2450 *prot & PAGE_EXEC ? 'x' : '-');
2451
2452 return ret;
2453 }
2454
2455
2456
2457 if (regime_translation_disabled(env, mmu_idx)) {
2458 uint64_t hcr;
2459 uint8_t memattr;
2460
2461
2462
2463
2464
2465 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
2466 int r_el = regime_el(env, mmu_idx);
2467 if (arm_el_is_aa64(env, r_el)) {
2468 int pamax = arm_pamax(env_archcpu(env));
2469 uint64_t tcr = env->cp15.tcr_el[r_el];
2470 int addrtop, tbi;
2471
2472 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2473 if (access_type == MMU_INST_FETCH) {
2474 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2475 }
2476 tbi = (tbi >> extract64(address, 55, 1)) & 1;
2477 addrtop = (tbi ? 55 : 63);
2478
2479 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2480 fi->type = ARMFault_AddressSize;
2481 fi->level = 0;
2482 fi->stage2 = false;
2483 return 1;
2484 }
2485
2486
2487
2488
2489
2490
2491
2492 address = extract64(address, 0, 52);
2493 }
2494 }
2495 *phys_ptr = address;
2496 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2497 *page_size = TARGET_PAGE_SIZE;
2498
2499
2500 hcr = arm_hcr_el2_eff(env);
2501 cacheattrs->shareability = 0;
2502 cacheattrs->is_s2_format = false;
2503 if (hcr & HCR_DC) {
2504 if (hcr & HCR_DCT) {
2505 memattr = 0xf0;
2506 } else {
2507 memattr = 0xff;
2508 }
2509 } else if (access_type == MMU_INST_FETCH) {
2510 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2511 memattr = 0xee;
2512 } else {
2513 memattr = 0x44;
2514 }
2515 cacheattrs->shareability = 2;
2516 } else {
2517 memattr = 0x00;
2518 }
2519 cacheattrs->attrs = memattr;
2520 return 0;
2521 }
2522
2523 if (regime_using_lpae_format(env, mmu_idx)) {
2524 return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
2525 phys_ptr, attrs, prot, page_size,
2526 fi, cacheattrs);
2527 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
2528 return get_phys_addr_v6(env, address, access_type, mmu_idx,
2529 phys_ptr, attrs, prot, page_size, fi);
2530 } else {
2531 return get_phys_addr_v5(env, address, access_type, mmu_idx,
2532 phys_ptr, prot, page_size, fi);
2533 }
2534}
2535
2536hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2537 MemTxAttrs *attrs)
2538{
2539 ARMCPU *cpu = ARM_CPU(cs);
2540 CPUARMState *env = &cpu->env;
2541 hwaddr phys_addr;
2542 target_ulong page_size;
2543 int prot;
2544 bool ret;
2545 ARMMMUFaultInfo fi = {};
2546 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
2547 ARMCacheAttrs cacheattrs = {};
2548
2549 *attrs = (MemTxAttrs) {};
2550
2551 ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
2552 attrs, &prot, &page_size, &fi, &cacheattrs);
2553
2554 if (ret) {
2555 return -1;
2556 }
2557 return phys_addr;
2558}
2559