1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef TARGET_ARM_INTERNALS_H
26#define TARGET_ARM_INTERNALS_H
27
28#include "hw/registerfields.h"
29#include "syndrome.h"
30
31
32#define BANK_USRSYS 0
33#define BANK_SVC 1
34#define BANK_ABT 2
35#define BANK_UND 3
36#define BANK_IRQ 4
37#define BANK_FIQ 5
38#define BANK_HYP 6
39#define BANK_MON 7
40
41static inline bool excp_is_internal(int excp)
42{
43
44
45
46 return excp == EXCP_INTERRUPT
47 || excp == EXCP_HLT
48 || excp == EXCP_DEBUG
49 || excp == EXCP_HALTED
50 || excp == EXCP_EXCEPTION_EXIT
51 || excp == EXCP_KERNEL_TRAP
52 || excp == EXCP_SEMIHOST;
53}
54
55
56
57
58#define GTIMER_SCALE 16
59
60
61FIELD(V7M_CONTROL, NPRIV, 0, 1)
62FIELD(V7M_CONTROL, SPSEL, 1, 1)
63FIELD(V7M_CONTROL, FPCA, 2, 1)
64FIELD(V7M_CONTROL, SFPA, 3, 1)
65
66
67FIELD(V7M_EXCRET, ES, 0, 1)
68FIELD(V7M_EXCRET, RES0, 1, 1)
69FIELD(V7M_EXCRET, SPSEL, 2, 1)
70FIELD(V7M_EXCRET, MODE, 3, 1)
71FIELD(V7M_EXCRET, FTYPE, 4, 1)
72FIELD(V7M_EXCRET, DCRS, 5, 1)
73FIELD(V7M_EXCRET, S, 6, 1)
74FIELD(V7M_EXCRET, RES1, 7, 25)
75
76
77#define EXC_RETURN_MIN_MAGIC 0xff000000
78
79
80
81#define FNC_RETURN_MIN_MAGIC 0xfefffffe
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define M_FAKE_FSR_NSC_EXEC 0xf
96#define M_FAKE_FSR_SFAULT 0xe
97
98
99
100
101
102
103
104void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
105 uint32_t syndrome, uint32_t target_el);
106
107
108
109
110void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp,
111 uint32_t syndrome, uint32_t target_el,
112 uintptr_t ra);
113
114
115
116
117
118
119
120static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
121{
122 static const unsigned int map[4] = {
123 [1] = BANK_SVC,
124 [2] = BANK_HYP,
125 [3] = BANK_MON,
126 };
127 assert(el >= 1 && el <= 3);
128 return map[el];
129}
130
131
132static inline int bank_number(int mode)
133{
134 switch (mode) {
135 case ARM_CPU_MODE_USR:
136 case ARM_CPU_MODE_SYS:
137 return BANK_USRSYS;
138 case ARM_CPU_MODE_SVC:
139 return BANK_SVC;
140 case ARM_CPU_MODE_ABT:
141 return BANK_ABT;
142 case ARM_CPU_MODE_UND:
143 return BANK_UND;
144 case ARM_CPU_MODE_IRQ:
145 return BANK_IRQ;
146 case ARM_CPU_MODE_FIQ:
147 return BANK_FIQ;
148 case ARM_CPU_MODE_HYP:
149 return BANK_HYP;
150 case ARM_CPU_MODE_MON:
151 return BANK_MON;
152 }
153 g_assert_not_reached();
154}
155
156
157
158
159
160
161
162
163
164
165
166
167static inline int r14_bank_number(int mode)
168{
169 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
170}
171
172void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
173void arm_translate_init(void);
174
175#ifdef CONFIG_TCG
176void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
177#endif
178
179
180enum arm_fprounding {
181 FPROUNDING_TIEEVEN,
182 FPROUNDING_POSINF,
183 FPROUNDING_NEGINF,
184 FPROUNDING_ZERO,
185 FPROUNDING_TIEAWAY,
186 FPROUNDING_ODD
187};
188
189int arm_rmode_to_sf(int rmode);
190
191static inline void aarch64_save_sp(CPUARMState *env, int el)
192{
193 if (env->pstate & PSTATE_SP) {
194 env->sp_el[el] = env->xregs[31];
195 } else {
196 env->sp_el[0] = env->xregs[31];
197 }
198}
199
200static inline void aarch64_restore_sp(CPUARMState *env, int el)
201{
202 if (env->pstate & PSTATE_SP) {
203 env->xregs[31] = env->sp_el[el];
204 } else {
205 env->xregs[31] = env->sp_el[0];
206 }
207}
208
209static inline void update_spsel(CPUARMState *env, uint32_t imm)
210{
211 unsigned int cur_el = arm_current_el(env);
212
213
214
215 if (!((imm ^ env->pstate) & PSTATE_SP)) {
216 return;
217 }
218 aarch64_save_sp(env, cur_el);
219 env->pstate = deposit32(env->pstate, 0, 1, imm);
220
221
222
223
224 assert(cur_el >= 1 && cur_el <= 3);
225 aarch64_restore_sp(env, cur_el);
226}
227
228
229
230
231
232
233
234
235static inline unsigned int arm_pamax(ARMCPU *cpu)
236{
237 static const unsigned int pamax_map[] = {
238 [0] = 32,
239 [1] = 36,
240 [2] = 40,
241 [3] = 42,
242 [4] = 44,
243 [5] = 48,
244 };
245 unsigned int parange =
246 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
247
248
249
250 assert(parange < ARRAY_SIZE(pamax_map));
251 return pamax_map[parange];
252}
253
254
255
256
257
258static inline bool extended_addresses_enabled(CPUARMState *env)
259{
260 TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
261 return arm_el_is_aa64(env, 1) ||
262 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
263}
264
265
266
267
268void hw_watchpoint_update(ARMCPU *cpu, int n);
269
270
271
272
273void hw_watchpoint_update_all(ARMCPU *cpu);
274
275
276
277void hw_breakpoint_update(ARMCPU *cpu, int n);
278
279
280
281
282void hw_breakpoint_update_all(ARMCPU *cpu);
283
284
285bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
286
287
288
289
290vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
291
292
293void arm_debug_excp_handler(CPUState *cs);
294
295#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
296static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
297{
298 return false;
299}
300static inline void arm_handle_psci_call(ARMCPU *cpu)
301{
302 g_assert_not_reached();
303}
304#else
305
306bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
307
308void arm_handle_psci_call(ARMCPU *cpu);
309#endif
310
311
312
313
314
315
316static inline void arm_clear_exclusive(CPUARMState *env)
317{
318 env->exclusive_addr = -1;
319}
320
321
322
323
324
325
326typedef enum ARMFaultType {
327 ARMFault_None,
328 ARMFault_AccessFlag,
329 ARMFault_Alignment,
330 ARMFault_Background,
331 ARMFault_Domain,
332 ARMFault_Permission,
333 ARMFault_Translation,
334 ARMFault_AddressSize,
335 ARMFault_SyncExternal,
336 ARMFault_SyncExternalOnWalk,
337 ARMFault_SyncParity,
338 ARMFault_SyncParityOnWalk,
339 ARMFault_AsyncParity,
340 ARMFault_AsyncExternal,
341 ARMFault_Debug,
342 ARMFault_TLBConflict,
343 ARMFault_Lockdown,
344 ARMFault_Exclusive,
345 ARMFault_ICacheMaint,
346 ARMFault_QEMU_NSCExec,
347 ARMFault_QEMU_SFault,
348} ARMFaultType;
349
350
351
352
353
354
355
356
357
358
359
360
361typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
362struct ARMMMUFaultInfo {
363 ARMFaultType type;
364 target_ulong s2addr;
365 int level;
366 int domain;
367 bool stage2;
368 bool s1ptw;
369 bool s1ns;
370 bool ea;
371};
372
373
374
375
376
377
378
379static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
380{
381 uint32_t fsc;
382
383 switch (fi->type) {
384 case ARMFault_None:
385 return 0;
386 case ARMFault_AccessFlag:
387 fsc = fi->level == 1 ? 0x3 : 0x6;
388 break;
389 case ARMFault_Alignment:
390 fsc = 0x1;
391 break;
392 case ARMFault_Permission:
393 fsc = fi->level == 1 ? 0xd : 0xf;
394 break;
395 case ARMFault_Domain:
396 fsc = fi->level == 1 ? 0x9 : 0xb;
397 break;
398 case ARMFault_Translation:
399 fsc = fi->level == 1 ? 0x5 : 0x7;
400 break;
401 case ARMFault_SyncExternal:
402 fsc = 0x8 | (fi->ea << 12);
403 break;
404 case ARMFault_SyncExternalOnWalk:
405 fsc = fi->level == 1 ? 0xc : 0xe;
406 fsc |= (fi->ea << 12);
407 break;
408 case ARMFault_SyncParity:
409 fsc = 0x409;
410 break;
411 case ARMFault_SyncParityOnWalk:
412 fsc = fi->level == 1 ? 0x40c : 0x40e;
413 break;
414 case ARMFault_AsyncParity:
415 fsc = 0x408;
416 break;
417 case ARMFault_AsyncExternal:
418 fsc = 0x406 | (fi->ea << 12);
419 break;
420 case ARMFault_Debug:
421 fsc = 0x2;
422 break;
423 case ARMFault_TLBConflict:
424 fsc = 0x400;
425 break;
426 case ARMFault_Lockdown:
427 fsc = 0x404;
428 break;
429 case ARMFault_Exclusive:
430 fsc = 0x405;
431 break;
432 case ARMFault_ICacheMaint:
433 fsc = 0x4;
434 break;
435 case ARMFault_Background:
436 fsc = 0x0;
437 break;
438 case ARMFault_QEMU_NSCExec:
439 fsc = M_FAKE_FSR_NSC_EXEC;
440 break;
441 case ARMFault_QEMU_SFault:
442 fsc = M_FAKE_FSR_SFAULT;
443 break;
444 default:
445
446
447
448 g_assert_not_reached();
449 }
450
451 fsc |= (fi->domain << 4);
452 return fsc;
453}
454
455
456
457
458
459
460static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
461{
462 uint32_t fsc;
463
464 switch (fi->type) {
465 case ARMFault_None:
466 return 0;
467 case ARMFault_AddressSize:
468 fsc = fi->level & 3;
469 break;
470 case ARMFault_AccessFlag:
471 fsc = (fi->level & 3) | (0x2 << 2);
472 break;
473 case ARMFault_Permission:
474 fsc = (fi->level & 3) | (0x3 << 2);
475 break;
476 case ARMFault_Translation:
477 fsc = (fi->level & 3) | (0x1 << 2);
478 break;
479 case ARMFault_SyncExternal:
480 fsc = 0x10 | (fi->ea << 12);
481 break;
482 case ARMFault_SyncExternalOnWalk:
483 fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
484 break;
485 case ARMFault_SyncParity:
486 fsc = 0x18;
487 break;
488 case ARMFault_SyncParityOnWalk:
489 fsc = (fi->level & 3) | (0x7 << 2);
490 break;
491 case ARMFault_AsyncParity:
492 fsc = 0x19;
493 break;
494 case ARMFault_AsyncExternal:
495 fsc = 0x11 | (fi->ea << 12);
496 break;
497 case ARMFault_Alignment:
498 fsc = 0x21;
499 break;
500 case ARMFault_Debug:
501 fsc = 0x22;
502 break;
503 case ARMFault_TLBConflict:
504 fsc = 0x30;
505 break;
506 case ARMFault_Lockdown:
507 fsc = 0x34;
508 break;
509 case ARMFault_Exclusive:
510 fsc = 0x35;
511 break;
512 default:
513
514
515
516 g_assert_not_reached();
517 }
518
519 fsc |= 1 << 9;
520 return fsc;
521}
522
523static inline bool arm_extabort_type(MemTxResult result)
524{
525
526
527
528
529
530 return result != MEMTX_DECODE_ERROR;
531}
532
533bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
534 MMUAccessType access_type, int mmu_idx,
535 bool probe, uintptr_t retaddr);
536
537static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
538{
539 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
540}
541
542static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
543{
544 if (arm_feature(env, ARM_FEATURE_M)) {
545 return mmu_idx | ARM_MMU_IDX_M;
546 } else {
547 return mmu_idx | ARM_MMU_IDX_A;
548 }
549}
550
551static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
552{
553
554 return mmu_idx | ARM_MMU_IDX_A;
555}
556
557int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
558
559
560
561
562
563ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
564 bool secstate, bool priv, bool negpri);
565
566
567
568
569
570ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
571 bool secstate, bool priv);
572
573
574ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
575
576
577
578bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
579
580
581void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
582 MMUAccessType access_type,
583 int mmu_idx, uintptr_t retaddr);
584
585
586
587
588
589void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
590 vaddr addr, unsigned size,
591 MMUAccessType access_type,
592 int mmu_idx, MemTxAttrs attrs,
593 MemTxResult response, uintptr_t retaddr);
594
595
596static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
597{
598 ARMELChangeHook *hook, *next;
599 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
600 hook->hook(cpu, hook->opaque);
601 }
602}
603static inline void arm_call_el_change_hook(ARMCPU *cpu)
604{
605 ARMELChangeHook *hook, *next;
606 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
607 hook->hook(cpu, hook->opaque);
608 }
609}
610
611
612static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
613{
614 switch (mmu_idx) {
615 case ARMMMUIdx_Stage1_E0:
616 case ARMMMUIdx_Stage1_E1:
617 case ARMMMUIdx_Stage1_E1_PAN:
618 case ARMMMUIdx_Stage1_SE0:
619 case ARMMMUIdx_Stage1_SE1:
620 case ARMMMUIdx_Stage1_SE1_PAN:
621 case ARMMMUIdx_E10_0:
622 case ARMMMUIdx_E10_1:
623 case ARMMMUIdx_E10_1_PAN:
624 case ARMMMUIdx_E20_0:
625 case ARMMMUIdx_E20_2:
626 case ARMMMUIdx_E20_2_PAN:
627 case ARMMMUIdx_SE10_0:
628 case ARMMMUIdx_SE10_1:
629 case ARMMMUIdx_SE10_1_PAN:
630 case ARMMMUIdx_SE20_0:
631 case ARMMMUIdx_SE20_2:
632 case ARMMMUIdx_SE20_2_PAN:
633 return true;
634 default:
635 return false;
636 }
637}
638
639
640static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
641{
642 switch (mmu_idx) {
643 case ARMMMUIdx_E10_0:
644 case ARMMMUIdx_E10_1:
645 case ARMMMUIdx_E10_1_PAN:
646 case ARMMMUIdx_E20_0:
647 case ARMMMUIdx_E20_2:
648 case ARMMMUIdx_E20_2_PAN:
649 case ARMMMUIdx_Stage1_E0:
650 case ARMMMUIdx_Stage1_E1:
651 case ARMMMUIdx_Stage1_E1_PAN:
652 case ARMMMUIdx_E2:
653 case ARMMMUIdx_Stage2:
654 case ARMMMUIdx_MPrivNegPri:
655 case ARMMMUIdx_MUserNegPri:
656 case ARMMMUIdx_MPriv:
657 case ARMMMUIdx_MUser:
658 return false;
659 case ARMMMUIdx_SE3:
660 case ARMMMUIdx_SE10_0:
661 case ARMMMUIdx_SE10_1:
662 case ARMMMUIdx_SE10_1_PAN:
663 case ARMMMUIdx_SE20_0:
664 case ARMMMUIdx_SE20_2:
665 case ARMMMUIdx_SE20_2_PAN:
666 case ARMMMUIdx_Stage1_SE0:
667 case ARMMMUIdx_Stage1_SE1:
668 case ARMMMUIdx_Stage1_SE1_PAN:
669 case ARMMMUIdx_SE2:
670 case ARMMMUIdx_Stage2_S:
671 case ARMMMUIdx_MSPrivNegPri:
672 case ARMMMUIdx_MSUserNegPri:
673 case ARMMMUIdx_MSPriv:
674 case ARMMMUIdx_MSUser:
675 return true;
676 default:
677 g_assert_not_reached();
678 }
679}
680
681static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
682{
683 switch (mmu_idx) {
684 case ARMMMUIdx_Stage1_E1_PAN:
685 case ARMMMUIdx_Stage1_SE1_PAN:
686 case ARMMMUIdx_E10_1_PAN:
687 case ARMMMUIdx_E20_2_PAN:
688 case ARMMMUIdx_SE10_1_PAN:
689 case ARMMMUIdx_SE20_2_PAN:
690 return true;
691 default:
692 return false;
693 }
694}
695
696
697static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
698{
699 switch (mmu_idx) {
700 case ARMMMUIdx_SE20_0:
701 case ARMMMUIdx_SE20_2:
702 case ARMMMUIdx_SE20_2_PAN:
703 case ARMMMUIdx_E20_0:
704 case ARMMMUIdx_E20_2:
705 case ARMMMUIdx_E20_2_PAN:
706 case ARMMMUIdx_Stage2:
707 case ARMMMUIdx_Stage2_S:
708 case ARMMMUIdx_SE2:
709 case ARMMMUIdx_E2:
710 return 2;
711 case ARMMMUIdx_SE3:
712 return 3;
713 case ARMMMUIdx_SE10_0:
714 case ARMMMUIdx_Stage1_SE0:
715 return arm_el_is_aa64(env, 3) ? 1 : 3;
716 case ARMMMUIdx_SE10_1:
717 case ARMMMUIdx_SE10_1_PAN:
718 case ARMMMUIdx_Stage1_E0:
719 case ARMMMUIdx_Stage1_E1:
720 case ARMMMUIdx_Stage1_E1_PAN:
721 case ARMMMUIdx_Stage1_SE1:
722 case ARMMMUIdx_Stage1_SE1_PAN:
723 case ARMMMUIdx_E10_0:
724 case ARMMMUIdx_E10_1:
725 case ARMMMUIdx_E10_1_PAN:
726 case ARMMMUIdx_MPrivNegPri:
727 case ARMMMUIdx_MUserNegPri:
728 case ARMMMUIdx_MPriv:
729 case ARMMMUIdx_MUser:
730 case ARMMMUIdx_MSPrivNegPri:
731 case ARMMMUIdx_MSUserNegPri:
732 case ARMMMUIdx_MSPriv:
733 case ARMMMUIdx_MSUser:
734 return 1;
735 default:
736 g_assert_not_reached();
737 }
738}
739
740
741static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
742{
743 if (mmu_idx == ARMMMUIdx_Stage2) {
744 return &env->cp15.vtcr_el2;
745 }
746 if (mmu_idx == ARMMMUIdx_Stage2_S) {
747
748
749
750
751 return &env->cp15.vstcr_el2;
752 }
753 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
754}
755
756
757
758
759static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
760{
761 ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
762 int target_el = arm_debug_target_el(env);
763 bool using_lpae = false;
764
765 if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
766 using_lpae = true;
767 } else {
768 if (arm_feature(env, ARM_FEATURE_LPAE) &&
769 (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
770 using_lpae = true;
771 }
772 }
773
774 if (using_lpae) {
775 return arm_fi_to_lfsc(&fi);
776 } else {
777 return arm_fi_to_sfsc(&fi);
778 }
779}
780
781
782
783
784
785
786static inline int arm_num_brps(ARMCPU *cpu)
787{
788 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
789 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
790 } else {
791 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
792 }
793}
794
795
796
797
798
799
800static inline int arm_num_wrps(ARMCPU *cpu)
801{
802 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
803 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
804 } else {
805 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
806 }
807}
808
809
810
811
812
813
814static inline int arm_num_ctx_cmps(ARMCPU *cpu)
815{
816 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
817 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
818 } else {
819 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
820 }
821}
822
823
824
825
826
827
828static inline bool v7m_using_psp(CPUARMState *env)
829{
830
831
832
833
834
835 return !arm_v7m_is_handler_mode(env) &&
836 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
837}
838
839
840
841
842
843
844static inline uint32_t v7m_sp_limit(CPUARMState *env)
845{
846 if (v7m_using_psp(env)) {
847 return env->v7m.psplim[env->v7m.secure];
848 } else {
849 return env->v7m.msplim[env->v7m.secure];
850 }
851}
852
853
854
855
856
857
858static inline bool v7m_cpacr_pass(CPUARMState *env,
859 bool is_secure, bool is_priv)
860{
861 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
862 case 0:
863 case 2:
864 return false;
865 case 1:
866 return is_priv;
867 case 3:
868 return true;
869 default:
870 g_assert_not_reached();
871 }
872}
873
874
875
876
877
878
879
880
881
882static inline const char *aarch32_mode_name(uint32_t psr)
883{
884 static const char cpu_mode_names[16][4] = {
885 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
886 "???", "???", "hyp", "und", "???", "???", "???", "sys"
887 };
888
889 return cpu_mode_names[psr & 0xf];
890}
891
892
893
894
895
896
897
898
899void arm_cpu_update_virq(ARMCPU *cpu);
900
901
902
903
904
905
906
907
908void arm_cpu_update_vfiq(ARMCPU *cpu);
909
910
911
912
913
914
915
916
917ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
918
919
920
921
922
923
924
925ARMMMUIdx arm_mmu_idx(CPUARMState *env);
926
927
928
929
930
931
932
933#ifdef CONFIG_USER_ONLY
934static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
935{
936 return ARMMMUIdx_Stage1_E0;
937}
938#else
939ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
940#endif
941
942
943
944
945
946
947
948
949static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
950{
951 switch (mmu_idx) {
952 case ARMMMUIdx_Stage1_E0:
953 case ARMMMUIdx_Stage1_E1:
954 case ARMMMUIdx_Stage1_E1_PAN:
955 case ARMMMUIdx_Stage1_SE0:
956 case ARMMMUIdx_Stage1_SE1:
957 case ARMMMUIdx_Stage1_SE1_PAN:
958 return true;
959 default:
960 return false;
961 }
962}
963
964static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
965 const ARMISARegisters *id)
966{
967 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
968
969 if ((features >> ARM_FEATURE_V4T) & 1) {
970 valid |= CPSR_T;
971 }
972 if ((features >> ARM_FEATURE_V5) & 1) {
973 valid |= CPSR_Q;
974 }
975 if ((features >> ARM_FEATURE_V6) & 1) {
976 valid |= CPSR_E | CPSR_GE;
977 }
978 if ((features >> ARM_FEATURE_THUMB2) & 1) {
979 valid |= CPSR_IT;
980 }
981 if (isar_feature_aa32_jazelle(id)) {
982 valid |= CPSR_J;
983 }
984 if (isar_feature_aa32_pan(id)) {
985 valid |= CPSR_PAN;
986 }
987 if (isar_feature_aa32_dit(id)) {
988 valid |= CPSR_DIT;
989 }
990 if (isar_feature_aa32_ssbs(id)) {
991 valid |= CPSR_SSBS;
992 }
993
994 return valid;
995}
996
997static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
998{
999 uint32_t valid;
1000
1001 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1002 if (isar_feature_aa64_bti(id)) {
1003 valid |= PSTATE_BTYPE;
1004 }
1005 if (isar_feature_aa64_pan(id)) {
1006 valid |= PSTATE_PAN;
1007 }
1008 if (isar_feature_aa64_uao(id)) {
1009 valid |= PSTATE_UAO;
1010 }
1011 if (isar_feature_aa64_dit(id)) {
1012 valid |= PSTATE_DIT;
1013 }
1014 if (isar_feature_aa64_ssbs(id)) {
1015 valid |= PSTATE_SSBS;
1016 }
1017 if (isar_feature_aa64_mte(id)) {
1018 valid |= PSTATE_TCO;
1019 }
1020
1021 return valid;
1022}
1023
1024
1025
1026
1027
1028typedef struct ARMVAParameters {
1029 unsigned tsz : 8;
1030 unsigned select : 1;
1031 bool tbi : 1;
1032 bool epd : 1;
1033 bool hpd : 1;
1034 bool using16k : 1;
1035 bool using64k : 1;
1036} ARMVAParameters;
1037
1038ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1039 ARMMMUIdx mmu_idx, bool data);
1040
1041static inline int exception_target_el(CPUARMState *env)
1042{
1043 int target_el = MAX(1, arm_current_el(env));
1044
1045
1046
1047
1048
1049 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
1050 target_el = 3;
1051 }
1052
1053 return target_el;
1054}
1055
1056
1057static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1058 uint64_t sctlr)
1059{
1060 if (el < 3
1061 && arm_feature(env, ARM_FEATURE_EL3)
1062 && !(env->cp15.scr_el3 & SCR_ATA)) {
1063 return false;
1064 }
1065 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
1066 uint64_t hcr = arm_hcr_el2_eff(env);
1067 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1068 return false;
1069 }
1070 }
1071 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1072 return sctlr != 0;
1073}
1074
1075#ifndef CONFIG_USER_ONLY
1076
1077
1078typedef struct V8M_SAttributes {
1079 bool subpage;
1080 bool ns;
1081 bool nsc;
1082 uint8_t sregion;
1083 bool srvalid;
1084 uint8_t iregion;
1085 bool irvalid;
1086} V8M_SAttributes;
1087
1088void v8m_security_lookup(CPUARMState *env, uint32_t address,
1089 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1090 V8M_SAttributes *sattrs);
1091
1092bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1093 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1094 hwaddr *phys_ptr, MemTxAttrs *txattrs,
1095 int *prot, bool *is_subpage,
1096 ARMMMUFaultInfo *fi, uint32_t *mregion);
1097
1098
1099typedef struct ARMCacheAttrs {
1100 unsigned int attrs:8;
1101 unsigned int shareability:2;
1102} ARMCacheAttrs;
1103
1104bool get_phys_addr(CPUARMState *env, target_ulong address,
1105 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1106 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
1107 target_ulong *page_size,
1108 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
1109 __attribute__((nonnull));
1110
1111void arm_log_exception(int idx);
1112
1113#endif
1114
1115
1116
1117
1118
1119#define GMID_EL1_BS 6
1120
1121
1122#define LOG2_TAG_GRANULE 4
1123#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
1124
1125
1126
1127
1128
1129
1130FIELD(PREDDESC, OPRSZ, 0, 6)
1131FIELD(PREDDESC, ESZ, 6, 2)
1132FIELD(PREDDESC, DATA, 8, 24)
1133
1134
1135
1136
1137
1138#define SVE_MTEDESC_SHIFT 5
1139
1140
1141FIELD(MTEDESC, MIDX, 0, 4)
1142FIELD(MTEDESC, TBI, 4, 2)
1143FIELD(MTEDESC, TCMA, 6, 2)
1144FIELD(MTEDESC, WRITE, 8, 1)
1145FIELD(MTEDESC, ESIZE, 9, 5)
1146FIELD(MTEDESC, TSIZE, 14, 10)
1147
1148bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
1149uint64_t mte_check1(CPUARMState *env, uint32_t desc,
1150 uint64_t ptr, uintptr_t ra);
1151uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
1152 uint64_t ptr, uintptr_t ra);
1153
1154static inline int allocation_tag_from_addr(uint64_t ptr)
1155{
1156 return extract64(ptr, 56, 4);
1157}
1158
1159static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1160{
1161 return deposit64(ptr, 56, 4, rtag);
1162}
1163
1164
1165static inline bool tbi_check(uint32_t desc, int bit55)
1166{
1167 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1168}
1169
1170
1171static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1172{
1173
1174
1175
1176
1177 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1178 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1179 return tcma && match;
1180}
1181
1182
1183
1184
1185
1186
1187
1188static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1189{
1190#ifdef CONFIG_USER_ONLY
1191
1192 ptr &= sextract64(ptr, 0, 56);
1193#endif
1194 return ptr;
1195}
1196
1197static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1198{
1199#ifdef CONFIG_USER_ONLY
1200 int64_t clean_ptr = sextract64(ptr, 0, 56);
1201 if (tbi_check(desc, clean_ptr < 0)) {
1202 ptr = clean_ptr;
1203 }
1204#endif
1205 return ptr;
1206}
1207
1208#endif
1209