1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef TARGET_ARM_INTERNALS_H
26#define TARGET_ARM_INTERNALS_H
27
28#include "hw/registerfields.h"
29#include "tcg/tcg-gvec-desc.h"
30#include "syndrome.h"
31
32
33#define BANK_USRSYS 0
34#define BANK_SVC 1
35#define BANK_ABT 2
36#define BANK_UND 3
37#define BANK_IRQ 4
38#define BANK_FIQ 5
39#define BANK_HYP 6
40#define BANK_MON 7
41
42static inline bool excp_is_internal(int excp)
43{
44
45
46
47 return excp == EXCP_INTERRUPT
48 || excp == EXCP_HLT
49 || excp == EXCP_DEBUG
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
53 || excp == EXCP_SEMIHOST;
54}
55
56
57
58
59#define GTIMER_SCALE 16
60
61
62FIELD(V7M_CONTROL, NPRIV, 0, 1)
63FIELD(V7M_CONTROL, SPSEL, 1, 1)
64FIELD(V7M_CONTROL, FPCA, 2, 1)
65FIELD(V7M_CONTROL, SFPA, 3, 1)
66
67
68FIELD(V7M_EXCRET, ES, 0, 1)
69FIELD(V7M_EXCRET, RES0, 1, 1)
70FIELD(V7M_EXCRET, SPSEL, 2, 1)
71FIELD(V7M_EXCRET, MODE, 3, 1)
72FIELD(V7M_EXCRET, FTYPE, 4, 1)
73FIELD(V7M_EXCRET, DCRS, 5, 1)
74FIELD(V7M_EXCRET, S, 6, 1)
75FIELD(V7M_EXCRET, RES1, 7, 25)
76
77
78#define EXC_RETURN_MIN_MAGIC 0xff000000
79
80
81
82#define FNC_RETURN_MIN_MAGIC 0xfefffffe
83
84
85FIELD(DBGWCR, E, 0, 1)
86FIELD(DBGWCR, PAC, 1, 2)
87FIELD(DBGWCR, LSC, 3, 2)
88FIELD(DBGWCR, BAS, 5, 8)
89FIELD(DBGWCR, HMC, 13, 1)
90FIELD(DBGWCR, SSC, 14, 2)
91FIELD(DBGWCR, LBN, 16, 4)
92FIELD(DBGWCR, WT, 20, 1)
93FIELD(DBGWCR, MASK, 24, 5)
94FIELD(DBGWCR, SSCE, 29, 1)
95
96
97
98
99
100
101
102
103
104
105
106
107
108#define M_FAKE_FSR_NSC_EXEC 0xf
109#define M_FAKE_FSR_SFAULT 0xe
110
111
112
113
114
115
116
117G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
118 uint32_t syndrome, uint32_t target_el);
119
120
121
122
123G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el,
125 uintptr_t ra);
126
127
128
129
130
131
132
133static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
134{
135 static const unsigned int map[4] = {
136 [1] = BANK_SVC,
137 [2] = BANK_HYP,
138 [3] = BANK_MON,
139 };
140 assert(el >= 1 && el <= 3);
141 return map[el];
142}
143
144
145static inline int bank_number(int mode)
146{
147 switch (mode) {
148 case ARM_CPU_MODE_USR:
149 case ARM_CPU_MODE_SYS:
150 return BANK_USRSYS;
151 case ARM_CPU_MODE_SVC:
152 return BANK_SVC;
153 case ARM_CPU_MODE_ABT:
154 return BANK_ABT;
155 case ARM_CPU_MODE_UND:
156 return BANK_UND;
157 case ARM_CPU_MODE_IRQ:
158 return BANK_IRQ;
159 case ARM_CPU_MODE_FIQ:
160 return BANK_FIQ;
161 case ARM_CPU_MODE_HYP:
162 return BANK_HYP;
163 case ARM_CPU_MODE_MON:
164 return BANK_MON;
165 }
166 g_assert_not_reached();
167}
168
169
170
171
172
173
174
175
176
177
178
179
180static inline int r14_bank_number(int mode)
181{
182 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
183}
184
185void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
186void arm_translate_init(void);
187
188void arm_restore_state_to_opc(CPUState *cs,
189 const TranslationBlock *tb,
190 const uint64_t *data);
191
192#ifdef CONFIG_TCG
193void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
194#endif
195
196typedef enum ARMFPRounding {
197 FPROUNDING_TIEEVEN,
198 FPROUNDING_POSINF,
199 FPROUNDING_NEGINF,
200 FPROUNDING_ZERO,
201 FPROUNDING_TIEAWAY,
202 FPROUNDING_ODD
203} ARMFPRounding;
204
205extern const FloatRoundMode arm_rmode_to_sf_map[6];
206
207static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
208{
209 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
210 return arm_rmode_to_sf_map[rmode];
211}
212
213static inline void aarch64_save_sp(CPUARMState *env, int el)
214{
215 if (env->pstate & PSTATE_SP) {
216 env->sp_el[el] = env->xregs[31];
217 } else {
218 env->sp_el[0] = env->xregs[31];
219 }
220}
221
222static inline void aarch64_restore_sp(CPUARMState *env, int el)
223{
224 if (env->pstate & PSTATE_SP) {
225 env->xregs[31] = env->sp_el[el];
226 } else {
227 env->xregs[31] = env->sp_el[0];
228 }
229}
230
231static inline void update_spsel(CPUARMState *env, uint32_t imm)
232{
233 unsigned int cur_el = arm_current_el(env);
234
235
236
237 if (!((imm ^ env->pstate) & PSTATE_SP)) {
238 return;
239 }
240 aarch64_save_sp(env, cur_el);
241 env->pstate = deposit32(env->pstate, 0, 1, imm);
242
243
244
245
246 assert(cur_el >= 1 && cur_el <= 3);
247 aarch64_restore_sp(env, cur_el);
248}
249
250
251
252
253
254
255
256
257unsigned int arm_pamax(ARMCPU *cpu);
258
259
260
261
262
263static inline bool extended_addresses_enabled(CPUARMState *env)
264{
265 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
266 if (arm_feature(env, ARM_FEATURE_PMSA) &&
267 arm_feature(env, ARM_FEATURE_V8)) {
268 return true;
269 }
270 return arm_el_is_aa64(env, 1) ||
271 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
272}
273
274
275
276
277void hw_watchpoint_update(ARMCPU *cpu, int n);
278
279
280
281
282void hw_watchpoint_update_all(ARMCPU *cpu);
283
284
285
286void hw_breakpoint_update(ARMCPU *cpu, int n);
287
288
289
290
291void hw_breakpoint_update_all(ARMCPU *cpu);
292
293
294bool arm_debug_check_breakpoint(CPUState *cs);
295
296
297bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
298
299
300
301
302vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
303
304
305void arm_debug_excp_handler(CPUState *cs);
306
307#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
308static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
309{
310 return false;
311}
312static inline void arm_handle_psci_call(ARMCPU *cpu)
313{
314 g_assert_not_reached();
315}
316#else
317
318bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
319
320void arm_handle_psci_call(ARMCPU *cpu);
321#endif
322
323
324
325
326
327
328static inline void arm_clear_exclusive(CPUARMState *env)
329{
330 env->exclusive_addr = -1;
331}
332
333
334
335
336
337
338typedef enum ARMFaultType {
339 ARMFault_None,
340 ARMFault_AccessFlag,
341 ARMFault_Alignment,
342 ARMFault_Background,
343 ARMFault_Domain,
344 ARMFault_Permission,
345 ARMFault_Translation,
346 ARMFault_AddressSize,
347 ARMFault_SyncExternal,
348 ARMFault_SyncExternalOnWalk,
349 ARMFault_SyncParity,
350 ARMFault_SyncParityOnWalk,
351 ARMFault_AsyncParity,
352 ARMFault_AsyncExternal,
353 ARMFault_Debug,
354 ARMFault_TLBConflict,
355 ARMFault_UnsuppAtomicUpdate,
356 ARMFault_Lockdown,
357 ARMFault_Exclusive,
358 ARMFault_ICacheMaint,
359 ARMFault_QEMU_NSCExec,
360 ARMFault_QEMU_SFault,
361} ARMFaultType;
362
363
364
365
366
367
368
369
370
371
372
373
374typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
375struct ARMMMUFaultInfo {
376 ARMFaultType type;
377 target_ulong s2addr;
378 int level;
379 int domain;
380 bool stage2;
381 bool s1ptw;
382 bool s1ns;
383 bool ea;
384};
385
386
387
388
389
390
391
392static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
393{
394 uint32_t fsc;
395
396 switch (fi->type) {
397 case ARMFault_None:
398 return 0;
399 case ARMFault_AccessFlag:
400 fsc = fi->level == 1 ? 0x3 : 0x6;
401 break;
402 case ARMFault_Alignment:
403 fsc = 0x1;
404 break;
405 case ARMFault_Permission:
406 fsc = fi->level == 1 ? 0xd : 0xf;
407 break;
408 case ARMFault_Domain:
409 fsc = fi->level == 1 ? 0x9 : 0xb;
410 break;
411 case ARMFault_Translation:
412 fsc = fi->level == 1 ? 0x5 : 0x7;
413 break;
414 case ARMFault_SyncExternal:
415 fsc = 0x8 | (fi->ea << 12);
416 break;
417 case ARMFault_SyncExternalOnWalk:
418 fsc = fi->level == 1 ? 0xc : 0xe;
419 fsc |= (fi->ea << 12);
420 break;
421 case ARMFault_SyncParity:
422 fsc = 0x409;
423 break;
424 case ARMFault_SyncParityOnWalk:
425 fsc = fi->level == 1 ? 0x40c : 0x40e;
426 break;
427 case ARMFault_AsyncParity:
428 fsc = 0x408;
429 break;
430 case ARMFault_AsyncExternal:
431 fsc = 0x406 | (fi->ea << 12);
432 break;
433 case ARMFault_Debug:
434 fsc = 0x2;
435 break;
436 case ARMFault_TLBConflict:
437 fsc = 0x400;
438 break;
439 case ARMFault_Lockdown:
440 fsc = 0x404;
441 break;
442 case ARMFault_Exclusive:
443 fsc = 0x405;
444 break;
445 case ARMFault_ICacheMaint:
446 fsc = 0x4;
447 break;
448 case ARMFault_Background:
449 fsc = 0x0;
450 break;
451 case ARMFault_QEMU_NSCExec:
452 fsc = M_FAKE_FSR_NSC_EXEC;
453 break;
454 case ARMFault_QEMU_SFault:
455 fsc = M_FAKE_FSR_SFAULT;
456 break;
457 default:
458
459
460
461 g_assert_not_reached();
462 }
463
464 fsc |= (fi->domain << 4);
465 return fsc;
466}
467
468
469
470
471
472
473static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
474{
475 uint32_t fsc;
476
477 switch (fi->type) {
478 case ARMFault_None:
479 return 0;
480 case ARMFault_AddressSize:
481 assert(fi->level >= -1 && fi->level <= 3);
482 if (fi->level < 0) {
483 fsc = 0b101001;
484 } else {
485 fsc = fi->level;
486 }
487 break;
488 case ARMFault_AccessFlag:
489 assert(fi->level >= 0 && fi->level <= 3);
490 fsc = 0b001000 | fi->level;
491 break;
492 case ARMFault_Permission:
493 assert(fi->level >= 0 && fi->level <= 3);
494 fsc = 0b001100 | fi->level;
495 break;
496 case ARMFault_Translation:
497 assert(fi->level >= -1 && fi->level <= 3);
498 if (fi->level < 0) {
499 fsc = 0b101011;
500 } else {
501 fsc = 0b000100 | fi->level;
502 }
503 break;
504 case ARMFault_SyncExternal:
505 fsc = 0x10 | (fi->ea << 12);
506 break;
507 case ARMFault_SyncExternalOnWalk:
508 assert(fi->level >= -1 && fi->level <= 3);
509 if (fi->level < 0) {
510 fsc = 0b010011;
511 } else {
512 fsc = 0b010100 | fi->level;
513 }
514 fsc |= fi->ea << 12;
515 break;
516 case ARMFault_SyncParity:
517 fsc = 0x18;
518 break;
519 case ARMFault_SyncParityOnWalk:
520 assert(fi->level >= -1 && fi->level <= 3);
521 if (fi->level < 0) {
522 fsc = 0b011011;
523 } else {
524 fsc = 0b011100 | fi->level;
525 }
526 break;
527 case ARMFault_AsyncParity:
528 fsc = 0x19;
529 break;
530 case ARMFault_AsyncExternal:
531 fsc = 0x11 | (fi->ea << 12);
532 break;
533 case ARMFault_Alignment:
534 fsc = 0x21;
535 break;
536 case ARMFault_Debug:
537 fsc = 0x22;
538 break;
539 case ARMFault_TLBConflict:
540 fsc = 0x30;
541 break;
542 case ARMFault_UnsuppAtomicUpdate:
543 fsc = 0x31;
544 break;
545 case ARMFault_Lockdown:
546 fsc = 0x34;
547 break;
548 case ARMFault_Exclusive:
549 fsc = 0x35;
550 break;
551 default:
552
553
554
555 g_assert_not_reached();
556 }
557
558 fsc |= 1 << 9;
559 return fsc;
560}
561
562static inline bool arm_extabort_type(MemTxResult result)
563{
564
565
566
567
568
569 return result != MEMTX_DECODE_ERROR;
570}
571
572#ifdef CONFIG_USER_ONLY
573void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
574 MMUAccessType access_type,
575 bool maperr, uintptr_t ra);
576void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
577 MMUAccessType access_type, uintptr_t ra);
578#else
579bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
580 MMUAccessType access_type, int mmu_idx,
581 bool probe, uintptr_t retaddr);
582#endif
583
584static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
585{
586 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
587}
588
589static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
590{
591 if (arm_feature(env, ARM_FEATURE_M)) {
592 return mmu_idx | ARM_MMU_IDX_M;
593 } else {
594 return mmu_idx | ARM_MMU_IDX_A;
595 }
596}
597
598static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
599{
600
601 return mmu_idx | ARM_MMU_IDX_A;
602}
603
604int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
605
606
607ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
608
609
610
611
612
613bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
614
615
616G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
617 MMUAccessType access_type,
618 int mmu_idx, uintptr_t retaddr);
619
620#ifndef CONFIG_USER_ONLY
621
622
623
624
625void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
626 vaddr addr, unsigned size,
627 MMUAccessType access_type,
628 int mmu_idx, MemTxAttrs attrs,
629 MemTxResult response, uintptr_t retaddr);
630#endif
631
632
633static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
634{
635 ARMELChangeHook *hook, *next;
636 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
637 hook->hook(cpu, hook->opaque);
638 }
639}
640static inline void arm_call_el_change_hook(ARMCPU *cpu)
641{
642 ARMELChangeHook *hook, *next;
643 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
644 hook->hook(cpu, hook->opaque);
645 }
646}
647
648
649static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
650{
651 switch (mmu_idx) {
652 case ARMMMUIdx_Stage1_E0:
653 case ARMMMUIdx_Stage1_E1:
654 case ARMMMUIdx_Stage1_E1_PAN:
655 case ARMMMUIdx_E10_0:
656 case ARMMMUIdx_E10_1:
657 case ARMMMUIdx_E10_1_PAN:
658 case ARMMMUIdx_E20_0:
659 case ARMMMUIdx_E20_2:
660 case ARMMMUIdx_E20_2_PAN:
661 return true;
662 default:
663 return false;
664 }
665}
666
667static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
668{
669 switch (mmu_idx) {
670 case ARMMMUIdx_Stage1_E1_PAN:
671 case ARMMMUIdx_E10_1_PAN:
672 case ARMMMUIdx_E20_2_PAN:
673 return true;
674 default:
675 return false;
676 }
677}
678
679static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
680{
681 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
682}
683
684
685static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
686{
687 switch (mmu_idx) {
688 case ARMMMUIdx_E20_0:
689 case ARMMMUIdx_E20_2:
690 case ARMMMUIdx_E20_2_PAN:
691 case ARMMMUIdx_Stage2:
692 case ARMMMUIdx_Stage2_S:
693 case ARMMMUIdx_E2:
694 return 2;
695 case ARMMMUIdx_E3:
696 return 3;
697 case ARMMMUIdx_E10_0:
698 case ARMMMUIdx_Stage1_E0:
699 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
700 case ARMMMUIdx_Stage1_E1:
701 case ARMMMUIdx_Stage1_E1_PAN:
702 case ARMMMUIdx_E10_1:
703 case ARMMMUIdx_E10_1_PAN:
704 case ARMMMUIdx_MPrivNegPri:
705 case ARMMMUIdx_MUserNegPri:
706 case ARMMMUIdx_MPriv:
707 case ARMMMUIdx_MUser:
708 case ARMMMUIdx_MSPrivNegPri:
709 case ARMMMUIdx_MSUserNegPri:
710 case ARMMMUIdx_MSPriv:
711 case ARMMMUIdx_MSUser:
712 return 1;
713 default:
714 g_assert_not_reached();
715 }
716}
717
718static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
719{
720 switch (mmu_idx) {
721 case ARMMMUIdx_E20_0:
722 case ARMMMUIdx_Stage1_E0:
723 case ARMMMUIdx_MUser:
724 case ARMMMUIdx_MSUser:
725 case ARMMMUIdx_MUserNegPri:
726 case ARMMMUIdx_MSUserNegPri:
727 return true;
728 default:
729 return false;
730 case ARMMMUIdx_E10_0:
731 case ARMMMUIdx_E10_1:
732 case ARMMMUIdx_E10_1_PAN:
733 g_assert_not_reached();
734 }
735}
736
737
738static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
739{
740 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
741}
742
743
744
745
746
747
748#define VTCR_SHARED_FIELD_MASK \
749 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
750 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
751 R_VTCR_DS_MASK)
752
753
754static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
755{
756 if (mmu_idx == ARMMMUIdx_Stage2) {
757 return env->cp15.vtcr_el2;
758 }
759 if (mmu_idx == ARMMMUIdx_Stage2_S) {
760
761
762
763
764
765
766
767
768 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
769 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
770 return v;
771 }
772 return env->cp15.tcr_el[regime_el(env, mmu_idx)];
773}
774
775
776static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
777{
778 int el = regime_el(env, mmu_idx);
779 if (el == 2 || arm_el_is_aa64(env, el)) {
780 return true;
781 }
782 if (arm_feature(env, ARM_FEATURE_PMSA) &&
783 arm_feature(env, ARM_FEATURE_V8)) {
784 return true;
785 }
786 if (arm_feature(env, ARM_FEATURE_LPAE)
787 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
788 return true;
789 }
790 return false;
791}
792
793
794
795
796
797
798static inline int arm_num_brps(ARMCPU *cpu)
799{
800 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
801 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
802 } else {
803 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
804 }
805}
806
807
808
809
810
811
812static inline int arm_num_wrps(ARMCPU *cpu)
813{
814 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
815 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
816 } else {
817 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
818 }
819}
820
821
822
823
824
825
826static inline int arm_num_ctx_cmps(ARMCPU *cpu)
827{
828 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
829 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
830 } else {
831 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
832 }
833}
834
835
836
837
838
839
840static inline bool v7m_using_psp(CPUARMState *env)
841{
842
843
844
845
846
847 return !arm_v7m_is_handler_mode(env) &&
848 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
849}
850
851
852
853
854
855
856static inline uint32_t v7m_sp_limit(CPUARMState *env)
857{
858 if (v7m_using_psp(env)) {
859 return env->v7m.psplim[env->v7m.secure];
860 } else {
861 return env->v7m.msplim[env->v7m.secure];
862 }
863}
864
865
866
867
868
869
870static inline bool v7m_cpacr_pass(CPUARMState *env,
871 bool is_secure, bool is_priv)
872{
873 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
874 case 0:
875 case 2:
876 return false;
877 case 1:
878 return is_priv;
879 case 3:
880 return true;
881 default:
882 g_assert_not_reached();
883 }
884}
885
886
887
888
889
890
891
892
893
894static inline const char *aarch32_mode_name(uint32_t psr)
895{
896 static const char cpu_mode_names[16][4] = {
897 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
898 "???", "???", "hyp", "und", "???", "???", "???", "sys"
899 };
900
901 return cpu_mode_names[psr & 0xf];
902}
903
904
905
906
907
908
909
910
911void arm_cpu_update_virq(ARMCPU *cpu);
912
913
914
915
916
917
918
919
920void arm_cpu_update_vfiq(ARMCPU *cpu);
921
922
923
924
925
926
927
928void arm_cpu_update_vserr(ARMCPU *cpu);
929
930
931
932
933
934
935
936
937ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
938
939
940
941
942
943
944
945ARMMMUIdx arm_mmu_idx(CPUARMState *env);
946
947
948
949
950
951
952
953#ifdef CONFIG_USER_ONLY
954static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
955{
956 return ARMMMUIdx_Stage1_E0;
957}
958static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
959{
960 return ARMMMUIdx_Stage1_E0;
961}
962#else
963ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
964ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
965#endif
966
967
968
969
970
971
972
973
974static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
975{
976 switch (mmu_idx) {
977 case ARMMMUIdx_Stage1_E0:
978 case ARMMMUIdx_Stage1_E1:
979 case ARMMMUIdx_Stage1_E1_PAN:
980 return true;
981 default:
982 return false;
983 }
984}
985
986static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
987 const ARMISARegisters *id)
988{
989 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
990
991 if ((features >> ARM_FEATURE_V4T) & 1) {
992 valid |= CPSR_T;
993 }
994 if ((features >> ARM_FEATURE_V5) & 1) {
995 valid |= CPSR_Q;
996 }
997 if ((features >> ARM_FEATURE_V6) & 1) {
998 valid |= CPSR_E | CPSR_GE;
999 }
1000 if ((features >> ARM_FEATURE_THUMB2) & 1) {
1001 valid |= CPSR_IT;
1002 }
1003 if (isar_feature_aa32_jazelle(id)) {
1004 valid |= CPSR_J;
1005 }
1006 if (isar_feature_aa32_pan(id)) {
1007 valid |= CPSR_PAN;
1008 }
1009 if (isar_feature_aa32_dit(id)) {
1010 valid |= CPSR_DIT;
1011 }
1012 if (isar_feature_aa32_ssbs(id)) {
1013 valid |= CPSR_SSBS;
1014 }
1015
1016 return valid;
1017}
1018
1019static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1020{
1021 uint32_t valid;
1022
1023 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1024 if (isar_feature_aa64_bti(id)) {
1025 valid |= PSTATE_BTYPE;
1026 }
1027 if (isar_feature_aa64_pan(id)) {
1028 valid |= PSTATE_PAN;
1029 }
1030 if (isar_feature_aa64_uao(id)) {
1031 valid |= PSTATE_UAO;
1032 }
1033 if (isar_feature_aa64_dit(id)) {
1034 valid |= PSTATE_DIT;
1035 }
1036 if (isar_feature_aa64_ssbs(id)) {
1037 valid |= PSTATE_SSBS;
1038 }
1039 if (isar_feature_aa64_mte(id)) {
1040 valid |= PSTATE_TCO;
1041 }
1042
1043 return valid;
1044}
1045
1046
1047typedef enum ARMGranuleSize {
1048
1049 Gran4K,
1050 Gran64K,
1051 Gran16K,
1052 GranInvalid,
1053} ARMGranuleSize;
1054
1055
1056
1057
1058
1059
1060
1061static inline int arm_granule_bits(ARMGranuleSize gran)
1062{
1063 switch (gran) {
1064 case Gran64K:
1065 return 16;
1066 case Gran16K:
1067 return 14;
1068 case Gran4K:
1069 return 12;
1070 default:
1071 g_assert_not_reached();
1072 }
1073}
1074
1075
1076
1077
1078
1079typedef struct ARMVAParameters {
1080 unsigned tsz : 8;
1081 unsigned ps : 3;
1082 unsigned sh : 2;
1083 unsigned select : 1;
1084 bool tbi : 1;
1085 bool epd : 1;
1086 bool hpd : 1;
1087 bool tsz_oob : 1;
1088 bool ds : 1;
1089 bool ha : 1;
1090 bool hd : 1;
1091 ARMGranuleSize gran : 2;
1092} ARMVAParameters;
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1104 ARMMMUIdx mmu_idx, bool data,
1105 bool el1_is_aa32);
1106
1107int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1108int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1109int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1110
1111
1112static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1113 uint64_t sctlr)
1114{
1115 if (el < 3
1116 && arm_feature(env, ARM_FEATURE_EL3)
1117 && !(env->cp15.scr_el3 & SCR_ATA)) {
1118 return false;
1119 }
1120 if (el < 2 && arm_is_el2_enabled(env)) {
1121 uint64_t hcr = arm_hcr_el2_eff(env);
1122 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1123 return false;
1124 }
1125 }
1126 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1127 return sctlr != 0;
1128}
1129
1130#ifndef CONFIG_USER_ONLY
1131
1132
1133typedef struct V8M_SAttributes {
1134 bool subpage;
1135 bool ns;
1136 bool nsc;
1137 uint8_t sregion;
1138 bool srvalid;
1139 uint8_t iregion;
1140 bool irvalid;
1141} V8M_SAttributes;
1142
1143void v8m_security_lookup(CPUARMState *env, uint32_t address,
1144 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1145 bool secure, V8M_SAttributes *sattrs);
1146
1147
1148typedef struct ARMCacheAttrs {
1149
1150
1151
1152
1153 unsigned int attrs:8;
1154 unsigned int shareability:2;
1155 bool is_s2_format:1;
1156 bool guarded:1;
1157} ARMCacheAttrs;
1158
1159
1160typedef struct GetPhysAddrResult {
1161 CPUTLBEntryFull f;
1162 ARMCacheAttrs cacheattrs;
1163} GetPhysAddrResult;
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
1189 MMUAccessType access_type,
1190 ARMMMUIdx mmu_idx, bool is_secure,
1191 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1192 __attribute__((nonnull));
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205bool get_phys_addr(CPUARMState *env, target_ulong address,
1206 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1207 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1208 __attribute__((nonnull));
1209
1210bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1211 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1212 bool is_secure, GetPhysAddrResult *result,
1213 ARMMMUFaultInfo *fi, uint32_t *mregion);
1214
1215void arm_log_exception(CPUState *cs);
1216
1217#endif
1218
1219
1220
1221
1222
1223#define GMID_EL1_BS 6
1224
1225
1226
1227
1228
1229
1230FIELD(PREDDESC, OPRSZ, 0, 6)
1231FIELD(PREDDESC, ESZ, 6, 2)
1232FIELD(PREDDESC, DATA, 8, 24)
1233
1234
1235
1236
1237
1238#define SVE_MTEDESC_SHIFT 5
1239
1240
1241FIELD(MTEDESC, MIDX, 0, 4)
1242FIELD(MTEDESC, TBI, 4, 2)
1243FIELD(MTEDESC, TCMA, 6, 2)
1244FIELD(MTEDESC, WRITE, 8, 1)
1245FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9)
1246
1247bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1248uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1249
1250static inline int allocation_tag_from_addr(uint64_t ptr)
1251{
1252 return extract64(ptr, 56, 4);
1253}
1254
1255static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1256{
1257 return deposit64(ptr, 56, 4, rtag);
1258}
1259
1260
1261static inline bool tbi_check(uint32_t desc, int bit55)
1262{
1263 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1264}
1265
1266
1267static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1268{
1269
1270
1271
1272
1273 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1274 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1275 return tcma && match;
1276}
1277
1278
1279
1280
1281
1282
1283
1284static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1285{
1286#ifdef CONFIG_USER_ONLY
1287
1288 ptr &= sextract64(ptr, 0, 56);
1289#endif
1290 return ptr;
1291}
1292
1293static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1294{
1295#ifdef CONFIG_USER_ONLY
1296 int64_t clean_ptr = sextract64(ptr, 0, 56);
1297 if (tbi_check(desc, clean_ptr < 0)) {
1298 ptr = clean_ptr;
1299 }
1300#endif
1301 return ptr;
1302}
1303
1304
1305enum MVEECIState {
1306 ECI_NONE = 0,
1307 ECI_A0 = 1,
1308 ECI_A0A1 = 2,
1309
1310 ECI_A0A1A2 = 4,
1311 ECI_A0A1A2B0 = 5,
1312
1313};
1314
1315
1316#define PMCRN_MASK 0xf800
1317#define PMCRN_SHIFT 11
1318#define PMCRLP 0x80
1319#define PMCRLC 0x40
1320#define PMCRDP 0x20
1321#define PMCRX 0x10
1322#define PMCRD 0x8
1323#define PMCRC 0x4
1324#define PMCRP 0x2
1325#define PMCRE 0x1
1326
1327
1328
1329
1330#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1331
1332#define PMXEVTYPER_P 0x80000000
1333#define PMXEVTYPER_U 0x40000000
1334#define PMXEVTYPER_NSK 0x20000000
1335#define PMXEVTYPER_NSU 0x10000000
1336#define PMXEVTYPER_NSH 0x08000000
1337#define PMXEVTYPER_M 0x04000000
1338#define PMXEVTYPER_MT 0x02000000
1339#define PMXEVTYPER_EVTCOUNT 0x0000ffff
1340#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1341 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1342 PMXEVTYPER_M | PMXEVTYPER_MT | \
1343 PMXEVTYPER_EVTCOUNT)
1344
1345#define PMCCFILTR 0xf8000000
1346#define PMCCFILTR_M PMXEVTYPER_M
1347#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1348
1349static inline uint32_t pmu_num_counters(CPUARMState *env)
1350{
1351 ARMCPU *cpu = env_archcpu(env);
1352
1353 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1354}
1355
1356
1357static inline uint64_t pmu_counter_mask(CPUARMState *env)
1358{
1359 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1360}
1361
1362#ifdef TARGET_AARCH64
1363int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
1364int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
1365int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
1366int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
1367int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
1368int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
1369int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
1370void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1371void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1372void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1373void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1374#endif
1375
1376
1377uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1378
1379
1380
1381
1382
1383
1384
1385
1386uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1387 bool threadmode, bool spsel);
1388
1389#ifdef CONFIG_USER_ONLY
1390static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
1391#else
1392void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
1393#endif
1394
1395bool el_is_in_host(CPUARMState *env, int el);
1396
1397void aa32_max_features(ARMCPU *cpu);
1398int exception_target_el(CPUARMState *env);
1399bool arm_singlestep_active(CPUARMState *env);
1400bool arm_generate_debug_exceptions(CPUARMState *env);
1401
1402
1403
1404
1405
1406
1407
1408
1409static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1410{
1411 int bot_pac_bit = 64 - param.tsz;
1412 int top_pac_bit = 64 - 8 * param.tbi;
1413
1414 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1415}
1416
1417
1418void define_debug_regs(ARMCPU *cpu);
1419
1420
1421static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1422{
1423 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1424}
1425
1426
1427#define SVE_VQ_POW2_MAP \
1428 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1429 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1430
1431
1432
1433
1434static inline bool arm_fgt_active(CPUARMState *env, int el)
1435{
1436
1437
1438
1439
1440
1441
1442
1443
1444 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1445 el < 2 && arm_is_el2_enabled(env) &&
1446 arm_el_is_aa64(env, 1) &&
1447 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1448 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1449}
1450
1451void assert_hflags_rebuild_correctly(CPUARMState *env);
1452#endif
1453