1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef TARGET_ARM_INTERNALS_H
26#define TARGET_ARM_INTERNALS_H
27
28#include "hw/registerfields.h"
29#include "tcg/tcg-gvec-desc.h"
30#include "syndrome.h"
31
32
33#define BANK_USRSYS 0
34#define BANK_SVC 1
35#define BANK_ABT 2
36#define BANK_UND 3
37#define BANK_IRQ 4
38#define BANK_FIQ 5
39#define BANK_HYP 6
40#define BANK_MON 7
41
42static inline bool excp_is_internal(int excp)
43{
44
45
46
47 return excp == EXCP_INTERRUPT
48 || excp == EXCP_HLT
49 || excp == EXCP_DEBUG
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
53 || excp == EXCP_SEMIHOST;
54}
55
56
57
58
59#define GTIMER_SCALE 16
60
61
62FIELD(V7M_CONTROL, NPRIV, 0, 1)
63FIELD(V7M_CONTROL, SPSEL, 1, 1)
64FIELD(V7M_CONTROL, FPCA, 2, 1)
65FIELD(V7M_CONTROL, SFPA, 3, 1)
66
67
68FIELD(V7M_EXCRET, ES, 0, 1)
69FIELD(V7M_EXCRET, RES0, 1, 1)
70FIELD(V7M_EXCRET, SPSEL, 2, 1)
71FIELD(V7M_EXCRET, MODE, 3, 1)
72FIELD(V7M_EXCRET, FTYPE, 4, 1)
73FIELD(V7M_EXCRET, DCRS, 5, 1)
74FIELD(V7M_EXCRET, S, 6, 1)
75FIELD(V7M_EXCRET, RES1, 7, 25)
76
77
78#define EXC_RETURN_MIN_MAGIC 0xff000000
79
80
81
82#define FNC_RETURN_MIN_MAGIC 0xfefffffe
83
84
85FIELD(DBGWCR, E, 0, 1)
86FIELD(DBGWCR, PAC, 1, 2)
87FIELD(DBGWCR, LSC, 3, 2)
88FIELD(DBGWCR, BAS, 5, 8)
89FIELD(DBGWCR, HMC, 13, 1)
90FIELD(DBGWCR, SSC, 14, 2)
91FIELD(DBGWCR, LBN, 16, 4)
92FIELD(DBGWCR, WT, 20, 1)
93FIELD(DBGWCR, MASK, 24, 5)
94FIELD(DBGWCR, SSCE, 29, 1)
95
96
97
98
99
100
101
102
103
104
105
106
107
108#define M_FAKE_FSR_NSC_EXEC 0xf
109#define M_FAKE_FSR_SFAULT 0xe
110
111
112
113
114
115
116
117G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
118 uint32_t syndrome, uint32_t target_el);
119
120
121
122
123G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el,
125 uintptr_t ra);
126
127
128
129
130
131
132
133static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
134{
135 static const unsigned int map[4] = {
136 [1] = BANK_SVC,
137 [2] = BANK_HYP,
138 [3] = BANK_MON,
139 };
140 assert(el >= 1 && el <= 3);
141 return map[el];
142}
143
144
145static inline int bank_number(int mode)
146{
147 switch (mode) {
148 case ARM_CPU_MODE_USR:
149 case ARM_CPU_MODE_SYS:
150 return BANK_USRSYS;
151 case ARM_CPU_MODE_SVC:
152 return BANK_SVC;
153 case ARM_CPU_MODE_ABT:
154 return BANK_ABT;
155 case ARM_CPU_MODE_UND:
156 return BANK_UND;
157 case ARM_CPU_MODE_IRQ:
158 return BANK_IRQ;
159 case ARM_CPU_MODE_FIQ:
160 return BANK_FIQ;
161 case ARM_CPU_MODE_HYP:
162 return BANK_HYP;
163 case ARM_CPU_MODE_MON:
164 return BANK_MON;
165 }
166 g_assert_not_reached();
167}
168
169
170
171
172
173
174
175
176
177
178
179
180static inline int r14_bank_number(int mode)
181{
182 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
183}
184
185void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
186void arm_translate_init(void);
187
188void arm_restore_state_to_opc(CPUState *cs,
189 const TranslationBlock *tb,
190 const uint64_t *data);
191
192#ifdef CONFIG_TCG
193void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
194#endif
195
196typedef enum ARMFPRounding {
197 FPROUNDING_TIEEVEN,
198 FPROUNDING_POSINF,
199 FPROUNDING_NEGINF,
200 FPROUNDING_ZERO,
201 FPROUNDING_TIEAWAY,
202 FPROUNDING_ODD
203} ARMFPRounding;
204
205extern const FloatRoundMode arm_rmode_to_sf_map[6];
206
207static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
208{
209 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
210 return arm_rmode_to_sf_map[rmode];
211}
212
213static inline void aarch64_save_sp(CPUARMState *env, int el)
214{
215 if (env->pstate & PSTATE_SP) {
216 env->sp_el[el] = env->xregs[31];
217 } else {
218 env->sp_el[0] = env->xregs[31];
219 }
220}
221
222static inline void aarch64_restore_sp(CPUARMState *env, int el)
223{
224 if (env->pstate & PSTATE_SP) {
225 env->xregs[31] = env->sp_el[el];
226 } else {
227 env->xregs[31] = env->sp_el[0];
228 }
229}
230
231static inline void update_spsel(CPUARMState *env, uint32_t imm)
232{
233 unsigned int cur_el = arm_current_el(env);
234
235
236
237 if (!((imm ^ env->pstate) & PSTATE_SP)) {
238 return;
239 }
240 aarch64_save_sp(env, cur_el);
241 env->pstate = deposit32(env->pstate, 0, 1, imm);
242
243
244
245
246 assert(cur_el >= 1 && cur_el <= 3);
247 aarch64_restore_sp(env, cur_el);
248}
249
250
251
252
253
254
255
256
257unsigned int arm_pamax(ARMCPU *cpu);
258
259
260
261
262
263static inline bool extended_addresses_enabled(CPUARMState *env)
264{
265 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
266 if (arm_feature(env, ARM_FEATURE_PMSA) &&
267 arm_feature(env, ARM_FEATURE_V8)) {
268 return true;
269 }
270 return arm_el_is_aa64(env, 1) ||
271 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
272}
273
274
275
276
277void hw_watchpoint_update(ARMCPU *cpu, int n);
278
279
280
281
282void hw_watchpoint_update_all(ARMCPU *cpu);
283
284
285
286void hw_breakpoint_update(ARMCPU *cpu, int n);
287
288
289
290
291void hw_breakpoint_update_all(ARMCPU *cpu);
292
293
294bool arm_debug_check_breakpoint(CPUState *cs);
295
296
297bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
298
299
300
301
302vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
303
304
305void arm_debug_excp_handler(CPUState *cs);
306
307#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
308static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
309{
310 return false;
311}
312static inline void arm_handle_psci_call(ARMCPU *cpu)
313{
314 g_assert_not_reached();
315}
316#else
317
318bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
319
320void arm_handle_psci_call(ARMCPU *cpu);
321#endif
322
323
324
325
326
327
328static inline void arm_clear_exclusive(CPUARMState *env)
329{
330 env->exclusive_addr = -1;
331}
332
333
334
335
336
337
338typedef enum ARMFaultType {
339 ARMFault_None,
340 ARMFault_AccessFlag,
341 ARMFault_Alignment,
342 ARMFault_Background,
343 ARMFault_Domain,
344 ARMFault_Permission,
345 ARMFault_Translation,
346 ARMFault_AddressSize,
347 ARMFault_SyncExternal,
348 ARMFault_SyncExternalOnWalk,
349 ARMFault_SyncParity,
350 ARMFault_SyncParityOnWalk,
351 ARMFault_AsyncParity,
352 ARMFault_AsyncExternal,
353 ARMFault_Debug,
354 ARMFault_TLBConflict,
355 ARMFault_UnsuppAtomicUpdate,
356 ARMFault_Lockdown,
357 ARMFault_Exclusive,
358 ARMFault_ICacheMaint,
359 ARMFault_QEMU_NSCExec,
360 ARMFault_QEMU_SFault,
361 ARMFault_GPCFOnWalk,
362 ARMFault_GPCFOnOutput,
363} ARMFaultType;
364
365typedef enum ARMGPCF {
366 GPCF_None,
367 GPCF_AddressSize,
368 GPCF_Walk,
369 GPCF_EABT,
370 GPCF_Fail,
371} ARMGPCF;
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
388struct ARMMMUFaultInfo {
389 ARMFaultType type;
390 ARMGPCF gpcf;
391 target_ulong s2addr;
392 target_ulong paddr;
393 ARMSecuritySpace paddr_space;
394 int level;
395 int domain;
396 bool stage2;
397 bool s1ptw;
398 bool s1ns;
399 bool ea;
400};
401
402
403
404
405
406
407
408static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
409{
410 uint32_t fsc;
411
412 switch (fi->type) {
413 case ARMFault_None:
414 return 0;
415 case ARMFault_AccessFlag:
416 fsc = fi->level == 1 ? 0x3 : 0x6;
417 break;
418 case ARMFault_Alignment:
419 fsc = 0x1;
420 break;
421 case ARMFault_Permission:
422 fsc = fi->level == 1 ? 0xd : 0xf;
423 break;
424 case ARMFault_Domain:
425 fsc = fi->level == 1 ? 0x9 : 0xb;
426 break;
427 case ARMFault_Translation:
428 fsc = fi->level == 1 ? 0x5 : 0x7;
429 break;
430 case ARMFault_SyncExternal:
431 fsc = 0x8 | (fi->ea << 12);
432 break;
433 case ARMFault_SyncExternalOnWalk:
434 fsc = fi->level == 1 ? 0xc : 0xe;
435 fsc |= (fi->ea << 12);
436 break;
437 case ARMFault_SyncParity:
438 fsc = 0x409;
439 break;
440 case ARMFault_SyncParityOnWalk:
441 fsc = fi->level == 1 ? 0x40c : 0x40e;
442 break;
443 case ARMFault_AsyncParity:
444 fsc = 0x408;
445 break;
446 case ARMFault_AsyncExternal:
447 fsc = 0x406 | (fi->ea << 12);
448 break;
449 case ARMFault_Debug:
450 fsc = 0x2;
451 break;
452 case ARMFault_TLBConflict:
453 fsc = 0x400;
454 break;
455 case ARMFault_Lockdown:
456 fsc = 0x404;
457 break;
458 case ARMFault_Exclusive:
459 fsc = 0x405;
460 break;
461 case ARMFault_ICacheMaint:
462 fsc = 0x4;
463 break;
464 case ARMFault_Background:
465 fsc = 0x0;
466 break;
467 case ARMFault_QEMU_NSCExec:
468 fsc = M_FAKE_FSR_NSC_EXEC;
469 break;
470 case ARMFault_QEMU_SFault:
471 fsc = M_FAKE_FSR_SFAULT;
472 break;
473 default:
474
475
476
477 g_assert_not_reached();
478 }
479
480 fsc |= (fi->domain << 4);
481 return fsc;
482}
483
484
485
486
487
488
489static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
490{
491 uint32_t fsc;
492
493 switch (fi->type) {
494 case ARMFault_None:
495 return 0;
496 case ARMFault_AddressSize:
497 assert(fi->level >= -1 && fi->level <= 3);
498 if (fi->level < 0) {
499 fsc = 0b101001;
500 } else {
501 fsc = fi->level;
502 }
503 break;
504 case ARMFault_AccessFlag:
505 assert(fi->level >= 0 && fi->level <= 3);
506 fsc = 0b001000 | fi->level;
507 break;
508 case ARMFault_Permission:
509 assert(fi->level >= 0 && fi->level <= 3);
510 fsc = 0b001100 | fi->level;
511 break;
512 case ARMFault_Translation:
513 assert(fi->level >= -1 && fi->level <= 3);
514 if (fi->level < 0) {
515 fsc = 0b101011;
516 } else {
517 fsc = 0b000100 | fi->level;
518 }
519 break;
520 case ARMFault_SyncExternal:
521 fsc = 0x10 | (fi->ea << 12);
522 break;
523 case ARMFault_SyncExternalOnWalk:
524 assert(fi->level >= -1 && fi->level <= 3);
525 if (fi->level < 0) {
526 fsc = 0b010011;
527 } else {
528 fsc = 0b010100 | fi->level;
529 }
530 fsc |= fi->ea << 12;
531 break;
532 case ARMFault_SyncParity:
533 fsc = 0x18;
534 break;
535 case ARMFault_SyncParityOnWalk:
536 assert(fi->level >= -1 && fi->level <= 3);
537 if (fi->level < 0) {
538 fsc = 0b011011;
539 } else {
540 fsc = 0b011100 | fi->level;
541 }
542 break;
543 case ARMFault_AsyncParity:
544 fsc = 0x19;
545 break;
546 case ARMFault_AsyncExternal:
547 fsc = 0x11 | (fi->ea << 12);
548 break;
549 case ARMFault_Alignment:
550 fsc = 0x21;
551 break;
552 case ARMFault_Debug:
553 fsc = 0x22;
554 break;
555 case ARMFault_TLBConflict:
556 fsc = 0x30;
557 break;
558 case ARMFault_UnsuppAtomicUpdate:
559 fsc = 0x31;
560 break;
561 case ARMFault_Lockdown:
562 fsc = 0x34;
563 break;
564 case ARMFault_Exclusive:
565 fsc = 0x35;
566 break;
567 case ARMFault_GPCFOnWalk:
568 assert(fi->level >= -1 && fi->level <= 3);
569 if (fi->level < 0) {
570 fsc = 0b100011;
571 } else {
572 fsc = 0b100100 | fi->level;
573 }
574 break;
575 case ARMFault_GPCFOnOutput:
576 fsc = 0b101000;
577 break;
578 default:
579
580
581
582 g_assert_not_reached();
583 }
584
585 fsc |= 1 << 9;
586 return fsc;
587}
588
589static inline bool arm_extabort_type(MemTxResult result)
590{
591
592
593
594
595
596 return result != MEMTX_DECODE_ERROR;
597}
598
599#ifdef CONFIG_USER_ONLY
600void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
601 MMUAccessType access_type,
602 bool maperr, uintptr_t ra);
603void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
604 MMUAccessType access_type, uintptr_t ra);
605#else
606bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
607 MMUAccessType access_type, int mmu_idx,
608 bool probe, uintptr_t retaddr);
609#endif
610
611static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
612{
613 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
614}
615
616static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
617{
618 if (arm_feature(env, ARM_FEATURE_M)) {
619 return mmu_idx | ARM_MMU_IDX_M;
620 } else {
621 return mmu_idx | ARM_MMU_IDX_A;
622 }
623}
624
625static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
626{
627
628 return mmu_idx | ARM_MMU_IDX_A;
629}
630
631int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
632
633
634ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
635
636
637
638
639
640bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
641
642
643G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
644 MMUAccessType access_type,
645 int mmu_idx, uintptr_t retaddr);
646
647#ifndef CONFIG_USER_ONLY
648
649
650
651
652void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
653 vaddr addr, unsigned size,
654 MMUAccessType access_type,
655 int mmu_idx, MemTxAttrs attrs,
656 MemTxResult response, uintptr_t retaddr);
657#endif
658
659
660static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
661{
662 ARMELChangeHook *hook, *next;
663 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
664 hook->hook(cpu, hook->opaque);
665 }
666}
667static inline void arm_call_el_change_hook(ARMCPU *cpu)
668{
669 ARMELChangeHook *hook, *next;
670 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
671 hook->hook(cpu, hook->opaque);
672 }
673}
674
675
676static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
677{
678 switch (mmu_idx) {
679 case ARMMMUIdx_Stage1_E0:
680 case ARMMMUIdx_Stage1_E1:
681 case ARMMMUIdx_Stage1_E1_PAN:
682 case ARMMMUIdx_E10_0:
683 case ARMMMUIdx_E10_1:
684 case ARMMMUIdx_E10_1_PAN:
685 case ARMMMUIdx_E20_0:
686 case ARMMMUIdx_E20_2:
687 case ARMMMUIdx_E20_2_PAN:
688 return true;
689 default:
690 return false;
691 }
692}
693
694static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
695{
696 switch (mmu_idx) {
697 case ARMMMUIdx_Stage1_E1_PAN:
698 case ARMMMUIdx_E10_1_PAN:
699 case ARMMMUIdx_E20_2_PAN:
700 return true;
701 default:
702 return false;
703 }
704}
705
706static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
707{
708 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
709}
710
711
712static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
713{
714 switch (mmu_idx) {
715 case ARMMMUIdx_E20_0:
716 case ARMMMUIdx_E20_2:
717 case ARMMMUIdx_E20_2_PAN:
718 case ARMMMUIdx_Stage2:
719 case ARMMMUIdx_Stage2_S:
720 case ARMMMUIdx_E2:
721 return 2;
722 case ARMMMUIdx_E3:
723 return 3;
724 case ARMMMUIdx_E10_0:
725 case ARMMMUIdx_Stage1_E0:
726 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
727 case ARMMMUIdx_Stage1_E1:
728 case ARMMMUIdx_Stage1_E1_PAN:
729 case ARMMMUIdx_E10_1:
730 case ARMMMUIdx_E10_1_PAN:
731 case ARMMMUIdx_MPrivNegPri:
732 case ARMMMUIdx_MUserNegPri:
733 case ARMMMUIdx_MPriv:
734 case ARMMMUIdx_MUser:
735 case ARMMMUIdx_MSPrivNegPri:
736 case ARMMMUIdx_MSUserNegPri:
737 case ARMMMUIdx_MSPriv:
738 case ARMMMUIdx_MSUser:
739 return 1;
740 default:
741 g_assert_not_reached();
742 }
743}
744
745static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
746{
747 switch (mmu_idx) {
748 case ARMMMUIdx_E20_0:
749 case ARMMMUIdx_Stage1_E0:
750 case ARMMMUIdx_MUser:
751 case ARMMMUIdx_MSUser:
752 case ARMMMUIdx_MUserNegPri:
753 case ARMMMUIdx_MSUserNegPri:
754 return true;
755 default:
756 return false;
757 case ARMMMUIdx_E10_0:
758 case ARMMMUIdx_E10_1:
759 case ARMMMUIdx_E10_1_PAN:
760 g_assert_not_reached();
761 }
762}
763
764
765static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
766{
767 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
768}
769
770
771
772
773
774
775#define VTCR_SHARED_FIELD_MASK \
776 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
777 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
778 R_VTCR_DS_MASK)
779
780
781static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
782{
783 if (mmu_idx == ARMMMUIdx_Stage2) {
784 return env->cp15.vtcr_el2;
785 }
786 if (mmu_idx == ARMMMUIdx_Stage2_S) {
787
788
789
790
791
792
793
794
795 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
796 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
797 return v;
798 }
799 return env->cp15.tcr_el[regime_el(env, mmu_idx)];
800}
801
802
803static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
804{
805 int el = regime_el(env, mmu_idx);
806 if (el == 2 || arm_el_is_aa64(env, el)) {
807 return true;
808 }
809 if (arm_feature(env, ARM_FEATURE_PMSA) &&
810 arm_feature(env, ARM_FEATURE_V8)) {
811 return true;
812 }
813 if (arm_feature(env, ARM_FEATURE_LPAE)
814 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
815 return true;
816 }
817 return false;
818}
819
820
821
822
823
824
825static inline int arm_num_brps(ARMCPU *cpu)
826{
827 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
828 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
829 } else {
830 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
831 }
832}
833
834
835
836
837
838
839static inline int arm_num_wrps(ARMCPU *cpu)
840{
841 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
842 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
843 } else {
844 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
845 }
846}
847
848
849
850
851
852
853static inline int arm_num_ctx_cmps(ARMCPU *cpu)
854{
855 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
856 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
857 } else {
858 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
859 }
860}
861
862
863
864
865
866
867static inline bool v7m_using_psp(CPUARMState *env)
868{
869
870
871
872
873
874 return !arm_v7m_is_handler_mode(env) &&
875 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
876}
877
878
879
880
881
882
883static inline uint32_t v7m_sp_limit(CPUARMState *env)
884{
885 if (v7m_using_psp(env)) {
886 return env->v7m.psplim[env->v7m.secure];
887 } else {
888 return env->v7m.msplim[env->v7m.secure];
889 }
890}
891
892
893
894
895
896
897static inline bool v7m_cpacr_pass(CPUARMState *env,
898 bool is_secure, bool is_priv)
899{
900 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
901 case 0:
902 case 2:
903 return false;
904 case 1:
905 return is_priv;
906 case 3:
907 return true;
908 default:
909 g_assert_not_reached();
910 }
911}
912
913
914
915
916
917
918
919
920
921static inline const char *aarch32_mode_name(uint32_t psr)
922{
923 static const char cpu_mode_names[16][4] = {
924 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
925 "???", "???", "hyp", "und", "???", "???", "???", "sys"
926 };
927
928 return cpu_mode_names[psr & 0xf];
929}
930
931
932
933
934
935
936
937
938void arm_cpu_update_virq(ARMCPU *cpu);
939
940
941
942
943
944
945
946
947void arm_cpu_update_vfiq(ARMCPU *cpu);
948
949
950
951
952
953
954
955void arm_cpu_update_vserr(ARMCPU *cpu);
956
957
958
959
960
961
962
963
964ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
965
966
967
968
969
970
971
972ARMMMUIdx arm_mmu_idx(CPUARMState *env);
973
974
975
976
977
978
979
980#ifdef CONFIG_USER_ONLY
981static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
982{
983 return ARMMMUIdx_Stage1_E0;
984}
985static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
986{
987 return ARMMMUIdx_Stage1_E0;
988}
989#else
990ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
991ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
992#endif
993
994
995
996
997
998
999
1000
1001static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1002{
1003 switch (mmu_idx) {
1004 case ARMMMUIdx_Stage1_E0:
1005 case ARMMMUIdx_Stage1_E1:
1006 case ARMMMUIdx_Stage1_E1_PAN:
1007 return true;
1008 default:
1009 return false;
1010 }
1011}
1012
1013static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1014 const ARMISARegisters *id)
1015{
1016 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1017
1018 if ((features >> ARM_FEATURE_V4T) & 1) {
1019 valid |= CPSR_T;
1020 }
1021 if ((features >> ARM_FEATURE_V5) & 1) {
1022 valid |= CPSR_Q;
1023 }
1024 if ((features >> ARM_FEATURE_V6) & 1) {
1025 valid |= CPSR_E | CPSR_GE;
1026 }
1027 if ((features >> ARM_FEATURE_THUMB2) & 1) {
1028 valid |= CPSR_IT;
1029 }
1030 if (isar_feature_aa32_jazelle(id)) {
1031 valid |= CPSR_J;
1032 }
1033 if (isar_feature_aa32_pan(id)) {
1034 valid |= CPSR_PAN;
1035 }
1036 if (isar_feature_aa32_dit(id)) {
1037 valid |= CPSR_DIT;
1038 }
1039 if (isar_feature_aa32_ssbs(id)) {
1040 valid |= CPSR_SSBS;
1041 }
1042
1043 return valid;
1044}
1045
1046static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1047{
1048 uint32_t valid;
1049
1050 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1051 if (isar_feature_aa64_bti(id)) {
1052 valid |= PSTATE_BTYPE;
1053 }
1054 if (isar_feature_aa64_pan(id)) {
1055 valid |= PSTATE_PAN;
1056 }
1057 if (isar_feature_aa64_uao(id)) {
1058 valid |= PSTATE_UAO;
1059 }
1060 if (isar_feature_aa64_dit(id)) {
1061 valid |= PSTATE_DIT;
1062 }
1063 if (isar_feature_aa64_ssbs(id)) {
1064 valid |= PSTATE_SSBS;
1065 }
1066 if (isar_feature_aa64_mte(id)) {
1067 valid |= PSTATE_TCO;
1068 }
1069
1070 return valid;
1071}
1072
1073
1074typedef enum ARMGranuleSize {
1075
1076 Gran4K,
1077 Gran64K,
1078 Gran16K,
1079 GranInvalid,
1080} ARMGranuleSize;
1081
1082
1083
1084
1085
1086
1087
1088static inline int arm_granule_bits(ARMGranuleSize gran)
1089{
1090 switch (gran) {
1091 case Gran64K:
1092 return 16;
1093 case Gran16K:
1094 return 14;
1095 case Gran4K:
1096 return 12;
1097 default:
1098 g_assert_not_reached();
1099 }
1100}
1101
1102
1103
1104
1105
1106typedef struct ARMVAParameters {
1107 unsigned tsz : 8;
1108 unsigned ps : 3;
1109 unsigned sh : 2;
1110 unsigned select : 1;
1111 bool tbi : 1;
1112 bool epd : 1;
1113 bool hpd : 1;
1114 bool tsz_oob : 1;
1115 bool ds : 1;
1116 bool ha : 1;
1117 bool hd : 1;
1118 ARMGranuleSize gran : 2;
1119} ARMVAParameters;
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1131 ARMMMUIdx mmu_idx, bool data,
1132 bool el1_is_aa32);
1133
1134int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1135int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1136int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1137
1138
1139static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1140 uint64_t sctlr)
1141{
1142 if (el < 3
1143 && arm_feature(env, ARM_FEATURE_EL3)
1144 && !(env->cp15.scr_el3 & SCR_ATA)) {
1145 return false;
1146 }
1147 if (el < 2 && arm_is_el2_enabled(env)) {
1148 uint64_t hcr = arm_hcr_el2_eff(env);
1149 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1150 return false;
1151 }
1152 }
1153 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1154 return sctlr != 0;
1155}
1156
1157#ifndef CONFIG_USER_ONLY
1158
1159
1160typedef struct V8M_SAttributes {
1161 bool subpage;
1162 bool ns;
1163 bool nsc;
1164 uint8_t sregion;
1165 bool srvalid;
1166 uint8_t iregion;
1167 bool irvalid;
1168} V8M_SAttributes;
1169
1170void v8m_security_lookup(CPUARMState *env, uint32_t address,
1171 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1172 bool secure, V8M_SAttributes *sattrs);
1173
1174
1175typedef struct ARMCacheAttrs {
1176
1177
1178
1179
1180 unsigned int attrs:8;
1181 unsigned int shareability:2;
1182 bool is_s2_format:1;
1183 bool guarded:1;
1184} ARMCacheAttrs;
1185
1186
1187typedef struct GetPhysAddrResult {
1188 CPUTLBEntryFull f;
1189 ARMCacheAttrs cacheattrs;
1190} GetPhysAddrResult;
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
1216 MMUAccessType access_type,
1217 ARMMMUIdx mmu_idx, bool is_secure,
1218 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1219 __attribute__((nonnull));
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232bool get_phys_addr(CPUARMState *env, target_ulong address,
1233 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1234 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1235 __attribute__((nonnull));
1236
1237bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1238 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1239 bool is_secure, GetPhysAddrResult *result,
1240 ARMMMUFaultInfo *fi, uint32_t *mregion);
1241
1242void arm_log_exception(CPUState *cs);
1243
1244#endif
1245
1246
1247
1248
1249
1250#define GMID_EL1_BS 6
1251
1252
1253
1254
1255
1256
1257FIELD(PREDDESC, OPRSZ, 0, 6)
1258FIELD(PREDDESC, ESZ, 6, 2)
1259FIELD(PREDDESC, DATA, 8, 24)
1260
1261
1262
1263
1264
1265#define SVE_MTEDESC_SHIFT 5
1266
1267
1268FIELD(MTEDESC, MIDX, 0, 4)
1269FIELD(MTEDESC, TBI, 4, 2)
1270FIELD(MTEDESC, TCMA, 6, 2)
1271FIELD(MTEDESC, WRITE, 8, 1)
1272FIELD(MTEDESC, ALIGN, 9, 3)
1273FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12)
1274
1275bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1276uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1277
1278static inline int allocation_tag_from_addr(uint64_t ptr)
1279{
1280 return extract64(ptr, 56, 4);
1281}
1282
1283static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1284{
1285 return deposit64(ptr, 56, 4, rtag);
1286}
1287
1288
1289static inline bool tbi_check(uint32_t desc, int bit55)
1290{
1291 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1292}
1293
1294
1295static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1296{
1297
1298
1299
1300
1301 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1302 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1303 return tcma && match;
1304}
1305
1306
1307
1308
1309
1310
1311
1312static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1313{
1314#ifdef CONFIG_USER_ONLY
1315
1316 ptr &= sextract64(ptr, 0, 56);
1317#endif
1318 return ptr;
1319}
1320
1321static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1322{
1323#ifdef CONFIG_USER_ONLY
1324 int64_t clean_ptr = sextract64(ptr, 0, 56);
1325 if (tbi_check(desc, clean_ptr < 0)) {
1326 ptr = clean_ptr;
1327 }
1328#endif
1329 return ptr;
1330}
1331
1332
1333enum MVEECIState {
1334 ECI_NONE = 0,
1335 ECI_A0 = 1,
1336 ECI_A0A1 = 2,
1337
1338 ECI_A0A1A2 = 4,
1339 ECI_A0A1A2B0 = 5,
1340
1341};
1342
1343
1344#define PMCRN_MASK 0xf800
1345#define PMCRN_SHIFT 11
1346#define PMCRLP 0x80
1347#define PMCRLC 0x40
1348#define PMCRDP 0x20
1349#define PMCRX 0x10
1350#define PMCRD 0x8
1351#define PMCRC 0x4
1352#define PMCRP 0x2
1353#define PMCRE 0x1
1354
1355
1356
1357
1358#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1359
1360#define PMXEVTYPER_P 0x80000000
1361#define PMXEVTYPER_U 0x40000000
1362#define PMXEVTYPER_NSK 0x20000000
1363#define PMXEVTYPER_NSU 0x10000000
1364#define PMXEVTYPER_NSH 0x08000000
1365#define PMXEVTYPER_M 0x04000000
1366#define PMXEVTYPER_MT 0x02000000
1367#define PMXEVTYPER_EVTCOUNT 0x0000ffff
1368#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1369 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1370 PMXEVTYPER_M | PMXEVTYPER_MT | \
1371 PMXEVTYPER_EVTCOUNT)
1372
1373#define PMCCFILTR 0xf8000000
1374#define PMCCFILTR_M PMXEVTYPER_M
1375#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1376
1377static inline uint32_t pmu_num_counters(CPUARMState *env)
1378{
1379 ARMCPU *cpu = env_archcpu(env);
1380
1381 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1382}
1383
1384
1385static inline uint64_t pmu_counter_mask(CPUARMState *env)
1386{
1387 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1388}
1389
1390#ifdef TARGET_AARCH64
1391int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
1392int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
1393int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
1394int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
1395int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
1396int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
1397int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
1398void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1399void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1400void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1401void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1402void aarch64_max_tcg_initfn(Object *obj);
1403void aarch64_add_pauth_properties(Object *obj);
1404void aarch64_add_sve_properties(Object *obj);
1405void aarch64_add_sme_properties(Object *obj);
1406#endif
1407
1408
1409uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1410
1411
1412
1413
1414
1415
1416
1417
1418uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1419 bool threadmode, bool spsel);
1420
1421bool el_is_in_host(CPUARMState *env, int el);
1422
1423void aa32_max_features(ARMCPU *cpu);
1424int exception_target_el(CPUARMState *env);
1425bool arm_singlestep_active(CPUARMState *env);
1426bool arm_generate_debug_exceptions(CPUARMState *env);
1427
1428
1429
1430
1431
1432
1433
1434
1435static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1436{
1437 int bot_pac_bit = 64 - param.tsz;
1438 int top_pac_bit = 64 - 8 * param.tbi;
1439
1440 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1441}
1442
1443
1444void define_debug_regs(ARMCPU *cpu);
1445
1446
1447static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1448{
1449 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1450}
1451
1452
1453#define SVE_VQ_POW2_MAP \
1454 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1455 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1456
1457
1458
1459
1460static inline bool arm_fgt_active(CPUARMState *env, int el)
1461{
1462
1463
1464
1465
1466
1467
1468
1469
1470 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1471 el < 2 && arm_is_el2_enabled(env) &&
1472 arm_el_is_aa64(env, 1) &&
1473 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1474 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1475}
1476
1477void assert_hflags_rebuild_correctly(CPUARMState *env);
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494typedef struct {
1495 uint64_t bcr;
1496 uint64_t bvr;
1497} HWBreakpoint;
1498
1499
1500
1501
1502
1503
1504
1505typedef struct {
1506 uint64_t wcr;
1507 uint64_t wvr;
1508 CPUWatchpoint details;
1509} HWWatchpoint;
1510
1511
1512extern int max_hw_bps, max_hw_wps;
1513extern GArray *hw_breakpoints, *hw_watchpoints;
1514
1515#define cur_hw_wps (hw_watchpoints->len)
1516#define cur_hw_bps (hw_breakpoints->len)
1517#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1518#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1519
1520bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1521int insert_hw_breakpoint(target_ulong pc);
1522int delete_hw_breakpoint(target_ulong pc);
1523
1524bool check_watchpoint_in_range(int i, target_ulong addr);
1525CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1526int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1527int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1528#endif
1529