1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef TARGET_ARM_INTERNALS_H
26#define TARGET_ARM_INTERNALS_H
27
28#include "hw/registerfields.h"
29#include "tcg/tcg-gvec-desc.h"
30#include "syndrome.h"
31
32
33#define BANK_USRSYS 0
34#define BANK_SVC 1
35#define BANK_ABT 2
36#define BANK_UND 3
37#define BANK_IRQ 4
38#define BANK_FIQ 5
39#define BANK_HYP 6
40#define BANK_MON 7
41
42static inline bool excp_is_internal(int excp)
43{
44
45
46
47 return excp == EXCP_INTERRUPT
48 || excp == EXCP_HLT
49 || excp == EXCP_DEBUG
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
53 || excp == EXCP_SEMIHOST;
54}
55
56
57
58
59#define GTIMER_SCALE 16
60
61
62FIELD(V7M_CONTROL, NPRIV, 0, 1)
63FIELD(V7M_CONTROL, SPSEL, 1, 1)
64FIELD(V7M_CONTROL, FPCA, 2, 1)
65FIELD(V7M_CONTROL, SFPA, 3, 1)
66
67
68FIELD(V7M_EXCRET, ES, 0, 1)
69FIELD(V7M_EXCRET, RES0, 1, 1)
70FIELD(V7M_EXCRET, SPSEL, 2, 1)
71FIELD(V7M_EXCRET, MODE, 3, 1)
72FIELD(V7M_EXCRET, FTYPE, 4, 1)
73FIELD(V7M_EXCRET, DCRS, 5, 1)
74FIELD(V7M_EXCRET, S, 6, 1)
75FIELD(V7M_EXCRET, RES1, 7, 25)
76
77
78#define EXC_RETURN_MIN_MAGIC 0xff000000
79
80
81
82#define FNC_RETURN_MIN_MAGIC 0xfefffffe
83
84
85FIELD(DBGWCR, E, 0, 1)
86FIELD(DBGWCR, PAC, 1, 2)
87FIELD(DBGWCR, LSC, 3, 2)
88FIELD(DBGWCR, BAS, 5, 8)
89FIELD(DBGWCR, HMC, 13, 1)
90FIELD(DBGWCR, SSC, 14, 2)
91FIELD(DBGWCR, LBN, 16, 4)
92FIELD(DBGWCR, WT, 20, 1)
93FIELD(DBGWCR, MASK, 24, 5)
94FIELD(DBGWCR, SSCE, 29, 1)
95
96
97
98
99
100
101
102
103
104
105
106
107
108#define M_FAKE_FSR_NSC_EXEC 0xf
109#define M_FAKE_FSR_SFAULT 0xe
110
111
112
113
114
115
116
117G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
118 uint32_t syndrome, uint32_t target_el);
119
120
121
122
123G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el,
125 uintptr_t ra);
126
127
128
129
130
131
132
133static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
134{
135 static const unsigned int map[4] = {
136 [1] = BANK_SVC,
137 [2] = BANK_HYP,
138 [3] = BANK_MON,
139 };
140 assert(el >= 1 && el <= 3);
141 return map[el];
142}
143
144
145static inline int bank_number(int mode)
146{
147 switch (mode) {
148 case ARM_CPU_MODE_USR:
149 case ARM_CPU_MODE_SYS:
150 return BANK_USRSYS;
151 case ARM_CPU_MODE_SVC:
152 return BANK_SVC;
153 case ARM_CPU_MODE_ABT:
154 return BANK_ABT;
155 case ARM_CPU_MODE_UND:
156 return BANK_UND;
157 case ARM_CPU_MODE_IRQ:
158 return BANK_IRQ;
159 case ARM_CPU_MODE_FIQ:
160 return BANK_FIQ;
161 case ARM_CPU_MODE_HYP:
162 return BANK_HYP;
163 case ARM_CPU_MODE_MON:
164 return BANK_MON;
165 }
166 g_assert_not_reached();
167}
168
169
170
171
172
173
174
175
176
177
178
179
180static inline int r14_bank_number(int mode)
181{
182 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
183}
184
185void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
186void arm_translate_init(void);
187
188#ifdef CONFIG_TCG
189void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
190#endif
191
192enum arm_fprounding {
193 FPROUNDING_TIEEVEN,
194 FPROUNDING_POSINF,
195 FPROUNDING_NEGINF,
196 FPROUNDING_ZERO,
197 FPROUNDING_TIEAWAY,
198 FPROUNDING_ODD
199};
200
201int arm_rmode_to_sf(int rmode);
202
203static inline void aarch64_save_sp(CPUARMState *env, int el)
204{
205 if (env->pstate & PSTATE_SP) {
206 env->sp_el[el] = env->xregs[31];
207 } else {
208 env->sp_el[0] = env->xregs[31];
209 }
210}
211
212static inline void aarch64_restore_sp(CPUARMState *env, int el)
213{
214 if (env->pstate & PSTATE_SP) {
215 env->xregs[31] = env->sp_el[el];
216 } else {
217 env->xregs[31] = env->sp_el[0];
218 }
219}
220
221static inline void update_spsel(CPUARMState *env, uint32_t imm)
222{
223 unsigned int cur_el = arm_current_el(env);
224
225
226
227 if (!((imm ^ env->pstate) & PSTATE_SP)) {
228 return;
229 }
230 aarch64_save_sp(env, cur_el);
231 env->pstate = deposit32(env->pstate, 0, 1, imm);
232
233
234
235
236 assert(cur_el >= 1 && cur_el <= 3);
237 aarch64_restore_sp(env, cur_el);
238}
239
240
241
242
243
244
245
246
247unsigned int arm_pamax(ARMCPU *cpu);
248
249
250
251
252
253static inline bool extended_addresses_enabled(CPUARMState *env)
254{
255 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
256 return arm_el_is_aa64(env, 1) ||
257 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
258}
259
260
261
262
263void hw_watchpoint_update(ARMCPU *cpu, int n);
264
265
266
267
268void hw_watchpoint_update_all(ARMCPU *cpu);
269
270
271
272void hw_breakpoint_update(ARMCPU *cpu, int n);
273
274
275
276
277void hw_breakpoint_update_all(ARMCPU *cpu);
278
279
280bool arm_debug_check_breakpoint(CPUState *cs);
281
282
283bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
284
285
286
287
288vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
289
290
291void arm_debug_excp_handler(CPUState *cs);
292
293#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
294static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
295{
296 return false;
297}
298static inline void arm_handle_psci_call(ARMCPU *cpu)
299{
300 g_assert_not_reached();
301}
302#else
303
304bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
305
306void arm_handle_psci_call(ARMCPU *cpu);
307#endif
308
309
310
311
312
313
314static inline void arm_clear_exclusive(CPUARMState *env)
315{
316 env->exclusive_addr = -1;
317}
318
319
320
321
322
323
324typedef enum ARMFaultType {
325 ARMFault_None,
326 ARMFault_AccessFlag,
327 ARMFault_Alignment,
328 ARMFault_Background,
329 ARMFault_Domain,
330 ARMFault_Permission,
331 ARMFault_Translation,
332 ARMFault_AddressSize,
333 ARMFault_SyncExternal,
334 ARMFault_SyncExternalOnWalk,
335 ARMFault_SyncParity,
336 ARMFault_SyncParityOnWalk,
337 ARMFault_AsyncParity,
338 ARMFault_AsyncExternal,
339 ARMFault_Debug,
340 ARMFault_TLBConflict,
341 ARMFault_Lockdown,
342 ARMFault_Exclusive,
343 ARMFault_ICacheMaint,
344 ARMFault_QEMU_NSCExec,
345 ARMFault_QEMU_SFault,
346} ARMFaultType;
347
348
349
350
351
352
353
354
355
356
357
358
359typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
360struct ARMMMUFaultInfo {
361 ARMFaultType type;
362 target_ulong s2addr;
363 int level;
364 int domain;
365 bool stage2;
366 bool s1ptw;
367 bool s1ns;
368 bool ea;
369};
370
371
372
373
374
375
376
377static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
378{
379 uint32_t fsc;
380
381 switch (fi->type) {
382 case ARMFault_None:
383 return 0;
384 case ARMFault_AccessFlag:
385 fsc = fi->level == 1 ? 0x3 : 0x6;
386 break;
387 case ARMFault_Alignment:
388 fsc = 0x1;
389 break;
390 case ARMFault_Permission:
391 fsc = fi->level == 1 ? 0xd : 0xf;
392 break;
393 case ARMFault_Domain:
394 fsc = fi->level == 1 ? 0x9 : 0xb;
395 break;
396 case ARMFault_Translation:
397 fsc = fi->level == 1 ? 0x5 : 0x7;
398 break;
399 case ARMFault_SyncExternal:
400 fsc = 0x8 | (fi->ea << 12);
401 break;
402 case ARMFault_SyncExternalOnWalk:
403 fsc = fi->level == 1 ? 0xc : 0xe;
404 fsc |= (fi->ea << 12);
405 break;
406 case ARMFault_SyncParity:
407 fsc = 0x409;
408 break;
409 case ARMFault_SyncParityOnWalk:
410 fsc = fi->level == 1 ? 0x40c : 0x40e;
411 break;
412 case ARMFault_AsyncParity:
413 fsc = 0x408;
414 break;
415 case ARMFault_AsyncExternal:
416 fsc = 0x406 | (fi->ea << 12);
417 break;
418 case ARMFault_Debug:
419 fsc = 0x2;
420 break;
421 case ARMFault_TLBConflict:
422 fsc = 0x400;
423 break;
424 case ARMFault_Lockdown:
425 fsc = 0x404;
426 break;
427 case ARMFault_Exclusive:
428 fsc = 0x405;
429 break;
430 case ARMFault_ICacheMaint:
431 fsc = 0x4;
432 break;
433 case ARMFault_Background:
434 fsc = 0x0;
435 break;
436 case ARMFault_QEMU_NSCExec:
437 fsc = M_FAKE_FSR_NSC_EXEC;
438 break;
439 case ARMFault_QEMU_SFault:
440 fsc = M_FAKE_FSR_SFAULT;
441 break;
442 default:
443
444
445
446 g_assert_not_reached();
447 }
448
449 fsc |= (fi->domain << 4);
450 return fsc;
451}
452
453
454
455
456
457
458static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
459{
460 uint32_t fsc;
461
462 switch (fi->type) {
463 case ARMFault_None:
464 return 0;
465 case ARMFault_AddressSize:
466 assert(fi->level >= -1 && fi->level <= 3);
467 if (fi->level < 0) {
468 fsc = 0b101001;
469 } else {
470 fsc = fi->level;
471 }
472 break;
473 case ARMFault_AccessFlag:
474 assert(fi->level >= 0 && fi->level <= 3);
475 fsc = 0b001000 | fi->level;
476 break;
477 case ARMFault_Permission:
478 assert(fi->level >= 0 && fi->level <= 3);
479 fsc = 0b001100 | fi->level;
480 break;
481 case ARMFault_Translation:
482 assert(fi->level >= -1 && fi->level <= 3);
483 if (fi->level < 0) {
484 fsc = 0b101011;
485 } else {
486 fsc = 0b000100 | fi->level;
487 }
488 break;
489 case ARMFault_SyncExternal:
490 fsc = 0x10 | (fi->ea << 12);
491 break;
492 case ARMFault_SyncExternalOnWalk:
493 assert(fi->level >= -1 && fi->level <= 3);
494 if (fi->level < 0) {
495 fsc = 0b010011;
496 } else {
497 fsc = 0b010100 | fi->level;
498 }
499 fsc |= fi->ea << 12;
500 break;
501 case ARMFault_SyncParity:
502 fsc = 0x18;
503 break;
504 case ARMFault_SyncParityOnWalk:
505 assert(fi->level >= -1 && fi->level <= 3);
506 if (fi->level < 0) {
507 fsc = 0b011011;
508 } else {
509 fsc = 0b011100 | fi->level;
510 }
511 break;
512 case ARMFault_AsyncParity:
513 fsc = 0x19;
514 break;
515 case ARMFault_AsyncExternal:
516 fsc = 0x11 | (fi->ea << 12);
517 break;
518 case ARMFault_Alignment:
519 fsc = 0x21;
520 break;
521 case ARMFault_Debug:
522 fsc = 0x22;
523 break;
524 case ARMFault_TLBConflict:
525 fsc = 0x30;
526 break;
527 case ARMFault_Lockdown:
528 fsc = 0x34;
529 break;
530 case ARMFault_Exclusive:
531 fsc = 0x35;
532 break;
533 default:
534
535
536
537 g_assert_not_reached();
538 }
539
540 fsc |= 1 << 9;
541 return fsc;
542}
543
544static inline bool arm_extabort_type(MemTxResult result)
545{
546
547
548
549
550
551 return result != MEMTX_DECODE_ERROR;
552}
553
554#ifdef CONFIG_USER_ONLY
555void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
556 MMUAccessType access_type,
557 bool maperr, uintptr_t ra);
558void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
559 MMUAccessType access_type, uintptr_t ra);
560#else
561bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
562 MMUAccessType access_type, int mmu_idx,
563 bool probe, uintptr_t retaddr);
564#endif
565
566static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
567{
568 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
569}
570
571static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
572{
573 if (arm_feature(env, ARM_FEATURE_M)) {
574 return mmu_idx | ARM_MMU_IDX_M;
575 } else {
576 return mmu_idx | ARM_MMU_IDX_A;
577 }
578}
579
580static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
581{
582
583 return mmu_idx | ARM_MMU_IDX_A;
584}
585
586int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
587
588
589
590
591
592ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
593 bool secstate, bool priv, bool negpri);
594
595
596
597
598
599ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
600 bool secstate, bool priv);
601
602
603ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
604
605
606bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
607
608
609
610
611
612bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
613
614
615G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
616 MMUAccessType access_type,
617 int mmu_idx, uintptr_t retaddr);
618
619
620
621
622
623void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
624 vaddr addr, unsigned size,
625 MMUAccessType access_type,
626 int mmu_idx, MemTxAttrs attrs,
627 MemTxResult response, uintptr_t retaddr);
628
629
630static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
631{
632 ARMELChangeHook *hook, *next;
633 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
634 hook->hook(cpu, hook->opaque);
635 }
636}
637static inline void arm_call_el_change_hook(ARMCPU *cpu)
638{
639 ARMELChangeHook *hook, *next;
640 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
641 hook->hook(cpu, hook->opaque);
642 }
643}
644
645
646static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
647{
648 switch (mmu_idx) {
649 case ARMMMUIdx_Stage1_E0:
650 case ARMMMUIdx_Stage1_E1:
651 case ARMMMUIdx_Stage1_E1_PAN:
652 case ARMMMUIdx_Stage1_SE0:
653 case ARMMMUIdx_Stage1_SE1:
654 case ARMMMUIdx_Stage1_SE1_PAN:
655 case ARMMMUIdx_E10_0:
656 case ARMMMUIdx_E10_1:
657 case ARMMMUIdx_E10_1_PAN:
658 case ARMMMUIdx_E20_0:
659 case ARMMMUIdx_E20_2:
660 case ARMMMUIdx_E20_2_PAN:
661 case ARMMMUIdx_SE10_0:
662 case ARMMMUIdx_SE10_1:
663 case ARMMMUIdx_SE10_1_PAN:
664 case ARMMMUIdx_SE20_0:
665 case ARMMMUIdx_SE20_2:
666 case ARMMMUIdx_SE20_2_PAN:
667 return true;
668 default:
669 return false;
670 }
671}
672
673
674static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
675{
676 switch (mmu_idx) {
677 case ARMMMUIdx_E10_0:
678 case ARMMMUIdx_E10_1:
679 case ARMMMUIdx_E10_1_PAN:
680 case ARMMMUIdx_E20_0:
681 case ARMMMUIdx_E20_2:
682 case ARMMMUIdx_E20_2_PAN:
683 case ARMMMUIdx_Stage1_E0:
684 case ARMMMUIdx_Stage1_E1:
685 case ARMMMUIdx_Stage1_E1_PAN:
686 case ARMMMUIdx_E2:
687 case ARMMMUIdx_Stage2:
688 case ARMMMUIdx_MPrivNegPri:
689 case ARMMMUIdx_MUserNegPri:
690 case ARMMMUIdx_MPriv:
691 case ARMMMUIdx_MUser:
692 return false;
693 case ARMMMUIdx_SE3:
694 case ARMMMUIdx_SE10_0:
695 case ARMMMUIdx_SE10_1:
696 case ARMMMUIdx_SE10_1_PAN:
697 case ARMMMUIdx_SE20_0:
698 case ARMMMUIdx_SE20_2:
699 case ARMMMUIdx_SE20_2_PAN:
700 case ARMMMUIdx_Stage1_SE0:
701 case ARMMMUIdx_Stage1_SE1:
702 case ARMMMUIdx_Stage1_SE1_PAN:
703 case ARMMMUIdx_SE2:
704 case ARMMMUIdx_Stage2_S:
705 case ARMMMUIdx_MSPrivNegPri:
706 case ARMMMUIdx_MSUserNegPri:
707 case ARMMMUIdx_MSPriv:
708 case ARMMMUIdx_MSUser:
709 return true;
710 default:
711 g_assert_not_reached();
712 }
713}
714
715static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
716{
717 switch (mmu_idx) {
718 case ARMMMUIdx_Stage1_E1_PAN:
719 case ARMMMUIdx_Stage1_SE1_PAN:
720 case ARMMMUIdx_E10_1_PAN:
721 case ARMMMUIdx_E20_2_PAN:
722 case ARMMMUIdx_SE10_1_PAN:
723 case ARMMMUIdx_SE20_2_PAN:
724 return true;
725 default:
726 return false;
727 }
728}
729
730
731static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
732{
733 switch (mmu_idx) {
734 case ARMMMUIdx_SE20_0:
735 case ARMMMUIdx_SE20_2:
736 case ARMMMUIdx_SE20_2_PAN:
737 case ARMMMUIdx_E20_0:
738 case ARMMMUIdx_E20_2:
739 case ARMMMUIdx_E20_2_PAN:
740 case ARMMMUIdx_Stage2:
741 case ARMMMUIdx_Stage2_S:
742 case ARMMMUIdx_SE2:
743 case ARMMMUIdx_E2:
744 return 2;
745 case ARMMMUIdx_SE3:
746 return 3;
747 case ARMMMUIdx_SE10_0:
748 case ARMMMUIdx_Stage1_SE0:
749 return arm_el_is_aa64(env, 3) ? 1 : 3;
750 case ARMMMUIdx_SE10_1:
751 case ARMMMUIdx_SE10_1_PAN:
752 case ARMMMUIdx_Stage1_E0:
753 case ARMMMUIdx_Stage1_E1:
754 case ARMMMUIdx_Stage1_E1_PAN:
755 case ARMMMUIdx_Stage1_SE1:
756 case ARMMMUIdx_Stage1_SE1_PAN:
757 case ARMMMUIdx_E10_0:
758 case ARMMMUIdx_E10_1:
759 case ARMMMUIdx_E10_1_PAN:
760 case ARMMMUIdx_MPrivNegPri:
761 case ARMMMUIdx_MUserNegPri:
762 case ARMMMUIdx_MPriv:
763 case ARMMMUIdx_MUser:
764 case ARMMMUIdx_MSPrivNegPri:
765 case ARMMMUIdx_MSUserNegPri:
766 case ARMMMUIdx_MSPriv:
767 case ARMMMUIdx_MSUser:
768 return 1;
769 default:
770 g_assert_not_reached();
771 }
772}
773
774
775static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
776{
777 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
778}
779
780
781
782
783
784
785#define VTCR_SHARED_FIELD_MASK \
786 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
787 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
788 R_VTCR_DS_MASK)
789
790
791static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
792{
793 if (mmu_idx == ARMMMUIdx_Stage2) {
794 return env->cp15.vtcr_el2;
795 }
796 if (mmu_idx == ARMMMUIdx_Stage2_S) {
797
798
799
800
801
802
803
804
805 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
806 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
807 return v;
808 }
809 return env->cp15.tcr_el[regime_el(env, mmu_idx)];
810}
811
812
813
814
815
816
817static inline int arm_num_brps(ARMCPU *cpu)
818{
819 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
820 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
821 } else {
822 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
823 }
824}
825
826
827
828
829
830
831static inline int arm_num_wrps(ARMCPU *cpu)
832{
833 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
834 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
835 } else {
836 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
837 }
838}
839
840
841
842
843
844
845static inline int arm_num_ctx_cmps(ARMCPU *cpu)
846{
847 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
848 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
849 } else {
850 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
851 }
852}
853
854
855
856
857
858
859static inline bool v7m_using_psp(CPUARMState *env)
860{
861
862
863
864
865
866 return !arm_v7m_is_handler_mode(env) &&
867 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
868}
869
870
871
872
873
874
875static inline uint32_t v7m_sp_limit(CPUARMState *env)
876{
877 if (v7m_using_psp(env)) {
878 return env->v7m.psplim[env->v7m.secure];
879 } else {
880 return env->v7m.msplim[env->v7m.secure];
881 }
882}
883
884
885
886
887
888
889static inline bool v7m_cpacr_pass(CPUARMState *env,
890 bool is_secure, bool is_priv)
891{
892 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
893 case 0:
894 case 2:
895 return false;
896 case 1:
897 return is_priv;
898 case 3:
899 return true;
900 default:
901 g_assert_not_reached();
902 }
903}
904
905
906
907
908
909
910
911
912
913static inline const char *aarch32_mode_name(uint32_t psr)
914{
915 static const char cpu_mode_names[16][4] = {
916 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
917 "???", "???", "hyp", "und", "???", "???", "???", "sys"
918 };
919
920 return cpu_mode_names[psr & 0xf];
921}
922
923
924
925
926
927
928
929
930void arm_cpu_update_virq(ARMCPU *cpu);
931
932
933
934
935
936
937
938
939void arm_cpu_update_vfiq(ARMCPU *cpu);
940
941
942
943
944
945
946
947void arm_cpu_update_vserr(ARMCPU *cpu);
948
949
950
951
952
953
954
955
956ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
957
958
959
960
961
962
963
964ARMMMUIdx arm_mmu_idx(CPUARMState *env);
965
966
967
968
969
970
971
972#ifdef CONFIG_USER_ONLY
973static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
974{
975 return ARMMMUIdx_Stage1_E0;
976}
977static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
978{
979 return ARMMMUIdx_Stage1_E0;
980}
981#else
982ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
983ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
984#endif
985
986
987
988
989
990
991
992
993static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
994{
995 switch (mmu_idx) {
996 case ARMMMUIdx_Stage1_E0:
997 case ARMMMUIdx_Stage1_E1:
998 case ARMMMUIdx_Stage1_E1_PAN:
999 case ARMMMUIdx_Stage1_SE0:
1000 case ARMMMUIdx_Stage1_SE1:
1001 case ARMMMUIdx_Stage1_SE1_PAN:
1002 return true;
1003 default:
1004 return false;
1005 }
1006}
1007
1008static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1009 const ARMISARegisters *id)
1010{
1011 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1012
1013 if ((features >> ARM_FEATURE_V4T) & 1) {
1014 valid |= CPSR_T;
1015 }
1016 if ((features >> ARM_FEATURE_V5) & 1) {
1017 valid |= CPSR_Q;
1018 }
1019 if ((features >> ARM_FEATURE_V6) & 1) {
1020 valid |= CPSR_E | CPSR_GE;
1021 }
1022 if ((features >> ARM_FEATURE_THUMB2) & 1) {
1023 valid |= CPSR_IT;
1024 }
1025 if (isar_feature_aa32_jazelle(id)) {
1026 valid |= CPSR_J;
1027 }
1028 if (isar_feature_aa32_pan(id)) {
1029 valid |= CPSR_PAN;
1030 }
1031 if (isar_feature_aa32_dit(id)) {
1032 valid |= CPSR_DIT;
1033 }
1034 if (isar_feature_aa32_ssbs(id)) {
1035 valid |= CPSR_SSBS;
1036 }
1037
1038 return valid;
1039}
1040
1041static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1042{
1043 uint32_t valid;
1044
1045 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1046 if (isar_feature_aa64_bti(id)) {
1047 valid |= PSTATE_BTYPE;
1048 }
1049 if (isar_feature_aa64_pan(id)) {
1050 valid |= PSTATE_PAN;
1051 }
1052 if (isar_feature_aa64_uao(id)) {
1053 valid |= PSTATE_UAO;
1054 }
1055 if (isar_feature_aa64_dit(id)) {
1056 valid |= PSTATE_DIT;
1057 }
1058 if (isar_feature_aa64_ssbs(id)) {
1059 valid |= PSTATE_SSBS;
1060 }
1061 if (isar_feature_aa64_mte(id)) {
1062 valid |= PSTATE_TCO;
1063 }
1064
1065 return valid;
1066}
1067
1068
1069
1070
1071
1072typedef struct ARMVAParameters {
1073 unsigned tsz : 8;
1074 unsigned ps : 3;
1075 unsigned sh : 2;
1076 unsigned select : 1;
1077 bool tbi : 1;
1078 bool epd : 1;
1079 bool hpd : 1;
1080 bool using16k : 1;
1081 bool using64k : 1;
1082 bool tsz_oob : 1;
1083 bool ds : 1;
1084} ARMVAParameters;
1085
1086ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1087 ARMMMUIdx mmu_idx, bool data);
1088
1089int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1090int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1091
1092
1093static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1094 uint64_t sctlr)
1095{
1096 if (el < 3
1097 && arm_feature(env, ARM_FEATURE_EL3)
1098 && !(env->cp15.scr_el3 & SCR_ATA)) {
1099 return false;
1100 }
1101 if (el < 2 && arm_is_el2_enabled(env)) {
1102 uint64_t hcr = arm_hcr_el2_eff(env);
1103 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1104 return false;
1105 }
1106 }
1107 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1108 return sctlr != 0;
1109}
1110
1111#ifndef CONFIG_USER_ONLY
1112
1113
1114typedef struct V8M_SAttributes {
1115 bool subpage;
1116 bool ns;
1117 bool nsc;
1118 uint8_t sregion;
1119 bool srvalid;
1120 uint8_t iregion;
1121 bool irvalid;
1122} V8M_SAttributes;
1123
1124void v8m_security_lookup(CPUARMState *env, uint32_t address,
1125 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1126 V8M_SAttributes *sattrs);
1127
1128bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1129 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1130 hwaddr *phys_ptr, MemTxAttrs *txattrs,
1131 int *prot, bool *is_subpage,
1132 ARMMMUFaultInfo *fi, uint32_t *mregion);
1133
1134
1135typedef struct ARMCacheAttrs {
1136
1137
1138
1139
1140 unsigned int attrs:8;
1141 unsigned int shareability:2;
1142 bool is_s2_format:1;
1143} ARMCacheAttrs;
1144
1145bool get_phys_addr(CPUARMState *env, target_ulong address,
1146 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1147 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
1148 target_ulong *page_size,
1149 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
1150 __attribute__((nonnull));
1151
1152void arm_log_exception(CPUState *cs);
1153
1154#endif
1155
1156
1157
1158
1159
1160#define GMID_EL1_BS 6
1161
1162
1163#define LOG2_TAG_GRANULE 4
1164#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
1165
1166
1167
1168
1169
1170
1171FIELD(PREDDESC, OPRSZ, 0, 6)
1172FIELD(PREDDESC, ESZ, 6, 2)
1173FIELD(PREDDESC, DATA, 8, 24)
1174
1175
1176
1177
1178
1179#define SVE_MTEDESC_SHIFT 5
1180
1181
1182FIELD(MTEDESC, MIDX, 0, 4)
1183FIELD(MTEDESC, TBI, 4, 2)
1184FIELD(MTEDESC, TCMA, 6, 2)
1185FIELD(MTEDESC, WRITE, 8, 1)
1186FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9)
1187
1188bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1189uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1190
1191static inline int allocation_tag_from_addr(uint64_t ptr)
1192{
1193 return extract64(ptr, 56, 4);
1194}
1195
1196static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1197{
1198 return deposit64(ptr, 56, 4, rtag);
1199}
1200
1201
1202static inline bool tbi_check(uint32_t desc, int bit55)
1203{
1204 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1205}
1206
1207
1208static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1209{
1210
1211
1212
1213
1214 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1215 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1216 return tcma && match;
1217}
1218
1219
1220
1221
1222
1223
1224
1225static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1226{
1227#ifdef CONFIG_USER_ONLY
1228
1229 ptr &= sextract64(ptr, 0, 56);
1230#endif
1231 return ptr;
1232}
1233
1234static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1235{
1236#ifdef CONFIG_USER_ONLY
1237 int64_t clean_ptr = sextract64(ptr, 0, 56);
1238 if (tbi_check(desc, clean_ptr < 0)) {
1239 ptr = clean_ptr;
1240 }
1241#endif
1242 return ptr;
1243}
1244
1245
1246enum MVEECIState {
1247 ECI_NONE = 0,
1248 ECI_A0 = 1,
1249 ECI_A0A1 = 2,
1250
1251 ECI_A0A1A2 = 4,
1252 ECI_A0A1A2B0 = 5,
1253
1254};
1255
1256
1257#define PMCRN_MASK 0xf800
1258#define PMCRN_SHIFT 11
1259#define PMCRLC 0x40
1260#define PMCRDP 0x20
1261#define PMCRX 0x10
1262#define PMCRD 0x8
1263#define PMCRC 0x4
1264#define PMCRP 0x2
1265#define PMCRE 0x1
1266
1267
1268
1269
1270#define PMCR_WRITABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1271
1272#define PMXEVTYPER_P 0x80000000
1273#define PMXEVTYPER_U 0x40000000
1274#define PMXEVTYPER_NSK 0x20000000
1275#define PMXEVTYPER_NSU 0x10000000
1276#define PMXEVTYPER_NSH 0x08000000
1277#define PMXEVTYPER_M 0x04000000
1278#define PMXEVTYPER_MT 0x02000000
1279#define PMXEVTYPER_EVTCOUNT 0x0000ffff
1280#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1281 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1282 PMXEVTYPER_M | PMXEVTYPER_MT | \
1283 PMXEVTYPER_EVTCOUNT)
1284
1285#define PMCCFILTR 0xf8000000
1286#define PMCCFILTR_M PMXEVTYPER_M
1287#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1288
1289static inline uint32_t pmu_num_counters(CPUARMState *env)
1290{
1291 ARMCPU *cpu = env_archcpu(env);
1292
1293 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1294}
1295
1296
1297static inline uint64_t pmu_counter_mask(CPUARMState *env)
1298{
1299 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1300}
1301
1302#ifdef TARGET_AARCH64
1303int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
1304int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
1305int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
1306int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
1307void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1308void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1309void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1310void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1311#endif
1312
1313#ifdef CONFIG_USER_ONLY
1314static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
1315#else
1316void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
1317#endif
1318
1319bool el_is_in_host(CPUARMState *env, int el);
1320
1321void aa32_max_features(ARMCPU *cpu);
1322int exception_target_el(CPUARMState *env);
1323bool arm_singlestep_active(CPUARMState *env);
1324bool arm_generate_debug_exceptions(CPUARMState *env);
1325
1326
1327void define_debug_regs(ARMCPU *cpu);
1328
1329
1330static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1331{
1332 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1333}
1334
1335
1336#define SVE_VQ_POW2_MAP \
1337 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1338 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1339
1340#endif
1341