1#include "qemu/osdep.h"
2#include "target/arm/idau.h"
3#include "trace.h"
4#include "cpu.h"
5#include "internals.h"
6#include "exec/gdbstub.h"
7#include "exec/helper-proto.h"
8#include "qemu/host-utils.h"
9#include "sysemu/arch_init.h"
10#include "sysemu/sysemu.h"
11#include "qemu/bitops.h"
12#include "qemu/crc32c.h"
13#include "exec/exec-all.h"
14#include "exec/cpu_ldst.h"
15#include "arm_ldst.h"
16#include <zlib.h>
17#include "exec/semihost.h"
18#include "sysemu/kvm.h"
19#include "fpu/softfloat.h"
20#include "qemu/range.h"
21
22#define ARM_CPU_FREQ 1000000000
23
24#ifndef CONFIG_USER_ONLY
25
26typedef struct ARMCacheAttrs {
27 unsigned int attrs:8;
28 unsigned int shareability:2;
29} ARMCacheAttrs;
30
31static bool get_phys_addr(CPUARMState *env, target_ulong address,
32 MMUAccessType access_type, ARMMMUIdx mmu_idx,
33 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
34 target_ulong *page_size,
35 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
36
37static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
38 MMUAccessType access_type, ARMMMUIdx mmu_idx,
39 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
40 target_ulong *page_size_ptr,
41 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
42
43
44typedef struct V8M_SAttributes {
45 bool subpage;
46 bool ns;
47 bool nsc;
48 uint8_t sregion;
49 bool srvalid;
50 uint8_t iregion;
51 bool irvalid;
52} V8M_SAttributes;
53
54static void v8m_security_lookup(CPUARMState *env, uint32_t address,
55 MMUAccessType access_type, ARMMMUIdx mmu_idx,
56 V8M_SAttributes *sattrs);
57#endif
58
59static void switch_mode(CPUARMState *env, int mode);
60
61static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
62{
63 int nregs;
64
65
66 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
67 if (reg < nregs) {
68 stq_le_p(buf, *aa32_vfp_dreg(env, reg));
69 return 8;
70 }
71 if (arm_feature(env, ARM_FEATURE_NEON)) {
72
73 nregs += 16;
74 if (reg < nregs) {
75 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
76 stq_le_p(buf, q[0]);
77 stq_le_p(buf + 8, q[1]);
78 return 16;
79 }
80 }
81 switch (reg - nregs) {
82 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
83 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
84 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
85 }
86 return 0;
87}
88
89static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
90{
91 int nregs;
92
93 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
94 if (reg < nregs) {
95 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
96 return 8;
97 }
98 if (arm_feature(env, ARM_FEATURE_NEON)) {
99 nregs += 16;
100 if (reg < nregs) {
101 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
102 q[0] = ldq_le_p(buf);
103 q[1] = ldq_le_p(buf + 8);
104 return 16;
105 }
106 }
107 switch (reg - nregs) {
108 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
109 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
110 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
111 }
112 return 0;
113}
114
115static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
116{
117 switch (reg) {
118 case 0 ... 31:
119
120 {
121 uint64_t *q = aa64_vfp_qreg(env, reg);
122 stq_le_p(buf, q[0]);
123 stq_le_p(buf + 8, q[1]);
124 return 16;
125 }
126 case 32:
127
128 stl_p(buf, vfp_get_fpsr(env));
129 return 4;
130 case 33:
131
132 stl_p(buf, vfp_get_fpcr(env));
133 return 4;
134 default:
135 return 0;
136 }
137}
138
139static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
140{
141 switch (reg) {
142 case 0 ... 31:
143
144 {
145 uint64_t *q = aa64_vfp_qreg(env, reg);
146 q[0] = ldq_le_p(buf);
147 q[1] = ldq_le_p(buf + 8);
148 return 16;
149 }
150 case 32:
151
152 vfp_set_fpsr(env, ldl_p(buf));
153 return 4;
154 case 33:
155
156 vfp_set_fpcr(env, ldl_p(buf));
157 return 4;
158 default:
159 return 0;
160 }
161}
162
163static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
164{
165 assert(ri->fieldoffset);
166 if (cpreg_field_is_64bit(ri)) {
167 return CPREG_FIELD64(env, ri);
168 } else {
169 return CPREG_FIELD32(env, ri);
170 }
171}
172
173static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
174 uint64_t value)
175{
176 assert(ri->fieldoffset);
177 if (cpreg_field_is_64bit(ri)) {
178 CPREG_FIELD64(env, ri) = value;
179 } else {
180 CPREG_FIELD32(env, ri) = value;
181 }
182}
183
184static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
185{
186 return (char *)env + ri->fieldoffset;
187}
188
189uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
190{
191
192 if (ri->type & ARM_CP_CONST) {
193 return ri->resetvalue;
194 } else if (ri->raw_readfn) {
195 return ri->raw_readfn(env, ri);
196 } else if (ri->readfn) {
197 return ri->readfn(env, ri);
198 } else {
199 return raw_read(env, ri);
200 }
201}
202
203static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
204 uint64_t v)
205{
206
207
208
209
210
211 if (ri->type & ARM_CP_CONST) {
212 return;
213 } else if (ri->raw_writefn) {
214 ri->raw_writefn(env, ri, v);
215 } else if (ri->writefn) {
216 ri->writefn(env, ri, v);
217 } else {
218 raw_write(env, ri, v);
219 }
220}
221
222static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
223{
224 ARMCPU *cpu = arm_env_get_cpu(env);
225 const ARMCPRegInfo *ri;
226 uint32_t key;
227
228 key = cpu->dyn_xml.cpregs_keys[reg];
229 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
230 if (ri) {
231 if (cpreg_field_is_64bit(ri)) {
232 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
233 } else {
234 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
235 }
236 }
237 return 0;
238}
239
240static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
241{
242 return 0;
243}
244
245static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
246{
247
248
249
250
251
252
253
254
255
256
257
258 if ((ri->type & ARM_CP_CONST) ||
259 ri->fieldoffset ||
260 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
261 return false;
262 }
263 return true;
264}
265
266bool write_cpustate_to_list(ARMCPU *cpu)
267{
268
269 int i;
270 bool ok = true;
271
272 for (i = 0; i < cpu->cpreg_array_len; i++) {
273 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
274 const ARMCPRegInfo *ri;
275
276 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
277 if (!ri) {
278 ok = false;
279 continue;
280 }
281 if (ri->type & ARM_CP_NO_RAW) {
282 continue;
283 }
284 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
285 }
286 return ok;
287}
288
289bool write_list_to_cpustate(ARMCPU *cpu)
290{
291 int i;
292 bool ok = true;
293
294 for (i = 0; i < cpu->cpreg_array_len; i++) {
295 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
296 uint64_t v = cpu->cpreg_values[i];
297 const ARMCPRegInfo *ri;
298
299 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
300 if (!ri) {
301 ok = false;
302 continue;
303 }
304 if (ri->type & ARM_CP_NO_RAW) {
305 continue;
306 }
307
308
309
310
311 write_raw_cp_reg(&cpu->env, ri, v);
312 if (read_raw_cp_reg(&cpu->env, ri) != v) {
313 ok = false;
314 }
315 }
316 return ok;
317}
318
319static void add_cpreg_to_list(gpointer key, gpointer opaque)
320{
321 ARMCPU *cpu = opaque;
322 uint64_t regidx;
323 const ARMCPRegInfo *ri;
324
325 regidx = *(uint32_t *)key;
326 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
327
328 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
329 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
330
331 cpu->cpreg_array_len++;
332 }
333}
334
335static void count_cpreg(gpointer key, gpointer opaque)
336{
337 ARMCPU *cpu = opaque;
338 uint64_t regidx;
339 const ARMCPRegInfo *ri;
340
341 regidx = *(uint32_t *)key;
342 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
343
344 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
345 cpu->cpreg_array_len++;
346 }
347}
348
349static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
350{
351 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
352 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
353
354 if (aidx > bidx) {
355 return 1;
356 }
357 if (aidx < bidx) {
358 return -1;
359 }
360 return 0;
361}
362
363void init_cpreg_list(ARMCPU *cpu)
364{
365
366
367
368 GList *keys;
369 int arraylen;
370
371 keys = g_hash_table_get_keys(cpu->cp_regs);
372 keys = g_list_sort(keys, cpreg_key_compare);
373
374 cpu->cpreg_array_len = 0;
375
376 g_list_foreach(keys, count_cpreg, cpu);
377
378 arraylen = cpu->cpreg_array_len;
379 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
380 cpu->cpreg_values = g_new(uint64_t, arraylen);
381 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
382 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
383 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
384 cpu->cpreg_array_len = 0;
385
386 g_list_foreach(keys, add_cpreg_to_list, cpu);
387
388 assert(cpu->cpreg_array_len == arraylen);
389
390 g_list_free(keys);
391}
392
393
394
395
396
397
398
399
400static CPAccessResult access_el3_aa32ns(CPUARMState *env,
401 const ARMCPRegInfo *ri,
402 bool isread)
403{
404 bool secure = arm_is_secure_below_el3(env);
405
406 assert(!arm_el_is_aa64(env, 3));
407 if (secure) {
408 return CP_ACCESS_TRAP_UNCATEGORIZED;
409 }
410 return CP_ACCESS_OK;
411}
412
413static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
414 const ARMCPRegInfo *ri,
415 bool isread)
416{
417 if (!arm_el_is_aa64(env, 3)) {
418 return access_el3_aa32ns(env, ri, isread);
419 }
420 return CP_ACCESS_OK;
421}
422
423
424
425
426
427
428static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
429 const ARMCPRegInfo *ri,
430 bool isread)
431{
432 if (arm_current_el(env) == 3) {
433 return CP_ACCESS_OK;
434 }
435 if (arm_is_secure_below_el3(env)) {
436 return CP_ACCESS_TRAP_EL3;
437 }
438
439 return CP_ACCESS_TRAP_UNCATEGORIZED;
440}
441
442
443
444
445static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
446 bool isread)
447{
448 int el = arm_current_el(env);
449 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
450 (env->cp15.mdcr_el2 & MDCR_TDE) ||
451 (env->cp15.hcr_el2 & HCR_TGE);
452
453 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
454 return CP_ACCESS_TRAP_EL2;
455 }
456 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
457 return CP_ACCESS_TRAP_EL3;
458 }
459 return CP_ACCESS_OK;
460}
461
462
463
464
465static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
466 bool isread)
467{
468 int el = arm_current_el(env);
469 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
470 (env->cp15.mdcr_el2 & MDCR_TDE) ||
471 (env->cp15.hcr_el2 & HCR_TGE);
472
473 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
474 return CP_ACCESS_TRAP_EL2;
475 }
476 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
477 return CP_ACCESS_TRAP_EL3;
478 }
479 return CP_ACCESS_OK;
480}
481
482
483
484
485static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
486 bool isread)
487{
488 int el = arm_current_el(env);
489 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
490 (env->cp15.mdcr_el2 & MDCR_TDE) ||
491 (env->cp15.hcr_el2 & HCR_TGE);
492
493 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
494 return CP_ACCESS_TRAP_EL2;
495 }
496 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
497 return CP_ACCESS_TRAP_EL3;
498 }
499 return CP_ACCESS_OK;
500}
501
502
503
504
505static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
506 bool isread)
507{
508 int el = arm_current_el(env);
509
510 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
511 && !arm_is_secure_below_el3(env)) {
512 return CP_ACCESS_TRAP_EL2;
513 }
514 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
515 return CP_ACCESS_TRAP_EL3;
516 }
517 return CP_ACCESS_OK;
518}
519
520static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
521{
522 ARMCPU *cpu = arm_env_get_cpu(env);
523
524 raw_write(env, ri, value);
525 tlb_flush(CPU(cpu));
526}
527
528static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
529{
530 ARMCPU *cpu = arm_env_get_cpu(env);
531
532 if (raw_read(env, ri) != value) {
533
534
535
536 tlb_flush(CPU(cpu));
537 raw_write(env, ri, value);
538 }
539}
540
541static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
542 uint64_t value)
543{
544 ARMCPU *cpu = arm_env_get_cpu(env);
545
546 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
547 && !extended_addresses_enabled(env)) {
548
549
550
551
552 tlb_flush(CPU(cpu));
553 }
554 raw_write(env, ri, value);
555}
556
557
558static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
559 uint64_t value)
560{
561 CPUState *cs = ENV_GET_CPU(env);
562
563 tlb_flush_all_cpus_synced(cs);
564}
565
566static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
567 uint64_t value)
568{
569 CPUState *cs = ENV_GET_CPU(env);
570
571 tlb_flush_all_cpus_synced(cs);
572}
573
574static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
575 uint64_t value)
576{
577 CPUState *cs = ENV_GET_CPU(env);
578
579 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
580}
581
582static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
583 uint64_t value)
584{
585 CPUState *cs = ENV_GET_CPU(env);
586
587 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
588}
589
590
591
592
593
594
595static bool tlb_force_broadcast(CPUARMState *env)
596{
597 return (env->cp15.hcr_el2 & HCR_FB) &&
598 arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
599}
600
601static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
602 uint64_t value)
603{
604
605 ARMCPU *cpu = arm_env_get_cpu(env);
606
607 if (tlb_force_broadcast(env)) {
608 tlbiall_is_write(env, NULL, value);
609 return;
610 }
611
612 tlb_flush(CPU(cpu));
613}
614
615static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
616 uint64_t value)
617{
618
619 ARMCPU *cpu = arm_env_get_cpu(env);
620
621 if (tlb_force_broadcast(env)) {
622 tlbimva_is_write(env, NULL, value);
623 return;
624 }
625
626 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
627}
628
629static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
630 uint64_t value)
631{
632
633 ARMCPU *cpu = arm_env_get_cpu(env);
634
635 if (tlb_force_broadcast(env)) {
636 tlbiasid_is_write(env, NULL, value);
637 return;
638 }
639
640 tlb_flush(CPU(cpu));
641}
642
643static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
644 uint64_t value)
645{
646
647 ARMCPU *cpu = arm_env_get_cpu(env);
648
649 if (tlb_force_broadcast(env)) {
650 tlbimvaa_is_write(env, NULL, value);
651 return;
652 }
653
654 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
655}
656
657static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
658 uint64_t value)
659{
660 CPUState *cs = ENV_GET_CPU(env);
661
662 tlb_flush_by_mmuidx(cs,
663 ARMMMUIdxBit_S12NSE1 |
664 ARMMMUIdxBit_S12NSE0 |
665 ARMMMUIdxBit_S2NS);
666}
667
668static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
669 uint64_t value)
670{
671 CPUState *cs = ENV_GET_CPU(env);
672
673 tlb_flush_by_mmuidx_all_cpus_synced(cs,
674 ARMMMUIdxBit_S12NSE1 |
675 ARMMMUIdxBit_S12NSE0 |
676 ARMMMUIdxBit_S2NS);
677}
678
679static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
680 uint64_t value)
681{
682
683
684
685
686
687
688 CPUState *cs = ENV_GET_CPU(env);
689 uint64_t pageaddr;
690
691 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
692 return;
693 }
694
695 pageaddr = sextract64(value << 12, 0, 40);
696
697 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
698}
699
700static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
701 uint64_t value)
702{
703 CPUState *cs = ENV_GET_CPU(env);
704 uint64_t pageaddr;
705
706 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
707 return;
708 }
709
710 pageaddr = sextract64(value << 12, 0, 40);
711
712 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
713 ARMMMUIdxBit_S2NS);
714}
715
716static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
717 uint64_t value)
718{
719 CPUState *cs = ENV_GET_CPU(env);
720
721 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
722}
723
724static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
725 uint64_t value)
726{
727 CPUState *cs = ENV_GET_CPU(env);
728
729 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
730}
731
732static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
733 uint64_t value)
734{
735 CPUState *cs = ENV_GET_CPU(env);
736 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
737
738 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
739}
740
741static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
742 uint64_t value)
743{
744 CPUState *cs = ENV_GET_CPU(env);
745 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
746
747 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
748 ARMMMUIdxBit_S1E2);
749}
750
751static const ARMCPRegInfo cp_reginfo[] = {
752
753
754
755
756
757 { .name = "FCSEIDR",
758 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
759 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
760 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
761 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
762 { .name = "FCSEIDR_S",
763 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
764 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
765 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
766 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
767
768
769
770
771
772
773 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
774 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
775 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
776 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
777 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
778 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
779 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
780 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
781 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
782 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
783 REGINFO_SENTINEL
784};
785
786static const ARMCPRegInfo not_v8_cp_reginfo[] = {
787
788
789
790
791 { .name = "DACR",
792 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
793 .access = PL1_RW, .resetvalue = 0,
794 .writefn = dacr_write, .raw_writefn = raw_write,
795 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
796 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
797
798
799
800 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
801 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
802 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
803 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
804 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
805 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
806 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
807 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
808
809 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
810 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
811 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
812 REGINFO_SENTINEL
813};
814
815static const ARMCPRegInfo not_v6_cp_reginfo[] = {
816
817
818
819 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
820 .access = PL1_W, .type = ARM_CP_WFI },
821 REGINFO_SENTINEL
822};
823
824static const ARMCPRegInfo not_v7_cp_reginfo[] = {
825
826
827
828 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
829 .access = PL1_W, .type = ARM_CP_WFI },
830
831
832
833
834 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
835 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
836 .resetvalue = 0 },
837 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
838 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
839 .resetvalue = 0 },
840
841 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
842 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
843 .resetvalue = 0 },
844
845
846
847
848
849 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
850 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
851
852
853
854 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
855 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
856 .type = ARM_CP_NO_RAW },
857 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
858 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
859 .type = ARM_CP_NO_RAW },
860 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
861 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
862 .type = ARM_CP_NO_RAW },
863 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
864 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
865 .type = ARM_CP_NO_RAW },
866 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
867 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
868 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
869 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
870 REGINFO_SENTINEL
871};
872
873static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
874 uint64_t value)
875{
876 uint32_t mask = 0;
877
878
879 if (!arm_feature(env, ARM_FEATURE_V8)) {
880
881
882
883
884 if (arm_feature(env, ARM_FEATURE_VFP)) {
885
886 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
887
888 if (!arm_feature(env, ARM_FEATURE_NEON)) {
889
890 value |= (1 << 31);
891 }
892
893
894
895
896 if (!arm_feature(env, ARM_FEATURE_NEON) ||
897 !arm_feature(env, ARM_FEATURE_VFP3)) {
898
899 value |= (1 << 30);
900 }
901 }
902 value &= mask;
903 }
904 env->cp15.cpacr_el1 = value;
905}
906
907static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
908{
909
910
911
912 cpacr_write(env, ri, 0);
913}
914
915static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
916 bool isread)
917{
918 if (arm_feature(env, ARM_FEATURE_V8)) {
919
920 if (arm_current_el(env) == 1 &&
921 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
922 return CP_ACCESS_TRAP_EL2;
923
924 } else if (arm_current_el(env) < 3 &&
925 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
926 return CP_ACCESS_TRAP_EL3;
927 }
928 }
929
930 return CP_ACCESS_OK;
931}
932
933static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
934 bool isread)
935{
936
937 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
938 return CP_ACCESS_TRAP_EL3;
939 }
940
941 return CP_ACCESS_OK;
942}
943
944static const ARMCPRegInfo v6_cp_reginfo[] = {
945
946 { .name = "MVA_prefetch",
947 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
948 .access = PL1_W, .type = ARM_CP_NOP },
949
950
951
952
953 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
954 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
955 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
956 .access = PL0_W, .type = ARM_CP_NOP },
957 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
958 .access = PL0_W, .type = ARM_CP_NOP },
959 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
960 .access = PL1_RW,
961 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
962 offsetof(CPUARMState, cp15.ifar_ns) },
963 .resetvalue = 0, },
964
965
966
967 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
968 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
969 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
970 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
971 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
972 .resetfn = cpacr_reset, .writefn = cpacr_write },
973 REGINFO_SENTINEL
974};
975
976
977#define PMCRN_MASK 0xf800
978#define PMCRN_SHIFT 11
979#define PMCRD 0x8
980#define PMCRC 0x4
981#define PMCRE 0x1
982
983static inline uint32_t pmu_num_counters(CPUARMState *env)
984{
985 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
986}
987
988
989static inline uint64_t pmu_counter_mask(CPUARMState *env)
990{
991 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
992}
993
994static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
995 bool isread)
996{
997
998
999
1000
1001 int el = arm_current_el(env);
1002
1003 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1004 return CP_ACCESS_TRAP;
1005 }
1006 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1007 && !arm_is_secure_below_el3(env)) {
1008 return CP_ACCESS_TRAP_EL2;
1009 }
1010 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1011 return CP_ACCESS_TRAP_EL3;
1012 }
1013
1014 return CP_ACCESS_OK;
1015}
1016
1017static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1018 const ARMCPRegInfo *ri,
1019 bool isread)
1020{
1021
1022 if (arm_feature(env, ARM_FEATURE_V8)
1023 && arm_current_el(env) == 0
1024 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1025 && isread) {
1026 return CP_ACCESS_OK;
1027 }
1028
1029 return pmreg_access(env, ri, isread);
1030}
1031
1032static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1033 const ARMCPRegInfo *ri,
1034 bool isread)
1035{
1036
1037 if (arm_feature(env, ARM_FEATURE_V8)
1038 && arm_current_el(env) == 0
1039 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1040 && !isread) {
1041 return CP_ACCESS_OK;
1042 }
1043
1044 return pmreg_access(env, ri, isread);
1045}
1046
1047#ifndef CONFIG_USER_ONLY
1048
1049static CPAccessResult pmreg_access_selr(CPUARMState *env,
1050 const ARMCPRegInfo *ri,
1051 bool isread)
1052{
1053
1054 if (arm_feature(env, ARM_FEATURE_V8)
1055 && arm_current_el(env) == 0
1056 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1057 return CP_ACCESS_OK;
1058 }
1059
1060 return pmreg_access(env, ri, isread);
1061}
1062
1063static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1064 const ARMCPRegInfo *ri,
1065 bool isread)
1066{
1067
1068 if (arm_feature(env, ARM_FEATURE_V8)
1069 && arm_current_el(env) == 0
1070 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1071 && isread) {
1072 return CP_ACCESS_OK;
1073 }
1074
1075 return pmreg_access(env, ri, isread);
1076}
1077
1078static inline bool arm_ccnt_enabled(CPUARMState *env)
1079{
1080
1081
1082 if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) {
1083 return false;
1084 }
1085
1086 return true;
1087}
1088
1089void pmccntr_sync(CPUARMState *env)
1090{
1091 uint64_t temp_ticks;
1092
1093 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1094 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1095
1096 if (env->cp15.c9_pmcr & PMCRD) {
1097
1098 temp_ticks /= 64;
1099 }
1100
1101 if (arm_ccnt_enabled(env)) {
1102 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
1103 }
1104}
1105
1106static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1107 uint64_t value)
1108{
1109 pmccntr_sync(env);
1110
1111 if (value & PMCRC) {
1112
1113 env->cp15.c15_ccnt = 0;
1114 }
1115
1116
1117 env->cp15.c9_pmcr &= ~0x39;
1118 env->cp15.c9_pmcr |= (value & 0x39);
1119
1120 pmccntr_sync(env);
1121}
1122
1123static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1124{
1125 uint64_t total_ticks;
1126
1127 if (!arm_ccnt_enabled(env)) {
1128
1129 return env->cp15.c15_ccnt;
1130 }
1131
1132 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1133 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1134
1135 if (env->cp15.c9_pmcr & PMCRD) {
1136
1137 total_ticks /= 64;
1138 }
1139 return total_ticks - env->cp15.c15_ccnt;
1140}
1141
1142static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1143 uint64_t value)
1144{
1145
1146
1147
1148
1149
1150 env->cp15.c9_pmselr = value & 0x1f;
1151}
1152
1153static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1154 uint64_t value)
1155{
1156 uint64_t total_ticks;
1157
1158 if (!arm_ccnt_enabled(env)) {
1159
1160 env->cp15.c15_ccnt = value;
1161 return;
1162 }
1163
1164 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1165 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1166
1167 if (env->cp15.c9_pmcr & PMCRD) {
1168
1169 total_ticks /= 64;
1170 }
1171 env->cp15.c15_ccnt = total_ticks - value;
1172}
1173
1174static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1175 uint64_t value)
1176{
1177 uint64_t cur_val = pmccntr_read(env, NULL);
1178
1179 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1180}
1181
1182#else
1183
1184void pmccntr_sync(CPUARMState *env)
1185{
1186}
1187
1188#endif
1189
1190static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1191 uint64_t value)
1192{
1193 pmccntr_sync(env);
1194 env->cp15.pmccfiltr_el0 = value & 0xfc000000;
1195 pmccntr_sync(env);
1196}
1197
1198static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1199 uint64_t value)
1200{
1201 value &= pmu_counter_mask(env);
1202 env->cp15.c9_pmcnten |= value;
1203}
1204
1205static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1206 uint64_t value)
1207{
1208 value &= pmu_counter_mask(env);
1209 env->cp15.c9_pmcnten &= ~value;
1210}
1211
1212static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1213 uint64_t value)
1214{
1215 value &= pmu_counter_mask(env);
1216 env->cp15.c9_pmovsr &= ~value;
1217}
1218
1219static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1220 uint64_t value)
1221{
1222
1223
1224
1225
1226 if (env->cp15.c9_pmselr == 0x1f) {
1227 pmccfiltr_write(env, ri, value);
1228 }
1229}
1230
1231static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1232{
1233
1234
1235
1236 if (env->cp15.c9_pmselr == 0x1f) {
1237 return env->cp15.pmccfiltr_el0;
1238 } else {
1239 return 0;
1240 }
1241}
1242
1243static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1244 uint64_t value)
1245{
1246 if (arm_feature(env, ARM_FEATURE_V8)) {
1247 env->cp15.c9_pmuserenr = value & 0xf;
1248 } else {
1249 env->cp15.c9_pmuserenr = value & 1;
1250 }
1251}
1252
1253static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1254 uint64_t value)
1255{
1256
1257 value &= pmu_counter_mask(env);
1258 env->cp15.c9_pminten |= value;
1259}
1260
1261static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1262 uint64_t value)
1263{
1264 value &= pmu_counter_mask(env);
1265 env->cp15.c9_pminten &= ~value;
1266}
1267
1268static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1269 uint64_t value)
1270{
1271
1272
1273
1274
1275
1276
1277 raw_write(env, ri, value & ~0x1FULL);
1278}
1279
1280static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1281{
1282
1283
1284
1285
1286 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1287
1288 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1289 valid_mask &= ~SCR_HCE;
1290
1291
1292
1293
1294
1295
1296
1297 if (arm_feature(env, ARM_FEATURE_V7) &&
1298 !arm_feature(env, ARM_FEATURE_V8)) {
1299 valid_mask &= ~SCR_SMD;
1300 }
1301 }
1302
1303
1304 value &= valid_mask;
1305 raw_write(env, ri, value);
1306}
1307
1308static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1309{
1310 ARMCPU *cpu = arm_env_get_cpu(env);
1311
1312
1313
1314
1315 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1316 ri->secure & ARM_CP_SECSTATE_S);
1317
1318 return cpu->ccsidr[index];
1319}
1320
1321static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1322 uint64_t value)
1323{
1324 raw_write(env, ri, value & 0xf);
1325}
1326
1327static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1328{
1329 CPUState *cs = ENV_GET_CPU(env);
1330 uint64_t ret = 0;
1331
1332 if (arm_hcr_el2_imo(env)) {
1333 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1334 ret |= CPSR_I;
1335 }
1336 } else {
1337 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1338 ret |= CPSR_I;
1339 }
1340 }
1341
1342 if (arm_hcr_el2_fmo(env)) {
1343 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1344 ret |= CPSR_F;
1345 }
1346 } else {
1347 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1348 ret |= CPSR_F;
1349 }
1350 }
1351
1352
1353 return ret;
1354}
1355
1356static const ARMCPRegInfo v7_cp_reginfo[] = {
1357
1358 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1359 .access = PL1_W, .type = ARM_CP_NOP },
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1372 .access = PL0_RW, .type = ARM_CP_ALIAS,
1373 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1374 .writefn = pmcntenset_write,
1375 .accessfn = pmreg_access,
1376 .raw_writefn = raw_write },
1377 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1378 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1379 .access = PL0_RW, .accessfn = pmreg_access,
1380 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1381 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1382 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1383 .access = PL0_RW,
1384 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1385 .accessfn = pmreg_access,
1386 .writefn = pmcntenclr_write,
1387 .type = ARM_CP_ALIAS },
1388 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1389 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1390 .access = PL0_RW, .accessfn = pmreg_access,
1391 .type = ARM_CP_ALIAS,
1392 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1393 .writefn = pmcntenclr_write },
1394 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1395 .access = PL0_RW,
1396 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1397 .accessfn = pmreg_access,
1398 .writefn = pmovsr_write,
1399 .raw_writefn = raw_write },
1400 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1401 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1402 .access = PL0_RW, .accessfn = pmreg_access,
1403 .type = ARM_CP_ALIAS,
1404 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1405 .writefn = pmovsr_write,
1406 .raw_writefn = raw_write },
1407
1408 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1409 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP },
1410#ifndef CONFIG_USER_ONLY
1411 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1412 .access = PL0_RW, .type = ARM_CP_ALIAS,
1413 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1414 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1415 .raw_writefn = raw_write},
1416 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1417 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1418 .access = PL0_RW, .accessfn = pmreg_access_selr,
1419 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1420 .writefn = pmselr_write, .raw_writefn = raw_write, },
1421 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1422 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1423 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1424 .accessfn = pmreg_access_ccntr },
1425 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1426 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1427 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1428 .type = ARM_CP_IO,
1429 .readfn = pmccntr_read, .writefn = pmccntr_write, },
1430#endif
1431 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1432 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1433 .writefn = pmccfiltr_write,
1434 .access = PL0_RW, .accessfn = pmreg_access,
1435 .type = ARM_CP_IO,
1436 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1437 .resetvalue = 0, },
1438 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1439 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1440 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1441 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1442 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1443 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1444 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1445
1446 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1447 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1448 .accessfn = pmreg_access_xevcntr },
1449 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1450 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1451 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
1452 .resetvalue = 0,
1453 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1454 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1455 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1456 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1457 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1458 .resetvalue = 0,
1459 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1460 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1461 .access = PL1_RW, .accessfn = access_tpm,
1462 .type = ARM_CP_ALIAS | ARM_CP_IO,
1463 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1464 .resetvalue = 0,
1465 .writefn = pmintenset_write, .raw_writefn = raw_write },
1466 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1467 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1468 .access = PL1_RW, .accessfn = access_tpm,
1469 .type = ARM_CP_IO,
1470 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1471 .writefn = pmintenset_write, .raw_writefn = raw_write,
1472 .resetvalue = 0x0 },
1473 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1474 .access = PL1_RW, .accessfn = access_tpm,
1475 .type = ARM_CP_ALIAS | ARM_CP_IO,
1476 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1477 .writefn = pmintenclr_write, },
1478 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1479 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1480 .access = PL1_RW, .accessfn = access_tpm,
1481 .type = ARM_CP_ALIAS | ARM_CP_IO,
1482 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1483 .writefn = pmintenclr_write },
1484 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1485 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1486 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1487 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1488 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1489 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1490 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1491 offsetof(CPUARMState, cp15.csselr_ns) } },
1492
1493
1494
1495 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1496 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1497 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1498
1499
1500
1501 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1502 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1503 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1504 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1505 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1506 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1507
1508
1509
1510 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1511 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1512 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1513 .resetvalue = 0 },
1514 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1515 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1516 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1517 .resetvalue = 0 },
1518
1519
1520
1521
1522
1523
1524
1525 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1526 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1527 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1528 offsetof(CPUARMState, cp15.mair0_ns) },
1529 .resetfn = arm_cp_reset_ignore },
1530 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1531 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1532 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1533 offsetof(CPUARMState, cp15.mair1_ns) },
1534 .resetfn = arm_cp_reset_ignore },
1535 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1536 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1537 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1538
1539 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1540 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1541 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1542 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1543 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1544 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1545
1546 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1547 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1548 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1549 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1550 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1551 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1552
1553 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1554 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1555 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1556 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1557 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1558 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1559 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1560 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1561 REGINFO_SENTINEL
1562};
1563
1564static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1565
1566 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1567 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1568 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1569 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1570 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1571 .type = ARM_CP_NO_RAW, .access = PL1_W,
1572 .writefn = tlbiasid_is_write },
1573 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1574 .type = ARM_CP_NO_RAW, .access = PL1_W,
1575 .writefn = tlbimvaa_is_write },
1576 REGINFO_SENTINEL
1577};
1578
1579static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1580 uint64_t value)
1581{
1582 value &= 1;
1583 env->teecr = value;
1584}
1585
1586static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1587 bool isread)
1588{
1589 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1590 return CP_ACCESS_TRAP;
1591 }
1592 return CP_ACCESS_OK;
1593}
1594
1595static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1596 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1597 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1598 .resetvalue = 0,
1599 .writefn = teecr_write },
1600 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1601 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1602 .accessfn = teehbr_access, .resetvalue = 0 },
1603 REGINFO_SENTINEL
1604};
1605
1606static const ARMCPRegInfo v6k_cp_reginfo[] = {
1607 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1608 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1609 .access = PL0_RW,
1610 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1611 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1612 .access = PL0_RW,
1613 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1614 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1615 .resetfn = arm_cp_reset_ignore },
1616 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1617 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1618 .access = PL0_R|PL1_W,
1619 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1620 .resetvalue = 0},
1621 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1622 .access = PL0_R|PL1_W,
1623 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1624 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1625 .resetfn = arm_cp_reset_ignore },
1626 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1627 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1628 .access = PL1_RW,
1629 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1630 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1631 .access = PL1_RW,
1632 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1633 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1634 .resetvalue = 0 },
1635 REGINFO_SENTINEL
1636};
1637
1638#ifndef CONFIG_USER_ONLY
1639
1640static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1641 bool isread)
1642{
1643
1644
1645
1646 int el = arm_current_el(env);
1647
1648 switch (el) {
1649 case 0:
1650 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1651 return CP_ACCESS_TRAP;
1652 }
1653 break;
1654 case 1:
1655 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1656 arm_is_secure_below_el3(env)) {
1657
1658 return CP_ACCESS_TRAP_UNCATEGORIZED;
1659 }
1660 break;
1661 case 2:
1662 case 3:
1663 break;
1664 }
1665
1666 if (!isread && el < arm_highest_el(env)) {
1667 return CP_ACCESS_TRAP_UNCATEGORIZED;
1668 }
1669
1670 return CP_ACCESS_OK;
1671}
1672
1673static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1674 bool isread)
1675{
1676 unsigned int cur_el = arm_current_el(env);
1677 bool secure = arm_is_secure(env);
1678
1679
1680 if (cur_el == 0 &&
1681 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1682 return CP_ACCESS_TRAP;
1683 }
1684
1685 if (arm_feature(env, ARM_FEATURE_EL2) &&
1686 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1687 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1688 return CP_ACCESS_TRAP_EL2;
1689 }
1690 return CP_ACCESS_OK;
1691}
1692
1693static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1694 bool isread)
1695{
1696 unsigned int cur_el = arm_current_el(env);
1697 bool secure = arm_is_secure(env);
1698
1699
1700
1701
1702 if (cur_el == 0 &&
1703 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1704 return CP_ACCESS_TRAP;
1705 }
1706
1707 if (arm_feature(env, ARM_FEATURE_EL2) &&
1708 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1709 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1710 return CP_ACCESS_TRAP_EL2;
1711 }
1712 return CP_ACCESS_OK;
1713}
1714
1715static CPAccessResult gt_pct_access(CPUARMState *env,
1716 const ARMCPRegInfo *ri,
1717 bool isread)
1718{
1719 return gt_counter_access(env, GTIMER_PHYS, isread);
1720}
1721
1722static CPAccessResult gt_vct_access(CPUARMState *env,
1723 const ARMCPRegInfo *ri,
1724 bool isread)
1725{
1726 return gt_counter_access(env, GTIMER_VIRT, isread);
1727}
1728
1729static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1730 bool isread)
1731{
1732 return gt_timer_access(env, GTIMER_PHYS, isread);
1733}
1734
1735static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1736 bool isread)
1737{
1738 return gt_timer_access(env, GTIMER_VIRT, isread);
1739}
1740
1741static CPAccessResult gt_stimer_access(CPUARMState *env,
1742 const ARMCPRegInfo *ri,
1743 bool isread)
1744{
1745
1746
1747
1748
1749 switch (arm_current_el(env)) {
1750 case 1:
1751 if (!arm_is_secure(env)) {
1752 return CP_ACCESS_TRAP;
1753 }
1754 if (!(env->cp15.scr_el3 & SCR_ST)) {
1755 return CP_ACCESS_TRAP_EL3;
1756 }
1757 return CP_ACCESS_OK;
1758 case 0:
1759 case 2:
1760 return CP_ACCESS_TRAP;
1761 case 3:
1762 return CP_ACCESS_OK;
1763 default:
1764 g_assert_not_reached();
1765 }
1766}
1767
1768static uint64_t gt_get_countervalue(CPUARMState *env)
1769{
1770 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1771}
1772
1773static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1774{
1775 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1776
1777 if (gt->ctl & 1) {
1778
1779
1780
1781 uint64_t offset = timeridx == GTIMER_VIRT ?
1782 cpu->env.cp15.cntvoff_el2 : 0;
1783 uint64_t count = gt_get_countervalue(&cpu->env);
1784
1785 int istatus = count - offset >= gt->cval;
1786 uint64_t nexttick;
1787 int irqstate;
1788
1789 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1790
1791 irqstate = (istatus && !(gt->ctl & 2));
1792 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1793
1794 if (istatus) {
1795
1796 nexttick = UINT64_MAX;
1797 } else {
1798
1799 nexttick = gt->cval + offset;
1800 }
1801
1802
1803
1804
1805
1806 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1807 nexttick = INT64_MAX / GTIMER_SCALE;
1808 }
1809 timer_mod(cpu->gt_timer[timeridx], nexttick);
1810 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1811 } else {
1812
1813 gt->ctl &= ~4;
1814 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1815 timer_del(cpu->gt_timer[timeridx]);
1816 trace_arm_gt_recalc_disabled(timeridx);
1817 }
1818}
1819
1820static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1821 int timeridx)
1822{
1823 ARMCPU *cpu = arm_env_get_cpu(env);
1824
1825 timer_del(cpu->gt_timer[timeridx]);
1826}
1827
1828static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829{
1830 return gt_get_countervalue(env);
1831}
1832
1833static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1834{
1835 return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1836}
1837
1838static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1839 int timeridx,
1840 uint64_t value)
1841{
1842 trace_arm_gt_cval_write(timeridx, value);
1843 env->cp15.c14_timer[timeridx].cval = value;
1844 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1845}
1846
1847static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1848 int timeridx)
1849{
1850 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1851
1852 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1853 (gt_get_countervalue(env) - offset));
1854}
1855
1856static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1857 int timeridx,
1858 uint64_t value)
1859{
1860 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1861
1862 trace_arm_gt_tval_write(timeridx, value);
1863 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1864 sextract64(value, 0, 32);
1865 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1866}
1867
1868static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1869 int timeridx,
1870 uint64_t value)
1871{
1872 ARMCPU *cpu = arm_env_get_cpu(env);
1873 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1874
1875 trace_arm_gt_ctl_write(timeridx, value);
1876 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1877 if ((oldval ^ value) & 1) {
1878
1879 gt_recalc_timer(cpu, timeridx);
1880 } else if ((oldval ^ value) & 2) {
1881
1882
1883
1884 int irqstate = (oldval & 4) && !(value & 2);
1885
1886 trace_arm_gt_imask_toggle(timeridx, irqstate);
1887 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1888 }
1889}
1890
1891static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1892{
1893 gt_timer_reset(env, ri, GTIMER_PHYS);
1894}
1895
1896static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1897 uint64_t value)
1898{
1899 gt_cval_write(env, ri, GTIMER_PHYS, value);
1900}
1901
1902static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1903{
1904 return gt_tval_read(env, ri, GTIMER_PHYS);
1905}
1906
1907static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1908 uint64_t value)
1909{
1910 gt_tval_write(env, ri, GTIMER_PHYS, value);
1911}
1912
1913static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1914 uint64_t value)
1915{
1916 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1917}
1918
1919static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1920{
1921 gt_timer_reset(env, ri, GTIMER_VIRT);
1922}
1923
1924static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1925 uint64_t value)
1926{
1927 gt_cval_write(env, ri, GTIMER_VIRT, value);
1928}
1929
1930static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1931{
1932 return gt_tval_read(env, ri, GTIMER_VIRT);
1933}
1934
1935static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1936 uint64_t value)
1937{
1938 gt_tval_write(env, ri, GTIMER_VIRT, value);
1939}
1940
1941static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1942 uint64_t value)
1943{
1944 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1945}
1946
1947static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1948 uint64_t value)
1949{
1950 ARMCPU *cpu = arm_env_get_cpu(env);
1951
1952 trace_arm_gt_cntvoff_write(value);
1953 raw_write(env, ri, value);
1954 gt_recalc_timer(cpu, GTIMER_VIRT);
1955}
1956
1957static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1958{
1959 gt_timer_reset(env, ri, GTIMER_HYP);
1960}
1961
1962static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1963 uint64_t value)
1964{
1965 gt_cval_write(env, ri, GTIMER_HYP, value);
1966}
1967
1968static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1969{
1970 return gt_tval_read(env, ri, GTIMER_HYP);
1971}
1972
1973static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1974 uint64_t value)
1975{
1976 gt_tval_write(env, ri, GTIMER_HYP, value);
1977}
1978
1979static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1980 uint64_t value)
1981{
1982 gt_ctl_write(env, ri, GTIMER_HYP, value);
1983}
1984
1985static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1986{
1987 gt_timer_reset(env, ri, GTIMER_SEC);
1988}
1989
1990static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1991 uint64_t value)
1992{
1993 gt_cval_write(env, ri, GTIMER_SEC, value);
1994}
1995
1996static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1997{
1998 return gt_tval_read(env, ri, GTIMER_SEC);
1999}
2000
2001static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2002 uint64_t value)
2003{
2004 gt_tval_write(env, ri, GTIMER_SEC, value);
2005}
2006
2007static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2008 uint64_t value)
2009{
2010 gt_ctl_write(env, ri, GTIMER_SEC, value);
2011}
2012
2013void arm_gt_ptimer_cb(void *opaque)
2014{
2015 ARMCPU *cpu = opaque;
2016
2017 gt_recalc_timer(cpu, GTIMER_PHYS);
2018}
2019
2020void arm_gt_vtimer_cb(void *opaque)
2021{
2022 ARMCPU *cpu = opaque;
2023
2024 gt_recalc_timer(cpu, GTIMER_VIRT);
2025}
2026
2027void arm_gt_htimer_cb(void *opaque)
2028{
2029 ARMCPU *cpu = opaque;
2030
2031 gt_recalc_timer(cpu, GTIMER_HYP);
2032}
2033
2034void arm_gt_stimer_cb(void *opaque)
2035{
2036 ARMCPU *cpu = opaque;
2037
2038 gt_recalc_timer(cpu, GTIMER_SEC);
2039}
2040
2041static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2042
2043
2044
2045
2046 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2047 .type = ARM_CP_ALIAS,
2048 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2049 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2050 },
2051 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2052 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2053 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2054 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2055 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2056 },
2057
2058 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2059 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2060 .access = PL1_RW,
2061 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2062 .resetvalue = 0,
2063 },
2064
2065 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2066 .secure = ARM_CP_SECSTATE_NS,
2067 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2068 .accessfn = gt_ptimer_access,
2069 .fieldoffset = offsetoflow32(CPUARMState,
2070 cp15.c14_timer[GTIMER_PHYS].ctl),
2071 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2072 },
2073 { .name = "CNTP_CTL_S",
2074 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2075 .secure = ARM_CP_SECSTATE_S,
2076 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2077 .accessfn = gt_ptimer_access,
2078 .fieldoffset = offsetoflow32(CPUARMState,
2079 cp15.c14_timer[GTIMER_SEC].ctl),
2080 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2081 },
2082 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2083 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2084 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2085 .accessfn = gt_ptimer_access,
2086 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2087 .resetvalue = 0,
2088 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2089 },
2090 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2091 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2092 .accessfn = gt_vtimer_access,
2093 .fieldoffset = offsetoflow32(CPUARMState,
2094 cp15.c14_timer[GTIMER_VIRT].ctl),
2095 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2096 },
2097 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2098 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2099 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2100 .accessfn = gt_vtimer_access,
2101 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2102 .resetvalue = 0,
2103 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2104 },
2105
2106 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2107 .secure = ARM_CP_SECSTATE_NS,
2108 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2109 .accessfn = gt_ptimer_access,
2110 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2111 },
2112 { .name = "CNTP_TVAL_S",
2113 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2114 .secure = ARM_CP_SECSTATE_S,
2115 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2116 .accessfn = gt_ptimer_access,
2117 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2118 },
2119 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2120 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2121 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2122 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2123 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2124 },
2125 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2126 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2127 .accessfn = gt_vtimer_access,
2128 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2129 },
2130 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2131 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2132 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2133 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2134 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2135 },
2136
2137 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2138 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2139 .accessfn = gt_pct_access,
2140 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2141 },
2142 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2143 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2144 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2145 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2146 },
2147 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2148 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2149 .accessfn = gt_vct_access,
2150 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2151 },
2152 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2153 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2154 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2155 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2156 },
2157
2158 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2159 .secure = ARM_CP_SECSTATE_NS,
2160 .access = PL1_RW | PL0_R,
2161 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2162 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2163 .accessfn = gt_ptimer_access,
2164 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2165 },
2166 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2167 .secure = ARM_CP_SECSTATE_S,
2168 .access = PL1_RW | PL0_R,
2169 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2170 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2171 .accessfn = gt_ptimer_access,
2172 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2173 },
2174 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2175 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2176 .access = PL1_RW | PL0_R,
2177 .type = ARM_CP_IO,
2178 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2179 .resetvalue = 0, .accessfn = gt_ptimer_access,
2180 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2181 },
2182 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2183 .access = PL1_RW | PL0_R,
2184 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2185 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2186 .accessfn = gt_vtimer_access,
2187 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2188 },
2189 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2190 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2191 .access = PL1_RW | PL0_R,
2192 .type = ARM_CP_IO,
2193 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2194 .resetvalue = 0, .accessfn = gt_vtimer_access,
2195 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2196 },
2197
2198
2199
2200 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2201 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2202 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2203 .accessfn = gt_stimer_access,
2204 .readfn = gt_sec_tval_read,
2205 .writefn = gt_sec_tval_write,
2206 .resetfn = gt_sec_timer_reset,
2207 },
2208 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2209 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2210 .type = ARM_CP_IO, .access = PL1_RW,
2211 .accessfn = gt_stimer_access,
2212 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2213 .resetvalue = 0,
2214 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2215 },
2216 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2217 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2218 .type = ARM_CP_IO, .access = PL1_RW,
2219 .accessfn = gt_stimer_access,
2220 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2221 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2222 },
2223 REGINFO_SENTINEL
2224};
2225
2226#else
2227
2228
2229
2230
2231
2232static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2233{
2234
2235
2236
2237
2238 return cpu_get_clock() / GTIMER_SCALE;
2239}
2240
2241static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2242 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2243 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2244 .type = ARM_CP_CONST, .access = PL0_R ,
2245 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2246 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2247 },
2248 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2249 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2250 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2251 .readfn = gt_virt_cnt_read,
2252 },
2253 REGINFO_SENTINEL
2254};
2255
2256#endif
2257
2258static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2259{
2260 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2261 raw_write(env, ri, value);
2262 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2263 raw_write(env, ri, value & 0xfffff6ff);
2264 } else {
2265 raw_write(env, ri, value & 0xfffff1ff);
2266 }
2267}
2268
2269#ifndef CONFIG_USER_ONLY
2270
2271
2272static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2273 bool isread)
2274{
2275 if (ri->opc2 & 4) {
2276
2277
2278
2279
2280
2281 if (arm_current_el(env) == 1) {
2282 if (arm_is_secure_below_el3(env)) {
2283 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2284 }
2285 return CP_ACCESS_TRAP_UNCATEGORIZED;
2286 }
2287 }
2288 return CP_ACCESS_OK;
2289}
2290
2291static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2292 MMUAccessType access_type, ARMMMUIdx mmu_idx)
2293{
2294 hwaddr phys_addr;
2295 target_ulong page_size;
2296 int prot;
2297 bool ret;
2298 uint64_t par64;
2299 bool format64 = false;
2300 MemTxAttrs attrs = {};
2301 ARMMMUFaultInfo fi = {};
2302 ARMCacheAttrs cacheattrs = {};
2303
2304 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2305 &prot, &page_size, &fi, &cacheattrs);
2306
2307 if (is_a64(env)) {
2308 format64 = true;
2309 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2325
2326 if (arm_feature(env, ARM_FEATURE_EL2)) {
2327 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2328 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
2329 } else {
2330 format64 |= arm_current_el(env) == 2;
2331 }
2332 }
2333 }
2334
2335 if (format64) {
2336
2337 par64 = (1 << 11);
2338 if (!ret) {
2339 par64 |= phys_addr & ~0xfffULL;
2340 if (!attrs.secure) {
2341 par64 |= (1 << 9);
2342 }
2343 par64 |= (uint64_t)cacheattrs.attrs << 56;
2344 par64 |= cacheattrs.shareability << 7;
2345 } else {
2346 uint32_t fsr = arm_fi_to_lfsc(&fi);
2347
2348 par64 |= 1;
2349 par64 |= (fsr & 0x3f) << 1;
2350 if (fi.stage2) {
2351 par64 |= (1 << 9);
2352 }
2353 if (fi.s1ptw) {
2354 par64 |= (1 << 8);
2355 }
2356 }
2357 } else {
2358
2359
2360
2361
2362 if (!ret) {
2363
2364 if (page_size == (1 << 24)
2365 && arm_feature(env, ARM_FEATURE_V7)) {
2366 par64 = (phys_addr & 0xff000000) | (1 << 1);
2367 } else {
2368 par64 = phys_addr & 0xfffff000;
2369 }
2370 if (!attrs.secure) {
2371 par64 |= (1 << 9);
2372 }
2373 } else {
2374 uint32_t fsr = arm_fi_to_sfsc(&fi);
2375
2376 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2377 ((fsr & 0xf) << 1) | 1;
2378 }
2379 }
2380 return par64;
2381}
2382
2383static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2384{
2385 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2386 uint64_t par64;
2387 ARMMMUIdx mmu_idx;
2388 int el = arm_current_el(env);
2389 bool secure = arm_is_secure_below_el3(env);
2390
2391 switch (ri->opc2 & 6) {
2392 case 0:
2393
2394 switch (el) {
2395 case 3:
2396 mmu_idx = ARMMMUIdx_S1E3;
2397 break;
2398 case 2:
2399 mmu_idx = ARMMMUIdx_S1NSE1;
2400 break;
2401 case 1:
2402 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2403 break;
2404 default:
2405 g_assert_not_reached();
2406 }
2407 break;
2408 case 2:
2409
2410 switch (el) {
2411 case 3:
2412 mmu_idx = ARMMMUIdx_S1SE0;
2413 break;
2414 case 2:
2415 mmu_idx = ARMMMUIdx_S1NSE0;
2416 break;
2417 case 1:
2418 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2419 break;
2420 default:
2421 g_assert_not_reached();
2422 }
2423 break;
2424 case 4:
2425
2426 mmu_idx = ARMMMUIdx_S12NSE1;
2427 break;
2428 case 6:
2429
2430 mmu_idx = ARMMMUIdx_S12NSE0;
2431 break;
2432 default:
2433 g_assert_not_reached();
2434 }
2435
2436 par64 = do_ats_write(env, value, access_type, mmu_idx);
2437
2438 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2439}
2440
2441static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2442 uint64_t value)
2443{
2444 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2445 uint64_t par64;
2446
2447 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
2448
2449 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2450}
2451
2452static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2453 bool isread)
2454{
2455 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2456 return CP_ACCESS_TRAP;
2457 }
2458 return CP_ACCESS_OK;
2459}
2460
2461static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2462 uint64_t value)
2463{
2464 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2465 ARMMMUIdx mmu_idx;
2466 int secure = arm_is_secure_below_el3(env);
2467
2468 switch (ri->opc2 & 6) {
2469 case 0:
2470 switch (ri->opc1) {
2471 case 0:
2472 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2473 break;
2474 case 4:
2475 mmu_idx = ARMMMUIdx_S1E2;
2476 break;
2477 case 6:
2478 mmu_idx = ARMMMUIdx_S1E3;
2479 break;
2480 default:
2481 g_assert_not_reached();
2482 }
2483 break;
2484 case 2:
2485 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2486 break;
2487 case 4:
2488 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2489 break;
2490 case 6:
2491 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2492 break;
2493 default:
2494 g_assert_not_reached();
2495 }
2496
2497 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2498}
2499#endif
2500
2501static const ARMCPRegInfo vapa_cp_reginfo[] = {
2502 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2503 .access = PL1_RW, .resetvalue = 0,
2504 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2505 offsetoflow32(CPUARMState, cp15.par_ns) },
2506 .writefn = par_write },
2507#ifndef CONFIG_USER_ONLY
2508
2509 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2510 .access = PL1_W, .accessfn = ats_access,
2511 .writefn = ats_write, .type = ARM_CP_NO_RAW },
2512#endif
2513 REGINFO_SENTINEL
2514};
2515
2516
2517static uint32_t simple_mpu_ap_bits(uint32_t val)
2518{
2519 uint32_t ret;
2520 uint32_t mask;
2521 int i;
2522 ret = 0;
2523 mask = 3;
2524 for (i = 0; i < 16; i += 2) {
2525 ret |= (val >> i) & mask;
2526 mask <<= 2;
2527 }
2528 return ret;
2529}
2530
2531
2532static uint32_t extended_mpu_ap_bits(uint32_t val)
2533{
2534 uint32_t ret;
2535 uint32_t mask;
2536 int i;
2537 ret = 0;
2538 mask = 3;
2539 for (i = 0; i < 16; i += 2) {
2540 ret |= (val & mask) << i;
2541 mask <<= 2;
2542 }
2543 return ret;
2544}
2545
2546static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2547 uint64_t value)
2548{
2549 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2550}
2551
2552static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2553{
2554 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2555}
2556
2557static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2558 uint64_t value)
2559{
2560 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2561}
2562
2563static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2564{
2565 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2566}
2567
2568static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2569{
2570 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2571
2572 if (!u32p) {
2573 return 0;
2574 }
2575
2576 u32p += env->pmsav7.rnr[M_REG_NS];
2577 return *u32p;
2578}
2579
2580static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2581 uint64_t value)
2582{
2583 ARMCPU *cpu = arm_env_get_cpu(env);
2584 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2585
2586 if (!u32p) {
2587 return;
2588 }
2589
2590 u32p += env->pmsav7.rnr[M_REG_NS];
2591 tlb_flush(CPU(cpu));
2592 *u32p = value;
2593}
2594
2595static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2596 uint64_t value)
2597{
2598 ARMCPU *cpu = arm_env_get_cpu(env);
2599 uint32_t nrgs = cpu->pmsav7_dregion;
2600
2601 if (value >= nrgs) {
2602 qemu_log_mask(LOG_GUEST_ERROR,
2603 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2604 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2605 return;
2606 }
2607
2608 raw_write(env, ri, value);
2609}
2610
2611static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2612
2613
2614
2615
2616 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2617 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2618 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2619 .readfn = pmsav7_read, .writefn = pmsav7_write,
2620 .resetfn = arm_cp_reset_ignore },
2621 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2622 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2623 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2624 .readfn = pmsav7_read, .writefn = pmsav7_write,
2625 .resetfn = arm_cp_reset_ignore },
2626 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2627 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2628 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2629 .readfn = pmsav7_read, .writefn = pmsav7_write,
2630 .resetfn = arm_cp_reset_ignore },
2631 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2632 .access = PL1_RW,
2633 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2634 .writefn = pmsav7_rgnr_write,
2635 .resetfn = arm_cp_reset_ignore },
2636 REGINFO_SENTINEL
2637};
2638
2639static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2640 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2641 .access = PL1_RW, .type = ARM_CP_ALIAS,
2642 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2643 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2644 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2645 .access = PL1_RW, .type = ARM_CP_ALIAS,
2646 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2647 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2648 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2649 .access = PL1_RW,
2650 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2651 .resetvalue = 0, },
2652 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2653 .access = PL1_RW,
2654 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2655 .resetvalue = 0, },
2656 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2657 .access = PL1_RW,
2658 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2659 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2660 .access = PL1_RW,
2661 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2662
2663 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2664 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2665 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2666 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2667 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2668 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2669 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2670 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2671 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2672 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2673 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2674 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2675 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2676 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2677 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2678 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2679 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2680 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2681 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2682 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2683 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2684 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2685 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2686 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2687 REGINFO_SENTINEL
2688};
2689
2690static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2691 uint64_t value)
2692{
2693 TCR *tcr = raw_ptr(env, ri);
2694 int maskshift = extract32(value, 0, 3);
2695
2696 if (!arm_feature(env, ARM_FEATURE_V8)) {
2697 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2698
2699
2700 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2701 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2702
2703
2704
2705
2706 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2707 } else {
2708 value &= TTBCR_N;
2709 }
2710 }
2711
2712
2713
2714
2715
2716
2717
2718 tcr->raw_tcr = value;
2719 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2720 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2721}
2722
2723static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2724 uint64_t value)
2725{
2726 ARMCPU *cpu = arm_env_get_cpu(env);
2727
2728 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2729
2730
2731
2732 tlb_flush(CPU(cpu));
2733 }
2734 vmsa_ttbcr_raw_write(env, ri, value);
2735}
2736
2737static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2738{
2739 TCR *tcr = raw_ptr(env, ri);
2740
2741
2742
2743
2744 tcr->raw_tcr = 0;
2745 tcr->mask = 0;
2746 tcr->base_mask = 0xffffc000u;
2747}
2748
2749static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2750 uint64_t value)
2751{
2752 ARMCPU *cpu = arm_env_get_cpu(env);
2753 TCR *tcr = raw_ptr(env, ri);
2754
2755
2756 tlb_flush(CPU(cpu));
2757 tcr->raw_tcr = value;
2758}
2759
2760static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2761 uint64_t value)
2762{
2763
2764 if (cpreg_field_is_64bit(ri) &&
2765 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2766 ARMCPU *cpu = arm_env_get_cpu(env);
2767 tlb_flush(CPU(cpu));
2768 }
2769 raw_write(env, ri, value);
2770}
2771
2772static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2773 uint64_t value)
2774{
2775 ARMCPU *cpu = arm_env_get_cpu(env);
2776 CPUState *cs = CPU(cpu);
2777
2778
2779 if (raw_read(env, ri) != value) {
2780 tlb_flush_by_mmuidx(cs,
2781 ARMMMUIdxBit_S12NSE1 |
2782 ARMMMUIdxBit_S12NSE0 |
2783 ARMMMUIdxBit_S2NS);
2784 raw_write(env, ri, value);
2785 }
2786}
2787
2788static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2789 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2790 .access = PL1_RW, .type = ARM_CP_ALIAS,
2791 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2792 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2793 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2794 .access = PL1_RW, .resetvalue = 0,
2795 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2796 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2797 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2798 .access = PL1_RW, .resetvalue = 0,
2799 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2800 offsetof(CPUARMState, cp15.dfar_ns) } },
2801 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2802 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2803 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2804 .resetvalue = 0, },
2805 REGINFO_SENTINEL
2806};
2807
2808static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2809 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2810 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2811 .access = PL1_RW,
2812 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2813 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2814 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2815 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2816 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2817 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2818 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2819 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2820 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2821 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2822 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2823 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2824 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2825 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2826 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2827 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2828 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2829 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2830 .raw_writefn = vmsa_ttbcr_raw_write,
2831 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2832 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2833 REGINFO_SENTINEL
2834};
2835
2836static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2837 uint64_t value)
2838{
2839 env->cp15.c15_ticonfig = value & 0xe7;
2840
2841 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2842 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2843}
2844
2845static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2846 uint64_t value)
2847{
2848 env->cp15.c15_threadid = value & 0xffff;
2849}
2850
2851static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2852 uint64_t value)
2853{
2854
2855 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2856}
2857
2858static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2859 uint64_t value)
2860{
2861
2862
2863
2864 env->cp15.c15_i_max = 0x000;
2865 env->cp15.c15_i_min = 0xff0;
2866}
2867
2868static const ARMCPRegInfo omap_cp_reginfo[] = {
2869 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2870 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2871 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2872 .resetvalue = 0, },
2873 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2874 .access = PL1_RW, .type = ARM_CP_NOP },
2875 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2876 .access = PL1_RW,
2877 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2878 .writefn = omap_ticonfig_write },
2879 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2880 .access = PL1_RW,
2881 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2882 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2883 .access = PL1_RW, .resetvalue = 0xff0,
2884 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2885 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2886 .access = PL1_RW,
2887 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2888 .writefn = omap_threadid_write },
2889 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2890 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2891 .type = ARM_CP_NO_RAW,
2892 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2893
2894
2895
2896
2897
2898 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2899 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2900 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2901 .writefn = omap_cachemaint_write },
2902 { .name = "C9", .cp = 15, .crn = 9,
2903 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2904 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2905 REGINFO_SENTINEL
2906};
2907
2908static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2909 uint64_t value)
2910{
2911 env->cp15.c15_cpar = value & 0x3fff;
2912}
2913
2914static const ARMCPRegInfo xscale_cp_reginfo[] = {
2915 { .name = "XSCALE_CPAR",
2916 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2917 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2918 .writefn = xscale_cpar_write, },
2919 { .name = "XSCALE_AUXCR",
2920 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2921 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2922 .resetvalue = 0, },
2923
2924
2925
2926 { .name = "XSCALE_LOCK_ICACHE_LINE",
2927 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2928 .access = PL1_W, .type = ARM_CP_NOP },
2929 { .name = "XSCALE_UNLOCK_ICACHE",
2930 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2931 .access = PL1_W, .type = ARM_CP_NOP },
2932 { .name = "XSCALE_DCACHE_LOCK",
2933 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2934 .access = PL1_RW, .type = ARM_CP_NOP },
2935 { .name = "XSCALE_UNLOCK_DCACHE",
2936 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2937 .access = PL1_W, .type = ARM_CP_NOP },
2938 REGINFO_SENTINEL
2939};
2940
2941static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2942
2943
2944
2945
2946
2947 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2948 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2949 .access = PL1_RW,
2950 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2951 .resetvalue = 0 },
2952 REGINFO_SENTINEL
2953};
2954
2955static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2956
2957 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2958 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2959 .resetvalue = 0 },
2960 REGINFO_SENTINEL
2961};
2962
2963static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2964
2965 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2966 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2967 .resetvalue = 0 },
2968
2969 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2970 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2971 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2972 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2973 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2974 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2975 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2976 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2977 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2978 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2979 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2980 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2981 REGINFO_SENTINEL
2982};
2983
2984static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2985
2986
2987
2988 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2989 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2990 .resetvalue = (1 << 30) },
2991 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2992 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2993 .resetvalue = (1 << 30) },
2994 REGINFO_SENTINEL
2995};
2996
2997static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2998
2999 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3000 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3001 .access = PL1_RW, .resetvalue = 0,
3002 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3003 REGINFO_SENTINEL
3004};
3005
3006static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3007{
3008 ARMCPU *cpu = arm_env_get_cpu(env);
3009 unsigned int cur_el = arm_current_el(env);
3010 bool secure = arm_is_secure(env);
3011
3012 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3013 return env->cp15.vpidr_el2;
3014 }
3015 return raw_read(env, ri);
3016}
3017
3018static uint64_t mpidr_read_val(CPUARMState *env)
3019{
3020 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
3021 uint64_t mpidr = cpu->mp_affinity;
3022
3023 if (arm_feature(env, ARM_FEATURE_V7MP)) {
3024 mpidr |= (1U << 31);
3025
3026
3027
3028
3029 if (cpu->mp_is_up) {
3030 mpidr |= (1u << 30);
3031 }
3032 }
3033 return mpidr;
3034}
3035
3036static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3037{
3038 unsigned int cur_el = arm_current_el(env);
3039 bool secure = arm_is_secure(env);
3040
3041 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3042 return env->cp15.vmpidr_el2;
3043 }
3044 return mpidr_read_val(env);
3045}
3046
3047static const ARMCPRegInfo mpidr_cp_reginfo[] = {
3048 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
3049 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
3050 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
3051 REGINFO_SENTINEL
3052};
3053
3054static const ARMCPRegInfo lpae_cp_reginfo[] = {
3055
3056 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3057 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3058 .access = PL1_RW, .type = ARM_CP_CONST,
3059 .resetvalue = 0 },
3060
3061 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3062 .access = PL1_RW, .type = ARM_CP_CONST,
3063 .resetvalue = 0 },
3064 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3065 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3066 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3067 offsetof(CPUARMState, cp15.par_ns)} },
3068 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3069 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3070 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3071 offsetof(CPUARMState, cp15.ttbr0_ns) },
3072 .writefn = vmsa_ttbr_write, },
3073 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3074 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3075 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3076 offsetof(CPUARMState, cp15.ttbr1_ns) },
3077 .writefn = vmsa_ttbr_write, },
3078 REGINFO_SENTINEL
3079};
3080
3081static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3082{
3083 return vfp_get_fpcr(env);
3084}
3085
3086static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3087 uint64_t value)
3088{
3089 vfp_set_fpcr(env, value);
3090}
3091
3092static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3093{
3094 return vfp_get_fpsr(env);
3095}
3096
3097static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3098 uint64_t value)
3099{
3100 vfp_set_fpsr(env, value);
3101}
3102
3103static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3104 bool isread)
3105{
3106 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3107 return CP_ACCESS_TRAP;
3108 }
3109 return CP_ACCESS_OK;
3110}
3111
3112static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3113 uint64_t value)
3114{
3115 env->daif = value & PSTATE_DAIF;
3116}
3117
3118static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3119 const ARMCPRegInfo *ri,
3120 bool isread)
3121{
3122
3123
3124
3125 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3126 return CP_ACCESS_TRAP;
3127 }
3128 return CP_ACCESS_OK;
3129}
3130
3131
3132
3133
3134
3135static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3136 uint64_t value)
3137{
3138 CPUState *cs = ENV_GET_CPU(env);
3139 bool sec = arm_is_secure_below_el3(env);
3140
3141 if (sec) {
3142 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3143 ARMMMUIdxBit_S1SE1 |
3144 ARMMMUIdxBit_S1SE0);
3145 } else {
3146 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3147 ARMMMUIdxBit_S12NSE1 |
3148 ARMMMUIdxBit_S12NSE0);
3149 }
3150}
3151
3152static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3153 uint64_t value)
3154{
3155 CPUState *cs = ENV_GET_CPU(env);
3156
3157 if (tlb_force_broadcast(env)) {
3158 tlbi_aa64_vmalle1is_write(env, NULL, value);
3159 return;
3160 }
3161
3162 if (arm_is_secure_below_el3(env)) {
3163 tlb_flush_by_mmuidx(cs,
3164 ARMMMUIdxBit_S1SE1 |
3165 ARMMMUIdxBit_S1SE0);
3166 } else {
3167 tlb_flush_by_mmuidx(cs,
3168 ARMMMUIdxBit_S12NSE1 |
3169 ARMMMUIdxBit_S12NSE0);
3170 }
3171}
3172
3173static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3174 uint64_t value)
3175{
3176
3177
3178
3179
3180 ARMCPU *cpu = arm_env_get_cpu(env);
3181 CPUState *cs = CPU(cpu);
3182
3183 if (arm_is_secure_below_el3(env)) {
3184 tlb_flush_by_mmuidx(cs,
3185 ARMMMUIdxBit_S1SE1 |
3186 ARMMMUIdxBit_S1SE0);
3187 } else {
3188 if (arm_feature(env, ARM_FEATURE_EL2)) {
3189 tlb_flush_by_mmuidx(cs,
3190 ARMMMUIdxBit_S12NSE1 |
3191 ARMMMUIdxBit_S12NSE0 |
3192 ARMMMUIdxBit_S2NS);
3193 } else {
3194 tlb_flush_by_mmuidx(cs,
3195 ARMMMUIdxBit_S12NSE1 |
3196 ARMMMUIdxBit_S12NSE0);
3197 }
3198 }
3199}
3200
3201static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3202 uint64_t value)
3203{
3204 ARMCPU *cpu = arm_env_get_cpu(env);
3205 CPUState *cs = CPU(cpu);
3206
3207 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3208}
3209
3210static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3211 uint64_t value)
3212{
3213 ARMCPU *cpu = arm_env_get_cpu(env);
3214 CPUState *cs = CPU(cpu);
3215
3216 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3217}
3218
3219static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3220 uint64_t value)
3221{
3222
3223
3224
3225
3226 CPUState *cs = ENV_GET_CPU(env);
3227 bool sec = arm_is_secure_below_el3(env);
3228 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3229
3230 if (sec) {
3231 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3232 ARMMMUIdxBit_S1SE1 |
3233 ARMMMUIdxBit_S1SE0);
3234 } else if (has_el2) {
3235 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3236 ARMMMUIdxBit_S12NSE1 |
3237 ARMMMUIdxBit_S12NSE0 |
3238 ARMMMUIdxBit_S2NS);
3239 } else {
3240 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3241 ARMMMUIdxBit_S12NSE1 |
3242 ARMMMUIdxBit_S12NSE0);
3243 }
3244}
3245
3246static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3247 uint64_t value)
3248{
3249 CPUState *cs = ENV_GET_CPU(env);
3250
3251 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3252}
3253
3254static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3255 uint64_t value)
3256{
3257 CPUState *cs = ENV_GET_CPU(env);
3258
3259 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3260}
3261
3262static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3263 uint64_t value)
3264{
3265
3266
3267
3268
3269 ARMCPU *cpu = arm_env_get_cpu(env);
3270 CPUState *cs = CPU(cpu);
3271 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3272
3273 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3274}
3275
3276static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3277 uint64_t value)
3278{
3279
3280
3281
3282
3283 ARMCPU *cpu = arm_env_get_cpu(env);
3284 CPUState *cs = CPU(cpu);
3285 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3286
3287 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3288}
3289
3290static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3291 uint64_t value)
3292{
3293 ARMCPU *cpu = arm_env_get_cpu(env);
3294 CPUState *cs = CPU(cpu);
3295 bool sec = arm_is_secure_below_el3(env);
3296 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3297
3298 if (sec) {
3299 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3300 ARMMMUIdxBit_S1SE1 |
3301 ARMMMUIdxBit_S1SE0);
3302 } else {
3303 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3304 ARMMMUIdxBit_S12NSE1 |
3305 ARMMMUIdxBit_S12NSE0);
3306 }
3307}
3308
3309static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3310 uint64_t value)
3311{
3312
3313
3314
3315
3316
3317 ARMCPU *cpu = arm_env_get_cpu(env);
3318 CPUState *cs = CPU(cpu);
3319 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3320
3321 if (tlb_force_broadcast(env)) {
3322 tlbi_aa64_vae1is_write(env, NULL, value);
3323 return;
3324 }
3325
3326 if (arm_is_secure_below_el3(env)) {
3327 tlb_flush_page_by_mmuidx(cs, pageaddr,
3328 ARMMMUIdxBit_S1SE1 |
3329 ARMMMUIdxBit_S1SE0);
3330 } else {
3331 tlb_flush_page_by_mmuidx(cs, pageaddr,
3332 ARMMMUIdxBit_S12NSE1 |
3333 ARMMMUIdxBit_S12NSE0);
3334 }
3335}
3336
3337static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3338 uint64_t value)
3339{
3340 CPUState *cs = ENV_GET_CPU(env);
3341 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3342
3343 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3344 ARMMMUIdxBit_S1E2);
3345}
3346
3347static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3348 uint64_t value)
3349{
3350 CPUState *cs = ENV_GET_CPU(env);
3351 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3352
3353 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3354 ARMMMUIdxBit_S1E3);
3355}
3356
3357static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3358 uint64_t value)
3359{
3360
3361
3362
3363
3364
3365
3366 ARMCPU *cpu = arm_env_get_cpu(env);
3367 CPUState *cs = CPU(cpu);
3368 uint64_t pageaddr;
3369
3370 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3371 return;
3372 }
3373
3374 pageaddr = sextract64(value << 12, 0, 48);
3375
3376 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
3377}
3378
3379static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3380 uint64_t value)
3381{
3382 CPUState *cs = ENV_GET_CPU(env);
3383 uint64_t pageaddr;
3384
3385 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3386 return;
3387 }
3388
3389 pageaddr = sextract64(value << 12, 0, 48);
3390
3391 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3392 ARMMMUIdxBit_S2NS);
3393}
3394
3395static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3396 bool isread)
3397{
3398
3399
3400
3401 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3402 return CP_ACCESS_TRAP;
3403 }
3404 return CP_ACCESS_OK;
3405}
3406
3407static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3408{
3409 ARMCPU *cpu = arm_env_get_cpu(env);
3410 int dzp_bit = 1 << 4;
3411
3412
3413 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3414 dzp_bit = 0;
3415 }
3416 return cpu->dcz_blocksize | dzp_bit;
3417}
3418
3419static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3420 bool isread)
3421{
3422 if (!(env->pstate & PSTATE_SP)) {
3423
3424
3425
3426 return CP_ACCESS_TRAP_UNCATEGORIZED;
3427 }
3428 return CP_ACCESS_OK;
3429}
3430
3431static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3432{
3433 return env->pstate & PSTATE_SP;
3434}
3435
3436static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3437{
3438 update_spsel(env, val);
3439}
3440
3441static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3442 uint64_t value)
3443{
3444 ARMCPU *cpu = arm_env_get_cpu(env);
3445
3446 if (raw_read(env, ri) == value) {
3447
3448
3449
3450 return;
3451 }
3452
3453 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3454
3455 value &= ~SCTLR_M;
3456 }
3457
3458 raw_write(env, ri, value);
3459
3460
3461 tlb_flush(CPU(cpu));
3462}
3463
3464static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3465 bool isread)
3466{
3467 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3468 return CP_ACCESS_TRAP_FP_EL2;
3469 }
3470 if (env->cp15.cptr_el[3] & CPTR_TFP) {
3471 return CP_ACCESS_TRAP_FP_EL3;
3472 }
3473 return CP_ACCESS_OK;
3474}
3475
3476static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3477 uint64_t value)
3478{
3479 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3480}
3481
3482static const ARMCPRegInfo v8_cp_reginfo[] = {
3483
3484
3485
3486 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3487 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3488 .access = PL0_RW, .type = ARM_CP_NZCV },
3489 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3490 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3491 .type = ARM_CP_NO_RAW,
3492 .access = PL0_RW, .accessfn = aa64_daif_access,
3493 .fieldoffset = offsetof(CPUARMState, daif),
3494 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3495 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3496 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3497 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3498 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3499 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3500 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3501 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3502 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3503 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3504 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3505 .access = PL0_R, .type = ARM_CP_NO_RAW,
3506 .readfn = aa64_dczid_read },
3507 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3508 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3509 .access = PL0_W, .type = ARM_CP_DC_ZVA,
3510#ifndef CONFIG_USER_ONLY
3511
3512 .accessfn = aa64_zva_access,
3513#endif
3514 },
3515 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3516 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3517 .access = PL1_R, .type = ARM_CP_CURRENTEL },
3518
3519 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3520 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3521 .access = PL1_W, .type = ARM_CP_NOP },
3522 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3523 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3524 .access = PL1_W, .type = ARM_CP_NOP },
3525 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3526 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3527 .access = PL0_W, .type = ARM_CP_NOP,
3528 .accessfn = aa64_cacheop_access },
3529 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3530 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3531 .access = PL1_W, .type = ARM_CP_NOP },
3532 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3533 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3534 .access = PL1_W, .type = ARM_CP_NOP },
3535 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3536 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3537 .access = PL0_W, .type = ARM_CP_NOP,
3538 .accessfn = aa64_cacheop_access },
3539 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3540 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3541 .access = PL1_W, .type = ARM_CP_NOP },
3542 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3543 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3544 .access = PL0_W, .type = ARM_CP_NOP,
3545 .accessfn = aa64_cacheop_access },
3546 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3547 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3548 .access = PL0_W, .type = ARM_CP_NOP,
3549 .accessfn = aa64_cacheop_access },
3550 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3551 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3552 .access = PL1_W, .type = ARM_CP_NOP },
3553
3554 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3555 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3556 .access = PL1_W, .type = ARM_CP_NO_RAW,
3557 .writefn = tlbi_aa64_vmalle1is_write },
3558 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3559 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3560 .access = PL1_W, .type = ARM_CP_NO_RAW,
3561 .writefn = tlbi_aa64_vae1is_write },
3562 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3563 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3564 .access = PL1_W, .type = ARM_CP_NO_RAW,
3565 .writefn = tlbi_aa64_vmalle1is_write },
3566 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3567 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3568 .access = PL1_W, .type = ARM_CP_NO_RAW,
3569 .writefn = tlbi_aa64_vae1is_write },
3570 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3571 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3572 .access = PL1_W, .type = ARM_CP_NO_RAW,
3573 .writefn = tlbi_aa64_vae1is_write },
3574 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3575 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3576 .access = PL1_W, .type = ARM_CP_NO_RAW,
3577 .writefn = tlbi_aa64_vae1is_write },
3578 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3579 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3580 .access = PL1_W, .type = ARM_CP_NO_RAW,
3581 .writefn = tlbi_aa64_vmalle1_write },
3582 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3583 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3584 .access = PL1_W, .type = ARM_CP_NO_RAW,
3585 .writefn = tlbi_aa64_vae1_write },
3586 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3587 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3588 .access = PL1_W, .type = ARM_CP_NO_RAW,
3589 .writefn = tlbi_aa64_vmalle1_write },
3590 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3591 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3592 .access = PL1_W, .type = ARM_CP_NO_RAW,
3593 .writefn = tlbi_aa64_vae1_write },
3594 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3595 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3596 .access = PL1_W, .type = ARM_CP_NO_RAW,
3597 .writefn = tlbi_aa64_vae1_write },
3598 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3599 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3600 .access = PL1_W, .type = ARM_CP_NO_RAW,
3601 .writefn = tlbi_aa64_vae1_write },
3602 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3603 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3604 .access = PL2_W, .type = ARM_CP_NO_RAW,
3605 .writefn = tlbi_aa64_ipas2e1is_write },
3606 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3607 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3608 .access = PL2_W, .type = ARM_CP_NO_RAW,
3609 .writefn = tlbi_aa64_ipas2e1is_write },
3610 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3611 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3612 .access = PL2_W, .type = ARM_CP_NO_RAW,
3613 .writefn = tlbi_aa64_alle1is_write },
3614 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3615 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3616 .access = PL2_W, .type = ARM_CP_NO_RAW,
3617 .writefn = tlbi_aa64_alle1is_write },
3618 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3619 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3620 .access = PL2_W, .type = ARM_CP_NO_RAW,
3621 .writefn = tlbi_aa64_ipas2e1_write },
3622 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3623 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3624 .access = PL2_W, .type = ARM_CP_NO_RAW,
3625 .writefn = tlbi_aa64_ipas2e1_write },
3626 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3627 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3628 .access = PL2_W, .type = ARM_CP_NO_RAW,
3629 .writefn = tlbi_aa64_alle1_write },
3630 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3631 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3632 .access = PL2_W, .type = ARM_CP_NO_RAW,
3633 .writefn = tlbi_aa64_alle1is_write },
3634#ifndef CONFIG_USER_ONLY
3635
3636 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3637 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3638 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3639 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3640 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3641 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3642 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3643 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3644 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3645 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3646 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3647 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3648 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3649 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3650 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3651 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3652 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3653 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3654 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3655 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3656 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3657 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3658 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3659 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3660
3661 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3662 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3663 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3664 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3665 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3666 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3667 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3668 .type = ARM_CP_ALIAS,
3669 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3670 .access = PL1_RW, .resetvalue = 0,
3671 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3672 .writefn = par_write },
3673#endif
3674
3675 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3676 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3677 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3678 .type = ARM_CP_NO_RAW, .access = PL1_W,
3679 .writefn = tlbimvaa_is_write },
3680 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3681 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3682 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3683 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3684 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3685 .type = ARM_CP_NO_RAW, .access = PL2_W,
3686 .writefn = tlbimva_hyp_write },
3687 { .name = "TLBIMVALHIS",
3688 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3689 .type = ARM_CP_NO_RAW, .access = PL2_W,
3690 .writefn = tlbimva_hyp_is_write },
3691 { .name = "TLBIIPAS2",
3692 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3693 .type = ARM_CP_NO_RAW, .access = PL2_W,
3694 .writefn = tlbiipas2_write },
3695 { .name = "TLBIIPAS2IS",
3696 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3697 .type = ARM_CP_NO_RAW, .access = PL2_W,
3698 .writefn = tlbiipas2_is_write },
3699 { .name = "TLBIIPAS2L",
3700 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3701 .type = ARM_CP_NO_RAW, .access = PL2_W,
3702 .writefn = tlbiipas2_write },
3703 { .name = "TLBIIPAS2LIS",
3704 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3705 .type = ARM_CP_NO_RAW, .access = PL2_W,
3706 .writefn = tlbiipas2_is_write },
3707
3708 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3709 .type = ARM_CP_NOP, .access = PL1_W },
3710 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3711 .type = ARM_CP_NOP, .access = PL1_W },
3712 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3713 .type = ARM_CP_NOP, .access = PL1_W },
3714 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3715 .type = ARM_CP_NOP, .access = PL1_W },
3716 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3717 .type = ARM_CP_NOP, .access = PL1_W },
3718 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3719 .type = ARM_CP_NOP, .access = PL1_W },
3720 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3721 .type = ARM_CP_NOP, .access = PL1_W },
3722 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3723 .type = ARM_CP_NOP, .access = PL1_W },
3724 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3725 .type = ARM_CP_NOP, .access = PL1_W },
3726 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3727 .type = ARM_CP_NOP, .access = PL1_W },
3728 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3729 .type = ARM_CP_NOP, .access = PL1_W },
3730 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3731 .type = ARM_CP_NOP, .access = PL1_W },
3732 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3733 .type = ARM_CP_NOP, .access = PL1_W },
3734
3735 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3736 .access = PL1_RW, .resetvalue = 0,
3737 .writefn = dacr_write, .raw_writefn = raw_write,
3738 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3739 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3740 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3741 .type = ARM_CP_ALIAS,
3742 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3743 .access = PL1_RW,
3744 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3745 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3746 .type = ARM_CP_ALIAS,
3747 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3748 .access = PL1_RW,
3749 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3750
3751
3752
3753
3754 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3755 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3756 .access = PL1_RW, .accessfn = sp_el0_access,
3757 .type = ARM_CP_ALIAS,
3758 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3759 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3760 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3761 .access = PL2_RW, .type = ARM_CP_ALIAS,
3762 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3763 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3764 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3765 .type = ARM_CP_NO_RAW,
3766 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3767 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3768 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3769 .type = ARM_CP_ALIAS,
3770 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3771 .access = PL2_RW, .accessfn = fpexc32_access },
3772 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3773 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3774 .access = PL2_RW, .resetvalue = 0,
3775 .writefn = dacr_write, .raw_writefn = raw_write,
3776 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3777 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3778 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3779 .access = PL2_RW, .resetvalue = 0,
3780 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3781 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3782 .type = ARM_CP_ALIAS,
3783 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3784 .access = PL2_RW,
3785 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3786 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3787 .type = ARM_CP_ALIAS,
3788 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3789 .access = PL2_RW,
3790 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3791 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3792 .type = ARM_CP_ALIAS,
3793 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3794 .access = PL2_RW,
3795 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3796 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3797 .type = ARM_CP_ALIAS,
3798 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3799 .access = PL2_RW,
3800 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3801 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3802 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3803 .resetvalue = 0,
3804 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3805 { .name = "SDCR", .type = ARM_CP_ALIAS,
3806 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3807 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3808 .writefn = sdcr_write,
3809 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3810 REGINFO_SENTINEL
3811};
3812
3813
3814static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3815 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
3816 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3817 .access = PL2_RW,
3818 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3819 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
3820 .type = ARM_CP_NO_RAW,
3821 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3822 .access = PL2_RW,
3823 .type = ARM_CP_CONST, .resetvalue = 0 },
3824 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
3825 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3826 .access = PL2_RW,
3827 .type = ARM_CP_CONST, .resetvalue = 0 },
3828 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3829 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3830 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3831 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3832 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3833 .access = PL2_RW, .type = ARM_CP_CONST,
3834 .resetvalue = 0 },
3835 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3836 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3837 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3838 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3839 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3840 .access = PL2_RW, .type = ARM_CP_CONST,
3841 .resetvalue = 0 },
3842 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
3843 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3844 .access = PL2_RW, .type = ARM_CP_CONST,
3845 .resetvalue = 0 },
3846 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3847 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3848 .access = PL2_RW, .type = ARM_CP_CONST,
3849 .resetvalue = 0 },
3850 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3851 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3852 .access = PL2_RW, .type = ARM_CP_CONST,
3853 .resetvalue = 0 },
3854 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3855 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3856 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3857 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3858 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3859 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3860 .type = ARM_CP_CONST, .resetvalue = 0 },
3861 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3862 .cp = 15, .opc1 = 6, .crm = 2,
3863 .access = PL2_RW, .accessfn = access_el3_aa32ns,
3864 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3865 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3866 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3867 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3868 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3869 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3870 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3871 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3872 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3873 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3874 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3875 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3876 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3877 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3878 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3879 .resetvalue = 0 },
3880 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3881 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3882 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3883 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3884 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3885 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3886 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3887 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3888 .resetvalue = 0 },
3889 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3890 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3891 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3892 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3893 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3894 .resetvalue = 0 },
3895 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3896 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3897 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3898 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3899 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3900 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3901 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3902 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3903 .access = PL2_RW, .accessfn = access_tda,
3904 .type = ARM_CP_CONST, .resetvalue = 0 },
3905 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3906 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3907 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3908 .type = ARM_CP_CONST, .resetvalue = 0 },
3909 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3910 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3911 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3912 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
3913 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3914 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3915 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
3916 .type = ARM_CP_CONST,
3917 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
3918 .access = PL2_RW, .resetvalue = 0 },
3919 REGINFO_SENTINEL
3920};
3921
3922
3923static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
3924 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
3925 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
3926 .access = PL2_RW,
3927 .type = ARM_CP_CONST, .resetvalue = 0 },
3928 REGINFO_SENTINEL
3929};
3930
3931static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3932{
3933 ARMCPU *cpu = arm_env_get_cpu(env);
3934 uint64_t valid_mask = HCR_MASK;
3935
3936 if (arm_feature(env, ARM_FEATURE_EL3)) {
3937 valid_mask &= ~HCR_HCD;
3938 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3939
3940
3941
3942
3943
3944
3945
3946 valid_mask &= ~HCR_TSC;
3947 }
3948
3949
3950 value &= valid_mask;
3951
3952
3953
3954
3955
3956
3957 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3958 tlb_flush(CPU(cpu));
3959 }
3960 env->cp15.hcr_el2 = value;
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973 g_assert(qemu_mutex_iothread_locked());
3974 arm_cpu_update_virq(cpu);
3975 arm_cpu_update_vfiq(cpu);
3976}
3977
3978static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
3979 uint64_t value)
3980{
3981
3982 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
3983 hcr_write(env, NULL, value);
3984}
3985
3986static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
3987 uint64_t value)
3988{
3989
3990 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
3991 hcr_write(env, NULL, value);
3992}
3993
3994static const ARMCPRegInfo el2_cp_reginfo[] = {
3995 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3996 .type = ARM_CP_IO,
3997 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3998 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3999 .writefn = hcr_write },
4000 { .name = "HCR", .state = ARM_CP_STATE_AA32,
4001 .type = ARM_CP_ALIAS | ARM_CP_IO,
4002 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4003 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4004 .writefn = hcr_writelow },
4005 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4006 .type = ARM_CP_ALIAS,
4007 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4008 .access = PL2_RW,
4009 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4010 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4011 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4012 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4013 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4014 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4015 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4016 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4017 .type = ARM_CP_ALIAS,
4018 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4019 .access = PL2_RW,
4020 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4021 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4022 .type = ARM_CP_ALIAS,
4023 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4024 .access = PL2_RW,
4025 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4026 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4027 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4028 .access = PL2_RW, .writefn = vbar_write,
4029 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4030 .resetvalue = 0 },
4031 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4032 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4033 .access = PL3_RW, .type = ARM_CP_ALIAS,
4034 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4035 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4036 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4037 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4038 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
4039 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4040 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4041 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4042 .resetvalue = 0 },
4043 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4044 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4045 .access = PL2_RW, .type = ARM_CP_ALIAS,
4046 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4047 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4048 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4049 .access = PL2_RW, .type = ARM_CP_CONST,
4050 .resetvalue = 0 },
4051
4052 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4053 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4054 .access = PL2_RW, .type = ARM_CP_CONST,
4055 .resetvalue = 0 },
4056 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4057 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4058 .access = PL2_RW, .type = ARM_CP_CONST,
4059 .resetvalue = 0 },
4060 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4061 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4062 .access = PL2_RW, .type = ARM_CP_CONST,
4063 .resetvalue = 0 },
4064 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4065 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4066 .access = PL2_RW,
4067
4068
4069
4070 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4071 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4072 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4073 .type = ARM_CP_ALIAS,
4074 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4075 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4076 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4077 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4078 .access = PL2_RW,
4079
4080
4081
4082 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4083 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4084 .cp = 15, .opc1 = 6, .crm = 2,
4085 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4086 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4087 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4088 .writefn = vttbr_write },
4089 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4090 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4091 .access = PL2_RW, .writefn = vttbr_write,
4092 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4093 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4094 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4095 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4096 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4097 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4098 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4099 .access = PL2_RW, .resetvalue = 0,
4100 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4101 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4102 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4103 .access = PL2_RW, .resetvalue = 0,
4104 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4105 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4106 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4107 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4108 { .name = "TLBIALLNSNH",
4109 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4110 .type = ARM_CP_NO_RAW, .access = PL2_W,
4111 .writefn = tlbiall_nsnh_write },
4112 { .name = "TLBIALLNSNHIS",
4113 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4114 .type = ARM_CP_NO_RAW, .access = PL2_W,
4115 .writefn = tlbiall_nsnh_is_write },
4116 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4117 .type = ARM_CP_NO_RAW, .access = PL2_W,
4118 .writefn = tlbiall_hyp_write },
4119 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4120 .type = ARM_CP_NO_RAW, .access = PL2_W,
4121 .writefn = tlbiall_hyp_is_write },
4122 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4123 .type = ARM_CP_NO_RAW, .access = PL2_W,
4124 .writefn = tlbimva_hyp_write },
4125 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4126 .type = ARM_CP_NO_RAW, .access = PL2_W,
4127 .writefn = tlbimva_hyp_is_write },
4128 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
4129 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4130 .type = ARM_CP_NO_RAW, .access = PL2_W,
4131 .writefn = tlbi_aa64_alle2_write },
4132 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4133 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4134 .type = ARM_CP_NO_RAW, .access = PL2_W,
4135 .writefn = tlbi_aa64_vae2_write },
4136 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4137 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4138 .access = PL2_W, .type = ARM_CP_NO_RAW,
4139 .writefn = tlbi_aa64_vae2_write },
4140 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4141 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4142 .access = PL2_W, .type = ARM_CP_NO_RAW,
4143 .writefn = tlbi_aa64_alle2is_write },
4144 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4145 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4146 .type = ARM_CP_NO_RAW, .access = PL2_W,
4147 .writefn = tlbi_aa64_vae2is_write },
4148 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
4149 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4150 .access = PL2_W, .type = ARM_CP_NO_RAW,
4151 .writefn = tlbi_aa64_vae2is_write },
4152#ifndef CONFIG_USER_ONLY
4153
4154
4155
4156
4157 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
4158 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4159 .access = PL2_W, .accessfn = at_s1e2_access,
4160 .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4161 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
4162 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4163 .access = PL2_W, .accessfn = at_s1e2_access,
4164 .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4165
4166
4167
4168
4169
4170 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4171 .access = PL2_W,
4172 .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4173 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4174 .access = PL2_W,
4175 .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4176 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4177 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4178
4179
4180
4181
4182 .access = PL2_RW, .resetvalue = 3,
4183 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4184 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4185 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4186 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4187 .writefn = gt_cntvoff_write,
4188 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4189 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4190 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4191 .writefn = gt_cntvoff_write,
4192 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4193 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4194 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4195 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4196 .type = ARM_CP_IO, .access = PL2_RW,
4197 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4198 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4199 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4200 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4201 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4202 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4203 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4204 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4205 .resetfn = gt_hyp_timer_reset,
4206 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4207 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4208 .type = ARM_CP_IO,
4209 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4210 .access = PL2_RW,
4211 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4212 .resetvalue = 0,
4213 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4214#endif
4215
4216
4217
4218
4219
4220 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4221 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4222 .access = PL2_RW, .resetvalue = 0,
4223 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
4224 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4225 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4226 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4227 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4228 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4229 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4230 .access = PL2_RW,
4231 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4232 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4233 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4234 .access = PL2_RW,
4235 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4236 REGINFO_SENTINEL
4237};
4238
4239static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4240 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4241 .type = ARM_CP_ALIAS | ARM_CP_IO,
4242 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4243 .access = PL2_RW,
4244 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4245 .writefn = hcr_writehigh },
4246 REGINFO_SENTINEL
4247};
4248
4249static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4250 bool isread)
4251{
4252
4253
4254
4255 if (arm_current_el(env) == 3) {
4256 return CP_ACCESS_OK;
4257 }
4258 if (arm_is_secure_below_el3(env)) {
4259 return CP_ACCESS_TRAP_EL3;
4260 }
4261
4262 if (isread) {
4263 return CP_ACCESS_OK;
4264 }
4265 return CP_ACCESS_TRAP_UNCATEGORIZED;
4266}
4267
4268static const ARMCPRegInfo el3_cp_reginfo[] = {
4269 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4270 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4271 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4272 .resetvalue = 0, .writefn = scr_write },
4273 { .name = "SCR", .type = ARM_CP_ALIAS,
4274 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4275 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4276 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4277 .writefn = scr_write },
4278 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4279 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4280 .access = PL3_RW, .resetvalue = 0,
4281 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4282 { .name = "SDER",
4283 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4284 .access = PL3_RW, .resetvalue = 0,
4285 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4286 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4287 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4288 .writefn = vbar_write, .resetvalue = 0,
4289 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4290 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4291 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4292 .access = PL3_RW, .resetvalue = 0,
4293 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4294 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4295 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4296 .access = PL3_RW,
4297
4298
4299
4300
4301
4302 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
4303 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4304 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4305 .type = ARM_CP_ALIAS,
4306 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4307 .access = PL3_RW,
4308 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4309 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4310 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4311 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4312 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4313 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4314 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4315 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4316 .type = ARM_CP_ALIAS,
4317 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4318 .access = PL3_RW,
4319 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4320 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4321 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4322 .access = PL3_RW, .writefn = vbar_write,
4323 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4324 .resetvalue = 0 },
4325 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4326 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4327 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4328 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4329 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4330 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4331 .access = PL3_RW, .resetvalue = 0,
4332 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4333 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4334 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4335 .access = PL3_RW, .type = ARM_CP_CONST,
4336 .resetvalue = 0 },
4337 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4338 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4339 .access = PL3_RW, .type = ARM_CP_CONST,
4340 .resetvalue = 0 },
4341 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4342 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4343 .access = PL3_RW, .type = ARM_CP_CONST,
4344 .resetvalue = 0 },
4345 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
4346 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
4347 .access = PL3_W, .type = ARM_CP_NO_RAW,
4348 .writefn = tlbi_aa64_alle3is_write },
4349 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
4350 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
4351 .access = PL3_W, .type = ARM_CP_NO_RAW,
4352 .writefn = tlbi_aa64_vae3is_write },
4353 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
4354 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
4355 .access = PL3_W, .type = ARM_CP_NO_RAW,
4356 .writefn = tlbi_aa64_vae3is_write },
4357 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
4358 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
4359 .access = PL3_W, .type = ARM_CP_NO_RAW,
4360 .writefn = tlbi_aa64_alle3_write },
4361 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
4362 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
4363 .access = PL3_W, .type = ARM_CP_NO_RAW,
4364 .writefn = tlbi_aa64_vae3_write },
4365 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
4366 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
4367 .access = PL3_W, .type = ARM_CP_NO_RAW,
4368 .writefn = tlbi_aa64_vae3_write },
4369 REGINFO_SENTINEL
4370};
4371
4372static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4373 bool isread)
4374{
4375
4376
4377
4378 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4379 return CP_ACCESS_TRAP;
4380 }
4381 return CP_ACCESS_OK;
4382}
4383
4384static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4385 uint64_t value)
4386{
4387
4388
4389
4390 int oslock;
4391
4392 if (ri->state == ARM_CP_STATE_AA32) {
4393 oslock = (value == 0xC5ACCE55);
4394 } else {
4395 oslock = value & 1;
4396 }
4397
4398 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
4399}
4400
4401static const ARMCPRegInfo debug_cp_reginfo[] = {
4402
4403
4404
4405
4406
4407
4408 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4409 .access = PL0_R, .accessfn = access_tdra,
4410 .type = ARM_CP_CONST, .resetvalue = 0 },
4411 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4412 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4413 .access = PL1_R, .accessfn = access_tdra,
4414 .type = ARM_CP_CONST, .resetvalue = 0 },
4415 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4416 .access = PL0_R, .accessfn = access_tdra,
4417 .type = ARM_CP_CONST, .resetvalue = 0 },
4418
4419 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4420 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4421 .access = PL1_RW, .accessfn = access_tda,
4422 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4423 .resetvalue = 0 },
4424
4425
4426
4427 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4428 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4429 .type = ARM_CP_ALIAS,
4430 .access = PL1_R, .accessfn = access_tda,
4431 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4432 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4433 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4434 .access = PL1_W, .type = ARM_CP_NO_RAW,
4435 .accessfn = access_tdosa,
4436 .writefn = oslar_write },
4437 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4438 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4439 .access = PL1_R, .resetvalue = 10,
4440 .accessfn = access_tdosa,
4441 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4442
4443 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4444 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4445 .access = PL1_RW, .accessfn = access_tdosa,
4446 .type = ARM_CP_NOP },
4447
4448
4449
4450 { .name = "DBGVCR",
4451 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4452 .access = PL1_RW, .accessfn = access_tda,
4453 .type = ARM_CP_NOP },
4454
4455
4456
4457 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
4458 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
4459 .access = PL2_RW, .accessfn = access_tda,
4460 .type = ARM_CP_NOP },
4461
4462
4463
4464
4465 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
4466 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4467 .access = PL1_RW, .accessfn = access_tda,
4468 .type = ARM_CP_NOP },
4469 REGINFO_SENTINEL
4470};
4471
4472static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4473
4474 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4475 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4476 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4477 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4478 REGINFO_SENTINEL
4479};
4480
4481
4482
4483
4484
4485
4486
4487int sve_exception_el(CPUARMState *env, int el)
4488{
4489#ifndef CONFIG_USER_ONLY
4490 if (el <= 1) {
4491 bool disabled = false;
4492
4493
4494
4495
4496
4497
4498 if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
4499 disabled = true;
4500 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
4501 disabled = el == 0;
4502 }
4503 if (disabled) {
4504
4505 return (arm_feature(env, ARM_FEATURE_EL2)
4506 && !arm_is_secure(env)
4507 && (env->cp15.hcr_el2 & HCR_TGE) ? 2 : 1);
4508 }
4509
4510
4511 if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
4512 disabled = true;
4513 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
4514 disabled = el == 0;
4515 }
4516 if (disabled) {
4517 return 0;
4518 }
4519 }
4520
4521
4522
4523
4524 if (el <= 2 && !arm_is_secure_below_el3(env)) {
4525 if (env->cp15.cptr_el[2] & CPTR_TZ) {
4526 return 2;
4527 }
4528 if (env->cp15.cptr_el[2] & CPTR_TFP) {
4529 return 0;
4530 }
4531 }
4532
4533
4534 if (arm_feature(env, ARM_FEATURE_EL3)
4535 && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
4536 return 3;
4537 }
4538#endif
4539 return 0;
4540}
4541
4542
4543
4544
4545uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
4546{
4547 ARMCPU *cpu = arm_env_get_cpu(env);
4548 uint32_t zcr_len = cpu->sve_max_vq - 1;
4549
4550 if (el <= 1) {
4551 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
4552 }
4553 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
4554 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
4555 }
4556 if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
4557 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
4558 }
4559 return zcr_len;
4560}
4561
4562static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4563 uint64_t value)
4564{
4565 int cur_el = arm_current_el(env);
4566 int old_len = sve_zcr_len_for_el(env, cur_el);
4567 int new_len;
4568
4569
4570 raw_write(env, ri, value & 0xf);
4571
4572
4573
4574
4575
4576 new_len = sve_zcr_len_for_el(env, cur_el);
4577 if (new_len < old_len) {
4578 aarch64_sve_narrow_vq(env, new_len + 1);
4579 }
4580}
4581
4582static const ARMCPRegInfo zcr_el1_reginfo = {
4583 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
4584 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
4585 .access = PL1_RW, .type = ARM_CP_SVE,
4586 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
4587 .writefn = zcr_write, .raw_writefn = raw_write
4588};
4589
4590static const ARMCPRegInfo zcr_el2_reginfo = {
4591 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4592 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4593 .access = PL2_RW, .type = ARM_CP_SVE,
4594 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
4595 .writefn = zcr_write, .raw_writefn = raw_write
4596};
4597
4598static const ARMCPRegInfo zcr_no_el2_reginfo = {
4599 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4600 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4601 .access = PL2_RW, .type = ARM_CP_SVE,
4602 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
4603};
4604
4605static const ARMCPRegInfo zcr_el3_reginfo = {
4606 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
4607 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
4608 .access = PL3_RW, .type = ARM_CP_SVE,
4609 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
4610 .writefn = zcr_write, .raw_writefn = raw_write
4611};
4612
4613void hw_watchpoint_update(ARMCPU *cpu, int n)
4614{
4615 CPUARMState *env = &cpu->env;
4616 vaddr len = 0;
4617 vaddr wvr = env->cp15.dbgwvr[n];
4618 uint64_t wcr = env->cp15.dbgwcr[n];
4619 int mask;
4620 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4621
4622 if (env->cpu_watchpoint[n]) {
4623 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4624 env->cpu_watchpoint[n] = NULL;
4625 }
4626
4627 if (!extract64(wcr, 0, 1)) {
4628
4629 return;
4630 }
4631
4632 switch (extract64(wcr, 3, 2)) {
4633 case 0:
4634
4635 return;
4636 case 1:
4637 flags |= BP_MEM_READ;
4638 break;
4639 case 2:
4640 flags |= BP_MEM_WRITE;
4641 break;
4642 case 3:
4643 flags |= BP_MEM_ACCESS;
4644 break;
4645 }
4646
4647
4648
4649
4650
4651 mask = extract64(wcr, 24, 4);
4652 if (mask == 1 || mask == 2) {
4653
4654
4655
4656
4657 return;
4658 } else if (mask) {
4659
4660 len = 1ULL << mask;
4661
4662
4663
4664
4665 wvr &= ~(len - 1);
4666 } else {
4667
4668 int bas = extract64(wcr, 5, 8);
4669 int basstart;
4670
4671 if (bas == 0) {
4672
4673 return;
4674 }
4675
4676 if (extract64(wvr, 2, 1)) {
4677
4678
4679
4680 bas &= 0xf;
4681 }
4682
4683
4684
4685
4686
4687 basstart = ctz32(bas);
4688 len = cto32(bas >> basstart);
4689 wvr += basstart;
4690 }
4691
4692 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4693 &env->cpu_watchpoint[n]);
4694}
4695
4696void hw_watchpoint_update_all(ARMCPU *cpu)
4697{
4698 int i;
4699 CPUARMState *env = &cpu->env;
4700
4701
4702
4703
4704 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4705 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4706
4707 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4708 hw_watchpoint_update(cpu, i);
4709 }
4710}
4711
4712static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4713 uint64_t value)
4714{
4715 ARMCPU *cpu = arm_env_get_cpu(env);
4716 int i = ri->crm;
4717
4718
4719
4720
4721
4722 value = sextract64(value, 0, 49) & ~3ULL;
4723
4724 raw_write(env, ri, value);
4725 hw_watchpoint_update(cpu, i);
4726}
4727
4728static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4729 uint64_t value)
4730{
4731 ARMCPU *cpu = arm_env_get_cpu(env);
4732 int i = ri->crm;
4733
4734 raw_write(env, ri, value);
4735 hw_watchpoint_update(cpu, i);
4736}
4737
4738void hw_breakpoint_update(ARMCPU *cpu, int n)
4739{
4740 CPUARMState *env = &cpu->env;
4741 uint64_t bvr = env->cp15.dbgbvr[n];
4742 uint64_t bcr = env->cp15.dbgbcr[n];
4743 vaddr addr;
4744 int bt;
4745 int flags = BP_CPU;
4746
4747 if (env->cpu_breakpoint[n]) {
4748 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4749 env->cpu_breakpoint[n] = NULL;
4750 }
4751
4752 if (!extract64(bcr, 0, 1)) {
4753
4754 return;
4755 }
4756
4757 bt = extract64(bcr, 20, 4);
4758
4759 switch (bt) {
4760 case 4:
4761 case 5:
4762 qemu_log_mask(LOG_UNIMP,
4763 "arm: address mismatch breakpoint types not implemented\n");
4764 return;
4765 case 0:
4766 case 1:
4767 {
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783 int bas = extract64(bcr, 5, 4);
4784 addr = sextract64(bvr, 0, 49) & ~3ULL;
4785 if (bas == 0) {
4786 return;
4787 }
4788 if (bas == 0xc) {
4789 addr += 2;
4790 }
4791 break;
4792 }
4793 case 2:
4794 case 8:
4795 case 10:
4796 qemu_log_mask(LOG_UNIMP,
4797 "arm: unlinked context breakpoint types not implemented\n");
4798 return;
4799 case 9:
4800 case 11:
4801 case 3:
4802 default:
4803
4804
4805
4806
4807
4808 return;
4809 }
4810
4811 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4812}
4813
4814void hw_breakpoint_update_all(ARMCPU *cpu)
4815{
4816 int i;
4817 CPUARMState *env = &cpu->env;
4818
4819
4820
4821
4822 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4823 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4824
4825 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4826 hw_breakpoint_update(cpu, i);
4827 }
4828}
4829
4830static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4831 uint64_t value)
4832{
4833 ARMCPU *cpu = arm_env_get_cpu(env);
4834 int i = ri->crm;
4835
4836 raw_write(env, ri, value);
4837 hw_breakpoint_update(cpu, i);
4838}
4839
4840static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4841 uint64_t value)
4842{
4843 ARMCPU *cpu = arm_env_get_cpu(env);
4844 int i = ri->crm;
4845
4846
4847
4848
4849 value = deposit64(value, 6, 1, extract64(value, 5, 1));
4850 value = deposit64(value, 8, 1, extract64(value, 7, 1));
4851
4852 raw_write(env, ri, value);
4853 hw_breakpoint_update(cpu, i);
4854}
4855
4856static void define_debug_regs(ARMCPU *cpu)
4857{
4858
4859
4860
4861 int i;
4862 int wrps, brps, ctx_cmps;
4863 ARMCPRegInfo dbgdidr = {
4864 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4865 .access = PL0_R, .accessfn = access_tda,
4866 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4867 };
4868
4869
4870 brps = extract32(cpu->dbgdidr, 24, 4);
4871 wrps = extract32(cpu->dbgdidr, 28, 4);
4872 ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4873
4874 assert(ctx_cmps <= brps);
4875
4876
4877
4878
4879
4880 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4881 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4882 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4883 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4884 }
4885
4886 define_one_arm_cp_reg(cpu, &dbgdidr);
4887 define_arm_cp_regs(cpu, debug_cp_reginfo);
4888
4889 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4890 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4891 }
4892
4893 for (i = 0; i < brps + 1; i++) {
4894 ARMCPRegInfo dbgregs[] = {
4895 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4896 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4897 .access = PL1_RW, .accessfn = access_tda,
4898 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4899 .writefn = dbgbvr_write, .raw_writefn = raw_write
4900 },
4901 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4902 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4903 .access = PL1_RW, .accessfn = access_tda,
4904 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4905 .writefn = dbgbcr_write, .raw_writefn = raw_write
4906 },
4907 REGINFO_SENTINEL
4908 };
4909 define_arm_cp_regs(cpu, dbgregs);
4910 }
4911
4912 for (i = 0; i < wrps + 1; i++) {
4913 ARMCPRegInfo dbgregs[] = {
4914 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4915 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4916 .access = PL1_RW, .accessfn = access_tda,
4917 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4918 .writefn = dbgwvr_write, .raw_writefn = raw_write
4919 },
4920 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4921 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4922 .access = PL1_RW, .accessfn = access_tda,
4923 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4924 .writefn = dbgwcr_write, .raw_writefn = raw_write
4925 },
4926 REGINFO_SENTINEL
4927 };
4928 define_arm_cp_regs(cpu, dbgregs);
4929 }
4930}
4931
4932
4933
4934
4935
4936
4937static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
4938{
4939 ARMCPU *cpu = arm_env_get_cpu(env);
4940 uint64_t pfr1 = cpu->id_pfr1;
4941
4942 if (env->gicv3state) {
4943 pfr1 |= 1 << 28;
4944 }
4945 return pfr1;
4946}
4947
4948static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
4949{
4950 ARMCPU *cpu = arm_env_get_cpu(env);
4951 uint64_t pfr0 = cpu->isar.id_aa64pfr0;
4952
4953 if (env->gicv3state) {
4954 pfr0 |= 1 << 24;
4955 }
4956 return pfr0;
4957}
4958
4959void register_cp_regs_for_features(ARMCPU *cpu)
4960{
4961
4962 CPUARMState *env = &cpu->env;
4963 if (arm_feature(env, ARM_FEATURE_M)) {
4964
4965 return;
4966 }
4967
4968 define_arm_cp_regs(cpu, cp_reginfo);
4969 if (!arm_feature(env, ARM_FEATURE_V8)) {
4970
4971
4972
4973 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4974 }
4975
4976 if (arm_feature(env, ARM_FEATURE_V6)) {
4977
4978 ARMCPRegInfo v6_idregs[] = {
4979 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4980 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4981 .access = PL1_R, .type = ARM_CP_CONST,
4982 .resetvalue = cpu->id_pfr0 },
4983
4984
4985
4986 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4987 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4988 .access = PL1_R, .type = ARM_CP_NO_RAW,
4989 .readfn = id_pfr1_read,
4990 .writefn = arm_cp_write_ignore },
4991 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4992 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4993 .access = PL1_R, .type = ARM_CP_CONST,
4994 .resetvalue = cpu->id_dfr0 },
4995 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4996 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4997 .access = PL1_R, .type = ARM_CP_CONST,
4998 .resetvalue = cpu->id_afr0 },
4999 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
5000 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
5001 .access = PL1_R, .type = ARM_CP_CONST,
5002 .resetvalue = cpu->id_mmfr0 },
5003 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
5004 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
5005 .access = PL1_R, .type = ARM_CP_CONST,
5006 .resetvalue = cpu->id_mmfr1 },
5007 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
5008 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
5009 .access = PL1_R, .type = ARM_CP_CONST,
5010 .resetvalue = cpu->id_mmfr2 },
5011 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
5012 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
5013 .access = PL1_R, .type = ARM_CP_CONST,
5014 .resetvalue = cpu->id_mmfr3 },
5015 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
5016 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5017 .access = PL1_R, .type = ARM_CP_CONST,
5018 .resetvalue = cpu->isar.id_isar0 },
5019 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
5020 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
5021 .access = PL1_R, .type = ARM_CP_CONST,
5022 .resetvalue = cpu->isar.id_isar1 },
5023 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
5024 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5025 .access = PL1_R, .type = ARM_CP_CONST,
5026 .resetvalue = cpu->isar.id_isar2 },
5027 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
5028 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
5029 .access = PL1_R, .type = ARM_CP_CONST,
5030 .resetvalue = cpu->isar.id_isar3 },
5031 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
5032 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
5033 .access = PL1_R, .type = ARM_CP_CONST,
5034 .resetvalue = cpu->isar.id_isar4 },
5035 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
5036 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
5037 .access = PL1_R, .type = ARM_CP_CONST,
5038 .resetvalue = cpu->isar.id_isar5 },
5039 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
5040 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
5041 .access = PL1_R, .type = ARM_CP_CONST,
5042 .resetvalue = cpu->id_mmfr4 },
5043 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
5044 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
5045 .access = PL1_R, .type = ARM_CP_CONST,
5046 .resetvalue = cpu->isar.id_isar6 },
5047 REGINFO_SENTINEL
5048 };
5049 define_arm_cp_regs(cpu, v6_idregs);
5050 define_arm_cp_regs(cpu, v6_cp_reginfo);
5051 } else {
5052 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
5053 }
5054 if (arm_feature(env, ARM_FEATURE_V6K)) {
5055 define_arm_cp_regs(cpu, v6k_cp_reginfo);
5056 }
5057 if (arm_feature(env, ARM_FEATURE_V7MP) &&
5058 !arm_feature(env, ARM_FEATURE_PMSA)) {
5059 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
5060 }
5061 if (arm_feature(env, ARM_FEATURE_V7)) {
5062
5063
5064
5065
5066#ifndef CONFIG_USER_ONLY
5067 ARMCPRegInfo pmcr = {
5068 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
5069 .access = PL0_RW,
5070 .type = ARM_CP_IO | ARM_CP_ALIAS,
5071 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
5072 .accessfn = pmreg_access, .writefn = pmcr_write,
5073 .raw_writefn = raw_write,
5074 };
5075 ARMCPRegInfo pmcr64 = {
5076 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
5077 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
5078 .access = PL0_RW, .accessfn = pmreg_access,
5079 .type = ARM_CP_IO,
5080 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
5081 .resetvalue = cpu->midr & 0xff000000,
5082 .writefn = pmcr_write, .raw_writefn = raw_write,
5083 };
5084 define_one_arm_cp_reg(cpu, &pmcr);
5085 define_one_arm_cp_reg(cpu, &pmcr64);
5086#endif
5087 ARMCPRegInfo clidr = {
5088 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
5089 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
5090 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
5091 };
5092 define_one_arm_cp_reg(cpu, &clidr);
5093 define_arm_cp_regs(cpu, v7_cp_reginfo);
5094 define_debug_regs(cpu);
5095 } else {
5096 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
5097 }
5098 if (arm_feature(env, ARM_FEATURE_V8)) {
5099
5100
5101
5102
5103
5104 ARMCPRegInfo v8_idregs[] = {
5105
5106
5107
5108
5109 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
5110 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
5111 .access = PL1_R, .type = ARM_CP_NO_RAW,
5112 .readfn = id_aa64pfr0_read,
5113 .writefn = arm_cp_write_ignore },
5114 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
5115 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
5116 .access = PL1_R, .type = ARM_CP_CONST,
5117 .resetvalue = cpu->isar.id_aa64pfr1},
5118 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5119 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
5120 .access = PL1_R, .type = ARM_CP_CONST,
5121 .resetvalue = 0 },
5122 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5123 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
5124 .access = PL1_R, .type = ARM_CP_CONST,
5125 .resetvalue = 0 },
5126 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
5127 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
5128 .access = PL1_R, .type = ARM_CP_CONST,
5129
5130 .resetvalue = 0 },
5131 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5132 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
5133 .access = PL1_R, .type = ARM_CP_CONST,
5134 .resetvalue = 0 },
5135 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5136 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
5137 .access = PL1_R, .type = ARM_CP_CONST,
5138 .resetvalue = 0 },
5139 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5140 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
5141 .access = PL1_R, .type = ARM_CP_CONST,
5142 .resetvalue = 0 },
5143 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
5144 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
5145 .access = PL1_R, .type = ARM_CP_CONST,
5146 .resetvalue = cpu->id_aa64dfr0 },
5147 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
5148 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
5149 .access = PL1_R, .type = ARM_CP_CONST,
5150 .resetvalue = cpu->id_aa64dfr1 },
5151 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5152 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
5153 .access = PL1_R, .type = ARM_CP_CONST,
5154 .resetvalue = 0 },
5155 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5156 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
5157 .access = PL1_R, .type = ARM_CP_CONST,
5158 .resetvalue = 0 },
5159 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
5160 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
5161 .access = PL1_R, .type = ARM_CP_CONST,
5162 .resetvalue = cpu->id_aa64afr0 },
5163 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
5164 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
5165 .access = PL1_R, .type = ARM_CP_CONST,
5166 .resetvalue = cpu->id_aa64afr1 },
5167 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5168 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
5169 .access = PL1_R, .type = ARM_CP_CONST,
5170 .resetvalue = 0 },
5171 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5172 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
5173 .access = PL1_R, .type = ARM_CP_CONST,
5174 .resetvalue = 0 },
5175 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
5176 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
5177 .access = PL1_R, .type = ARM_CP_CONST,
5178 .resetvalue = cpu->isar.id_aa64isar0 },
5179 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
5180 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
5181 .access = PL1_R, .type = ARM_CP_CONST,
5182 .resetvalue = cpu->isar.id_aa64isar1 },
5183 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5184 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
5185 .access = PL1_R, .type = ARM_CP_CONST,
5186 .resetvalue = 0 },
5187 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5188 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
5189 .access = PL1_R, .type = ARM_CP_CONST,
5190 .resetvalue = 0 },
5191 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5192 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
5193 .access = PL1_R, .type = ARM_CP_CONST,
5194 .resetvalue = 0 },
5195 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5196 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
5197 .access = PL1_R, .type = ARM_CP_CONST,
5198 .resetvalue = 0 },
5199 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5200 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
5201 .access = PL1_R, .type = ARM_CP_CONST,
5202 .resetvalue = 0 },
5203 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5204 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
5205 .access = PL1_R, .type = ARM_CP_CONST,
5206 .resetvalue = 0 },
5207 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
5208 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
5209 .access = PL1_R, .type = ARM_CP_CONST,
5210 .resetvalue = cpu->id_aa64mmfr0 },
5211 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
5212 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
5213 .access = PL1_R, .type = ARM_CP_CONST,
5214 .resetvalue = cpu->id_aa64mmfr1 },
5215 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5216 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
5217 .access = PL1_R, .type = ARM_CP_CONST,
5218 .resetvalue = 0 },
5219 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5220 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
5221 .access = PL1_R, .type = ARM_CP_CONST,
5222 .resetvalue = 0 },
5223 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5224 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
5225 .access = PL1_R, .type = ARM_CP_CONST,
5226 .resetvalue = 0 },
5227 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5228 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
5229 .access = PL1_R, .type = ARM_CP_CONST,
5230 .resetvalue = 0 },
5231 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5232 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
5233 .access = PL1_R, .type = ARM_CP_CONST,
5234 .resetvalue = 0 },
5235 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5236 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
5237 .access = PL1_R, .type = ARM_CP_CONST,
5238 .resetvalue = 0 },
5239 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
5240 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
5241 .access = PL1_R, .type = ARM_CP_CONST,
5242 .resetvalue = cpu->isar.mvfr0 },
5243 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
5244 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
5245 .access = PL1_R, .type = ARM_CP_CONST,
5246 .resetvalue = cpu->isar.mvfr1 },
5247 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
5248 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
5249 .access = PL1_R, .type = ARM_CP_CONST,
5250 .resetvalue = cpu->isar.mvfr2 },
5251 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5252 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
5253 .access = PL1_R, .type = ARM_CP_CONST,
5254 .resetvalue = 0 },
5255 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5256 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
5257 .access = PL1_R, .type = ARM_CP_CONST,
5258 .resetvalue = 0 },
5259 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5260 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
5261 .access = PL1_R, .type = ARM_CP_CONST,
5262 .resetvalue = 0 },
5263 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5264 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
5265 .access = PL1_R, .type = ARM_CP_CONST,
5266 .resetvalue = 0 },
5267 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5268 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
5269 .access = PL1_R, .type = ARM_CP_CONST,
5270 .resetvalue = 0 },
5271 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
5272 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
5273 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5274 .resetvalue = cpu->pmceid0 },
5275 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
5276 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
5277 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5278 .resetvalue = cpu->pmceid0 },
5279 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
5280 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
5281 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5282 .resetvalue = cpu->pmceid1 },
5283 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
5284 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
5285 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5286 .resetvalue = cpu->pmceid1 },
5287 REGINFO_SENTINEL
5288 };
5289
5290 if (!arm_feature(env, ARM_FEATURE_EL3) &&
5291 !arm_feature(env, ARM_FEATURE_EL2)) {
5292 ARMCPRegInfo rvbar = {
5293 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
5294 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5295 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
5296 };
5297 define_one_arm_cp_reg(cpu, &rvbar);
5298 }
5299 define_arm_cp_regs(cpu, v8_idregs);
5300 define_arm_cp_regs(cpu, v8_cp_reginfo);
5301 }
5302 if (arm_feature(env, ARM_FEATURE_EL2)) {
5303 uint64_t vmpidr_def = mpidr_read_val(env);
5304 ARMCPRegInfo vpidr_regs[] = {
5305 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
5306 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5307 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5308 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
5309 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
5310 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
5311 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5312 .access = PL2_RW, .resetvalue = cpu->midr,
5313 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
5314 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
5315 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5316 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5317 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
5318 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
5319 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
5320 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5321 .access = PL2_RW,
5322 .resetvalue = vmpidr_def,
5323 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
5324 REGINFO_SENTINEL
5325 };
5326 define_arm_cp_regs(cpu, vpidr_regs);
5327 define_arm_cp_regs(cpu, el2_cp_reginfo);
5328 if (arm_feature(env, ARM_FEATURE_V8)) {
5329 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
5330 }
5331
5332 if (!arm_feature(env, ARM_FEATURE_EL3)) {
5333 ARMCPRegInfo rvbar = {
5334 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
5335 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
5336 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
5337 };
5338 define_one_arm_cp_reg(cpu, &rvbar);
5339 }
5340 } else {
5341
5342
5343
5344 if (arm_feature(env, ARM_FEATURE_EL3)) {
5345
5346
5347
5348 ARMCPRegInfo vpidr_regs[] = {
5349 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5350 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5351 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5352 .type = ARM_CP_CONST, .resetvalue = cpu->midr,
5353 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
5354 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5355 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5356 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5357 .type = ARM_CP_NO_RAW,
5358 .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
5359 REGINFO_SENTINEL
5360 };
5361 define_arm_cp_regs(cpu, vpidr_regs);
5362 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
5363 if (arm_feature(env, ARM_FEATURE_V8)) {
5364 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
5365 }
5366 }
5367 }
5368 if (arm_feature(env, ARM_FEATURE_EL3)) {
5369 define_arm_cp_regs(cpu, el3_cp_reginfo);
5370 ARMCPRegInfo el3_regs[] = {
5371 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
5372 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
5373 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
5374 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
5375 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
5376 .access = PL3_RW,
5377 .raw_writefn = raw_write, .writefn = sctlr_write,
5378 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
5379 .resetvalue = cpu->reset_sctlr },
5380 REGINFO_SENTINEL
5381 };
5382
5383 define_arm_cp_regs(cpu, el3_regs);
5384 }
5385
5386
5387
5388
5389
5390
5391
5392
5393 if (arm_feature(env, ARM_FEATURE_EL3)) {
5394 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5395 ARMCPRegInfo nsacr = {
5396 .name = "NSACR", .type = ARM_CP_CONST,
5397 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5398 .access = PL1_RW, .accessfn = nsacr_access,
5399 .resetvalue = 0xc00
5400 };
5401 define_one_arm_cp_reg(cpu, &nsacr);
5402 } else {
5403 ARMCPRegInfo nsacr = {
5404 .name = "NSACR",
5405 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5406 .access = PL3_RW | PL1_R,
5407 .resetvalue = 0,
5408 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
5409 };
5410 define_one_arm_cp_reg(cpu, &nsacr);
5411 }
5412 } else {
5413 if (arm_feature(env, ARM_FEATURE_V8)) {
5414 ARMCPRegInfo nsacr = {
5415 .name = "NSACR", .type = ARM_CP_CONST,
5416 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5417 .access = PL1_R,
5418 .resetvalue = 0xc00
5419 };
5420 define_one_arm_cp_reg(cpu, &nsacr);
5421 }
5422 }
5423
5424 if (arm_feature(env, ARM_FEATURE_PMSA)) {
5425 if (arm_feature(env, ARM_FEATURE_V6)) {
5426
5427 assert(arm_feature(env, ARM_FEATURE_V7));
5428 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
5429 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
5430 } else {
5431 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
5432 }
5433 } else {
5434 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
5435 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
5436 }
5437 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5438 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
5439 }
5440 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
5441 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
5442 }
5443 if (arm_feature(env, ARM_FEATURE_VAPA)) {
5444 define_arm_cp_regs(cpu, vapa_cp_reginfo);
5445 }
5446 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
5447 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
5448 }
5449 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
5450 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
5451 }
5452 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
5453 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
5454 }
5455 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
5456 define_arm_cp_regs(cpu, omap_cp_reginfo);
5457 }
5458 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
5459 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
5460 }
5461 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5462 define_arm_cp_regs(cpu, xscale_cp_reginfo);
5463 }
5464 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
5465 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
5466 }
5467 if (arm_feature(env, ARM_FEATURE_LPAE)) {
5468 define_arm_cp_regs(cpu, lpae_cp_reginfo);
5469 }
5470
5471
5472
5473
5474 {
5475 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485 { .name = "MIDR",
5486 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
5487 .access = PL1_R, .resetvalue = cpu->midr,
5488 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
5489 .readfn = midr_read,
5490 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5491 .type = ARM_CP_OVERRIDE },
5492
5493 { .name = "DUMMY",
5494 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
5495 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5496 { .name = "DUMMY",
5497 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
5498 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5499 { .name = "DUMMY",
5500 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
5501 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5502 { .name = "DUMMY",
5503 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
5504 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5505 { .name = "DUMMY",
5506 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
5507 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5508 REGINFO_SENTINEL
5509 };
5510 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
5511 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
5512 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
5513 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
5514 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5515 .readfn = midr_read },
5516
5517 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5518 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5519 .access = PL1_R, .resetvalue = cpu->midr },
5520 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5521 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
5522 .access = PL1_R, .resetvalue = cpu->midr },
5523 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
5524 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
5525 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
5526 REGINFO_SENTINEL
5527 };
5528 ARMCPRegInfo id_cp_reginfo[] = {
5529
5530 { .name = "CTR",
5531 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
5532 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5533 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
5534 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
5535 .access = PL0_R, .accessfn = ctr_el0_access,
5536 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5537
5538 { .name = "TCMTR",
5539 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
5540 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5541 REGINFO_SENTINEL
5542 };
5543
5544 ARMCPRegInfo id_tlbtr_reginfo = {
5545 .name = "TLBTR",
5546 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
5547 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
5548 };
5549
5550 ARMCPRegInfo id_mpuir_reginfo = {
5551 .name = "MPUIR",
5552 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5553 .access = PL1_R, .type = ARM_CP_CONST,
5554 .resetvalue = cpu->pmsav7_dregion << 8
5555 };
5556 ARMCPRegInfo crn0_wi_reginfo = {
5557 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
5558 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
5559 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
5560 };
5561 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
5562 arm_feature(env, ARM_FEATURE_STRONGARM)) {
5563 ARMCPRegInfo *r;
5564
5565
5566
5567
5568
5569 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5570 for (r = id_pre_v8_midr_cp_reginfo;
5571 r->type != ARM_CP_SENTINEL; r++) {
5572 r->access = PL1_RW;
5573 }
5574 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
5575 r->access = PL1_RW;
5576 }
5577 id_mpuir_reginfo.access = PL1_RW;
5578 id_tlbtr_reginfo.access = PL1_RW;
5579 }
5580 if (arm_feature(env, ARM_FEATURE_V8)) {
5581 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
5582 } else {
5583 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
5584 }
5585 define_arm_cp_regs(cpu, id_cp_reginfo);
5586 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
5587 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
5588 } else if (arm_feature(env, ARM_FEATURE_V7)) {
5589 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
5590 }
5591 }
5592
5593 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
5594 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
5595 }
5596
5597 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
5598 ARMCPRegInfo auxcr_reginfo[] = {
5599 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
5600 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
5601 .access = PL1_RW, .type = ARM_CP_CONST,
5602 .resetvalue = cpu->reset_auxcr },
5603 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
5604 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
5605 .access = PL2_RW, .type = ARM_CP_CONST,
5606 .resetvalue = 0 },
5607 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
5608 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
5609 .access = PL3_RW, .type = ARM_CP_CONST,
5610 .resetvalue = 0 },
5611 REGINFO_SENTINEL
5612 };
5613 define_arm_cp_regs(cpu, auxcr_reginfo);
5614 if (arm_feature(env, ARM_FEATURE_V8)) {
5615
5616 ARMCPRegInfo hactlr2_reginfo = {
5617 .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
5618 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
5619 .access = PL2_RW, .type = ARM_CP_CONST,
5620 .resetvalue = 0
5621 };
5622 define_one_arm_cp_reg(cpu, &hactlr2_reginfo);
5623 }
5624 }
5625
5626 if (arm_feature(env, ARM_FEATURE_CBAR)) {
5627 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5628
5629 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
5630 | extract64(cpu->reset_cbar, 32, 12);
5631 ARMCPRegInfo cbar_reginfo[] = {
5632 { .name = "CBAR",
5633 .type = ARM_CP_CONST,
5634 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5635 .access = PL1_R, .resetvalue = cpu->reset_cbar },
5636 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
5637 .type = ARM_CP_CONST,
5638 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
5639 .access = PL1_R, .resetvalue = cbar32 },
5640 REGINFO_SENTINEL
5641 };
5642
5643 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
5644 define_arm_cp_regs(cpu, cbar_reginfo);
5645 } else {
5646 ARMCPRegInfo cbar = {
5647 .name = "CBAR",
5648 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5649 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
5650 .fieldoffset = offsetof(CPUARMState,
5651 cp15.c15_config_base_address)
5652 };
5653 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
5654 cbar.access = PL1_R;
5655 cbar.fieldoffset = 0;
5656 cbar.type = ARM_CP_CONST;
5657 }
5658 define_one_arm_cp_reg(cpu, &cbar);
5659 }
5660 }
5661
5662 if (arm_feature(env, ARM_FEATURE_VBAR)) {
5663 ARMCPRegInfo vbar_cp_reginfo[] = {
5664 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
5665 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
5666 .access = PL1_RW, .writefn = vbar_write,
5667 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
5668 offsetof(CPUARMState, cp15.vbar_ns) },
5669 .resetvalue = 0 },
5670 REGINFO_SENTINEL
5671 };
5672 define_arm_cp_regs(cpu, vbar_cp_reginfo);
5673 }
5674
5675
5676 {
5677 ARMCPRegInfo sctlr = {
5678 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
5679 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5680 .access = PL1_RW,
5681 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
5682 offsetof(CPUARMState, cp15.sctlr_ns) },
5683 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
5684 .raw_writefn = raw_write,
5685 };
5686 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5687
5688
5689
5690
5691 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
5692 }
5693 define_one_arm_cp_reg(cpu, &sctlr);
5694 }
5695
5696 if (cpu_isar_feature(aa64_sve, cpu)) {
5697 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
5698 if (arm_feature(env, ARM_FEATURE_EL2)) {
5699 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
5700 } else {
5701 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
5702 }
5703 if (arm_feature(env, ARM_FEATURE_EL3)) {
5704 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
5705 }
5706 }
5707}
5708
5709void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
5710{
5711 CPUState *cs = CPU(cpu);
5712 CPUARMState *env = &cpu->env;
5713
5714 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5715 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
5716 aarch64_fpu_gdb_set_reg,
5717 34, "aarch64-fpu.xml", 0);
5718 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
5719 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5720 51, "arm-neon.xml", 0);
5721 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
5722 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5723 35, "arm-vfp3.xml", 0);
5724 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
5725 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5726 19, "arm-vfp.xml", 0);
5727 }
5728 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
5729 arm_gen_dynamic_xml(cs),
5730 "system-registers.xml", 0);
5731}
5732
5733
5734static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5735{
5736 ObjectClass *class_a = (ObjectClass *)a;
5737 ObjectClass *class_b = (ObjectClass *)b;
5738 const char *name_a, *name_b;
5739
5740 name_a = object_class_get_name(class_a);
5741 name_b = object_class_get_name(class_b);
5742 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
5743 return 1;
5744 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
5745 return -1;
5746 } else {
5747 return strcmp(name_a, name_b);
5748 }
5749}
5750
5751static void arm_cpu_list_entry(gpointer data, gpointer user_data)
5752{
5753 ObjectClass *oc = data;
5754 CPUListState *s = user_data;
5755 const char *typename;
5756 char *name;
5757
5758 typename = object_class_get_name(oc);
5759 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
5760 (*s->cpu_fprintf)(s->file, " %s\n",
5761 name);
5762 g_free(name);
5763}
5764
5765void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
5766{
5767 CPUListState s = {
5768 .file = f,
5769 .cpu_fprintf = cpu_fprintf,
5770 };
5771 GSList *list;
5772
5773 list = object_class_get_list(TYPE_ARM_CPU, false);
5774 list = g_slist_sort(list, arm_cpu_list_compare);
5775 (*cpu_fprintf)(f, "Available CPUs:\n");
5776 g_slist_foreach(list, arm_cpu_list_entry, &s);
5777 g_slist_free(list);
5778}
5779
5780static void arm_cpu_add_definition(gpointer data, gpointer user_data)
5781{
5782 ObjectClass *oc = data;
5783 CpuDefinitionInfoList **cpu_list = user_data;
5784 CpuDefinitionInfoList *entry;
5785 CpuDefinitionInfo *info;
5786 const char *typename;
5787
5788 typename = object_class_get_name(oc);
5789 info = g_malloc0(sizeof(*info));
5790 info->name = g_strndup(typename,
5791 strlen(typename) - strlen("-" TYPE_ARM_CPU));
5792 info->q_typename = g_strdup(typename);
5793
5794 entry = g_malloc0(sizeof(*entry));
5795 entry->value = info;
5796 entry->next = *cpu_list;
5797 *cpu_list = entry;
5798}
5799
5800CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
5801{
5802 CpuDefinitionInfoList *cpu_list = NULL;
5803 GSList *list;
5804
5805 list = object_class_get_list(TYPE_ARM_CPU, false);
5806 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
5807 g_slist_free(list);
5808
5809 return cpu_list;
5810}
5811
5812static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
5813 void *opaque, int state, int secstate,
5814 int crm, int opc1, int opc2,
5815 const char *name)
5816{
5817
5818
5819
5820 uint32_t *key = g_new(uint32_t, 1);
5821 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
5822 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
5823 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
5824
5825 r2->name = g_strdup(name);
5826
5827
5828
5829 r2->secure = secstate;
5830
5831 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5832
5833
5834
5835
5836 r2->fieldoffset = r->bank_fieldoffsets[ns];
5837 }
5838
5839 if (state == ARM_CP_STATE_AA32) {
5840 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
5852 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
5853 r2->type |= ARM_CP_ALIAS;
5854 }
5855 } else if ((secstate != r->secure) && !ns) {
5856
5857
5858
5859 r2->type |= ARM_CP_ALIAS;
5860 }
5861
5862 if (r->state == ARM_CP_STATE_BOTH) {
5863
5864
5865 if (r2->cp == 0) {
5866 r2->cp = 15;
5867 }
5868
5869#ifdef HOST_WORDS_BIGENDIAN
5870 if (r2->fieldoffset) {
5871 r2->fieldoffset += sizeof(uint32_t);
5872 }
5873#endif
5874 }
5875 }
5876 if (state == ARM_CP_STATE_AA64) {
5877
5878
5879
5880
5881
5882
5883
5884 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
5885 r2->cp = CP_REG_ARM64_SYSREG_CP;
5886 }
5887 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
5888 r2->opc0, opc1, opc2);
5889 } else {
5890 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
5891 }
5892 if (opaque) {
5893 r2->opaque = opaque;
5894 }
5895
5896
5897
5898 r2->state = state;
5899
5900
5901
5902 r2->crm = crm;
5903 r2->opc1 = opc1;
5904 r2->opc2 = opc2;
5905
5906
5907
5908
5909
5910
5911 if ((r->type & ARM_CP_SPECIAL)) {
5912 r2->type |= ARM_CP_NO_RAW;
5913 }
5914 if (((r->crm == CP_ANY) && crm != 0) ||
5915 ((r->opc1 == CP_ANY) && opc1 != 0) ||
5916 ((r->opc2 == CP_ANY) && opc2 != 0)) {
5917 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
5918 }
5919
5920
5921
5922
5923
5924 if (!(r2->type & ARM_CP_NO_RAW)) {
5925 assert(!raw_accessors_invalid(r2));
5926 }
5927
5928
5929
5930
5931 if (!(r->type & ARM_CP_OVERRIDE)) {
5932 ARMCPRegInfo *oldreg;
5933 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
5934 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
5935 fprintf(stderr, "Register redefined: cp=%d %d bit "
5936 "crn=%d crm=%d opc1=%d opc2=%d, "
5937 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
5938 r2->crn, r2->crm, r2->opc1, r2->opc2,
5939 oldreg->name, r2->name);
5940 g_assert_not_reached();
5941 }
5942 }
5943 g_hash_table_insert(cpu->cp_regs, key, r2);
5944}
5945
5946
5947void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
5948 const ARMCPRegInfo *r, void *opaque)
5949{
5950
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973 int crm, opc1, opc2, state;
5974 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
5975 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
5976 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
5977 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
5978 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
5979 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
5980
5981 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
5982
5983 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
5984
5985 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
5986
5987
5988
5989
5990
5991
5992 if (r->state != ARM_CP_STATE_AA32) {
5993 int mask = 0;
5994 switch (r->opc1) {
5995 case 0: case 1: case 2:
5996
5997 mask = PL1_RW;
5998 break;
5999 case 3:
6000
6001 mask = PL0_RW;
6002 break;
6003 case 4:
6004
6005 mask = PL2_RW;
6006 break;
6007 case 5:
6008
6009 assert(false);
6010 break;
6011 case 6:
6012
6013 mask = PL3_RW;
6014 break;
6015 case 7:
6016
6017 mask = PL1_RW;
6018 break;
6019 default:
6020
6021 assert(false);
6022 break;
6023 }
6024
6025 assert((r->access & ~mask) == 0);
6026 }
6027
6028
6029
6030
6031 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
6032 if (r->access & PL3_R) {
6033 assert((r->fieldoffset ||
6034 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
6035 r->readfn);
6036 }
6037 if (r->access & PL3_W) {
6038 assert((r->fieldoffset ||
6039 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
6040 r->writefn);
6041 }
6042 }
6043
6044 assert(cptype_valid(r->type));
6045 for (crm = crmmin; crm <= crmmax; crm++) {
6046 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
6047 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
6048 for (state = ARM_CP_STATE_AA32;
6049 state <= ARM_CP_STATE_AA64; state++) {
6050 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
6051 continue;
6052 }
6053 if (state == ARM_CP_STATE_AA32) {
6054
6055
6056
6057 char *name;
6058
6059 switch (r->secure) {
6060 case ARM_CP_SECSTATE_S:
6061 case ARM_CP_SECSTATE_NS:
6062 add_cpreg_to_hashtable(cpu, r, opaque, state,
6063 r->secure, crm, opc1, opc2,
6064 r->name);
6065 break;
6066 default:
6067 name = g_strdup_printf("%s_S", r->name);
6068 add_cpreg_to_hashtable(cpu, r, opaque, state,
6069 ARM_CP_SECSTATE_S,
6070 crm, opc1, opc2, name);
6071 g_free(name);
6072 add_cpreg_to_hashtable(cpu, r, opaque, state,
6073 ARM_CP_SECSTATE_NS,
6074 crm, opc1, opc2, r->name);
6075 break;
6076 }
6077 } else {
6078
6079
6080 add_cpreg_to_hashtable(cpu, r, opaque, state,
6081 ARM_CP_SECSTATE_NS,
6082 crm, opc1, opc2, r->name);
6083 }
6084 }
6085 }
6086 }
6087 }
6088}
6089
6090void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
6091 const ARMCPRegInfo *regs, void *opaque)
6092{
6093
6094 const ARMCPRegInfo *r;
6095 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
6096 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
6097 }
6098}
6099
6100const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
6101{
6102 return g_hash_table_lookup(cpregs, &encoded_cp);
6103}
6104
6105void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
6106 uint64_t value)
6107{
6108
6109}
6110
6111uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
6112{
6113
6114 return 0;
6115}
6116
6117void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
6118{
6119
6120}
6121
6122static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
6123{
6124
6125
6126
6127
6128
6129
6130 if (write_type == CPSRWriteByInstr &&
6131 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
6132 mode == ARM_CPU_MODE_HYP)) {
6133 return 1;
6134 }
6135
6136 switch (mode) {
6137 case ARM_CPU_MODE_USR:
6138 return 0;
6139 case ARM_CPU_MODE_SYS:
6140 case ARM_CPU_MODE_SVC:
6141 case ARM_CPU_MODE_ABT:
6142 case ARM_CPU_MODE_UND:
6143 case ARM_CPU_MODE_IRQ:
6144 case ARM_CPU_MODE_FIQ:
6145
6146
6147
6148
6149
6150
6151 if (write_type == CPSRWriteByInstr &&
6152 (env->cp15.hcr_el2 & HCR_TGE) &&
6153 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
6154 !arm_is_secure_below_el3(env)) {
6155 return 1;
6156 }
6157 return 0;
6158 case ARM_CPU_MODE_HYP:
6159 return !arm_feature(env, ARM_FEATURE_EL2)
6160 || arm_current_el(env) < 2 || arm_is_secure(env);
6161 case ARM_CPU_MODE_MON:
6162 return arm_current_el(env) < 3;
6163 default:
6164 return 1;
6165 }
6166}
6167
6168uint32_t cpsr_read(CPUARMState *env)
6169{
6170 int ZF;
6171 ZF = (env->ZF == 0);
6172 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
6173 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
6174 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
6175 | ((env->condexec_bits & 0xfc) << 8)
6176 | (env->GE << 16) | (env->daif & CPSR_AIF);
6177}
6178
6179void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
6180 CPSRWriteType write_type)
6181{
6182 uint32_t changed_daif;
6183
6184 if (mask & CPSR_NZCV) {
6185 env->ZF = (~val) & CPSR_Z;
6186 env->NF = val;
6187 env->CF = (val >> 29) & 1;
6188 env->VF = (val << 3) & 0x80000000;
6189 }
6190 if (mask & CPSR_Q)
6191 env->QF = ((val & CPSR_Q) != 0);
6192 if (mask & CPSR_T)
6193 env->thumb = ((val & CPSR_T) != 0);
6194 if (mask & CPSR_IT_0_1) {
6195 env->condexec_bits &= ~3;
6196 env->condexec_bits |= (val >> 25) & 3;
6197 }
6198 if (mask & CPSR_IT_2_7) {
6199 env->condexec_bits &= 3;
6200 env->condexec_bits |= (val >> 8) & 0xfc;
6201 }
6202 if (mask & CPSR_GE) {
6203 env->GE = (val >> 16) & 0xf;
6204 }
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
6215 arm_feature(env, ARM_FEATURE_EL3) &&
6216 !arm_feature(env, ARM_FEATURE_EL2) &&
6217 !arm_is_secure(env)) {
6218
6219 changed_daif = (env->daif ^ val) & mask;
6220
6221 if (changed_daif & CPSR_A) {
6222
6223
6224
6225 if (!(env->cp15.scr_el3 & SCR_AW)) {
6226 qemu_log_mask(LOG_GUEST_ERROR,
6227 "Ignoring attempt to switch CPSR_A flag from "
6228 "non-secure world with SCR.AW bit clear\n");
6229 mask &= ~CPSR_A;
6230 }
6231 }
6232
6233 if (changed_daif & CPSR_F) {
6234
6235
6236
6237 if (!(env->cp15.scr_el3 & SCR_FW)) {
6238 qemu_log_mask(LOG_GUEST_ERROR,
6239 "Ignoring attempt to switch CPSR_F flag from "
6240 "non-secure world with SCR.FW bit clear\n");
6241 mask &= ~CPSR_F;
6242 }
6243
6244
6245
6246
6247
6248 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
6249 (val & CPSR_F)) {
6250 qemu_log_mask(LOG_GUEST_ERROR,
6251 "Ignoring attempt to enable CPSR_F flag "
6252 "(non-maskable FIQ [NMFI] support enabled)\n");
6253 mask &= ~CPSR_F;
6254 }
6255 }
6256 }
6257
6258 env->daif &= ~(CPSR_AIF & mask);
6259 env->daif |= val & CPSR_AIF & mask;
6260
6261 if (write_type != CPSRWriteRaw &&
6262 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
6263 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
6264
6265
6266
6267
6268
6269
6270 mask &= ~CPSR_M;
6271 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
6272
6273
6274
6275
6276
6277
6278
6279
6280 mask &= ~CPSR_M;
6281 if (write_type != CPSRWriteByGDBStub &&
6282 arm_feature(env, ARM_FEATURE_V8)) {
6283 mask |= CPSR_IL;
6284 val |= CPSR_IL;
6285 }
6286 qemu_log_mask(LOG_GUEST_ERROR,
6287 "Illegal AArch32 mode switch attempt from %s to %s\n",
6288 aarch32_mode_name(env->uncached_cpsr),
6289 aarch32_mode_name(val));
6290 } else {
6291 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
6292 write_type == CPSRWriteExceptionReturn ?
6293 "Exception return from AArch32" :
6294 "AArch32 mode switch from",
6295 aarch32_mode_name(env->uncached_cpsr),
6296 aarch32_mode_name(val), env->regs[15]);
6297 switch_mode(env, val & CPSR_M);
6298 }
6299 }
6300 mask &= ~CACHED_CPSR_BITS;
6301 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
6302}
6303
6304
6305uint32_t HELPER(sxtb16)(uint32_t x)
6306{
6307 uint32_t res;
6308 res = (uint16_t)(int8_t)x;
6309 res |= (uint32_t)(int8_t)(x >> 16) << 16;
6310 return res;
6311}
6312
6313uint32_t HELPER(uxtb16)(uint32_t x)
6314{
6315 uint32_t res;
6316 res = (uint16_t)(uint8_t)x;
6317 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
6318 return res;
6319}
6320
6321int32_t HELPER(sdiv)(int32_t num, int32_t den)
6322{
6323 if (den == 0)
6324 return 0;
6325 if (num == INT_MIN && den == -1)
6326 return INT_MIN;
6327 return num / den;
6328}
6329
6330uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
6331{
6332 if (den == 0)
6333 return 0;
6334 return num / den;
6335}
6336
6337uint32_t HELPER(rbit)(uint32_t x)
6338{
6339 return revbit32(x);
6340}
6341
6342#if defined(CONFIG_USER_ONLY)
6343
6344
6345void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
6346{
6347 ARMCPU *cpu = arm_env_get_cpu(env);
6348
6349 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
6350}
6351
6352uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
6353{
6354 ARMCPU *cpu = arm_env_get_cpu(env);
6355
6356 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
6357 return 0;
6358}
6359
6360void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
6361{
6362
6363 g_assert_not_reached();
6364}
6365
6366void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
6367{
6368
6369 g_assert_not_reached();
6370}
6371
6372uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
6373{
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389
6390
6391 return 0;
6392}
6393
6394static void switch_mode(CPUARMState *env, int mode)
6395{
6396 ARMCPU *cpu = arm_env_get_cpu(env);
6397
6398 if (mode != ARM_CPU_MODE_USR) {
6399 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
6400 }
6401}
6402
6403uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
6404 uint32_t cur_el, bool secure)
6405{
6406 return 1;
6407}
6408
6409void aarch64_sync_64_to_32(CPUARMState *env)
6410{
6411 g_assert_not_reached();
6412}
6413
6414#else
6415
6416static void switch_mode(CPUARMState *env, int mode)
6417{
6418 int old_mode;
6419 int i;
6420
6421 old_mode = env->uncached_cpsr & CPSR_M;
6422 if (mode == old_mode)
6423 return;
6424
6425 if (old_mode == ARM_CPU_MODE_FIQ) {
6426 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
6427 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
6428 } else if (mode == ARM_CPU_MODE_FIQ) {
6429 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
6430 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
6431 }
6432
6433 i = bank_number(old_mode);
6434 env->banked_r13[i] = env->regs[13];
6435 env->banked_spsr[i] = env->spsr;
6436
6437 i = bank_number(mode);
6438 env->regs[13] = env->banked_r13[i];
6439 env->spsr = env->banked_spsr[i];
6440
6441 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
6442 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
6443}
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482static const int8_t target_el_table[2][2][2][2][2][4] = {
6483 {{{{{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6484 {{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
6485 {{{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6486 {{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
6487 {{{{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6488 {{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
6489 {{{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6490 {{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
6491 {{{{{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6492 {{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
6493 {{{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
6494 {{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
6495 {{{{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6496 {{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6497 {{{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6498 {{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
6499};
6500
6501
6502
6503
6504uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
6505 uint32_t cur_el, bool secure)
6506{
6507 CPUARMState *env = cs->env_ptr;
6508 int rw;
6509 int scr;
6510 int hcr;
6511 int target_el;
6512
6513 int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
6514
6515 if (arm_feature(env, ARM_FEATURE_EL3)) {
6516 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
6517 } else {
6518
6519
6520
6521
6522 rw = is64;
6523 }
6524
6525 switch (excp_idx) {
6526 case EXCP_IRQ:
6527 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
6528 hcr = arm_hcr_el2_imo(env);
6529 break;
6530 case EXCP_FIQ:
6531 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
6532 hcr = arm_hcr_el2_fmo(env);
6533 break;
6534 default:
6535 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
6536 hcr = arm_hcr_el2_amo(env);
6537 break;
6538 };
6539
6540
6541 hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
6542
6543
6544 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
6545
6546 assert(target_el > 0);
6547
6548 return target_el;
6549}
6550
6551static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
6552 ARMMMUIdx mmu_idx, bool ignfault)
6553{
6554 CPUState *cs = CPU(cpu);
6555 CPUARMState *env = &cpu->env;
6556 MemTxAttrs attrs = {};
6557 MemTxResult txres;
6558 target_ulong page_size;
6559 hwaddr physaddr;
6560 int prot;
6561 ARMMMUFaultInfo fi = {};
6562 bool secure = mmu_idx & ARM_MMU_IDX_M_S;
6563 int exc;
6564 bool exc_secure;
6565
6566 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
6567 &attrs, &prot, &page_size, &fi, NULL)) {
6568
6569 if (fi.type == ARMFault_QEMU_SFault) {
6570 qemu_log_mask(CPU_LOG_INT,
6571 "...SecureFault with SFSR.AUVIOL during stacking\n");
6572 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
6573 env->v7m.sfar = addr;
6574 exc = ARMV7M_EXCP_SECURE;
6575 exc_secure = false;
6576 } else {
6577 qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n");
6578 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
6579 exc = ARMV7M_EXCP_MEM;
6580 exc_secure = secure;
6581 }
6582 goto pend_fault;
6583 }
6584 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
6585 attrs, &txres);
6586 if (txres != MEMTX_OK) {
6587
6588 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
6589 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
6590 exc = ARMV7M_EXCP_BUS;
6591 exc_secure = false;
6592 goto pend_fault;
6593 }
6594 return true;
6595
6596pend_fault:
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607 if (!ignfault) {
6608 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
6609 }
6610 return false;
6611}
6612
6613static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
6614 ARMMMUIdx mmu_idx)
6615{
6616 CPUState *cs = CPU(cpu);
6617 CPUARMState *env = &cpu->env;
6618 MemTxAttrs attrs = {};
6619 MemTxResult txres;
6620 target_ulong page_size;
6621 hwaddr physaddr;
6622 int prot;
6623 ARMMMUFaultInfo fi = {};
6624 bool secure = mmu_idx & ARM_MMU_IDX_M_S;
6625 int exc;
6626 bool exc_secure;
6627 uint32_t value;
6628
6629 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
6630 &attrs, &prot, &page_size, &fi, NULL)) {
6631
6632 if (fi.type == ARMFault_QEMU_SFault) {
6633 qemu_log_mask(CPU_LOG_INT,
6634 "...SecureFault with SFSR.AUVIOL during unstack\n");
6635 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
6636 env->v7m.sfar = addr;
6637 exc = ARMV7M_EXCP_SECURE;
6638 exc_secure = false;
6639 } else {
6640 qemu_log_mask(CPU_LOG_INT,
6641 "...MemManageFault with CFSR.MUNSTKERR\n");
6642 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
6643 exc = ARMV7M_EXCP_MEM;
6644 exc_secure = secure;
6645 }
6646 goto pend_fault;
6647 }
6648
6649 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
6650 attrs, &txres);
6651 if (txres != MEMTX_OK) {
6652
6653 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
6654 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
6655 exc = ARMV7M_EXCP_BUS;
6656 exc_secure = false;
6657 goto pend_fault;
6658 }
6659
6660 *dest = value;
6661 return true;
6662
6663pend_fault:
6664
6665
6666
6667
6668
6669
6670 armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
6671 return false;
6672}
6673
6674
6675
6676
6677
6678
6679static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
6680 bool new_spsel,
6681 bool secstate)
6682{
6683 bool old_is_psp = v7m_using_psp(env);
6684
6685 env->v7m.control[secstate] =
6686 deposit32(env->v7m.control[secstate],
6687 R_V7M_CONTROL_SPSEL_SHIFT,
6688 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
6689
6690 if (secstate == env->v7m.secure) {
6691 bool new_is_psp = v7m_using_psp(env);
6692 uint32_t tmp;
6693
6694 if (old_is_psp != new_is_psp) {
6695 tmp = env->v7m.other_sp;
6696 env->v7m.other_sp = env->regs[13];
6697 env->regs[13] = tmp;
6698 }
6699 }
6700}
6701
6702
6703
6704
6705static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
6706{
6707 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
6708}
6709
6710void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
6711{
6712
6713
6714
6715 bool new_is_psp, old_is_psp = v7m_using_psp(env);
6716 uint32_t tmp;
6717
6718 env->v7m.exception = new_exc;
6719
6720 new_is_psp = v7m_using_psp(env);
6721
6722 if (old_is_psp != new_is_psp) {
6723 tmp = env->v7m.other_sp;
6724 env->v7m.other_sp = env->regs[13];
6725 env->regs[13] = tmp;
6726 }
6727}
6728
6729
6730static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
6731{
6732 uint32_t new_ss_msp, new_ss_psp;
6733
6734 if (env->v7m.secure == new_secstate) {
6735 return;
6736 }
6737
6738
6739
6740
6741 new_ss_msp = env->v7m.other_ss_msp;
6742 new_ss_psp = env->v7m.other_ss_psp;
6743
6744 if (v7m_using_psp(env)) {
6745 env->v7m.other_ss_psp = env->regs[13];
6746 env->v7m.other_ss_msp = env->v7m.other_sp;
6747 } else {
6748 env->v7m.other_ss_msp = env->regs[13];
6749 env->v7m.other_ss_psp = env->v7m.other_sp;
6750 }
6751
6752 env->v7m.secure = new_secstate;
6753
6754 if (v7m_using_psp(env)) {
6755 env->regs[13] = new_ss_psp;
6756 env->v7m.other_sp = new_ss_msp;
6757 } else {
6758 env->regs[13] = new_ss_msp;
6759 env->v7m.other_sp = new_ss_psp;
6760 }
6761}
6762
6763void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
6764{
6765
6766
6767
6768
6769 uint32_t min_magic;
6770
6771 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6772
6773 min_magic = FNC_RETURN_MIN_MAGIC;
6774 } else {
6775
6776 min_magic = EXC_RETURN_MIN_MAGIC;
6777 }
6778
6779 if (dest >= min_magic) {
6780
6781
6782
6783
6784
6785
6786 env->regs[15] = dest & ~1;
6787 env->thumb = dest & 1;
6788 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
6789
6790 }
6791
6792
6793 assert(env->v7m.secure);
6794
6795 switch_v7m_security_state(env, dest & 1);
6796 env->thumb = 1;
6797 env->regs[15] = dest & ~1;
6798}
6799
6800void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
6801{
6802
6803
6804
6805
6806
6807 uint32_t nextinst = env->regs[15] | 1;
6808 uint32_t sp = env->regs[13] - 8;
6809 uint32_t saved_psr;
6810
6811
6812 assert(env->v7m.secure);
6813
6814 if (dest & 1) {
6815
6816
6817
6818 env->regs[14] = nextinst;
6819 env->thumb = 1;
6820 env->regs[15] = dest & ~1;
6821 return;
6822 }
6823
6824
6825 if (!QEMU_IS_ALIGNED(sp, 8)) {
6826 qemu_log_mask(LOG_GUEST_ERROR,
6827 "BLXNS with misaligned SP is UNPREDICTABLE\n");
6828 }
6829
6830 if (sp < v7m_sp_limit(env)) {
6831 raise_exception(env, EXCP_STKOF, 0, 1);
6832 }
6833
6834 saved_psr = env->v7m.exception;
6835 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
6836 saved_psr |= XPSR_SFPA;
6837 }
6838
6839
6840 cpu_stl_data(env, sp, nextinst);
6841 cpu_stl_data(env, sp + 4, saved_psr);
6842
6843 env->regs[13] = sp;
6844 env->regs[14] = 0xfeffffff;
6845 if (arm_v7m_is_handler_mode(env)) {
6846
6847
6848
6849
6850 write_v7m_exception(env, 1);
6851 }
6852 switch_v7m_security_state(env, 0);
6853 env->thumb = 1;
6854 env->regs[15] = dest;
6855}
6856
6857static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
6858 bool spsel)
6859{
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875 bool want_psp = threadmode && spsel;
6876
6877 if (secure == env->v7m.secure) {
6878 if (want_psp == v7m_using_psp(env)) {
6879 return &env->regs[13];
6880 } else {
6881 return &env->v7m.other_sp;
6882 }
6883 } else {
6884 if (want_psp) {
6885 return &env->v7m.other_ss_psp;
6886 } else {
6887 return &env->v7m.other_ss_msp;
6888 }
6889 }
6890}
6891
6892static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
6893 uint32_t *pvec)
6894{
6895 CPUState *cs = CPU(cpu);
6896 CPUARMState *env = &cpu->env;
6897 MemTxResult result;
6898 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
6899 uint32_t vector_entry;
6900 MemTxAttrs attrs = {};
6901 ARMMMUIdx mmu_idx;
6902 bool exc_secure;
6903
6904 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
6905
6906
6907
6908
6909
6910
6911
6912
6913 attrs.secure = targets_secure;
6914 attrs.user = false;
6915
6916 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6917 V8M_SAttributes sattrs = {};
6918
6919 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
6920 if (sattrs.ns) {
6921 attrs.secure = false;
6922 } else if (!targets_secure) {
6923
6924 goto load_fail;
6925 }
6926 }
6927
6928 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
6929 attrs, &result);
6930 if (result != MEMTX_OK) {
6931 goto load_fail;
6932 }
6933 *pvec = vector_entry;
6934 return true;
6935
6936load_fail:
6937
6938
6939
6940
6941
6942
6943
6944 exc_secure = targets_secure ||
6945 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
6946 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
6947 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
6948 return false;
6949}
6950
6951static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
6952 bool ignore_faults)
6953{
6954
6955
6956
6957
6958 CPUARMState *env = &cpu->env;
6959 uint32_t *frame_sp_p;
6960 uint32_t frameptr;
6961 ARMMMUIdx mmu_idx;
6962 bool stacked_ok;
6963 uint32_t limit;
6964 bool want_psp;
6965
6966 if (dotailchain) {
6967 bool mode = lr & R_V7M_EXCRET_MODE_MASK;
6968 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
6969 !mode;
6970
6971 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
6972 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
6973 lr & R_V7M_EXCRET_SPSEL_MASK);
6974 want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
6975 if (want_psp) {
6976 limit = env->v7m.psplim[M_REG_S];
6977 } else {
6978 limit = env->v7m.msplim[M_REG_S];
6979 }
6980 } else {
6981 mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
6982 frame_sp_p = &env->regs[13];
6983 limit = v7m_sp_limit(env);
6984 }
6985
6986 frameptr = *frame_sp_p - 0x28;
6987 if (frameptr < limit) {
6988
6989
6990
6991
6992
6993
6994 qemu_log_mask(CPU_LOG_INT,
6995 "...STKOF during callee-saves register stacking\n");
6996 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
6997 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
6998 env->v7m.secure);
6999 *frame_sp_p = limit;
7000 return true;
7001 }
7002
7003
7004
7005
7006 stacked_ok =
7007 v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) &&
7008 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx,
7009 ignore_faults) &&
7010 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx,
7011 ignore_faults) &&
7012 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx,
7013 ignore_faults) &&
7014 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx,
7015 ignore_faults) &&
7016 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx,
7017 ignore_faults) &&
7018 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx,
7019 ignore_faults) &&
7020 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx,
7021 ignore_faults) &&
7022 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx,
7023 ignore_faults);
7024
7025
7026 *frame_sp_p = frameptr;
7027
7028 return !stacked_ok;
7029}
7030
7031static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
7032 bool ignore_stackfaults)
7033{
7034
7035
7036
7037
7038 CPUARMState *env = &cpu->env;
7039 uint32_t addr;
7040 bool targets_secure;
7041 int exc;
7042 bool push_failed = false;
7043
7044 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
7045 qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
7046 targets_secure ? "secure" : "nonsecure", exc);
7047
7048 if (arm_feature(env, ARM_FEATURE_V8)) {
7049 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
7050 (lr & R_V7M_EXCRET_S_MASK)) {
7051
7052
7053
7054
7055 if (targets_secure) {
7056 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
7057
7058
7059
7060
7061
7062
7063 lr &= ~R_V7M_EXCRET_DCRS_MASK;
7064 }
7065 } else {
7066
7067
7068
7069
7070 if (lr & R_V7M_EXCRET_DCRS_MASK &&
7071 !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
7072 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
7073 ignore_stackfaults);
7074 }
7075 lr |= R_V7M_EXCRET_DCRS_MASK;
7076 }
7077 }
7078
7079 lr &= ~R_V7M_EXCRET_ES_MASK;
7080 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7081 lr |= R_V7M_EXCRET_ES_MASK;
7082 }
7083 lr &= ~R_V7M_EXCRET_SPSEL_MASK;
7084 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
7085 lr |= R_V7M_EXCRET_SPSEL_MASK;
7086 }
7087
7088
7089
7090
7091
7092
7093 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7094 if (!targets_secure) {
7095
7096
7097
7098
7099
7100
7101 int i;
7102
7103 for (i = 0; i < 13; i++) {
7104
7105 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
7106 env->regs[i] = 0;
7107 }
7108 }
7109
7110 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
7111 }
7112 }
7113 }
7114
7115 if (push_failed && !ignore_stackfaults) {
7116
7117
7118
7119
7120 qemu_log_mask(CPU_LOG_INT,
7121 "...derived exception on callee-saves register stacking");
7122 v7m_exception_taken(cpu, lr, true, true);
7123 return;
7124 }
7125
7126 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
7127
7128 qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
7129 v7m_exception_taken(cpu, lr, true, true);
7130 return;
7131 }
7132
7133
7134
7135
7136
7137 armv7m_nvic_acknowledge_irq(env->nvic);
7138
7139
7140 switch_v7m_security_state(env, targets_secure);
7141 write_v7m_control_spsel(env, 0);
7142 arm_clear_exclusive(env);
7143
7144 env->condexec_bits = 0;
7145 env->regs[14] = lr;
7146 env->regs[15] = addr & 0xfffffffe;
7147 env->thumb = addr & 1;
7148}
7149
7150static bool v7m_push_stack(ARMCPU *cpu)
7151{
7152
7153
7154
7155
7156
7157
7158 bool stacked_ok;
7159 CPUARMState *env = &cpu->env;
7160 uint32_t xpsr = xpsr_read(env);
7161 uint32_t frameptr = env->regs[13];
7162 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
7163
7164
7165 if ((frameptr & 4) &&
7166 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
7167 frameptr -= 4;
7168 xpsr |= XPSR_SPREALIGN;
7169 }
7170
7171 frameptr -= 0x20;
7172
7173 if (arm_feature(env, ARM_FEATURE_V8)) {
7174 uint32_t limit = v7m_sp_limit(env);
7175
7176 if (frameptr < limit) {
7177
7178
7179
7180
7181
7182
7183 qemu_log_mask(CPU_LOG_INT,
7184 "...STKOF during stacking\n");
7185 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
7186 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
7187 env->v7m.secure);
7188 env->regs[13] = limit;
7189 return true;
7190 }
7191 }
7192
7193
7194
7195
7196
7197
7198 stacked_ok =
7199 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) &&
7200 v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) &&
7201 v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) &&
7202 v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) &&
7203 v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) &&
7204 v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) &&
7205 v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) &&
7206 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false);
7207
7208
7209 env->regs[13] = frameptr;
7210
7211 return !stacked_ok;
7212}
7213
7214static void do_v7m_exception_exit(ARMCPU *cpu)
7215{
7216 CPUARMState *env = &cpu->env;
7217 uint32_t excret;
7218 uint32_t xpsr;
7219 bool ufault = false;
7220 bool sfault = false;
7221 bool return_to_sp_process;
7222 bool return_to_handler;
7223 bool rettobase = false;
7224 bool exc_secure = false;
7225 bool return_to_secure;
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235
7236
7237 if (!arm_v7m_is_handler_mode(env)) {
7238 return;
7239 }
7240
7241
7242
7243
7244
7245
7246
7247
7248 excret = env->regs[15];
7249 if (env->thumb) {
7250 excret |= 1;
7251 }
7252
7253 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
7254 " previous exception %d\n",
7255 excret, env->v7m.exception);
7256
7257 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
7258 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
7259 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
7260 excret);
7261 }
7262
7263 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7264
7265
7266
7267 if (!env->v7m.secure &&
7268 ((excret & R_V7M_EXCRET_ES_MASK) ||
7269 !(excret & R_V7M_EXCRET_DCRS_MASK))) {
7270 sfault = 1;
7271
7272 excret &= ~R_V7M_EXCRET_ES_MASK;
7273 }
7274 exc_secure = excret & R_V7M_EXCRET_ES_MASK;
7275 }
7276
7277 if (env->v7m.exception != ARMV7M_EXCP_NMI) {
7278
7279
7280
7281
7282
7283
7284 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7285 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
7286 env->v7m.faultmask[exc_secure] = 0;
7287 }
7288 } else {
7289 env->v7m.faultmask[M_REG_NS] = 0;
7290 }
7291 }
7292
7293 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
7294 exc_secure)) {
7295 case -1:
7296
7297 ufault = true;
7298 break;
7299 case 0:
7300
7301 break;
7302 case 1:
7303
7304
7305
7306
7307 rettobase = true;
7308 break;
7309 default:
7310 g_assert_not_reached();
7311 }
7312
7313 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
7314 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
7315 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
7316 (excret & R_V7M_EXCRET_S_MASK);
7317
7318 if (arm_feature(env, ARM_FEATURE_V8)) {
7319 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7320
7321
7322
7323 if ((excret & R_V7M_EXCRET_S_MASK) ||
7324 (excret & R_V7M_EXCRET_ES_MASK) ||
7325 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
7326 ufault = true;
7327 }
7328 }
7329 if (excret & R_V7M_EXCRET_RES0_MASK) {
7330 ufault = true;
7331 }
7332 } else {
7333
7334 switch (excret & 0xf) {
7335 case 1:
7336 break;
7337 case 13:
7338 case 9:
7339
7340
7341
7342 if (!rettobase &&
7343 !(env->v7m.ccr[env->v7m.secure] &
7344 R_V7M_CCR_NONBASETHRDENA_MASK)) {
7345 ufault = true;
7346 }
7347 break;
7348 default:
7349 ufault = true;
7350 }
7351 }
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
7362
7363 if (sfault) {
7364 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
7365 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7366 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
7367 "stackframe: failed EXC_RETURN.ES validity check\n");
7368 v7m_exception_taken(cpu, excret, true, false);
7369 return;
7370 }
7371
7372 if (ufault) {
7373
7374
7375
7376 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7377 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7378 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
7379 "stackframe: failed exception return integrity check\n");
7380 v7m_exception_taken(cpu, excret, true, false);
7381 return;
7382 }
7383
7384
7385
7386
7387
7388
7389
7390
7391
7392
7393
7394 if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
7395 qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
7396 v7m_exception_taken(cpu, excret, true, false);
7397 return;
7398 }
7399
7400 switch_v7m_security_state(env, return_to_secure);
7401
7402 {
7403
7404
7405
7406
7407
7408
7409
7410
7411
7412 uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
7413 return_to_secure,
7414 !return_to_handler,
7415 return_to_sp_process);
7416 uint32_t frameptr = *frame_sp_p;
7417 bool pop_ok = true;
7418 ARMMMUIdx mmu_idx;
7419 bool return_to_priv = return_to_handler ||
7420 !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
7421
7422 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
7423 return_to_priv);
7424
7425 if (!QEMU_IS_ALIGNED(frameptr, 8) &&
7426 arm_feature(env, ARM_FEATURE_V8)) {
7427 qemu_log_mask(LOG_GUEST_ERROR,
7428 "M profile exception return with non-8-aligned SP "
7429 "for destination state is UNPREDICTABLE\n");
7430 }
7431
7432
7433 if (return_to_secure &&
7434 ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
7435 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
7436 uint32_t expected_sig = 0xfefa125b;
7437 uint32_t actual_sig;
7438
7439 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
7440
7441 if (pop_ok && expected_sig != actual_sig) {
7442
7443 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
7444 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7445 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
7446 "stackframe: failed exception return integrity "
7447 "signature check\n");
7448 v7m_exception_taken(cpu, excret, true, false);
7449 return;
7450 }
7451
7452 pop_ok = pop_ok &&
7453 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
7454 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
7455 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
7456 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
7457 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
7458 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
7459 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
7460 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
7461
7462 frameptr += 0x28;
7463 }
7464
7465
7466 pop_ok = pop_ok &&
7467 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
7468 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
7469 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
7470 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
7471 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
7472 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
7473 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
7474 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
7475
7476 if (!pop_ok) {
7477
7478
7479
7480 qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
7481 v7m_exception_taken(cpu, excret, true, false);
7482 return;
7483 }
7484
7485
7486
7487
7488
7489
7490
7491
7492
7493 if (env->regs[15] & 1) {
7494 env->regs[15] &= ~1U;
7495 if (!arm_feature(env, ARM_FEATURE_V8)) {
7496 qemu_log_mask(LOG_GUEST_ERROR,
7497 "M profile return from interrupt with misaligned "
7498 "PC is UNPREDICTABLE on v7M\n");
7499 }
7500 }
7501
7502 if (arm_feature(env, ARM_FEATURE_V8)) {
7503
7504
7505
7506
7507 bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
7508 if (return_to_handler != will_be_handler) {
7509
7510
7511
7512
7513
7514 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
7515 env->v7m.secure);
7516 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7517 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
7518 "stackframe: failed exception return integrity "
7519 "check\n");
7520 v7m_exception_taken(cpu, excret, true, false);
7521 return;
7522 }
7523 }
7524
7525
7526 frameptr += 0x20;
7527
7528
7529
7530
7531
7532
7533 if (xpsr & XPSR_SPREALIGN) {
7534 frameptr |= 4;
7535 }
7536 *frame_sp_p = frameptr;
7537 }
7538
7539 xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
7540
7541
7542
7543
7544
7545
7546 if (return_to_handler != arm_v7m_is_handler_mode(env)) {
7547
7548
7549
7550 bool ignore_stackfaults;
7551
7552 assert(!arm_feature(env, ARM_FEATURE_V8));
7553 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
7554 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7555 ignore_stackfaults = v7m_push_stack(cpu);
7556 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
7557 "failed exception return integrity check\n");
7558 v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
7559 return;
7560 }
7561
7562
7563 arm_clear_exclusive(env);
7564 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
7565}
7566
7567static bool do_v7m_function_return(ARMCPU *cpu)
7568{
7569
7570
7571
7572
7573
7574
7575
7576
7577
7578
7579
7580 CPUARMState *env = &cpu->env;
7581 uint32_t newpc, newpsr, newpsr_exc;
7582
7583 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
7584
7585 {
7586 bool threadmode, spsel;
7587 TCGMemOpIdx oi;
7588 ARMMMUIdx mmu_idx;
7589 uint32_t *frame_sp_p;
7590 uint32_t frameptr;
7591
7592
7593 threadmode = !arm_v7m_is_handler_mode(env);
7594 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
7595
7596 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
7597 frameptr = *frame_sp_p;
7598
7599
7600
7601
7602 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
7603 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
7604 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
7605 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
7606
7607
7608 newpsr_exc = newpsr & XPSR_EXCP;
7609 if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
7610 (env->v7m.exception == 1 && newpsr_exc != 0))) {
7611
7612 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7613 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
7614 env->v7m.secure);
7615 qemu_log_mask(CPU_LOG_INT,
7616 "...taking INVPC UsageFault: "
7617 "IPSR consistency check failed\n");
7618 return false;
7619 }
7620
7621 *frame_sp_p = frameptr + 8;
7622 }
7623
7624
7625 switch_v7m_security_state(env, true);
7626 env->v7m.exception = newpsr_exc;
7627 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
7628 if (newpsr & XPSR_SFPA) {
7629 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
7630 }
7631 xpsr_write(env, 0, XPSR_IT);
7632 env->thumb = newpc & 1;
7633 env->regs[15] = newpc & ~1;
7634
7635 qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
7636 return true;
7637}
7638
7639static void arm_log_exception(int idx)
7640{
7641 if (qemu_loglevel_mask(CPU_LOG_INT)) {
7642 const char *exc = NULL;
7643 static const char * const excnames[] = {
7644 [EXCP_UDEF] = "Undefined Instruction",
7645 [EXCP_SWI] = "SVC",
7646 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
7647 [EXCP_DATA_ABORT] = "Data Abort",
7648 [EXCP_IRQ] = "IRQ",
7649 [EXCP_FIQ] = "FIQ",
7650 [EXCP_BKPT] = "Breakpoint",
7651 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
7652 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
7653 [EXCP_HVC] = "Hypervisor Call",
7654 [EXCP_HYP_TRAP] = "Hypervisor Trap",
7655 [EXCP_SMC] = "Secure Monitor Call",
7656 [EXCP_VIRQ] = "Virtual IRQ",
7657 [EXCP_VFIQ] = "Virtual FIQ",
7658 [EXCP_SEMIHOST] = "Semihosting call",
7659 [EXCP_NOCP] = "v7M NOCP UsageFault",
7660 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
7661 [EXCP_STKOF] = "v8M STKOF UsageFault",
7662 };
7663
7664 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
7665 exc = excnames[idx];
7666 }
7667 if (!exc) {
7668 exc = "unknown";
7669 }
7670 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
7671 }
7672}
7673
7674static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
7675 uint32_t addr, uint16_t *insn)
7676{
7677
7678
7679
7680
7681
7682
7683
7684
7685
7686
7687
7688 CPUState *cs = CPU(cpu);
7689 CPUARMState *env = &cpu->env;
7690 V8M_SAttributes sattrs = {};
7691 MemTxAttrs attrs = {};
7692 ARMMMUFaultInfo fi = {};
7693 MemTxResult txres;
7694 target_ulong page_size;
7695 hwaddr physaddr;
7696 int prot;
7697
7698 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
7699 if (!sattrs.nsc || sattrs.ns) {
7700
7701
7702
7703 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7704 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7705 qemu_log_mask(CPU_LOG_INT,
7706 "...really SecureFault with SFSR.INVEP\n");
7707 return false;
7708 }
7709 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
7710 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
7711
7712 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
7713 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
7714 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
7715 return false;
7716 }
7717 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
7718 attrs, &txres);
7719 if (txres != MEMTX_OK) {
7720 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
7721 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
7722 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
7723 return false;
7724 }
7725 return true;
7726}
7727
7728static bool v7m_handle_execute_nsc(ARMCPU *cpu)
7729{
7730
7731
7732
7733
7734
7735 CPUARMState *env = &cpu->env;
7736 ARMMMUIdx mmu_idx;
7737 uint16_t insn;
7738
7739
7740
7741
7742 assert(!env->v7m.secure);
7743 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
7744
7745
7746 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
7747
7748 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
7749 return false;
7750 }
7751
7752 if (!env->thumb) {
7753 goto gen_invep;
7754 }
7755
7756 if (insn != 0xe97f) {
7757
7758
7759
7760 goto gen_invep;
7761 }
7762
7763 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
7764 return false;
7765 }
7766
7767 if (insn != 0xe97f) {
7768
7769
7770
7771 goto gen_invep;
7772 }
7773
7774
7775
7776
7777 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
7778 ", executing it\n", env->regs[15]);
7779 env->regs[14] &= ~1;
7780 switch_v7m_security_state(env, true);
7781 xpsr_write(env, 0, XPSR_IT);
7782 env->regs[15] += 4;
7783 return true;
7784
7785gen_invep:
7786 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7787 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7788 qemu_log_mask(CPU_LOG_INT,
7789 "...really SecureFault with SFSR.INVEP\n");
7790 return false;
7791}
7792
7793void arm_v7m_cpu_do_interrupt(CPUState *cs)
7794{
7795 ARMCPU *cpu = ARM_CPU(cs);
7796 CPUARMState *env = &cpu->env;
7797 uint32_t lr;
7798 bool ignore_stackfaults;
7799
7800 arm_log_exception(cs->exception_index);
7801
7802
7803
7804 switch (cs->exception_index) {
7805 case EXCP_UDEF:
7806 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7807 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
7808 break;
7809 case EXCP_NOCP:
7810 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7811 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
7812 break;
7813 case EXCP_INVSTATE:
7814 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7815 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
7816 break;
7817 case EXCP_STKOF:
7818 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7819 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
7820 break;
7821 case EXCP_SWI:
7822
7823 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
7824 break;
7825 case EXCP_PREFETCH_ABORT:
7826 case EXCP_DATA_ABORT:
7827
7828
7829
7830
7831 switch (env->exception.fsr & 0xf) {
7832 case M_FAKE_FSR_NSC_EXEC:
7833
7834
7835
7836
7837
7838
7839
7840
7841 if (v7m_handle_execute_nsc(cpu)) {
7842 return;
7843 }
7844 break;
7845 case M_FAKE_FSR_SFAULT:
7846
7847
7848
7849 switch (cs->exception_index) {
7850 case EXCP_PREFETCH_ABORT:
7851 if (env->v7m.secure) {
7852 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
7853 qemu_log_mask(CPU_LOG_INT,
7854 "...really SecureFault with SFSR.INVTRAN\n");
7855 } else {
7856 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7857 qemu_log_mask(CPU_LOG_INT,
7858 "...really SecureFault with SFSR.INVEP\n");
7859 }
7860 break;
7861 case EXCP_DATA_ABORT:
7862
7863 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
7864 qemu_log_mask(CPU_LOG_INT,
7865 "...really SecureFault with SFSR.AUVIOL\n");
7866 break;
7867 }
7868 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7869 break;
7870 case 0x8:
7871 switch (cs->exception_index) {
7872 case EXCP_PREFETCH_ABORT:
7873 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
7874 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
7875 break;
7876 case EXCP_DATA_ABORT:
7877 env->v7m.cfsr[M_REG_NS] |=
7878 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
7879 env->v7m.bfar = env->exception.vaddress;
7880 qemu_log_mask(CPU_LOG_INT,
7881 "...with CFSR.PRECISERR and BFAR 0x%x\n",
7882 env->v7m.bfar);
7883 break;
7884 }
7885 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
7886 break;
7887 default:
7888
7889
7890
7891 switch (cs->exception_index) {
7892 case EXCP_PREFETCH_ABORT:
7893 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
7894 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
7895 break;
7896 case EXCP_DATA_ABORT:
7897 env->v7m.cfsr[env->v7m.secure] |=
7898 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
7899 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
7900 qemu_log_mask(CPU_LOG_INT,
7901 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
7902 env->v7m.mmfar[env->v7m.secure]);
7903 break;
7904 }
7905 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
7906 env->v7m.secure);
7907 break;
7908 }
7909 break;
7910 case EXCP_BKPT:
7911 if (semihosting_enabled()) {
7912 int nr;
7913 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
7914 if (nr == 0xab) {
7915 env->regs[15] += 2;
7916 qemu_log_mask(CPU_LOG_INT,
7917 "...handling as semihosting call 0x%x\n",
7918 env->regs[0]);
7919 env->regs[0] = do_arm_semihosting(env);
7920 return;
7921 }
7922 }
7923 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
7924 break;
7925 case EXCP_IRQ:
7926 break;
7927 case EXCP_EXCEPTION_EXIT:
7928 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
7929
7930 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
7931 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
7932 if (do_v7m_function_return(cpu)) {
7933 return;
7934 }
7935 } else {
7936 do_v7m_exception_exit(cpu);
7937 return;
7938 }
7939 break;
7940 default:
7941 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
7942 return;
7943 }
7944
7945 if (arm_feature(env, ARM_FEATURE_V8)) {
7946 lr = R_V7M_EXCRET_RES1_MASK |
7947 R_V7M_EXCRET_DCRS_MASK |
7948 R_V7M_EXCRET_FTYPE_MASK;
7949
7950
7951
7952
7953
7954
7955
7956
7957
7958
7959
7960 if (env->v7m.secure) {
7961 lr |= R_V7M_EXCRET_S_MASK;
7962 }
7963 } else {
7964 lr = R_V7M_EXCRET_RES1_MASK |
7965 R_V7M_EXCRET_S_MASK |
7966 R_V7M_EXCRET_DCRS_MASK |
7967 R_V7M_EXCRET_FTYPE_MASK |
7968 R_V7M_EXCRET_ES_MASK;
7969 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
7970 lr |= R_V7M_EXCRET_SPSEL_MASK;
7971 }
7972 }
7973 if (!arm_v7m_is_handler_mode(env)) {
7974 lr |= R_V7M_EXCRET_MODE_MASK;
7975 }
7976
7977 ignore_stackfaults = v7m_push_stack(cpu);
7978 v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
7979}
7980
7981
7982
7983
7984
7985void aarch64_sync_32_to_64(CPUARMState *env)
7986{
7987 int i;
7988 uint32_t mode = env->uncached_cpsr & CPSR_M;
7989
7990
7991 for (i = 0; i < 8; i++) {
7992 env->xregs[i] = env->regs[i];
7993 }
7994
7995
7996
7997
7998 if (mode == ARM_CPU_MODE_FIQ) {
7999 for (i = 8; i < 13; i++) {
8000 env->xregs[i] = env->usr_regs[i - 8];
8001 }
8002 } else {
8003 for (i = 8; i < 13; i++) {
8004 env->xregs[i] = env->regs[i];
8005 }
8006 }
8007
8008
8009
8010
8011
8012 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8013 env->xregs[13] = env->regs[13];
8014 env->xregs[14] = env->regs[14];
8015 } else {
8016 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
8017
8018 if (mode == ARM_CPU_MODE_HYP) {
8019 env->xregs[14] = env->regs[14];
8020 } else {
8021 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
8022 }
8023 }
8024
8025 if (mode == ARM_CPU_MODE_HYP) {
8026 env->xregs[15] = env->regs[13];
8027 } else {
8028 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
8029 }
8030
8031 if (mode == ARM_CPU_MODE_IRQ) {
8032 env->xregs[16] = env->regs[14];
8033 env->xregs[17] = env->regs[13];
8034 } else {
8035 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
8036 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
8037 }
8038
8039 if (mode == ARM_CPU_MODE_SVC) {
8040 env->xregs[18] = env->regs[14];
8041 env->xregs[19] = env->regs[13];
8042 } else {
8043 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
8044 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
8045 }
8046
8047 if (mode == ARM_CPU_MODE_ABT) {
8048 env->xregs[20] = env->regs[14];
8049 env->xregs[21] = env->regs[13];
8050 } else {
8051 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
8052 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
8053 }
8054
8055 if (mode == ARM_CPU_MODE_UND) {
8056 env->xregs[22] = env->regs[14];
8057 env->xregs[23] = env->regs[13];
8058 } else {
8059 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
8060 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
8061 }
8062
8063
8064
8065
8066
8067 if (mode == ARM_CPU_MODE_FIQ) {
8068 for (i = 24; i < 31; i++) {
8069 env->xregs[i] = env->regs[i - 16];
8070 }
8071 } else {
8072 for (i = 24; i < 29; i++) {
8073 env->xregs[i] = env->fiq_regs[i - 24];
8074 }
8075 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
8076 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
8077 }
8078
8079 env->pc = env->regs[15];
8080}
8081
8082
8083
8084
8085
8086void aarch64_sync_64_to_32(CPUARMState *env)
8087{
8088 int i;
8089 uint32_t mode = env->uncached_cpsr & CPSR_M;
8090
8091
8092 for (i = 0; i < 8; i++) {
8093 env->regs[i] = env->xregs[i];
8094 }
8095
8096
8097
8098
8099 if (mode == ARM_CPU_MODE_FIQ) {
8100 for (i = 8; i < 13; i++) {
8101 env->usr_regs[i - 8] = env->xregs[i];
8102 }
8103 } else {
8104 for (i = 8; i < 13; i++) {
8105 env->regs[i] = env->xregs[i];
8106 }
8107 }
8108
8109
8110
8111
8112
8113
8114 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8115 env->regs[13] = env->xregs[13];
8116 env->regs[14] = env->xregs[14];
8117 } else {
8118 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
8119
8120
8121
8122
8123 if (mode == ARM_CPU_MODE_HYP) {
8124 env->regs[14] = env->xregs[14];
8125 } else {
8126 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8127 }
8128 }
8129
8130 if (mode == ARM_CPU_MODE_HYP) {
8131 env->regs[13] = env->xregs[15];
8132 } else {
8133 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
8134 }
8135
8136 if (mode == ARM_CPU_MODE_IRQ) {
8137 env->regs[14] = env->xregs[16];
8138 env->regs[13] = env->xregs[17];
8139 } else {
8140 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8141 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
8142 }
8143
8144 if (mode == ARM_CPU_MODE_SVC) {
8145 env->regs[14] = env->xregs[18];
8146 env->regs[13] = env->xregs[19];
8147 } else {
8148 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8149 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
8150 }
8151
8152 if (mode == ARM_CPU_MODE_ABT) {
8153 env->regs[14] = env->xregs[20];
8154 env->regs[13] = env->xregs[21];
8155 } else {
8156 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8157 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
8158 }
8159
8160 if (mode == ARM_CPU_MODE_UND) {
8161 env->regs[14] = env->xregs[22];
8162 env->regs[13] = env->xregs[23];
8163 } else {
8164 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
8165 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
8166 }
8167
8168
8169
8170
8171
8172 if (mode == ARM_CPU_MODE_FIQ) {
8173 for (i = 24; i < 31; i++) {
8174 env->regs[i - 16] = env->xregs[i];
8175 }
8176 } else {
8177 for (i = 24; i < 29; i++) {
8178 env->fiq_regs[i - 24] = env->xregs[i];
8179 }
8180 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
8181 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
8182 }
8183
8184 env->regs[15] = env->pc;
8185}
8186
8187static void take_aarch32_exception(CPUARMState *env, int new_mode,
8188 uint32_t mask, uint32_t offset,
8189 uint32_t newpc)
8190{
8191
8192 switch_mode(env, new_mode);
8193
8194
8195
8196
8197 env->uncached_cpsr &= ~PSTATE_SS;
8198 env->spsr = cpsr_read(env);
8199
8200 env->condexec_bits = 0;
8201
8202 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8203
8204 env->uncached_cpsr &= ~CPSR_E;
8205 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
8206 env->uncached_cpsr |= CPSR_E;
8207 }
8208
8209 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
8210 env->daif |= mask;
8211
8212 if (new_mode == ARM_CPU_MODE_HYP) {
8213 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8214 env->elr_el[2] = env->regs[15];
8215 } else {
8216
8217
8218
8219
8220 if (arm_feature(env, ARM_FEATURE_V4T)) {
8221 env->thumb =
8222 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8223 }
8224 env->regs[14] = env->regs[15] + offset;
8225 }
8226 env->regs[15] = newpc;
8227}
8228
8229static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8230{
8231
8232
8233
8234
8235
8236
8237
8238
8239
8240
8241
8242 uint32_t addr, mask;
8243 ARMCPU *cpu = ARM_CPU(cs);
8244 CPUARMState *env = &cpu->env;
8245
8246 switch (cs->exception_index) {
8247 case EXCP_UDEF:
8248 addr = 0x04;
8249 break;
8250 case EXCP_SWI:
8251 addr = 0x14;
8252 break;
8253 case EXCP_BKPT:
8254
8255 case EXCP_PREFETCH_ABORT:
8256 env->cp15.ifar_s = env->exception.vaddress;
8257 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8258 (uint32_t)env->exception.vaddress);
8259 addr = 0x0c;
8260 break;
8261 case EXCP_DATA_ABORT:
8262 env->cp15.dfar_s = env->exception.vaddress;
8263 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8264 (uint32_t)env->exception.vaddress);
8265 addr = 0x10;
8266 break;
8267 case EXCP_IRQ:
8268 addr = 0x18;
8269 break;
8270 case EXCP_FIQ:
8271 addr = 0x1c;
8272 break;
8273 case EXCP_HVC:
8274 addr = 0x08;
8275 break;
8276 case EXCP_HYP_TRAP:
8277 addr = 0x14;
8278 default:
8279 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8280 }
8281
8282 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
8283 if (!arm_feature(env, ARM_FEATURE_V8)) {
8284
8285
8286
8287
8288
8289 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
8290 (cs->exception_index == EXCP_DATA_ABORT &&
8291 !(env->exception.syndrome & ARM_EL_ISV)) ||
8292 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
8293 env->exception.syndrome &= ~ARM_EL_IL;
8294 }
8295 }
8296 env->cp15.esr_el[2] = env->exception.syndrome;
8297 }
8298
8299 if (arm_current_el(env) != 2 && addr < 0x14) {
8300 addr = 0x14;
8301 }
8302
8303 mask = 0;
8304 if (!(env->cp15.scr_el3 & SCR_EA)) {
8305 mask |= CPSR_A;
8306 }
8307 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
8308 mask |= CPSR_I;
8309 }
8310 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
8311 mask |= CPSR_F;
8312 }
8313
8314 addr += env->cp15.hvbar;
8315
8316 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
8317}
8318
8319static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
8320{
8321 ARMCPU *cpu = ARM_CPU(cs);
8322 CPUARMState *env = &cpu->env;
8323 uint32_t addr;
8324 uint32_t mask;
8325 int new_mode;
8326 uint32_t offset;
8327 uint32_t moe;
8328
8329
8330 switch (syn_get_ec(env->exception.syndrome)) {
8331 case EC_BREAKPOINT:
8332 case EC_BREAKPOINT_SAME_EL:
8333 moe = 1;
8334 break;
8335 case EC_WATCHPOINT:
8336 case EC_WATCHPOINT_SAME_EL:
8337 moe = 10;
8338 break;
8339 case EC_AA32_BKPT:
8340 moe = 3;
8341 break;
8342 case EC_VECTORCATCH:
8343 moe = 5;
8344 break;
8345 default:
8346 moe = 0;
8347 break;
8348 }
8349
8350 if (moe) {
8351 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8352 }
8353
8354 if (env->exception.target_el == 2) {
8355 arm_cpu_do_interrupt_aarch32_hyp(cs);
8356 return;
8357 }
8358
8359 switch (cs->exception_index) {
8360 case EXCP_UDEF:
8361 new_mode = ARM_CPU_MODE_UND;
8362 addr = 0x04;
8363 mask = CPSR_I;
8364 if (env->thumb)
8365 offset = 2;
8366 else
8367 offset = 4;
8368 break;
8369 case EXCP_SWI:
8370 new_mode = ARM_CPU_MODE_SVC;
8371 addr = 0x08;
8372 mask = CPSR_I;
8373
8374 offset = 0;
8375 break;
8376 case EXCP_BKPT:
8377
8378 case EXCP_PREFETCH_ABORT:
8379 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
8380 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
8381 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
8382 env->exception.fsr, (uint32_t)env->exception.vaddress);
8383 new_mode = ARM_CPU_MODE_ABT;
8384 addr = 0x0c;
8385 mask = CPSR_A | CPSR_I;
8386 offset = 4;
8387 break;
8388 case EXCP_DATA_ABORT:
8389 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8390 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8391 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8392 env->exception.fsr,
8393 (uint32_t)env->exception.vaddress);
8394 new_mode = ARM_CPU_MODE_ABT;
8395 addr = 0x10;
8396 mask = CPSR_A | CPSR_I;
8397 offset = 8;
8398 break;
8399 case EXCP_IRQ:
8400 new_mode = ARM_CPU_MODE_IRQ;
8401 addr = 0x18;
8402
8403 mask = CPSR_A | CPSR_I;
8404 offset = 4;
8405 if (env->cp15.scr_el3 & SCR_IRQ) {
8406
8407 new_mode = ARM_CPU_MODE_MON;
8408 mask |= CPSR_F;
8409 }
8410 break;
8411 case EXCP_FIQ:
8412 new_mode = ARM_CPU_MODE_FIQ;
8413 addr = 0x1c;
8414
8415 mask = CPSR_A | CPSR_I | CPSR_F;
8416 if (env->cp15.scr_el3 & SCR_FIQ) {
8417
8418 new_mode = ARM_CPU_MODE_MON;
8419 }
8420 offset = 4;
8421 break;
8422 case EXCP_VIRQ:
8423 new_mode = ARM_CPU_MODE_IRQ;
8424 addr = 0x18;
8425
8426 mask = CPSR_A | CPSR_I;
8427 offset = 4;
8428 break;
8429 case EXCP_VFIQ:
8430 new_mode = ARM_CPU_MODE_FIQ;
8431 addr = 0x1c;
8432
8433 mask = CPSR_A | CPSR_I | CPSR_F;
8434 offset = 4;
8435 break;
8436 case EXCP_SMC:
8437 new_mode = ARM_CPU_MODE_MON;
8438 addr = 0x08;
8439 mask = CPSR_A | CPSR_I | CPSR_F;
8440 offset = 0;
8441 break;
8442 default:
8443 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8444 return;
8445 }
8446
8447 if (new_mode == ARM_CPU_MODE_MON) {
8448 addr += env->cp15.mvbar;
8449 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
8450
8451 addr += 0xffff0000;
8452 } else {
8453
8454
8455
8456
8457
8458 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
8459 }
8460
8461 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
8462 env->cp15.scr_el3 &= ~SCR_NS;
8463 }
8464
8465 take_aarch32_exception(env, new_mode, mask, offset, addr);
8466}
8467
8468
8469static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
8470{
8471 ARMCPU *cpu = ARM_CPU(cs);
8472 CPUARMState *env = &cpu->env;
8473 unsigned int new_el = env->exception.target_el;
8474 target_ulong addr = env->cp15.vbar_el[new_el];
8475 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
8476 unsigned int cur_el = arm_current_el(env);
8477
8478
8479
8480
8481
8482 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
8483
8484 if (cur_el < new_el) {
8485
8486
8487
8488 bool is_aa64;
8489
8490 switch (new_el) {
8491 case 3:
8492 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
8493 break;
8494 case 2:
8495 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
8496 break;
8497 case 1:
8498 is_aa64 = is_a64(env);
8499 break;
8500 default:
8501 g_assert_not_reached();
8502 }
8503
8504 if (is_aa64) {
8505 addr += 0x400;
8506 } else {
8507 addr += 0x600;
8508 }
8509 } else if (pstate_read(env) & PSTATE_SP) {
8510 addr += 0x200;
8511 }
8512
8513 switch (cs->exception_index) {
8514 case EXCP_PREFETCH_ABORT:
8515 case EXCP_DATA_ABORT:
8516 env->cp15.far_el[new_el] = env->exception.vaddress;
8517 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
8518 env->cp15.far_el[new_el]);
8519
8520 case EXCP_BKPT:
8521 case EXCP_UDEF:
8522 case EXCP_SWI:
8523 case EXCP_HVC:
8524 case EXCP_HYP_TRAP:
8525 case EXCP_SMC:
8526 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
8527
8528
8529
8530
8531
8532
8533 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
8534 }
8535 env->cp15.esr_el[new_el] = env->exception.syndrome;
8536 break;
8537 case EXCP_IRQ:
8538 case EXCP_VIRQ:
8539 addr += 0x80;
8540 break;
8541 case EXCP_FIQ:
8542 case EXCP_VFIQ:
8543 addr += 0x100;
8544 break;
8545 case EXCP_SEMIHOST:
8546 qemu_log_mask(CPU_LOG_INT,
8547 "...handling as semihosting call 0x%" PRIx64 "\n",
8548 env->xregs[0]);
8549 env->xregs[0] = do_arm_semihosting(env);
8550 return;
8551 default:
8552 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8553 }
8554
8555 if (is_a64(env)) {
8556 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
8557 aarch64_save_sp(env, arm_current_el(env));
8558 env->elr_el[new_el] = env->pc;
8559 } else {
8560 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
8561 env->elr_el[new_el] = env->regs[15];
8562
8563 aarch64_sync_32_to_64(env);
8564
8565 env->condexec_bits = 0;
8566 }
8567 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
8568 env->elr_el[new_el]);
8569
8570 pstate_write(env, PSTATE_DAIF | new_mode);
8571 env->aarch64 = 1;
8572 aarch64_restore_sp(env, new_el);
8573
8574 env->pc = addr;
8575
8576 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
8577 new_el, env->pc, pstate_read(env));
8578}
8579
8580static inline bool check_for_semihosting(CPUState *cs)
8581{
8582
8583
8584
8585 ARMCPU *cpu = ARM_CPU(cs);
8586 CPUARMState *env = &cpu->env;
8587
8588 if (is_a64(env)) {
8589 if (cs->exception_index == EXCP_SEMIHOST) {
8590
8591
8592
8593
8594 qemu_log_mask(CPU_LOG_INT,
8595 "...handling as semihosting call 0x%" PRIx64 "\n",
8596 env->xregs[0]);
8597 env->xregs[0] = do_arm_semihosting(env);
8598 return true;
8599 }
8600 return false;
8601 } else {
8602 uint32_t imm;
8603
8604
8605
8606
8607 if (cs->exception_index != EXCP_SEMIHOST &&
8608 (!semihosting_enabled() ||
8609 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
8610 return false;
8611 }
8612
8613 switch (cs->exception_index) {
8614 case EXCP_SEMIHOST:
8615
8616
8617
8618
8619 break;
8620 case EXCP_SWI:
8621
8622 if (env->thumb) {
8623 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
8624 & 0xff;
8625 if (imm == 0xab) {
8626 break;
8627 }
8628 } else {
8629 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
8630 & 0xffffff;
8631 if (imm == 0x123456) {
8632 break;
8633 }
8634 }
8635 return false;
8636 case EXCP_BKPT:
8637
8638 if (env->thumb) {
8639 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
8640 & 0xff;
8641 if (imm == 0xab) {
8642 env->regs[15] += 2;
8643 break;
8644 }
8645 }
8646 return false;
8647 default:
8648 return false;
8649 }
8650
8651 qemu_log_mask(CPU_LOG_INT,
8652 "...handling as semihosting call 0x%x\n",
8653 env->regs[0]);
8654 env->regs[0] = do_arm_semihosting(env);
8655 return true;
8656 }
8657}
8658
8659
8660
8661
8662
8663
8664void arm_cpu_do_interrupt(CPUState *cs)
8665{
8666 ARMCPU *cpu = ARM_CPU(cs);
8667 CPUARMState *env = &cpu->env;
8668 unsigned int new_el = env->exception.target_el;
8669
8670 assert(!arm_feature(env, ARM_FEATURE_M));
8671
8672 arm_log_exception(cs->exception_index);
8673 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
8674 new_el);
8675 if (qemu_loglevel_mask(CPU_LOG_INT)
8676 && !excp_is_internal(cs->exception_index)) {
8677 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
8678 syn_get_ec(env->exception.syndrome),
8679 env->exception.syndrome);
8680 }
8681
8682 if (arm_is_psci_call(cpu, cs->exception_index)) {
8683 arm_handle_psci_call(cpu);
8684 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
8685 return;
8686 }
8687
8688
8689
8690
8691
8692 if (check_for_semihosting(cs)) {
8693 return;
8694 }
8695
8696
8697
8698
8699
8700 g_assert(qemu_mutex_iothread_locked());
8701
8702 arm_call_pre_el_change_hook(cpu);
8703
8704 assert(!excp_is_internal(cs->exception_index));
8705 if (arm_el_is_aa64(env, new_el)) {
8706 arm_cpu_do_interrupt_aarch64(cs);
8707 } else {
8708 arm_cpu_do_interrupt_aarch32(cs);
8709 }
8710
8711 arm_call_el_change_hook(cpu);
8712
8713 if (!kvm_enabled()) {
8714 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
8715 }
8716}
8717
8718
8719static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
8720{
8721 switch (mmu_idx) {
8722 case ARMMMUIdx_S2NS:
8723 case ARMMMUIdx_S1E2:
8724 return 2;
8725 case ARMMMUIdx_S1E3:
8726 return 3;
8727 case ARMMMUIdx_S1SE0:
8728 return arm_el_is_aa64(env, 3) ? 1 : 3;
8729 case ARMMMUIdx_S1SE1:
8730 case ARMMMUIdx_S1NSE0:
8731 case ARMMMUIdx_S1NSE1:
8732 case ARMMMUIdx_MPrivNegPri:
8733 case ARMMMUIdx_MUserNegPri:
8734 case ARMMMUIdx_MPriv:
8735 case ARMMMUIdx_MUser:
8736 case ARMMMUIdx_MSPrivNegPri:
8737 case ARMMMUIdx_MSUserNegPri:
8738 case ARMMMUIdx_MSPriv:
8739 case ARMMMUIdx_MSUser:
8740 return 1;
8741 default:
8742 g_assert_not_reached();
8743 }
8744}
8745
8746
8747static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
8748{
8749 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
8750}
8751
8752
8753static inline bool regime_translation_disabled(CPUARMState *env,
8754 ARMMMUIdx mmu_idx)
8755{
8756 if (arm_feature(env, ARM_FEATURE_M)) {
8757 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
8758 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
8759 case R_V7M_MPU_CTRL_ENABLE_MASK:
8760
8761 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
8762 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
8763
8764 return false;
8765 case 0:
8766 default:
8767
8768
8769
8770 return true;
8771 }
8772 }
8773
8774 if (mmu_idx == ARMMMUIdx_S2NS) {
8775
8776 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
8777 }
8778
8779 if (env->cp15.hcr_el2 & HCR_TGE) {
8780
8781 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
8782 return true;
8783 }
8784 }
8785
8786 if ((env->cp15.hcr_el2 & HCR_DC) &&
8787 (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
8788
8789 return true;
8790 }
8791
8792 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
8793}
8794
8795static inline bool regime_translation_big_endian(CPUARMState *env,
8796 ARMMMUIdx mmu_idx)
8797{
8798 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
8799}
8800
8801
8802static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
8803{
8804 if (mmu_idx == ARMMMUIdx_S2NS) {
8805 return &env->cp15.vtcr_el2;
8806 }
8807 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
8808}
8809
8810
8811
8812
8813static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
8814{
8815 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8816 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
8817 }
8818 return mmu_idx;
8819}
8820
8821
8822uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
8823{
8824 TCR *tcr;
8825 uint32_t el;
8826
8827
8828
8829
8830 mmu_idx = stage_1_mmu_idx(mmu_idx);
8831
8832 tcr = regime_tcr(env, mmu_idx);
8833 el = regime_el(env, mmu_idx);
8834
8835 if (el > 1) {
8836 return extract64(tcr->raw_tcr, 20, 1);
8837 } else {
8838 return extract64(tcr->raw_tcr, 37, 1);
8839 }
8840}
8841
8842
8843uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
8844{
8845 TCR *tcr;
8846 uint32_t el;
8847
8848
8849
8850
8851 mmu_idx = stage_1_mmu_idx(mmu_idx);
8852
8853 tcr = regime_tcr(env, mmu_idx);
8854 el = regime_el(env, mmu_idx);
8855
8856 if (el > 1) {
8857 return 0;
8858 } else {
8859 return extract64(tcr->raw_tcr, 38, 1);
8860 }
8861}
8862
8863
8864static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
8865 int ttbrn)
8866{
8867 if (mmu_idx == ARMMMUIdx_S2NS) {
8868 return env->cp15.vttbr_el2;
8869 }
8870 if (ttbrn == 0) {
8871 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
8872 } else {
8873 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
8874 }
8875}
8876
8877
8878static inline bool regime_using_lpae_format(CPUARMState *env,
8879 ARMMMUIdx mmu_idx)
8880{
8881 int el = regime_el(env, mmu_idx);
8882 if (el == 2 || arm_el_is_aa64(env, el)) {
8883 return true;
8884 }
8885 if (arm_feature(env, ARM_FEATURE_LPAE)
8886 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
8887 return true;
8888 }
8889 return false;
8890}
8891
8892
8893
8894
8895bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
8896{
8897 mmu_idx = stage_1_mmu_idx(mmu_idx);
8898
8899 return regime_using_lpae_format(env, mmu_idx);
8900}
8901
8902static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
8903{
8904 switch (mmu_idx) {
8905 case ARMMMUIdx_S1SE0:
8906 case ARMMMUIdx_S1NSE0:
8907 case ARMMMUIdx_MUser:
8908 case ARMMMUIdx_MSUser:
8909 case ARMMMUIdx_MUserNegPri:
8910 case ARMMMUIdx_MSUserNegPri:
8911 return true;
8912 default:
8913 return false;
8914 case ARMMMUIdx_S12NSE0:
8915 case ARMMMUIdx_S12NSE1:
8916 g_assert_not_reached();
8917 }
8918}
8919
8920
8921
8922
8923
8924
8925
8926
8927
8928static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
8929 int ap, int domain_prot)
8930{
8931 bool is_user = regime_is_user(env, mmu_idx);
8932
8933 if (domain_prot == 3) {
8934 return PAGE_READ | PAGE_WRITE;
8935 }
8936
8937 switch (ap) {
8938 case 0:
8939 if (arm_feature(env, ARM_FEATURE_V7)) {
8940 return 0;
8941 }
8942 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
8943 case SCTLR_S:
8944 return is_user ? 0 : PAGE_READ;
8945 case SCTLR_R:
8946 return PAGE_READ;
8947 default:
8948 return 0;
8949 }
8950 case 1:
8951 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8952 case 2:
8953 if (is_user) {
8954 return PAGE_READ;
8955 } else {
8956 return PAGE_READ | PAGE_WRITE;
8957 }
8958 case 3:
8959 return PAGE_READ | PAGE_WRITE;
8960 case 4:
8961 return 0;
8962 case 5:
8963 return is_user ? 0 : PAGE_READ;
8964 case 6:
8965 return PAGE_READ;
8966 case 7:
8967 if (!arm_feature(env, ARM_FEATURE_V6K)) {
8968 return 0;
8969 }
8970 return PAGE_READ;
8971 default:
8972 g_assert_not_reached();
8973 }
8974}
8975
8976
8977
8978
8979
8980
8981
8982static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
8983{
8984 switch (ap) {
8985 case 0:
8986 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8987 case 1:
8988 return PAGE_READ | PAGE_WRITE;
8989 case 2:
8990 return is_user ? 0 : PAGE_READ;
8991 case 3:
8992 return PAGE_READ;
8993 default:
8994 g_assert_not_reached();
8995 }
8996}
8997
8998static inline int
8999simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
9000{
9001 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
9002}
9003
9004
9005
9006
9007
9008
9009
9010static int get_S2prot(CPUARMState *env, int s2ap, int xn)
9011{
9012 int prot = 0;
9013
9014 if (s2ap & 1) {
9015 prot |= PAGE_READ;
9016 }
9017 if (s2ap & 2) {
9018 prot |= PAGE_WRITE;
9019 }
9020 if (!xn) {
9021 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
9022 prot |= PAGE_EXEC;
9023 }
9024 }
9025 return prot;
9026}
9027
9028
9029
9030
9031
9032
9033
9034
9035
9036
9037
9038static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
9039 int ap, int ns, int xn, int pxn)
9040{
9041 bool is_user = regime_is_user(env, mmu_idx);
9042 int prot_rw, user_rw;
9043 bool have_wxn;
9044 int wxn = 0;
9045
9046 assert(mmu_idx != ARMMMUIdx_S2NS);
9047
9048 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
9049 if (is_user) {
9050 prot_rw = user_rw;
9051 } else {
9052 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
9053 }
9054
9055 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
9056 return prot_rw;
9057 }
9058
9059
9060
9061
9062
9063
9064 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
9065
9066 if (have_wxn) {
9067 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
9068 }
9069
9070 if (is_aa64) {
9071 switch (regime_el(env, mmu_idx)) {
9072 case 1:
9073 if (!is_user) {
9074 xn = pxn || (user_rw & PAGE_WRITE);
9075 }
9076 break;
9077 case 2:
9078 case 3:
9079 break;
9080 }
9081 } else if (arm_feature(env, ARM_FEATURE_V7)) {
9082 switch (regime_el(env, mmu_idx)) {
9083 case 1:
9084 case 3:
9085 if (is_user) {
9086 xn = xn || !(user_rw & PAGE_READ);
9087 } else {
9088 int uwxn = 0;
9089 if (have_wxn) {
9090 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
9091 }
9092 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
9093 (uwxn && (user_rw & PAGE_WRITE));
9094 }
9095 break;
9096 case 2:
9097 break;
9098 }
9099 } else {
9100 xn = wxn = 0;
9101 }
9102
9103 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
9104 return prot_rw;
9105 }
9106 return prot_rw | PAGE_EXEC;
9107}
9108
9109static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
9110 uint32_t *table, uint32_t address)
9111{
9112
9113 TCR *tcr = regime_tcr(env, mmu_idx);
9114
9115 if (address & tcr->mask) {
9116 if (tcr->raw_tcr & TTBCR_PD1) {
9117
9118 return false;
9119 }
9120 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
9121 } else {
9122 if (tcr->raw_tcr & TTBCR_PD0) {
9123
9124 return false;
9125 }
9126 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
9127 }
9128 *table |= (address >> 18) & 0x3ffc;
9129 return true;
9130}
9131
9132
9133static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
9134 hwaddr addr, MemTxAttrs txattrs,
9135 ARMMMUFaultInfo *fi)
9136{
9137 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
9138 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
9139 target_ulong s2size;
9140 hwaddr s2pa;
9141 int s2prot;
9142 int ret;
9143 ARMCacheAttrs cacheattrs = {};
9144 ARMCacheAttrs *pcacheattrs = NULL;
9145
9146 if (env->cp15.hcr_el2 & HCR_PTW) {
9147
9148
9149
9150
9151
9152 pcacheattrs = &cacheattrs;
9153 }
9154
9155 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
9156 &txattrs, &s2prot, &s2size, fi, pcacheattrs);
9157 if (ret) {
9158 assert(fi->type != ARMFault_None);
9159 fi->s2addr = addr;
9160 fi->stage2 = true;
9161 fi->s1ptw = true;
9162 return ~0;
9163 }
9164 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
9165
9166 fi->type = ARMFault_Permission;
9167 fi->s2addr = addr;
9168 fi->stage2 = true;
9169 fi->s1ptw = true;
9170 return ~0;
9171 }
9172 addr = s2pa;
9173 }
9174 return addr;
9175}
9176
9177
9178static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
9179 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
9180{
9181 ARMCPU *cpu = ARM_CPU(cs);
9182 CPUARMState *env = &cpu->env;
9183 MemTxAttrs attrs = {};
9184 MemTxResult result = MEMTX_OK;
9185 AddressSpace *as;
9186 uint32_t data;
9187
9188 attrs.secure = is_secure;
9189 as = arm_addressspace(cs, attrs);
9190 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
9191 if (fi->s1ptw) {
9192 return 0;
9193 }
9194 if (regime_translation_big_endian(env, mmu_idx)) {
9195 data = address_space_ldl_be(as, addr, attrs, &result);
9196 } else {
9197 data = address_space_ldl_le(as, addr, attrs, &result);
9198 }
9199 if (result == MEMTX_OK) {
9200 return data;
9201 }
9202 fi->type = ARMFault_SyncExternalOnWalk;
9203 fi->ea = arm_extabort_type(result);
9204 return 0;
9205}
9206
9207static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
9208 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
9209{
9210 ARMCPU *cpu = ARM_CPU(cs);
9211 CPUARMState *env = &cpu->env;
9212 MemTxAttrs attrs = {};
9213 MemTxResult result = MEMTX_OK;
9214 AddressSpace *as;
9215 uint64_t data;
9216
9217 attrs.secure = is_secure;
9218 as = arm_addressspace(cs, attrs);
9219 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
9220 if (fi->s1ptw) {
9221 return 0;
9222 }
9223 if (regime_translation_big_endian(env, mmu_idx)) {
9224 data = address_space_ldq_be(as, addr, attrs, &result);
9225 } else {
9226 data = address_space_ldq_le(as, addr, attrs, &result);
9227 }
9228 if (result == MEMTX_OK) {
9229 return data;
9230 }
9231 fi->type = ARMFault_SyncExternalOnWalk;
9232 fi->ea = arm_extabort_type(result);
9233 return 0;
9234}
9235
9236static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
9237 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9238 hwaddr *phys_ptr, int *prot,
9239 target_ulong *page_size,
9240 ARMMMUFaultInfo *fi)
9241{
9242 CPUState *cs = CPU(arm_env_get_cpu(env));
9243 int level = 1;
9244 uint32_t table;
9245 uint32_t desc;
9246 int type;
9247 int ap;
9248 int domain = 0;
9249 int domain_prot;
9250 hwaddr phys_addr;
9251 uint32_t dacr;
9252
9253
9254
9255 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
9256
9257 fi->type = ARMFault_Translation;
9258 goto do_fault;
9259 }
9260 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9261 mmu_idx, fi);
9262 if (fi->type != ARMFault_None) {
9263 goto do_fault;
9264 }
9265 type = (desc & 3);
9266 domain = (desc >> 5) & 0x0f;
9267 if (regime_el(env, mmu_idx) == 1) {
9268 dacr = env->cp15.dacr_ns;
9269 } else {
9270 dacr = env->cp15.dacr_s;
9271 }
9272 domain_prot = (dacr >> (domain * 2)) & 3;
9273 if (type == 0) {
9274
9275 fi->type = ARMFault_Translation;
9276 goto do_fault;
9277 }
9278 if (type != 2) {
9279 level = 2;
9280 }
9281 if (domain_prot == 0 || domain_prot == 2) {
9282 fi->type = ARMFault_Domain;
9283 goto do_fault;
9284 }
9285 if (type == 2) {
9286
9287 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9288 ap = (desc >> 10) & 3;
9289 *page_size = 1024 * 1024;
9290 } else {
9291
9292 if (type == 1) {
9293
9294 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9295 } else {
9296
9297 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
9298 }
9299 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9300 mmu_idx, fi);
9301 if (fi->type != ARMFault_None) {
9302 goto do_fault;
9303 }
9304 switch (desc & 3) {
9305 case 0:
9306 fi->type = ARMFault_Translation;
9307 goto do_fault;
9308 case 1:
9309 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9310 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
9311 *page_size = 0x10000;
9312 break;
9313 case 2:
9314 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9315 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
9316 *page_size = 0x1000;
9317 break;
9318 case 3:
9319 if (type == 1) {
9320
9321 if (arm_feature(env, ARM_FEATURE_XSCALE)
9322 || arm_feature(env, ARM_FEATURE_V6)) {
9323 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9324 *page_size = 0x1000;
9325 } else {
9326
9327
9328
9329 fi->type = ARMFault_Translation;
9330 goto do_fault;
9331 }
9332 } else {
9333 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
9334 *page_size = 0x400;
9335 }
9336 ap = (desc >> 4) & 3;
9337 break;
9338 default:
9339
9340 abort();
9341 }
9342 }
9343 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9344 *prot |= *prot ? PAGE_EXEC : 0;
9345 if (!(*prot & (1 << access_type))) {
9346
9347 fi->type = ARMFault_Permission;
9348 goto do_fault;
9349 }
9350 *phys_ptr = phys_addr;
9351 return false;
9352do_fault:
9353 fi->domain = domain;
9354 fi->level = level;
9355 return true;
9356}
9357
9358static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
9359 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9360 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
9361 target_ulong *page_size, ARMMMUFaultInfo *fi)
9362{
9363 CPUState *cs = CPU(arm_env_get_cpu(env));
9364 int level = 1;
9365 uint32_t table;
9366 uint32_t desc;
9367 uint32_t xn;
9368 uint32_t pxn = 0;
9369 int type;
9370 int ap;
9371 int domain = 0;
9372 int domain_prot;
9373 hwaddr phys_addr;
9374 uint32_t dacr;
9375 bool ns;
9376
9377
9378
9379 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
9380
9381 fi->type = ARMFault_Translation;
9382 goto do_fault;
9383 }
9384 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9385 mmu_idx, fi);
9386 if (fi->type != ARMFault_None) {
9387 goto do_fault;
9388 }
9389 type = (desc & 3);
9390 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
9391
9392
9393
9394 fi->type = ARMFault_Translation;
9395 goto do_fault;
9396 }
9397 if ((type == 1) || !(desc & (1 << 18))) {
9398
9399 domain = (desc >> 5) & 0x0f;
9400 }
9401 if (regime_el(env, mmu_idx) == 1) {
9402 dacr = env->cp15.dacr_ns;
9403 } else {
9404 dacr = env->cp15.dacr_s;
9405 }
9406 if (type == 1) {
9407 level = 2;
9408 }
9409 domain_prot = (dacr >> (domain * 2)) & 3;
9410 if (domain_prot == 0 || domain_prot == 2) {
9411
9412 fi->type = ARMFault_Domain;
9413 goto do_fault;
9414 }
9415 if (type != 1) {
9416 if (desc & (1 << 18)) {
9417
9418 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
9419 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
9420 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
9421 *page_size = 0x1000000;
9422 } else {
9423
9424 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9425 *page_size = 0x100000;
9426 }
9427 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
9428 xn = desc & (1 << 4);
9429 pxn = desc & 1;
9430 ns = extract32(desc, 19, 1);
9431 } else {
9432 if (arm_feature(env, ARM_FEATURE_PXN)) {
9433 pxn = (desc >> 2) & 1;
9434 }
9435 ns = extract32(desc, 3, 1);
9436
9437 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9438 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9439 mmu_idx, fi);
9440 if (fi->type != ARMFault_None) {
9441 goto do_fault;
9442 }
9443 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
9444 switch (desc & 3) {
9445 case 0:
9446 fi->type = ARMFault_Translation;
9447 goto do_fault;
9448 case 1:
9449 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9450 xn = desc & (1 << 15);
9451 *page_size = 0x10000;
9452 break;
9453 case 2: case 3:
9454 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9455 xn = desc & 1;
9456 *page_size = 0x1000;
9457 break;
9458 default:
9459
9460 abort();
9461 }
9462 }
9463 if (domain_prot == 3) {
9464 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9465 } else {
9466 if (pxn && !regime_is_user(env, mmu_idx)) {
9467 xn = 1;
9468 }
9469 if (xn && access_type == MMU_INST_FETCH) {
9470 fi->type = ARMFault_Permission;
9471 goto do_fault;
9472 }
9473
9474 if (arm_feature(env, ARM_FEATURE_V6K) &&
9475 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
9476
9477 if ((ap & 1) == 0) {
9478
9479 fi->type = ARMFault_AccessFlag;
9480 goto do_fault;
9481 }
9482 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
9483 } else {
9484 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9485 }
9486 if (*prot && !xn) {
9487 *prot |= PAGE_EXEC;
9488 }
9489 if (!(*prot & (1 << access_type))) {
9490
9491 fi->type = ARMFault_Permission;
9492 goto do_fault;
9493 }
9494 }
9495 if (ns) {
9496
9497
9498
9499
9500 attrs->secure = false;
9501 }
9502 *phys_ptr = phys_addr;
9503 return false;
9504do_fault:
9505 fi->domain = domain;
9506 fi->level = level;
9507 return true;
9508}
9509
9510
9511
9512
9513
9514
9515
9516
9517
9518
9519
9520
9521static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
9522 int inputsize, int stride)
9523{
9524 const int grainsize = stride + 3;
9525 int startsizecheck;
9526
9527
9528 if (level < 0) {
9529 return false;
9530 }
9531
9532 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
9533 if (startsizecheck < 1 || startsizecheck > stride + 4) {
9534 return false;
9535 }
9536
9537 if (is_aa64) {
9538 CPUARMState *env = &cpu->env;
9539 unsigned int pamax = arm_pamax(cpu);
9540
9541 switch (stride) {
9542 case 13:
9543 if (level == 0 || (level == 1 && pamax <= 42)) {
9544 return false;
9545 }
9546 break;
9547 case 11:
9548 if (level == 0 || (level == 1 && pamax <= 40)) {
9549 return false;
9550 }
9551 break;
9552 case 9:
9553 if (level == 0 && pamax <= 42) {
9554 return false;
9555 }
9556 break;
9557 default:
9558 g_assert_not_reached();
9559 }
9560
9561
9562 if (inputsize > pamax &&
9563 (arm_el_is_aa64(env, 1) || inputsize > 40)) {
9564
9565 return false;
9566 }
9567 } else {
9568
9569 assert(stride == 9);
9570
9571 if (level == 0) {
9572 return false;
9573 }
9574 }
9575 return true;
9576}
9577
9578
9579
9580
9581
9582
9583
9584
9585
9586static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
9587{
9588 uint8_t hiattr = extract32(s2attrs, 2, 2);
9589 uint8_t loattr = extract32(s2attrs, 0, 2);
9590 uint8_t hihint = 0, lohint = 0;
9591
9592 if (hiattr != 0) {
9593 if ((env->cp15.hcr_el2 & HCR_CD) != 0) {
9594 hiattr = loattr = 1;
9595 } else {
9596 if (hiattr != 1) {
9597 hihint = 3;
9598 }
9599 if (loattr != 1) {
9600 lohint = 3;
9601 }
9602 }
9603 }
9604
9605 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
9606}
9607
9608static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
9609 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9610 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
9611 target_ulong *page_size_ptr,
9612 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
9613{
9614 ARMCPU *cpu = arm_env_get_cpu(env);
9615 CPUState *cs = CPU(cpu);
9616
9617 ARMFaultType fault_type = ARMFault_Translation;
9618 uint32_t level;
9619 uint32_t epd = 0;
9620 int32_t t0sz, t1sz;
9621 uint32_t tg;
9622 uint64_t ttbr;
9623 int ttbr_select;
9624 hwaddr descaddr, indexmask, indexmask_grainsize;
9625 uint32_t tableattrs;
9626 target_ulong page_size;
9627 uint32_t attrs;
9628 int32_t stride = 9;
9629 int32_t addrsize;
9630 int inputsize;
9631 int32_t tbi = 0;
9632 TCR *tcr = regime_tcr(env, mmu_idx);
9633 int ap, ns, xn, pxn;
9634 uint32_t el = regime_el(env, mmu_idx);
9635 bool ttbr1_valid = true;
9636 uint64_t descaddrmask;
9637 bool aarch64 = arm_el_is_aa64(env, el);
9638
9639
9640
9641
9642
9643
9644
9645 if (aarch64) {
9646 level = 0;
9647 addrsize = 64;
9648 if (el > 1) {
9649 if (mmu_idx != ARMMMUIdx_S2NS) {
9650 tbi = extract64(tcr->raw_tcr, 20, 1);
9651 }
9652 } else {
9653 if (extract64(address, 55, 1)) {
9654 tbi = extract64(tcr->raw_tcr, 38, 1);
9655 } else {
9656 tbi = extract64(tcr->raw_tcr, 37, 1);
9657 }
9658 }
9659 tbi *= 8;
9660
9661
9662
9663
9664 if (el > 1) {
9665 ttbr1_valid = false;
9666 }
9667 } else {
9668 level = 1;
9669 addrsize = 32;
9670
9671 if (el == 2) {
9672 ttbr1_valid = false;
9673 }
9674 }
9675
9676
9677
9678
9679
9680
9681 if (aarch64) {
9682
9683 t0sz = extract32(tcr->raw_tcr, 0, 6);
9684 t0sz = MIN(t0sz, 39);
9685 t0sz = MAX(t0sz, 16);
9686 } else if (mmu_idx != ARMMMUIdx_S2NS) {
9687
9688 t0sz = extract32(tcr->raw_tcr, 0, 3);
9689 } else {
9690
9691 bool sext = extract32(tcr->raw_tcr, 4, 1);
9692 bool sign = extract32(tcr->raw_tcr, 3, 1);
9693
9694
9695
9696
9697 addrsize = 40;
9698 t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8;
9699
9700
9701
9702 if (sign != sext) {
9703 qemu_log_mask(LOG_GUEST_ERROR,
9704 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9705 }
9706 }
9707 t1sz = extract32(tcr->raw_tcr, 16, 6);
9708 if (aarch64) {
9709 t1sz = MIN(t1sz, 39);
9710 t1sz = MAX(t1sz, 16);
9711 }
9712 if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) {
9713
9714 ttbr_select = 0;
9715 } else if (ttbr1_valid && t1sz &&
9716 !extract64(~address, addrsize - t1sz, t1sz - tbi)) {
9717
9718 ttbr_select = 1;
9719 } else if (!t0sz) {
9720
9721 ttbr_select = 0;
9722 } else if (!t1sz && ttbr1_valid) {
9723
9724 ttbr_select = 1;
9725 } else {
9726
9727 fault_type = ARMFault_Translation;
9728 goto do_fault;
9729 }
9730
9731
9732
9733
9734
9735
9736
9737
9738 if (ttbr_select == 0) {
9739 ttbr = regime_ttbr(env, mmu_idx, 0);
9740 if (el < 2) {
9741 epd = extract32(tcr->raw_tcr, 7, 1);
9742 }
9743 inputsize = addrsize - t0sz;
9744
9745 tg = extract32(tcr->raw_tcr, 14, 2);
9746 if (tg == 1) {
9747 stride = 13;
9748 }
9749 if (tg == 2) {
9750 stride = 11;
9751 }
9752 } else {
9753
9754 assert(ttbr1_valid);
9755
9756 ttbr = regime_ttbr(env, mmu_idx, 1);
9757 epd = extract32(tcr->raw_tcr, 23, 1);
9758 inputsize = addrsize - t1sz;
9759
9760 tg = extract32(tcr->raw_tcr, 30, 2);
9761 if (tg == 3) {
9762 stride = 13;
9763 }
9764 if (tg == 1) {
9765 stride = 11;
9766 }
9767 }
9768
9769
9770
9771
9772
9773 if (epd) {
9774
9775
9776
9777 goto do_fault;
9778 }
9779
9780 if (mmu_idx != ARMMMUIdx_S2NS) {
9781
9782
9783
9784
9785
9786
9787
9788
9789
9790
9791
9792 level = 4 - (inputsize - 4) / stride;
9793 } else {
9794
9795
9796
9797 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
9798 uint32_t startlevel;
9799 bool ok;
9800
9801 if (!aarch64 || stride == 9) {
9802
9803 startlevel = 2 - sl0;
9804 } else {
9805
9806 startlevel = 3 - sl0;
9807 }
9808
9809
9810 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
9811 inputsize, stride);
9812 if (!ok) {
9813 fault_type = ARMFault_Translation;
9814 goto do_fault;
9815 }
9816 level = startlevel;
9817 }
9818
9819 indexmask_grainsize = (1ULL << (stride + 3)) - 1;
9820 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
9821
9822
9823 descaddr = extract64(ttbr, 0, 48);
9824 descaddr &= ~indexmask;
9825
9826
9827
9828
9829
9830
9831 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
9832 ~indexmask_grainsize;
9833
9834
9835
9836
9837
9838
9839 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
9840 for (;;) {
9841 uint64_t descriptor;
9842 bool nstable;
9843
9844 descaddr |= (address >> (stride * (4 - level))) & indexmask;
9845 descaddr &= ~7ULL;
9846 nstable = extract32(tableattrs, 4, 1);
9847 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
9848 if (fi->type != ARMFault_None) {
9849 goto do_fault;
9850 }
9851
9852 if (!(descriptor & 1) ||
9853 (!(descriptor & 2) && (level == 3))) {
9854
9855 goto do_fault;
9856 }
9857 descaddr = descriptor & descaddrmask;
9858
9859 if ((descriptor & 2) && (level < 3)) {
9860
9861
9862
9863
9864
9865 tableattrs |= extract64(descriptor, 59, 5);
9866 level++;
9867 indexmask = indexmask_grainsize;
9868 continue;
9869 }
9870
9871
9872
9873
9874 page_size = (1ULL << ((stride * (4 - level)) + 3));
9875 descaddr |= (address & (page_size - 1));
9876
9877 attrs = extract64(descriptor, 2, 10)
9878 | (extract64(descriptor, 52, 12) << 10);
9879
9880 if (mmu_idx == ARMMMUIdx_S2NS) {
9881
9882 break;
9883 }
9884
9885 attrs |= extract32(tableattrs, 0, 2) << 11;
9886 attrs |= extract32(tableattrs, 3, 1) << 5;
9887
9888
9889
9890 if (extract32(tableattrs, 2, 1)) {
9891 attrs &= ~(1 << 4);
9892 }
9893 attrs |= nstable << 3;
9894 break;
9895 }
9896
9897
9898
9899 fault_type = ARMFault_AccessFlag;
9900 if ((attrs & (1 << 8)) == 0) {
9901
9902 goto do_fault;
9903 }
9904
9905 ap = extract32(attrs, 4, 2);
9906 xn = extract32(attrs, 12, 1);
9907
9908 if (mmu_idx == ARMMMUIdx_S2NS) {
9909 ns = true;
9910 *prot = get_S2prot(env, ap, xn);
9911 } else {
9912 ns = extract32(attrs, 3, 1);
9913 pxn = extract32(attrs, 11, 1);
9914 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
9915 }
9916
9917 fault_type = ARMFault_Permission;
9918 if (!(*prot & (1 << access_type))) {
9919 goto do_fault;
9920 }
9921
9922 if (ns) {
9923
9924
9925
9926
9927 txattrs->secure = false;
9928 }
9929
9930 if (cacheattrs != NULL) {
9931 if (mmu_idx == ARMMMUIdx_S2NS) {
9932 cacheattrs->attrs = convert_stage2_attrs(env,
9933 extract32(attrs, 0, 4));
9934 } else {
9935
9936 uint8_t attrindx = extract32(attrs, 0, 3);
9937 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
9938 assert(attrindx <= 7);
9939 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
9940 }
9941 cacheattrs->shareability = extract32(attrs, 6, 2);
9942 }
9943
9944 *phys_ptr = descaddr;
9945 *page_size_ptr = page_size;
9946 return false;
9947
9948do_fault:
9949 fi->type = fault_type;
9950 fi->level = level;
9951
9952 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
9953 return true;
9954}
9955
9956static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
9957 ARMMMUIdx mmu_idx,
9958 int32_t address, int *prot)
9959{
9960 if (!arm_feature(env, ARM_FEATURE_M)) {
9961 *prot = PAGE_READ | PAGE_WRITE;
9962 switch (address) {
9963 case 0xF0000000 ... 0xFFFFFFFF:
9964 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
9965
9966 *prot |= PAGE_EXEC;
9967 }
9968 break;
9969 case 0x00000000 ... 0x7FFFFFFF:
9970 *prot |= PAGE_EXEC;
9971 break;
9972 }
9973 } else {
9974
9975
9976
9977
9978 switch (address) {
9979 case 0x00000000 ... 0x1fffffff:
9980 case 0x20000000 ... 0x3fffffff:
9981 case 0x60000000 ... 0x7fffffff:
9982 case 0x80000000 ... 0x9fffffff:
9983 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9984 break;
9985 case 0x40000000 ... 0x5fffffff:
9986 case 0xa0000000 ... 0xbfffffff:
9987 case 0xc0000000 ... 0xdfffffff:
9988 case 0xe0000000 ... 0xffffffff:
9989 *prot = PAGE_READ | PAGE_WRITE;
9990 break;
9991 default:
9992 g_assert_not_reached();
9993 }
9994 }
9995}
9996
9997static bool pmsav7_use_background_region(ARMCPU *cpu,
9998 ARMMMUIdx mmu_idx, bool is_user)
9999{
10000
10001
10002
10003 CPUARMState *env = &cpu->env;
10004
10005 if (is_user) {
10006 return false;
10007 }
10008
10009 if (arm_feature(env, ARM_FEATURE_M)) {
10010 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
10011 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
10012 } else {
10013 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
10014 }
10015}
10016
10017static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
10018{
10019
10020 return arm_feature(env, ARM_FEATURE_M) &&
10021 extract32(address, 20, 12) == 0xe00;
10022}
10023
10024static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
10025{
10026
10027
10028
10029 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
10030}
10031
10032static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
10033 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10034 hwaddr *phys_ptr, int *prot,
10035 target_ulong *page_size,
10036 ARMMMUFaultInfo *fi)
10037{
10038 ARMCPU *cpu = arm_env_get_cpu(env);
10039 int n;
10040 bool is_user = regime_is_user(env, mmu_idx);
10041
10042 *phys_ptr = address;
10043 *page_size = TARGET_PAGE_SIZE;
10044 *prot = 0;
10045
10046 if (regime_translation_disabled(env, mmu_idx) ||
10047 m_is_ppb_region(env, address)) {
10048
10049
10050
10051
10052
10053
10054
10055 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10056 } else {
10057 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10058
10059 uint32_t base = env->pmsav7.drbar[n];
10060 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
10061 uint32_t rmask;
10062 bool srdis = false;
10063
10064 if (!(env->pmsav7.drsr[n] & 0x1)) {
10065 continue;
10066 }
10067
10068 if (!rsize) {
10069 qemu_log_mask(LOG_GUEST_ERROR,
10070 "DRSR[%d]: Rsize field cannot be 0\n", n);
10071 continue;
10072 }
10073 rsize++;
10074 rmask = (1ull << rsize) - 1;
10075
10076 if (base & rmask) {
10077 qemu_log_mask(LOG_GUEST_ERROR,
10078 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
10079 "to DRSR region size, mask = 0x%" PRIx32 "\n",
10080 n, base, rmask);
10081 continue;
10082 }
10083
10084 if (address < base || address > base + rmask) {
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094 if (ranges_overlap(base, rmask,
10095 address & TARGET_PAGE_MASK,
10096 TARGET_PAGE_SIZE)) {
10097 *page_size = 1;
10098 }
10099 continue;
10100 }
10101
10102
10103
10104 if (rsize >= 8) {
10105 int i, snd;
10106 uint32_t srdis_mask;
10107
10108 rsize -= 3;
10109 snd = ((address - base) >> rsize) & 0x7;
10110 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
10111
10112 srdis_mask = srdis ? 0x3 : 0x0;
10113 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
10114
10115
10116
10117
10118
10119
10120 int snd_rounded = snd & ~(i - 1);
10121 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
10122 snd_rounded + 8, i);
10123 if (srdis_mask ^ srdis_multi) {
10124 break;
10125 }
10126 srdis_mask = (srdis_mask << i) | srdis_mask;
10127 rsize++;
10128 }
10129 }
10130 if (srdis) {
10131 continue;
10132 }
10133 if (rsize < TARGET_PAGE_BITS) {
10134 *page_size = 1 << rsize;
10135 }
10136 break;
10137 }
10138
10139 if (n == -1) {
10140 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10141
10142 fi->type = ARMFault_Background;
10143 return true;
10144 }
10145 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10146 } else {
10147 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
10148 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
10149
10150 if (m_is_system_region(env, address)) {
10151
10152 xn = 1;
10153 }
10154
10155 if (is_user) {
10156 switch (ap) {
10157 case 0:
10158 case 1:
10159 case 5:
10160 break;
10161 case 3:
10162 *prot |= PAGE_WRITE;
10163
10164 case 2:
10165 case 6:
10166 *prot |= PAGE_READ | PAGE_EXEC;
10167 break;
10168 case 7:
10169
10170 if (arm_feature(env, ARM_FEATURE_M)) {
10171 *prot |= PAGE_READ | PAGE_EXEC;
10172 break;
10173 }
10174
10175 default:
10176 qemu_log_mask(LOG_GUEST_ERROR,
10177 "DRACR[%d]: Bad value for AP bits: 0x%"
10178 PRIx32 "\n", n, ap);
10179 }
10180 } else {
10181 switch (ap) {
10182 case 0:
10183 break;
10184 case 1:
10185 case 2:
10186 case 3:
10187 *prot |= PAGE_WRITE;
10188
10189 case 5:
10190 case 6:
10191 *prot |= PAGE_READ | PAGE_EXEC;
10192 break;
10193 case 7:
10194
10195 if (arm_feature(env, ARM_FEATURE_M)) {
10196 *prot |= PAGE_READ | PAGE_EXEC;
10197 break;
10198 }
10199
10200 default:
10201 qemu_log_mask(LOG_GUEST_ERROR,
10202 "DRACR[%d]: Bad value for AP bits: 0x%"
10203 PRIx32 "\n", n, ap);
10204 }
10205 }
10206
10207
10208 if (xn) {
10209 *prot &= ~PAGE_EXEC;
10210 }
10211 }
10212 }
10213
10214 fi->type = ARMFault_Permission;
10215 fi->level = 1;
10216 return !(*prot & (1 << access_type));
10217}
10218
10219static bool v8m_is_sau_exempt(CPUARMState *env,
10220 uint32_t address, MMUAccessType access_type)
10221{
10222
10223
10224
10225 return
10226 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
10227 (address >= 0xe0000000 && address <= 0xe0002fff) ||
10228 (address >= 0xe000e000 && address <= 0xe000efff) ||
10229 (address >= 0xe002e000 && address <= 0xe002efff) ||
10230 (address >= 0xe0040000 && address <= 0xe0041fff) ||
10231 (address >= 0xe00ff000 && address <= 0xe00fffff);
10232}
10233
10234static void v8m_security_lookup(CPUARMState *env, uint32_t address,
10235 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10236 V8M_SAttributes *sattrs)
10237{
10238
10239
10240
10241
10242 ARMCPU *cpu = arm_env_get_cpu(env);
10243 int r;
10244 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
10245 int idau_region = IREGION_NOTVALID;
10246 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10247 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
10248
10249 if (cpu->idau) {
10250 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
10251 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
10252
10253 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
10254 &idau_nsc);
10255 }
10256
10257 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
10258
10259 return;
10260 }
10261
10262 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
10263 sattrs->ns = !regime_is_secure(env, mmu_idx);
10264 return;
10265 }
10266
10267 if (idau_region != IREGION_NOTVALID) {
10268 sattrs->irvalid = true;
10269 sattrs->iregion = idau_region;
10270 }
10271
10272 switch (env->sau.ctrl & 3) {
10273 case 0:
10274 break;
10275 case 2:
10276 sattrs->ns = true;
10277 break;
10278 default:
10279 for (r = 0; r < cpu->sau_sregion; r++) {
10280 if (env->sau.rlar[r] & 1) {
10281 uint32_t base = env->sau.rbar[r] & ~0x1f;
10282 uint32_t limit = env->sau.rlar[r] | 0x1f;
10283
10284 if (base <= address && limit >= address) {
10285 if (base > addr_page_base || limit < addr_page_limit) {
10286 sattrs->subpage = true;
10287 }
10288 if (sattrs->srvalid) {
10289
10290
10291
10292
10293 sattrs->ns = false;
10294 sattrs->nsc = false;
10295 sattrs->sregion = 0;
10296 sattrs->srvalid = false;
10297 break;
10298 } else {
10299 if (env->sau.rlar[r] & 2) {
10300 sattrs->nsc = true;
10301 } else {
10302 sattrs->ns = true;
10303 }
10304 sattrs->srvalid = true;
10305 sattrs->sregion = r;
10306 }
10307 } else {
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317 if (limit >= base &&
10318 ranges_overlap(base, limit - base + 1,
10319 addr_page_base,
10320 TARGET_PAGE_SIZE)) {
10321 sattrs->subpage = true;
10322 }
10323 }
10324 }
10325 }
10326
10327
10328
10329
10330 if (!idau_ns) {
10331 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
10332 sattrs->ns = false;
10333 sattrs->nsc = idau_nsc;
10334 }
10335 }
10336 break;
10337 }
10338}
10339
10340static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
10341 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10342 hwaddr *phys_ptr, MemTxAttrs *txattrs,
10343 int *prot, bool *is_subpage,
10344 ARMMMUFaultInfo *fi, uint32_t *mregion)
10345{
10346
10347
10348
10349
10350
10351
10352
10353
10354 ARMCPU *cpu = arm_env_get_cpu(env);
10355 bool is_user = regime_is_user(env, mmu_idx);
10356 uint32_t secure = regime_is_secure(env, mmu_idx);
10357 int n;
10358 int matchregion = -1;
10359 bool hit = false;
10360 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10361 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
10362
10363 *is_subpage = false;
10364 *phys_ptr = address;
10365 *prot = 0;
10366 if (mregion) {
10367 *mregion = -1;
10368 }
10369
10370
10371
10372
10373
10374
10375
10376 if (regime_translation_disabled(env, mmu_idx)) {
10377 hit = true;
10378 } else if (m_is_ppb_region(env, address)) {
10379 hit = true;
10380 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10381 hit = true;
10382 } else {
10383 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10384
10385
10386
10387
10388
10389 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
10390 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
10391
10392 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
10393
10394 continue;
10395 }
10396
10397 if (address < base || address > limit) {
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407 if (limit >= base &&
10408 ranges_overlap(base, limit - base + 1,
10409 addr_page_base,
10410 TARGET_PAGE_SIZE)) {
10411 *is_subpage = true;
10412 }
10413 continue;
10414 }
10415
10416 if (base > addr_page_base || limit < addr_page_limit) {
10417 *is_subpage = true;
10418 }
10419
10420 if (hit) {
10421
10422
10423
10424 fi->type = ARMFault_Permission;
10425 fi->level = 1;
10426 return true;
10427 }
10428
10429 matchregion = n;
10430 hit = true;
10431 }
10432 }
10433
10434 if (!hit) {
10435
10436 fi->type = ARMFault_Background;
10437 return true;
10438 }
10439
10440 if (matchregion == -1) {
10441
10442 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10443 } else {
10444 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
10445 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
10446
10447 if (m_is_system_region(env, address)) {
10448
10449 xn = 1;
10450 }
10451
10452 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
10453 if (*prot && !xn) {
10454 *prot |= PAGE_EXEC;
10455 }
10456
10457
10458
10459 if (mregion) {
10460 *mregion = matchregion;
10461 }
10462 }
10463
10464 fi->type = ARMFault_Permission;
10465 fi->level = 1;
10466 return !(*prot & (1 << access_type));
10467}
10468
10469
10470static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
10471 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10472 hwaddr *phys_ptr, MemTxAttrs *txattrs,
10473 int *prot, target_ulong *page_size,
10474 ARMMMUFaultInfo *fi)
10475{
10476 uint32_t secure = regime_is_secure(env, mmu_idx);
10477 V8M_SAttributes sattrs = {};
10478 bool ret;
10479 bool mpu_is_subpage;
10480
10481 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10482 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
10483 if (access_type == MMU_INST_FETCH) {
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495
10496
10497
10498
10499
10500
10501 if (sattrs.ns != !secure) {
10502 if (sattrs.nsc) {
10503 fi->type = ARMFault_QEMU_NSCExec;
10504 } else {
10505 fi->type = ARMFault_QEMU_SFault;
10506 }
10507 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10508 *phys_ptr = address;
10509 *prot = 0;
10510 return true;
10511 }
10512 } else {
10513
10514
10515
10516
10517 if (sattrs.ns) {
10518 txattrs->secure = false;
10519 } else if (!secure) {
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529 fi->type = ARMFault_QEMU_SFault;
10530 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10531 *phys_ptr = address;
10532 *prot = 0;
10533 return true;
10534 }
10535 }
10536 }
10537
10538 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
10539 txattrs, prot, &mpu_is_subpage, fi, NULL);
10540 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
10541 return ret;
10542}
10543
10544static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
10545 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10546 hwaddr *phys_ptr, int *prot,
10547 ARMMMUFaultInfo *fi)
10548{
10549 int n;
10550 uint32_t mask;
10551 uint32_t base;
10552 bool is_user = regime_is_user(env, mmu_idx);
10553
10554 if (regime_translation_disabled(env, mmu_idx)) {
10555
10556 *phys_ptr = address;
10557 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10558 return false;
10559 }
10560
10561 *phys_ptr = address;
10562 for (n = 7; n >= 0; n--) {
10563 base = env->cp15.c6_region[n];
10564 if ((base & 1) == 0) {
10565 continue;
10566 }
10567 mask = 1 << ((base >> 1) & 0x1f);
10568
10569
10570 mask = (mask << 1) - 1;
10571 if (((base ^ address) & ~mask) == 0) {
10572 break;
10573 }
10574 }
10575 if (n < 0) {
10576 fi->type = ARMFault_Background;
10577 return true;
10578 }
10579
10580 if (access_type == MMU_INST_FETCH) {
10581 mask = env->cp15.pmsav5_insn_ap;
10582 } else {
10583 mask = env->cp15.pmsav5_data_ap;
10584 }
10585 mask = (mask >> (n * 4)) & 0xf;
10586 switch (mask) {
10587 case 0:
10588 fi->type = ARMFault_Permission;
10589 fi->level = 1;
10590 return true;
10591 case 1:
10592 if (is_user) {
10593 fi->type = ARMFault_Permission;
10594 fi->level = 1;
10595 return true;
10596 }
10597 *prot = PAGE_READ | PAGE_WRITE;
10598 break;
10599 case 2:
10600 *prot = PAGE_READ;
10601 if (!is_user) {
10602 *prot |= PAGE_WRITE;
10603 }
10604 break;
10605 case 3:
10606 *prot = PAGE_READ | PAGE_WRITE;
10607 break;
10608 case 5:
10609 if (is_user) {
10610 fi->type = ARMFault_Permission;
10611 fi->level = 1;
10612 return true;
10613 }
10614 *prot = PAGE_READ;
10615 break;
10616 case 6:
10617 *prot = PAGE_READ;
10618 break;
10619 default:
10620
10621 fi->type = ARMFault_Permission;
10622 fi->level = 1;
10623 return true;
10624 }
10625 *prot |= PAGE_EXEC;
10626 return false;
10627}
10628
10629
10630
10631
10632
10633
10634
10635
10636static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
10637{
10638 if (s1 == 4 || s2 == 4) {
10639
10640 return 4;
10641 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
10642
10643 return s1;
10644 } else if (extract32(s2, 2, 2) == 2) {
10645
10646
10647
10648 return (2 << 2) | extract32(s1, 0, 2);
10649 } else {
10650 return s1;
10651 }
10652}
10653
10654
10655
10656
10657
10658
10659
10660static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
10661{
10662 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
10663 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
10664 ARMCacheAttrs ret;
10665
10666
10667 if (s1.shareability == 2 || s2.shareability == 2) {
10668
10669 ret.shareability = 2;
10670 } else if (s1.shareability == 3 || s2.shareability == 3) {
10671
10672 ret.shareability = 3;
10673 } else {
10674
10675 ret.shareability = 0;
10676 }
10677
10678
10679 if (s1hi == 0 || s2hi == 0) {
10680
10681 if (s1lo == 0 || s2lo == 0) {
10682
10683 ret.attrs = 0;
10684 } else if (s1lo == 4 || s2lo == 4) {
10685
10686 ret.attrs = 4;
10687 } else if (s1lo == 8 || s2lo == 8) {
10688
10689 ret.attrs = 8;
10690 } else {
10691 ret.attrs = 0xc;
10692 }
10693
10694
10695
10696
10697 ret.shareability = 2;
10698 } else {
10699
10700 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
10701 | combine_cacheattr_nibble(s1lo, s2lo);
10702
10703 if (ret.attrs == 0x44) {
10704
10705
10706
10707
10708 ret.shareability = 2;
10709 }
10710 }
10711
10712 return ret;
10713}
10714
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742static bool get_phys_addr(CPUARMState *env, target_ulong address,
10743 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10744 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10745 target_ulong *page_size,
10746 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
10747{
10748 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
10749
10750
10751
10752 if (arm_feature(env, ARM_FEATURE_EL2)) {
10753 hwaddr ipa;
10754 int s2_prot;
10755 int ret;
10756 ARMCacheAttrs cacheattrs2 = {};
10757
10758 ret = get_phys_addr(env, address, access_type,
10759 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
10760 prot, page_size, fi, cacheattrs);
10761
10762
10763 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
10764 *phys_ptr = ipa;
10765 return ret;
10766 }
10767
10768
10769 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
10770 phys_ptr, attrs, &s2_prot,
10771 page_size, fi,
10772 cacheattrs != NULL ? &cacheattrs2 : NULL);
10773 fi->s2addr = ipa;
10774
10775 *prot &= s2_prot;
10776
10777
10778 if (!ret && cacheattrs != NULL) {
10779 if (env->cp15.hcr_el2 & HCR_DC) {
10780
10781
10782
10783
10784
10785
10786 cacheattrs->attrs = 0xff;
10787 cacheattrs->shareability = 0;
10788 }
10789 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
10790 }
10791
10792 return ret;
10793 } else {
10794
10795
10796
10797 mmu_idx = stage_1_mmu_idx(mmu_idx);
10798 }
10799 }
10800
10801
10802
10803
10804
10805 attrs->secure = regime_is_secure(env, mmu_idx);
10806 attrs->user = regime_is_user(env, mmu_idx);
10807
10808
10809
10810
10811 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
10812 && !arm_feature(env, ARM_FEATURE_V8)) {
10813 if (regime_el(env, mmu_idx) == 3) {
10814 address += env->cp15.fcseidr_s;
10815 } else {
10816 address += env->cp15.fcseidr_ns;
10817 }
10818 }
10819
10820 if (arm_feature(env, ARM_FEATURE_PMSA)) {
10821 bool ret;
10822 *page_size = TARGET_PAGE_SIZE;
10823
10824 if (arm_feature(env, ARM_FEATURE_V8)) {
10825
10826 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
10827 phys_ptr, attrs, prot, page_size, fi);
10828 } else if (arm_feature(env, ARM_FEATURE_V7)) {
10829
10830 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
10831 phys_ptr, prot, page_size, fi);
10832 } else {
10833
10834 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
10835 phys_ptr, prot, fi);
10836 }
10837 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
10838 " mmu_idx %u -> %s (prot %c%c%c)\n",
10839 access_type == MMU_DATA_LOAD ? "reading" :
10840 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
10841 (uint32_t)address, mmu_idx,
10842 ret ? "Miss" : "Hit",
10843 *prot & PAGE_READ ? 'r' : '-',
10844 *prot & PAGE_WRITE ? 'w' : '-',
10845 *prot & PAGE_EXEC ? 'x' : '-');
10846
10847 return ret;
10848 }
10849
10850
10851
10852 if (regime_translation_disabled(env, mmu_idx)) {
10853
10854 *phys_ptr = address;
10855 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10856 *page_size = TARGET_PAGE_SIZE;
10857 return 0;
10858 }
10859
10860 if (regime_using_lpae_format(env, mmu_idx)) {
10861 return get_phys_addr_lpae(env, address, access_type, mmu_idx,
10862 phys_ptr, attrs, prot, page_size,
10863 fi, cacheattrs);
10864 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
10865 return get_phys_addr_v6(env, address, access_type, mmu_idx,
10866 phys_ptr, attrs, prot, page_size, fi);
10867 } else {
10868 return get_phys_addr_v5(env, address, access_type, mmu_idx,
10869 phys_ptr, prot, page_size, fi);
10870 }
10871}
10872
10873
10874
10875
10876
10877bool arm_tlb_fill(CPUState *cs, vaddr address,
10878 MMUAccessType access_type, int mmu_idx,
10879 ARMMMUFaultInfo *fi)
10880{
10881 ARMCPU *cpu = ARM_CPU(cs);
10882 CPUARMState *env = &cpu->env;
10883 hwaddr phys_addr;
10884 target_ulong page_size;
10885 int prot;
10886 int ret;
10887 MemTxAttrs attrs = {};
10888
10889 ret = get_phys_addr(env, address, access_type,
10890 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
10891 &attrs, &prot, &page_size, fi, NULL);
10892 if (!ret) {
10893
10894
10895
10896
10897
10898 if (page_size >= TARGET_PAGE_SIZE) {
10899 phys_addr &= TARGET_PAGE_MASK;
10900 address &= TARGET_PAGE_MASK;
10901 }
10902 tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
10903 prot, mmu_idx, page_size);
10904 return 0;
10905 }
10906
10907 return ret;
10908}
10909
10910hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
10911 MemTxAttrs *attrs)
10912{
10913 ARMCPU *cpu = ARM_CPU(cs);
10914 CPUARMState *env = &cpu->env;
10915 hwaddr phys_addr;
10916 target_ulong page_size;
10917 int prot;
10918 bool ret;
10919 ARMMMUFaultInfo fi = {};
10920 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
10921
10922 *attrs = (MemTxAttrs) {};
10923
10924 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
10925 attrs, &prot, &page_size, &fi, NULL);
10926
10927 if (ret) {
10928 return -1;
10929 }
10930 return phys_addr;
10931}
10932
10933uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
10934{
10935 uint32_t mask;
10936 unsigned el = arm_current_el(env);
10937
10938
10939
10940 switch (reg) {
10941 case 0 ... 7:
10942 mask = 0;
10943 if ((reg & 1) && el) {
10944 mask |= XPSR_EXCP;
10945 }
10946 if (!(reg & 4)) {
10947 mask |= XPSR_NZCV | XPSR_Q;
10948 }
10949
10950 return xpsr_read(env) & mask;
10951 break;
10952 case 20:
10953 return env->v7m.control[env->v7m.secure];
10954 case 0x94:
10955
10956
10957
10958 if (!env->v7m.secure) {
10959 return 0;
10960 }
10961 return env->v7m.control[M_REG_NS];
10962 }
10963
10964 if (el == 0) {
10965 return 0;
10966 }
10967
10968 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10969 switch (reg) {
10970 case 0x88:
10971 if (!env->v7m.secure) {
10972 return 0;
10973 }
10974 return env->v7m.other_ss_msp;
10975 case 0x89:
10976 if (!env->v7m.secure) {
10977 return 0;
10978 }
10979 return env->v7m.other_ss_psp;
10980 case 0x8a:
10981 if (!env->v7m.secure) {
10982 return 0;
10983 }
10984 return env->v7m.msplim[M_REG_NS];
10985 case 0x8b:
10986 if (!env->v7m.secure) {
10987 return 0;
10988 }
10989 return env->v7m.psplim[M_REG_NS];
10990 case 0x90:
10991 if (!env->v7m.secure) {
10992 return 0;
10993 }
10994 return env->v7m.primask[M_REG_NS];
10995 case 0x91:
10996 if (!env->v7m.secure) {
10997 return 0;
10998 }
10999 return env->v7m.basepri[M_REG_NS];
11000 case 0x93:
11001 if (!env->v7m.secure) {
11002 return 0;
11003 }
11004 return env->v7m.faultmask[M_REG_NS];
11005 case 0x98:
11006 {
11007
11008
11009
11010 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
11011
11012 if (!env->v7m.secure) {
11013 return 0;
11014 }
11015 if (!arm_v7m_is_handler_mode(env) && spsel) {
11016 return env->v7m.other_ss_psp;
11017 } else {
11018 return env->v7m.other_ss_msp;
11019 }
11020 }
11021 default:
11022 break;
11023 }
11024 }
11025
11026 switch (reg) {
11027 case 8:
11028 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
11029 case 9:
11030 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
11031 case 10:
11032 if (!arm_feature(env, ARM_FEATURE_V8)) {
11033 goto bad_reg;
11034 }
11035 return env->v7m.msplim[env->v7m.secure];
11036 case 11:
11037 if (!arm_feature(env, ARM_FEATURE_V8)) {
11038 goto bad_reg;
11039 }
11040 return env->v7m.psplim[env->v7m.secure];
11041 case 16:
11042 return env->v7m.primask[env->v7m.secure];
11043 case 17:
11044 case 18:
11045 return env->v7m.basepri[env->v7m.secure];
11046 case 19:
11047 return env->v7m.faultmask[env->v7m.secure];
11048 default:
11049 bad_reg:
11050 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
11051 " register %d\n", reg);
11052 return 0;
11053 }
11054}
11055
11056void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
11057{
11058
11059
11060
11061
11062
11063
11064
11065 uint32_t mask = extract32(maskreg, 8, 4);
11066 uint32_t reg = extract32(maskreg, 0, 8);
11067
11068 if (arm_current_el(env) == 0 && reg > 7) {
11069
11070 return;
11071 }
11072
11073 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
11074 switch (reg) {
11075 case 0x88:
11076 if (!env->v7m.secure) {
11077 return;
11078 }
11079 env->v7m.other_ss_msp = val;
11080 return;
11081 case 0x89:
11082 if (!env->v7m.secure) {
11083 return;
11084 }
11085 env->v7m.other_ss_psp = val;
11086 return;
11087 case 0x8a:
11088 if (!env->v7m.secure) {
11089 return;
11090 }
11091 env->v7m.msplim[M_REG_NS] = val & ~7;
11092 return;
11093 case 0x8b:
11094 if (!env->v7m.secure) {
11095 return;
11096 }
11097 env->v7m.psplim[M_REG_NS] = val & ~7;
11098 return;
11099 case 0x90:
11100 if (!env->v7m.secure) {
11101 return;
11102 }
11103 env->v7m.primask[M_REG_NS] = val & 1;
11104 return;
11105 case 0x91:
11106 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
11107 return;
11108 }
11109 env->v7m.basepri[M_REG_NS] = val & 0xff;
11110 return;
11111 case 0x93:
11112 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
11113 return;
11114 }
11115 env->v7m.faultmask[M_REG_NS] = val & 1;
11116 return;
11117 case 0x94:
11118 if (!env->v7m.secure) {
11119 return;
11120 }
11121 write_v7m_control_spsel_for_secstate(env,
11122 val & R_V7M_CONTROL_SPSEL_MASK,
11123 M_REG_NS);
11124 if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
11125 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
11126 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
11127 }
11128 return;
11129 case 0x98:
11130 {
11131
11132
11133
11134 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
11135 bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
11136 uint32_t limit;
11137
11138 if (!env->v7m.secure) {
11139 return;
11140 }
11141
11142 limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
11143
11144 if (val < limit) {
11145 CPUState *cs = CPU(arm_env_get_cpu(env));
11146
11147 cpu_restore_state(cs, GETPC(), true);
11148 raise_exception(env, EXCP_STKOF, 0, 1);
11149 }
11150
11151 if (is_psp) {
11152 env->v7m.other_ss_psp = val;
11153 } else {
11154 env->v7m.other_ss_msp = val;
11155 }
11156 return;
11157 }
11158 default:
11159 break;
11160 }
11161 }
11162
11163 switch (reg) {
11164 case 0 ... 7:
11165
11166 if (!(reg & 4)) {
11167 uint32_t apsrmask = 0;
11168
11169 if (mask & 8) {
11170 apsrmask |= XPSR_NZCV | XPSR_Q;
11171 }
11172 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
11173 apsrmask |= XPSR_GE;
11174 }
11175 xpsr_write(env, val, apsrmask);
11176 }
11177 break;
11178 case 8:
11179 if (v7m_using_psp(env)) {
11180 env->v7m.other_sp = val;
11181 } else {
11182 env->regs[13] = val;
11183 }
11184 break;
11185 case 9:
11186 if (v7m_using_psp(env)) {
11187 env->regs[13] = val;
11188 } else {
11189 env->v7m.other_sp = val;
11190 }
11191 break;
11192 case 10:
11193 if (!arm_feature(env, ARM_FEATURE_V8)) {
11194 goto bad_reg;
11195 }
11196 env->v7m.msplim[env->v7m.secure] = val & ~7;
11197 break;
11198 case 11:
11199 if (!arm_feature(env, ARM_FEATURE_V8)) {
11200 goto bad_reg;
11201 }
11202 env->v7m.psplim[env->v7m.secure] = val & ~7;
11203 break;
11204 case 16:
11205 env->v7m.primask[env->v7m.secure] = val & 1;
11206 break;
11207 case 17:
11208 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
11209 goto bad_reg;
11210 }
11211 env->v7m.basepri[env->v7m.secure] = val & 0xff;
11212 break;
11213 case 18:
11214 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
11215 goto bad_reg;
11216 }
11217 val &= 0xff;
11218 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
11219 || env->v7m.basepri[env->v7m.secure] == 0)) {
11220 env->v7m.basepri[env->v7m.secure] = val;
11221 }
11222 break;
11223 case 19:
11224 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
11225 goto bad_reg;
11226 }
11227 env->v7m.faultmask[env->v7m.secure] = val & 1;
11228 break;
11229 case 20:
11230
11231
11232
11233
11234
11235
11236
11237 if (arm_feature(env, ARM_FEATURE_V8) ||
11238 !arm_v7m_is_handler_mode(env)) {
11239 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
11240 }
11241 if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
11242 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
11243 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
11244 }
11245 break;
11246 default:
11247 bad_reg:
11248 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
11249 " register %d\n", reg);
11250 return;
11251 }
11252}
11253
11254uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
11255{
11256
11257 bool forceunpriv = op & 1;
11258 bool alt = op & 2;
11259 V8M_SAttributes sattrs = {};
11260 uint32_t tt_resp;
11261 bool r, rw, nsr, nsrw, mrvalid;
11262 int prot;
11263 ARMMMUFaultInfo fi = {};
11264 MemTxAttrs attrs = {};
11265 hwaddr phys_addr;
11266 ARMMMUIdx mmu_idx;
11267 uint32_t mregion;
11268 bool targetpriv;
11269 bool targetsec = env->v7m.secure;
11270 bool is_subpage;
11271
11272
11273
11274
11275 if (alt) {
11276 targetsec = !targetsec;
11277 }
11278
11279 if (forceunpriv) {
11280 targetpriv = false;
11281 } else {
11282 targetpriv = arm_v7m_is_handler_mode(env) ||
11283 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
11284 }
11285
11286
11287 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
11288
11289
11290
11291
11292
11293
11294
11295
11296
11297 if (arm_current_el(env) != 0 || alt) {
11298
11299 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
11300 &phys_addr, &attrs, &prot, &is_subpage,
11301 &fi, &mregion);
11302 if (mregion == -1) {
11303 mrvalid = false;
11304 mregion = 0;
11305 } else {
11306 mrvalid = true;
11307 }
11308 r = prot & PAGE_READ;
11309 rw = prot & PAGE_WRITE;
11310 } else {
11311 r = false;
11312 rw = false;
11313 mrvalid = false;
11314 mregion = 0;
11315 }
11316
11317 if (env->v7m.secure) {
11318 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
11319 nsr = sattrs.ns && r;
11320 nsrw = sattrs.ns && rw;
11321 } else {
11322 sattrs.ns = true;
11323 nsr = false;
11324 nsrw = false;
11325 }
11326
11327 tt_resp = (sattrs.iregion << 24) |
11328 (sattrs.irvalid << 23) |
11329 ((!sattrs.ns) << 22) |
11330 (nsrw << 21) |
11331 (nsr << 20) |
11332 (rw << 19) |
11333 (r << 18) |
11334 (sattrs.srvalid << 17) |
11335 (mrvalid << 16) |
11336 (sattrs.sregion << 8) |
11337 mregion;
11338
11339 return tt_resp;
11340}
11341
11342#endif
11343
11344void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
11345{
11346
11347
11348
11349
11350
11351
11352
11353 ARMCPU *cpu = arm_env_get_cpu(env);
11354 uint64_t blocklen = 4 << cpu->dcz_blocksize;
11355 uint64_t vaddr = vaddr_in & ~(blocklen - 1);
11356
11357#ifndef CONFIG_USER_ONLY
11358 {
11359
11360
11361
11362
11363
11364
11365
11366 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
11367 void *hostaddr[maxidx];
11368 int try, i;
11369 unsigned mmu_idx = cpu_mmu_index(env, false);
11370 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
11371
11372 for (try = 0; try < 2; try++) {
11373
11374 for (i = 0; i < maxidx; i++) {
11375 hostaddr[i] = tlb_vaddr_to_host(env,
11376 vaddr + TARGET_PAGE_SIZE * i,
11377 1, mmu_idx);
11378 if (!hostaddr[i]) {
11379 break;
11380 }
11381 }
11382 if (i == maxidx) {
11383
11384
11385
11386 for (i = 0; i < maxidx - 1; i++) {
11387 memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
11388 }
11389 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
11390 return;
11391 }
11392
11393
11394
11395
11396
11397
11398 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
11399
11400 for (i = 0; i < maxidx; i++) {
11401 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
11402 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
11403 helper_ret_stb_mmu(env, va, 0, oi, GETPC());
11404 }
11405 }
11406 }
11407
11408
11409
11410
11411
11412
11413
11414
11415
11416
11417
11418
11419 for (i = 0; i < blocklen; i++) {
11420 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
11421 }
11422 }
11423#else
11424 memset(g2h(vaddr), 0, blocklen);
11425#endif
11426}
11427
11428
11429
11430
11431
11432
11433
11434
11435static inline uint16_t add16_sat(uint16_t a, uint16_t b)
11436{
11437 uint16_t res;
11438
11439 res = a + b;
11440 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
11441 if (a & 0x8000)
11442 res = 0x8000;
11443 else
11444 res = 0x7fff;
11445 }
11446 return res;
11447}
11448
11449
11450static inline uint8_t add8_sat(uint8_t a, uint8_t b)
11451{
11452 uint8_t res;
11453
11454 res = a + b;
11455 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
11456 if (a & 0x80)
11457 res = 0x80;
11458 else
11459 res = 0x7f;
11460 }
11461 return res;
11462}
11463
11464
11465static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
11466{
11467 uint16_t res;
11468
11469 res = a - b;
11470 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
11471 if (a & 0x8000)
11472 res = 0x8000;
11473 else
11474 res = 0x7fff;
11475 }
11476 return res;
11477}
11478
11479
11480static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
11481{
11482 uint8_t res;
11483
11484 res = a - b;
11485 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
11486 if (a & 0x80)
11487 res = 0x80;
11488 else
11489 res = 0x7f;
11490 }
11491 return res;
11492}
11493
11494#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11495#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11496#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
11497#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
11498#define PFX q
11499
11500#include "op_addsub.h"
11501
11502
11503static inline uint16_t add16_usat(uint16_t a, uint16_t b)
11504{
11505 uint16_t res;
11506 res = a + b;
11507 if (res < a)
11508 res = 0xffff;
11509 return res;
11510}
11511
11512static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
11513{
11514 if (a > b)
11515 return a - b;
11516 else
11517 return 0;
11518}
11519
11520static inline uint8_t add8_usat(uint8_t a, uint8_t b)
11521{
11522 uint8_t res;
11523 res = a + b;
11524 if (res < a)
11525 res = 0xff;
11526 return res;
11527}
11528
11529static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
11530{
11531 if (a > b)
11532 return a - b;
11533 else
11534 return 0;
11535}
11536
11537#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11538#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11539#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11540#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11541#define PFX uq
11542
11543#include "op_addsub.h"
11544
11545
11546#define SARITH16(a, b, n, op) do { \
11547 int32_t sum; \
11548 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11549 RESULT(sum, n, 16); \
11550 if (sum >= 0) \
11551 ge |= 3 << (n * 2); \
11552 } while(0)
11553
11554#define SARITH8(a, b, n, op) do { \
11555 int32_t sum; \
11556 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11557 RESULT(sum, n, 8); \
11558 if (sum >= 0) \
11559 ge |= 1 << n; \
11560 } while(0)
11561
11562
11563#define ADD16(a, b, n) SARITH16(a, b, n, +)
11564#define SUB16(a, b, n) SARITH16(a, b, n, -)
11565#define ADD8(a, b, n) SARITH8(a, b, n, +)
11566#define SUB8(a, b, n) SARITH8(a, b, n, -)
11567#define PFX s
11568#define ARITH_GE
11569
11570#include "op_addsub.h"
11571
11572
11573#define ADD16(a, b, n) do { \
11574 uint32_t sum; \
11575 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11576 RESULT(sum, n, 16); \
11577 if ((sum >> 16) == 1) \
11578 ge |= 3 << (n * 2); \
11579 } while(0)
11580
11581#define ADD8(a, b, n) do { \
11582 uint32_t sum; \
11583 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11584 RESULT(sum, n, 8); \
11585 if ((sum >> 8) == 1) \
11586 ge |= 1 << n; \
11587 } while(0)
11588
11589#define SUB16(a, b, n) do { \
11590 uint32_t sum; \
11591 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11592 RESULT(sum, n, 16); \
11593 if ((sum >> 16) == 0) \
11594 ge |= 3 << (n * 2); \
11595 } while(0)
11596
11597#define SUB8(a, b, n) do { \
11598 uint32_t sum; \
11599 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11600 RESULT(sum, n, 8); \
11601 if ((sum >> 8) == 0) \
11602 ge |= 1 << n; \
11603 } while(0)
11604
11605#define PFX u
11606#define ARITH_GE
11607
11608#include "op_addsub.h"
11609
11610
11611#define ADD16(a, b, n) \
11612 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11613#define SUB16(a, b, n) \
11614 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11615#define ADD8(a, b, n) \
11616 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11617#define SUB8(a, b, n) \
11618 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11619#define PFX sh
11620
11621#include "op_addsub.h"
11622
11623
11624#define ADD16(a, b, n) \
11625 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11626#define SUB16(a, b, n) \
11627 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11628#define ADD8(a, b, n) \
11629 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11630#define SUB8(a, b, n) \
11631 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11632#define PFX uh
11633
11634#include "op_addsub.h"
11635
11636static inline uint8_t do_usad(uint8_t a, uint8_t b)
11637{
11638 if (a > b)
11639 return a - b;
11640 else
11641 return b - a;
11642}
11643
11644
11645uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
11646{
11647 uint32_t sum;
11648 sum = do_usad(a, b);
11649 sum += do_usad(a >> 8, b >> 8);
11650 sum += do_usad(a >> 16, b >>16);
11651 sum += do_usad(a >> 24, b >> 24);
11652 return sum;
11653}
11654
11655
11656uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
11657{
11658 uint32_t mask;
11659
11660 mask = 0;
11661 if (flags & 1)
11662 mask |= 0xff;
11663 if (flags & 2)
11664 mask |= 0xff00;
11665 if (flags & 4)
11666 mask |= 0xff0000;
11667 if (flags & 8)
11668 mask |= 0xff000000;
11669 return (a & mask) | (b & ~mask);
11670}
11671
11672
11673
11674
11675
11676
11677static inline int vfp_exceptbits_from_host(int host_bits)
11678{
11679 int target_bits = 0;
11680
11681 if (host_bits & float_flag_invalid)
11682 target_bits |= 1;
11683 if (host_bits & float_flag_divbyzero)
11684 target_bits |= 2;
11685 if (host_bits & float_flag_overflow)
11686 target_bits |= 4;
11687 if (host_bits & (float_flag_underflow | float_flag_output_denormal))
11688 target_bits |= 8;
11689 if (host_bits & float_flag_inexact)
11690 target_bits |= 0x10;
11691 if (host_bits & float_flag_input_denormal)
11692 target_bits |= 0x80;
11693 return target_bits;
11694}
11695
11696uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
11697{
11698 int i;
11699 uint32_t fpscr;
11700
11701 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
11702 | (env->vfp.vec_len << 16)
11703 | (env->vfp.vec_stride << 20);
11704
11705 i = get_float_exception_flags(&env->vfp.fp_status);
11706 i |= get_float_exception_flags(&env->vfp.standard_fp_status);
11707
11708 i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
11709 & ~float_flag_input_denormal);
11710
11711 fpscr |= vfp_exceptbits_from_host(i);
11712 return fpscr;
11713}
11714
11715uint32_t vfp_get_fpscr(CPUARMState *env)
11716{
11717 return HELPER(vfp_get_fpscr)(env);
11718}
11719
11720
11721static inline int vfp_exceptbits_to_host(int target_bits)
11722{
11723 int host_bits = 0;
11724
11725 if (target_bits & 1)
11726 host_bits |= float_flag_invalid;
11727 if (target_bits & 2)
11728 host_bits |= float_flag_divbyzero;
11729 if (target_bits & 4)
11730 host_bits |= float_flag_overflow;
11731 if (target_bits & 8)
11732 host_bits |= float_flag_underflow;
11733 if (target_bits & 0x10)
11734 host_bits |= float_flag_inexact;
11735 if (target_bits & 0x80)
11736 host_bits |= float_flag_input_denormal;
11737 return host_bits;
11738}
11739
11740void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
11741{
11742 int i;
11743 uint32_t changed;
11744
11745
11746 if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
11747 val &= ~FPCR_FZ16;
11748 }
11749
11750 changed = env->vfp.xregs[ARM_VFP_FPSCR];
11751 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
11752 env->vfp.vec_len = (val >> 16) & 7;
11753 env->vfp.vec_stride = (val >> 20) & 3;
11754
11755 changed ^= val;
11756 if (changed & (3 << 22)) {
11757 i = (val >> 22) & 3;
11758 switch (i) {
11759 case FPROUNDING_TIEEVEN:
11760 i = float_round_nearest_even;
11761 break;
11762 case FPROUNDING_POSINF:
11763 i = float_round_up;
11764 break;
11765 case FPROUNDING_NEGINF:
11766 i = float_round_down;
11767 break;
11768 case FPROUNDING_ZERO:
11769 i = float_round_to_zero;
11770 break;
11771 }
11772 set_float_rounding_mode(i, &env->vfp.fp_status);
11773 set_float_rounding_mode(i, &env->vfp.fp_status_f16);
11774 }
11775 if (changed & FPCR_FZ16) {
11776 bool ftz_enabled = val & FPCR_FZ16;
11777 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
11778 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
11779 }
11780 if (changed & FPCR_FZ) {
11781 bool ftz_enabled = val & FPCR_FZ;
11782 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status);
11783 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status);
11784 }
11785 if (changed & FPCR_DN) {
11786 bool dnan_enabled = val & FPCR_DN;
11787 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status);
11788 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
11789 }
11790
11791
11792
11793
11794
11795 i = vfp_exceptbits_to_host(val);
11796 set_float_exception_flags(i, &env->vfp.fp_status);
11797 set_float_exception_flags(0, &env->vfp.fp_status_f16);
11798 set_float_exception_flags(0, &env->vfp.standard_fp_status);
11799}
11800
11801void vfp_set_fpscr(CPUARMState *env, uint32_t val)
11802{
11803 HELPER(vfp_set_fpscr)(env, val);
11804}
11805
11806#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
11807
11808#define VFP_BINOP(name) \
11809float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
11810{ \
11811 float_status *fpst = fpstp; \
11812 return float32_ ## name(a, b, fpst); \
11813} \
11814float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
11815{ \
11816 float_status *fpst = fpstp; \
11817 return float64_ ## name(a, b, fpst); \
11818}
11819VFP_BINOP(add)
11820VFP_BINOP(sub)
11821VFP_BINOP(mul)
11822VFP_BINOP(div)
11823VFP_BINOP(min)
11824VFP_BINOP(max)
11825VFP_BINOP(minnum)
11826VFP_BINOP(maxnum)
11827#undef VFP_BINOP
11828
11829float32 VFP_HELPER(neg, s)(float32 a)
11830{
11831 return float32_chs(a);
11832}
11833
11834float64 VFP_HELPER(neg, d)(float64 a)
11835{
11836 return float64_chs(a);
11837}
11838
11839float32 VFP_HELPER(abs, s)(float32 a)
11840{
11841 return float32_abs(a);
11842}
11843
11844float64 VFP_HELPER(abs, d)(float64 a)
11845{
11846 return float64_abs(a);
11847}
11848
11849float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
11850{
11851 return float32_sqrt(a, &env->vfp.fp_status);
11852}
11853
11854float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
11855{
11856 return float64_sqrt(a, &env->vfp.fp_status);
11857}
11858
11859
11860#define DO_VFP_cmp(p, type) \
11861void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
11862{ \
11863 uint32_t flags; \
11864 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
11865 case 0: flags = 0x6; break; \
11866 case -1: flags = 0x8; break; \
11867 case 1: flags = 0x2; break; \
11868 default: case 2: flags = 0x3; break; \
11869 } \
11870 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11871 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11872} \
11873void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
11874{ \
11875 uint32_t flags; \
11876 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
11877 case 0: flags = 0x6; break; \
11878 case -1: flags = 0x8; break; \
11879 case 1: flags = 0x2; break; \
11880 default: case 2: flags = 0x3; break; \
11881 } \
11882 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11883 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11884}
11885DO_VFP_cmp(s, float32)
11886DO_VFP_cmp(d, float64)
11887#undef DO_VFP_cmp
11888
11889
11890
11891#define CONV_ITOF(name, ftype, fsz, sign) \
11892ftype HELPER(name)(uint32_t x, void *fpstp) \
11893{ \
11894 float_status *fpst = fpstp; \
11895 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
11896}
11897
11898#define CONV_FTOI(name, ftype, fsz, sign, round) \
11899sign##int32_t HELPER(name)(ftype x, void *fpstp) \
11900{ \
11901 float_status *fpst = fpstp; \
11902 if (float##fsz##_is_any_nan(x)) { \
11903 float_raise(float_flag_invalid, fpst); \
11904 return 0; \
11905 } \
11906 return float##fsz##_to_##sign##int32##round(x, fpst); \
11907}
11908
11909#define FLOAT_CONVS(name, p, ftype, fsz, sign) \
11910 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
11911 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
11912 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
11913
11914FLOAT_CONVS(si, h, uint32_t, 16, )
11915FLOAT_CONVS(si, s, float32, 32, )
11916FLOAT_CONVS(si, d, float64, 64, )
11917FLOAT_CONVS(ui, h, uint32_t, 16, u)
11918FLOAT_CONVS(ui, s, float32, 32, u)
11919FLOAT_CONVS(ui, d, float64, 64, u)
11920
11921#undef CONV_ITOF
11922#undef CONV_FTOI
11923#undef FLOAT_CONVS
11924
11925
11926float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
11927{
11928 return float32_to_float64(x, &env->vfp.fp_status);
11929}
11930
11931float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
11932{
11933 return float64_to_float32(x, &env->vfp.fp_status);
11934}
11935
11936
11937#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11938float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
11939 void *fpstp) \
11940{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
11941
11942#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \
11943uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \
11944 void *fpst) \
11945{ \
11946 if (unlikely(float##fsz##_is_any_nan(x))) { \
11947 float_raise(float_flag_invalid, fpst); \
11948 return 0; \
11949 } \
11950 return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
11951}
11952
11953#define VFP_CONV_FIX(name, p, fsz, isz, itype) \
11954VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11955VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
11956 float_round_to_zero, _round_to_zero) \
11957VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
11958 get_float_rounding_mode(fpst), )
11959
11960#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
11961VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11962VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
11963 get_float_rounding_mode(fpst), )
11964
11965VFP_CONV_FIX(sh, d, 64, 64, int16)
11966VFP_CONV_FIX(sl, d, 64, 64, int32)
11967VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
11968VFP_CONV_FIX(uh, d, 64, 64, uint16)
11969VFP_CONV_FIX(ul, d, 64, 64, uint32)
11970VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
11971VFP_CONV_FIX(sh, s, 32, 32, int16)
11972VFP_CONV_FIX(sl, s, 32, 32, int32)
11973VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
11974VFP_CONV_FIX(uh, s, 32, 32, uint16)
11975VFP_CONV_FIX(ul, s, 32, 32, uint32)
11976VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
11977
11978#undef VFP_CONV_FIX
11979#undef VFP_CONV_FIX_FLOAT
11980#undef VFP_CONV_FLOAT_FIX_ROUND
11981#undef VFP_CONV_FIX_A64
11982
11983uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
11984{
11985 return int32_to_float16_scalbn(x, -shift, fpst);
11986}
11987
11988uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
11989{
11990 return uint32_to_float16_scalbn(x, -shift, fpst);
11991}
11992
11993uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
11994{
11995 return int64_to_float16_scalbn(x, -shift, fpst);
11996}
11997
11998uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
11999{
12000 return uint64_to_float16_scalbn(x, -shift, fpst);
12001}
12002
12003uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst)
12004{
12005 if (unlikely(float16_is_any_nan(x))) {
12006 float_raise(float_flag_invalid, fpst);
12007 return 0;
12008 }
12009 return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst),
12010 shift, fpst);
12011}
12012
12013uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst)
12014{
12015 if (unlikely(float16_is_any_nan(x))) {
12016 float_raise(float_flag_invalid, fpst);
12017 return 0;
12018 }
12019 return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst),
12020 shift, fpst);
12021}
12022
12023uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst)
12024{
12025 if (unlikely(float16_is_any_nan(x))) {
12026 float_raise(float_flag_invalid, fpst);
12027 return 0;
12028 }
12029 return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst),
12030 shift, fpst);
12031}
12032
12033uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst)
12034{
12035 if (unlikely(float16_is_any_nan(x))) {
12036 float_raise(float_flag_invalid, fpst);
12037 return 0;
12038 }
12039 return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst),
12040 shift, fpst);
12041}
12042
12043uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst)
12044{
12045 if (unlikely(float16_is_any_nan(x))) {
12046 float_raise(float_flag_invalid, fpst);
12047 return 0;
12048 }
12049 return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst),
12050 shift, fpst);
12051}
12052
12053uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst)
12054{
12055 if (unlikely(float16_is_any_nan(x))) {
12056 float_raise(float_flag_invalid, fpst);
12057 return 0;
12058 }
12059 return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst),
12060 shift, fpst);
12061}
12062
12063
12064
12065
12066uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp)
12067{
12068 float_status *fp_status = fpstp;
12069
12070 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
12071 set_float_rounding_mode(rmode, fp_status);
12072
12073 return prev_rmode;
12074}
12075
12076
12077
12078
12079
12080
12081
12082
12083uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
12084{
12085 float_status *fp_status = &env->vfp.standard_fp_status;
12086
12087 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
12088 set_float_rounding_mode(rmode, fp_status);
12089
12090 return prev_rmode;
12091}
12092
12093
12094float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
12095{
12096
12097
12098
12099 float_status *fpst = fpstp;
12100 flag save = get_flush_inputs_to_zero(fpst);
12101 set_flush_inputs_to_zero(false, fpst);
12102 float32 r = float16_to_float32(a, !ahp_mode, fpst);
12103 set_flush_inputs_to_zero(save, fpst);
12104 return r;
12105}
12106
12107uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
12108{
12109
12110
12111
12112 float_status *fpst = fpstp;
12113 flag save = get_flush_to_zero(fpst);
12114 set_flush_to_zero(false, fpst);
12115 float16 r = float32_to_float16(a, !ahp_mode, fpst);
12116 set_flush_to_zero(save, fpst);
12117 return r;
12118}
12119
12120float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
12121{
12122
12123
12124
12125 float_status *fpst = fpstp;
12126 flag save = get_flush_inputs_to_zero(fpst);
12127 set_flush_inputs_to_zero(false, fpst);
12128 float64 r = float16_to_float64(a, !ahp_mode, fpst);
12129 set_flush_inputs_to_zero(save, fpst);
12130 return r;
12131}
12132
12133uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
12134{
12135
12136
12137
12138 float_status *fpst = fpstp;
12139 flag save = get_flush_to_zero(fpst);
12140 set_flush_to_zero(false, fpst);
12141 float16 r = float64_to_float16(a, !ahp_mode, fpst);
12142 set_flush_to_zero(save, fpst);
12143 return r;
12144}
12145
12146#define float32_two make_float32(0x40000000)
12147#define float32_three make_float32(0x40400000)
12148#define float32_one_point_five make_float32(0x3fc00000)
12149
12150float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
12151{
12152 float_status *s = &env->vfp.standard_fp_status;
12153 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
12154 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
12155 if (!(float32_is_zero(a) || float32_is_zero(b))) {
12156 float_raise(float_flag_input_denormal, s);
12157 }
12158 return float32_two;
12159 }
12160 return float32_sub(float32_two, float32_mul(a, b, s), s);
12161}
12162
12163float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
12164{
12165 float_status *s = &env->vfp.standard_fp_status;
12166 float32 product;
12167 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
12168 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
12169 if (!(float32_is_zero(a) || float32_is_zero(b))) {
12170 float_raise(float_flag_input_denormal, s);
12171 }
12172 return float32_one_point_five;
12173 }
12174 product = float32_mul(a, b, s);
12175 return float32_div(float32_sub(float32_three, product, s), float32_two, s);
12176}
12177
12178
12179
12180
12181
12182#define float64_256 make_float64(0x4070000000000000LL)
12183#define float64_512 make_float64(0x4080000000000000LL)
12184#define float16_maxnorm make_float16(0x7bff)
12185#define float32_maxnorm make_float32(0x7f7fffff)
12186#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200
12201static int recip_estimate(int input)
12202{
12203 int a, b, r;
12204 assert(256 <= input && input < 512);
12205 a = (input * 2) + 1;
12206 b = (1 << 19) / a;
12207 r = (b + 1) >> 1;
12208 assert(256 <= r && r < 512);
12209 return r;
12210}
12211
12212
12213
12214
12215
12216
12217
12218
12219
12220
12221static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac)
12222{
12223 uint32_t scaled, estimate;
12224 uint64_t result_frac;
12225 int result_exp;
12226
12227
12228 if (*exp == 0) {
12229 if (extract64(frac, 51, 1) == 0) {
12230 *exp = -1;
12231 frac <<= 2;
12232 } else {
12233 frac <<= 1;
12234 }
12235 }
12236
12237
12238 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
12239 estimate = recip_estimate(scaled);
12240
12241 result_exp = exp_off - *exp;
12242 result_frac = deposit64(0, 44, 8, estimate);
12243 if (result_exp == 0) {
12244 result_frac = deposit64(result_frac >> 1, 51, 1, 1);
12245 } else if (result_exp == -1) {
12246 result_frac = deposit64(result_frac >> 2, 50, 2, 1);
12247 result_exp = 0;
12248 }
12249
12250 *exp = result_exp;
12251
12252 return result_frac;
12253}
12254
12255static bool round_to_inf(float_status *fpst, bool sign_bit)
12256{
12257 switch (fpst->float_rounding_mode) {
12258 case float_round_nearest_even:
12259 return true;
12260 case float_round_up:
12261 return !sign_bit;
12262 case float_round_down:
12263 return sign_bit;
12264 case float_round_to_zero:
12265 return false;
12266 }
12267
12268 g_assert_not_reached();
12269}
12270
12271uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
12272{
12273 float_status *fpst = fpstp;
12274 float16 f16 = float16_squash_input_denormal(input, fpst);
12275 uint32_t f16_val = float16_val(f16);
12276 uint32_t f16_sign = float16_is_neg(f16);
12277 int f16_exp = extract32(f16_val, 10, 5);
12278 uint32_t f16_frac = extract32(f16_val, 0, 10);
12279 uint64_t f64_frac;
12280
12281 if (float16_is_any_nan(f16)) {
12282 float16 nan = f16;
12283 if (float16_is_signaling_nan(f16, fpst)) {
12284 float_raise(float_flag_invalid, fpst);
12285 nan = float16_silence_nan(f16, fpst);
12286 }
12287 if (fpst->default_nan_mode) {
12288 nan = float16_default_nan(fpst);
12289 }
12290 return nan;
12291 } else if (float16_is_infinity(f16)) {
12292 return float16_set_sign(float16_zero, float16_is_neg(f16));
12293 } else if (float16_is_zero(f16)) {
12294 float_raise(float_flag_divbyzero, fpst);
12295 return float16_set_sign(float16_infinity, float16_is_neg(f16));
12296 } else if (float16_abs(f16) < (1 << 8)) {
12297
12298 float_raise(float_flag_overflow | float_flag_inexact, fpst);
12299 if (round_to_inf(fpst, f16_sign)) {
12300 return float16_set_sign(float16_infinity, f16_sign);
12301 } else {
12302 return float16_set_sign(float16_maxnorm, f16_sign);
12303 }
12304 } else if (f16_exp >= 29 && fpst->flush_to_zero) {
12305 float_raise(float_flag_underflow, fpst);
12306 return float16_set_sign(float16_zero, float16_is_neg(f16));
12307 }
12308
12309 f64_frac = call_recip_estimate(&f16_exp, 29,
12310 ((uint64_t) f16_frac) << (52 - 10));
12311
12312
12313 f16_val = deposit32(0, 15, 1, f16_sign);
12314 f16_val = deposit32(f16_val, 10, 5, f16_exp);
12315 f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10));
12316 return make_float16(f16_val);
12317}
12318
12319float32 HELPER(recpe_f32)(float32 input, void *fpstp)
12320{
12321 float_status *fpst = fpstp;
12322 float32 f32 = float32_squash_input_denormal(input, fpst);
12323 uint32_t f32_val = float32_val(f32);
12324 bool f32_sign = float32_is_neg(f32);
12325 int f32_exp = extract32(f32_val, 23, 8);
12326 uint32_t f32_frac = extract32(f32_val, 0, 23);
12327 uint64_t f64_frac;
12328
12329 if (float32_is_any_nan(f32)) {
12330 float32 nan = f32;
12331 if (float32_is_signaling_nan(f32, fpst)) {
12332 float_raise(float_flag_invalid, fpst);
12333 nan = float32_silence_nan(f32, fpst);
12334 }
12335 if (fpst->default_nan_mode) {
12336 nan = float32_default_nan(fpst);
12337 }
12338 return nan;
12339 } else if (float32_is_infinity(f32)) {
12340 return float32_set_sign(float32_zero, float32_is_neg(f32));
12341 } else if (float32_is_zero(f32)) {
12342 float_raise(float_flag_divbyzero, fpst);
12343 return float32_set_sign(float32_infinity, float32_is_neg(f32));
12344 } else if (float32_abs(f32) < (1ULL << 21)) {
12345
12346 float_raise(float_flag_overflow | float_flag_inexact, fpst);
12347 if (round_to_inf(fpst, f32_sign)) {
12348 return float32_set_sign(float32_infinity, f32_sign);
12349 } else {
12350 return float32_set_sign(float32_maxnorm, f32_sign);
12351 }
12352 } else if (f32_exp >= 253 && fpst->flush_to_zero) {
12353 float_raise(float_flag_underflow, fpst);
12354 return float32_set_sign(float32_zero, float32_is_neg(f32));
12355 }
12356
12357 f64_frac = call_recip_estimate(&f32_exp, 253,
12358 ((uint64_t) f32_frac) << (52 - 23));
12359
12360
12361 f32_val = deposit32(0, 31, 1, f32_sign);
12362 f32_val = deposit32(f32_val, 23, 8, f32_exp);
12363 f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23));
12364 return make_float32(f32_val);
12365}
12366
12367float64 HELPER(recpe_f64)(float64 input, void *fpstp)
12368{
12369 float_status *fpst = fpstp;
12370 float64 f64 = float64_squash_input_denormal(input, fpst);
12371 uint64_t f64_val = float64_val(f64);
12372 bool f64_sign = float64_is_neg(f64);
12373 int f64_exp = extract64(f64_val, 52, 11);
12374 uint64_t f64_frac = extract64(f64_val, 0, 52);
12375
12376
12377 if (float64_is_any_nan(f64)) {
12378 float64 nan = f64;
12379 if (float64_is_signaling_nan(f64, fpst)) {
12380 float_raise(float_flag_invalid, fpst);
12381 nan = float64_silence_nan(f64, fpst);
12382 }
12383 if (fpst->default_nan_mode) {
12384 nan = float64_default_nan(fpst);
12385 }
12386 return nan;
12387 } else if (float64_is_infinity(f64)) {
12388 return float64_set_sign(float64_zero, float64_is_neg(f64));
12389 } else if (float64_is_zero(f64)) {
12390 float_raise(float_flag_divbyzero, fpst);
12391 return float64_set_sign(float64_infinity, float64_is_neg(f64));
12392 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
12393
12394 float_raise(float_flag_overflow | float_flag_inexact, fpst);
12395 if (round_to_inf(fpst, f64_sign)) {
12396 return float64_set_sign(float64_infinity, f64_sign);
12397 } else {
12398 return float64_set_sign(float64_maxnorm, f64_sign);
12399 }
12400 } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
12401 float_raise(float_flag_underflow, fpst);
12402 return float64_set_sign(float64_zero, float64_is_neg(f64));
12403 }
12404
12405 f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac);
12406
12407
12408 f64_val = deposit64(0, 63, 1, f64_sign);
12409 f64_val = deposit64(f64_val, 52, 11, f64_exp);
12410 f64_val = deposit64(f64_val, 0, 52, f64_frac);
12411 return make_float64(f64_val);
12412}
12413
12414
12415
12416
12417
12418static int do_recip_sqrt_estimate(int a)
12419{
12420 int b, estimate;
12421
12422 assert(128 <= a && a < 512);
12423 if (a < 256) {
12424 a = a * 2 + 1;
12425 } else {
12426 a = (a >> 1) << 1;
12427 a = (a + 1) * 2;
12428 }
12429 b = 512;
12430 while (a * (b + 1) * (b + 1) < (1 << 28)) {
12431 b += 1;
12432 }
12433 estimate = (b + 1) / 2;
12434 assert(256 <= estimate && estimate < 512);
12435
12436 return estimate;
12437}
12438
12439
12440static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac)
12441{
12442 int estimate;
12443 uint32_t scaled;
12444
12445 if (*exp == 0) {
12446 while (extract64(frac, 51, 1) == 0) {
12447 frac = frac << 1;
12448 *exp -= 1;
12449 }
12450 frac = extract64(frac, 0, 51) << 1;
12451 }
12452
12453 if (*exp & 1) {
12454
12455 scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7));
12456 } else {
12457
12458 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
12459 }
12460 estimate = do_recip_sqrt_estimate(scaled);
12461
12462 *exp = (exp_off - *exp) / 2;
12463 return extract64(estimate, 0, 8) << 44;
12464}
12465
12466uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
12467{
12468 float_status *s = fpstp;
12469 float16 f16 = float16_squash_input_denormal(input, s);
12470 uint16_t val = float16_val(f16);
12471 bool f16_sign = float16_is_neg(f16);
12472 int f16_exp = extract32(val, 10, 5);
12473 uint16_t f16_frac = extract32(val, 0, 10);
12474 uint64_t f64_frac;
12475
12476 if (float16_is_any_nan(f16)) {
12477 float16 nan = f16;
12478 if (float16_is_signaling_nan(f16, s)) {
12479 float_raise(float_flag_invalid, s);
12480 nan = float16_silence_nan(f16, s);
12481 }
12482 if (s->default_nan_mode) {
12483 nan = float16_default_nan(s);
12484 }
12485 return nan;
12486 } else if (float16_is_zero(f16)) {
12487 float_raise(float_flag_divbyzero, s);
12488 return float16_set_sign(float16_infinity, f16_sign);
12489 } else if (f16_sign) {
12490 float_raise(float_flag_invalid, s);
12491 return float16_default_nan(s);
12492 } else if (float16_is_infinity(f16)) {
12493 return float16_zero;
12494 }
12495
12496
12497
12498
12499 f64_frac = ((uint64_t) f16_frac) << (52 - 10);
12500
12501 f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac);
12502
12503
12504 val = deposit32(0, 15, 1, f16_sign);
12505 val = deposit32(val, 10, 5, f16_exp);
12506 val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8));
12507 return make_float16(val);
12508}
12509
12510float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
12511{
12512 float_status *s = fpstp;
12513 float32 f32 = float32_squash_input_denormal(input, s);
12514 uint32_t val = float32_val(f32);
12515 uint32_t f32_sign = float32_is_neg(f32);
12516 int f32_exp = extract32(val, 23, 8);
12517 uint32_t f32_frac = extract32(val, 0, 23);
12518 uint64_t f64_frac;
12519
12520 if (float32_is_any_nan(f32)) {
12521 float32 nan = f32;
12522 if (float32_is_signaling_nan(f32, s)) {
12523 float_raise(float_flag_invalid, s);
12524 nan = float32_silence_nan(f32, s);
12525 }
12526 if (s->default_nan_mode) {
12527 nan = float32_default_nan(s);
12528 }
12529 return nan;
12530 } else if (float32_is_zero(f32)) {
12531 float_raise(float_flag_divbyzero, s);
12532 return float32_set_sign(float32_infinity, float32_is_neg(f32));
12533 } else if (float32_is_neg(f32)) {
12534 float_raise(float_flag_invalid, s);
12535 return float32_default_nan(s);
12536 } else if (float32_is_infinity(f32)) {
12537 return float32_zero;
12538 }
12539
12540
12541
12542
12543 f64_frac = ((uint64_t) f32_frac) << 29;
12544
12545 f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac);
12546
12547
12548 val = deposit32(0, 31, 1, f32_sign);
12549 val = deposit32(val, 23, 8, f32_exp);
12550 val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8));
12551 return make_float32(val);
12552}
12553
12554float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
12555{
12556 float_status *s = fpstp;
12557 float64 f64 = float64_squash_input_denormal(input, s);
12558 uint64_t val = float64_val(f64);
12559 bool f64_sign = float64_is_neg(f64);
12560 int f64_exp = extract64(val, 52, 11);
12561 uint64_t f64_frac = extract64(val, 0, 52);
12562
12563 if (float64_is_any_nan(f64)) {
12564 float64 nan = f64;
12565 if (float64_is_signaling_nan(f64, s)) {
12566 float_raise(float_flag_invalid, s);
12567 nan = float64_silence_nan(f64, s);
12568 }
12569 if (s->default_nan_mode) {
12570 nan = float64_default_nan(s);
12571 }
12572 return nan;
12573 } else if (float64_is_zero(f64)) {
12574 float_raise(float_flag_divbyzero, s);
12575 return float64_set_sign(float64_infinity, float64_is_neg(f64));
12576 } else if (float64_is_neg(f64)) {
12577 float_raise(float_flag_invalid, s);
12578 return float64_default_nan(s);
12579 } else if (float64_is_infinity(f64)) {
12580 return float64_zero;
12581 }
12582
12583 f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac);
12584
12585
12586 val = deposit64(0, 61, 1, f64_sign);
12587 val = deposit64(val, 52, 11, f64_exp);
12588 val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8));
12589 return make_float64(val);
12590}
12591
12592uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
12593{
12594
12595 int input, estimate;
12596
12597 if ((a & 0x80000000) == 0) {
12598 return 0xffffffff;
12599 }
12600
12601 input = extract32(a, 23, 9);
12602 estimate = recip_estimate(input);
12603
12604 return deposit32(0, (32 - 9), 9, estimate);
12605}
12606
12607uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
12608{
12609 int estimate;
12610
12611 if ((a & 0xc0000000) == 0) {
12612 return 0xffffffff;
12613 }
12614
12615 estimate = do_recip_sqrt_estimate(extract32(a, 23, 9));
12616
12617 return deposit32(0, 23, 9, estimate);
12618}
12619
12620
12621float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
12622{
12623 float_status *fpst = fpstp;
12624 return float32_muladd(a, b, c, 0, fpst);
12625}
12626
12627float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
12628{
12629 float_status *fpst = fpstp;
12630 return float64_muladd(a, b, c, 0, fpst);
12631}
12632
12633
12634float32 HELPER(rints_exact)(float32 x, void *fp_status)
12635{
12636 return float32_round_to_int(x, fp_status);
12637}
12638
12639float64 HELPER(rintd_exact)(float64 x, void *fp_status)
12640{
12641 return float64_round_to_int(x, fp_status);
12642}
12643
12644float32 HELPER(rints)(float32 x, void *fp_status)
12645{
12646 int old_flags = get_float_exception_flags(fp_status), new_flags;
12647 float32 ret;
12648
12649 ret = float32_round_to_int(x, fp_status);
12650
12651
12652 if (!(old_flags & float_flag_inexact)) {
12653 new_flags = get_float_exception_flags(fp_status);
12654 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
12655 }
12656
12657 return ret;
12658}
12659
12660float64 HELPER(rintd)(float64 x, void *fp_status)
12661{
12662 int old_flags = get_float_exception_flags(fp_status), new_flags;
12663 float64 ret;
12664
12665 ret = float64_round_to_int(x, fp_status);
12666
12667 new_flags = get_float_exception_flags(fp_status);
12668
12669
12670 if (!(old_flags & float_flag_inexact)) {
12671 new_flags = get_float_exception_flags(fp_status);
12672 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
12673 }
12674
12675 return ret;
12676}
12677
12678
12679int arm_rmode_to_sf(int rmode)
12680{
12681 switch (rmode) {
12682 case FPROUNDING_TIEAWAY:
12683 rmode = float_round_ties_away;
12684 break;
12685 case FPROUNDING_ODD:
12686
12687 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
12688 rmode);
12689
12690 case FPROUNDING_TIEEVEN:
12691 default:
12692 rmode = float_round_nearest_even;
12693 break;
12694 case FPROUNDING_POSINF:
12695 rmode = float_round_up;
12696 break;
12697 case FPROUNDING_NEGINF:
12698 rmode = float_round_down;
12699 break;
12700 case FPROUNDING_ZERO:
12701 rmode = float_round_to_zero;
12702 break;
12703 }
12704 return rmode;
12705}
12706
12707
12708
12709
12710
12711uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12712{
12713 uint8_t buf[4];
12714
12715 stl_le_p(buf, val);
12716
12717
12718 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12719}
12720
12721uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12722{
12723 uint8_t buf[4];
12724
12725 stl_le_p(buf, val);
12726
12727
12728 return crc32c(acc, buf, bytes) ^ 0xffffffff;
12729}
12730
12731
12732
12733
12734int fp_exception_el(CPUARMState *env, int cur_el)
12735{
12736#ifndef CONFIG_USER_ONLY
12737 int fpen;
12738
12739
12740
12741
12742 if (!arm_feature(env, ARM_FEATURE_V6)) {
12743 return 0;
12744 }
12745
12746
12747
12748
12749
12750
12751 fpen = extract32(env->cp15.cpacr_el1, 20, 2);
12752 switch (fpen) {
12753 case 0:
12754 case 2:
12755 if (cur_el == 0 || cur_el == 1) {
12756
12757 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
12758 return 3;
12759 }
12760 return 1;
12761 }
12762 if (cur_el == 3 && !is_a64(env)) {
12763
12764 return 3;
12765 }
12766 break;
12767 case 1:
12768 if (cur_el == 0) {
12769 return 1;
12770 }
12771 break;
12772 case 3:
12773 break;
12774 }
12775
12776
12777
12778
12779
12780
12781 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
12782 && !arm_is_secure_below_el3(env)) {
12783
12784 return 2;
12785 }
12786
12787
12788 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
12789
12790 return 3;
12791 }
12792#endif
12793 return 0;
12794}
12795
12796void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
12797 target_ulong *cs_base, uint32_t *pflags)
12798{
12799 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
12800 int current_el = arm_current_el(env);
12801 int fp_el = fp_exception_el(env, current_el);
12802 uint32_t flags;
12803
12804 if (is_a64(env)) {
12805 ARMCPU *cpu = arm_env_get_cpu(env);
12806
12807 *pc = env->pc;
12808 flags = ARM_TBFLAG_AARCH64_STATE_MASK;
12809
12810 flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
12811 flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
12812
12813 if (cpu_isar_feature(aa64_sve, cpu)) {
12814 int sve_el = sve_exception_el(env, current_el);
12815 uint32_t zcr_len;
12816
12817
12818
12819
12820 if (sve_el != 0 && fp_el == 0) {
12821 zcr_len = 0;
12822 } else {
12823 zcr_len = sve_zcr_len_for_el(env, current_el);
12824 }
12825 flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT;
12826 flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT;
12827 }
12828 } else {
12829 *pc = env->regs[15];
12830 flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
12831 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
12832 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
12833 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
12834 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
12835 if (!(access_secure_reg(env))) {
12836 flags |= ARM_TBFLAG_NS_MASK;
12837 }
12838 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
12839 || arm_el_is_aa64(env, 1)) {
12840 flags |= ARM_TBFLAG_VFPEN_MASK;
12841 }
12842 flags |= (extract32(env->cp15.c15_cpar, 0, 2)
12843 << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
12844 }
12845
12846 flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
12847
12848
12849
12850
12851
12852
12853
12854
12855 if (arm_singlestep_active(env)) {
12856 flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
12857 if (is_a64(env)) {
12858 if (env->pstate & PSTATE_SS) {
12859 flags |= ARM_TBFLAG_PSTATE_SS_MASK;
12860 }
12861 } else {
12862 if (env->uncached_cpsr & PSTATE_SS) {
12863 flags |= ARM_TBFLAG_PSTATE_SS_MASK;
12864 }
12865 }
12866 }
12867 if (arm_cpu_data_is_big_endian(env)) {
12868 flags |= ARM_TBFLAG_BE_DATA_MASK;
12869 }
12870 flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT;
12871
12872 if (arm_v7m_is_handler_mode(env)) {
12873 flags |= ARM_TBFLAG_HANDLER_MASK;
12874 }
12875
12876
12877
12878
12879 if (arm_feature(env, ARM_FEATURE_V8) &&
12880 arm_feature(env, ARM_FEATURE_M) &&
12881 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
12882 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
12883 flags |= ARM_TBFLAG_STACKCHECK_MASK;
12884 }
12885
12886 *pflags = flags;
12887 *cs_base = 0;
12888}
12889
12890#ifdef TARGET_AARCH64
12891
12892
12893
12894
12895
12896
12897
12898
12899
12900
12901
12902
12903
12904
12905void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
12906{
12907 int i, j;
12908 uint64_t pmask;
12909
12910 assert(vq >= 1 && vq <= ARM_MAX_VQ);
12911 assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
12912
12913
12914 for (i = 0; i < 32; i++) {
12915 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
12916 }
12917
12918
12919 pmask = 0;
12920 if (vq & 3) {
12921 pmask = ~(-1ULL << (16 * (vq & 3)));
12922 }
12923 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
12924 for (i = 0; i < 17; ++i) {
12925 env->vfp.pregs[i].p[j] &= pmask;
12926 }
12927 pmask = 0;
12928 }
12929}
12930
12931
12932
12933
12934void aarch64_sve_change_el(CPUARMState *env, int old_el,
12935 int new_el, bool el0_a64)
12936{
12937 ARMCPU *cpu = arm_env_get_cpu(env);
12938 int old_len, new_len;
12939 bool old_a64, new_a64;
12940
12941
12942 if (!cpu_isar_feature(aa64_sve, cpu)) {
12943 return;
12944 }
12945
12946
12947 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
12948 return;
12949 }
12950
12951
12952
12953
12954
12955
12956
12957
12958
12959
12960
12961
12962
12963 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
12964 old_len = (old_a64 && !sve_exception_el(env, old_el)
12965 ? sve_zcr_len_for_el(env, old_el) : 0);
12966 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
12967 new_len = (new_a64 && !sve_exception_el(env, new_el)
12968 ? sve_zcr_len_for_el(env, new_el) : 0);
12969
12970
12971 if (new_len < old_len) {
12972 aarch64_sve_narrow_vq(env, new_len + 1);
12973 }
12974}
12975#endif
12976