1#include "qemu/osdep.h"
2#include "target/arm/idau.h"
3#include "trace.h"
4#include "cpu.h"
5#include "internals.h"
6#include "exec/gdbstub.h"
7#include "exec/helper-proto.h"
8#include "qemu/host-utils.h"
9#include "sysemu/arch_init.h"
10#include "sysemu/sysemu.h"
11#include "qemu/bitops.h"
12#include "qemu/crc32c.h"
13#include "exec/exec-all.h"
14#include "exec/cpu_ldst.h"
15#include "arm_ldst.h"
16#include <zlib.h>
17#include "exec/semihost.h"
18#include "sysemu/kvm.h"
19#include "fpu/softfloat.h"
20#include "qemu/range.h"
21
22#define ARM_CPU_FREQ 1000000000
23
24#ifndef CONFIG_USER_ONLY
25
26typedef struct ARMCacheAttrs {
27 unsigned int attrs:8;
28 unsigned int shareability:2;
29} ARMCacheAttrs;
30
31static bool get_phys_addr(CPUARMState *env, target_ulong address,
32 MMUAccessType access_type, ARMMMUIdx mmu_idx,
33 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
34 target_ulong *page_size,
35 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
36
37static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
38 MMUAccessType access_type, ARMMMUIdx mmu_idx,
39 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
40 target_ulong *page_size_ptr,
41 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
42
43
44typedef struct V8M_SAttributes {
45 bool subpage;
46 bool ns;
47 bool nsc;
48 uint8_t sregion;
49 bool srvalid;
50 uint8_t iregion;
51 bool irvalid;
52} V8M_SAttributes;
53
54static void v8m_security_lookup(CPUARMState *env, uint32_t address,
55 MMUAccessType access_type, ARMMMUIdx mmu_idx,
56 V8M_SAttributes *sattrs);
57#endif
58
59static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
60{
61 int nregs;
62
63
64 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
65 if (reg < nregs) {
66 stq_le_p(buf, *aa32_vfp_dreg(env, reg));
67 return 8;
68 }
69 if (arm_feature(env, ARM_FEATURE_NEON)) {
70
71 nregs += 16;
72 if (reg < nregs) {
73 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
74 stq_le_p(buf, q[0]);
75 stq_le_p(buf + 8, q[1]);
76 return 16;
77 }
78 }
79 switch (reg - nregs) {
80 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
81 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
82 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
83 }
84 return 0;
85}
86
87static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
88{
89 int nregs;
90
91 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
92 if (reg < nregs) {
93 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
94 return 8;
95 }
96 if (arm_feature(env, ARM_FEATURE_NEON)) {
97 nregs += 16;
98 if (reg < nregs) {
99 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
100 q[0] = ldq_le_p(buf);
101 q[1] = ldq_le_p(buf + 8);
102 return 16;
103 }
104 }
105 switch (reg - nregs) {
106 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
107 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
108 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
109 }
110 return 0;
111}
112
113static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
114{
115 switch (reg) {
116 case 0 ... 31:
117
118 {
119 uint64_t *q = aa64_vfp_qreg(env, reg);
120 stq_le_p(buf, q[0]);
121 stq_le_p(buf + 8, q[1]);
122 return 16;
123 }
124 case 32:
125
126 stl_p(buf, vfp_get_fpsr(env));
127 return 4;
128 case 33:
129
130 stl_p(buf, vfp_get_fpcr(env));
131 return 4;
132 default:
133 return 0;
134 }
135}
136
137static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
138{
139 switch (reg) {
140 case 0 ... 31:
141
142 {
143 uint64_t *q = aa64_vfp_qreg(env, reg);
144 q[0] = ldq_le_p(buf);
145 q[1] = ldq_le_p(buf + 8);
146 return 16;
147 }
148 case 32:
149
150 vfp_set_fpsr(env, ldl_p(buf));
151 return 4;
152 case 33:
153
154 vfp_set_fpcr(env, ldl_p(buf));
155 return 4;
156 default:
157 return 0;
158 }
159}
160
161static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
162{
163 assert(ri->fieldoffset);
164 if (cpreg_field_is_64bit(ri)) {
165 return CPREG_FIELD64(env, ri);
166 } else {
167 return CPREG_FIELD32(env, ri);
168 }
169}
170
171static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
172 uint64_t value)
173{
174 assert(ri->fieldoffset);
175 if (cpreg_field_is_64bit(ri)) {
176 CPREG_FIELD64(env, ri) = value;
177 } else {
178 CPREG_FIELD32(env, ri) = value;
179 }
180}
181
182static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
183{
184 return (char *)env + ri->fieldoffset;
185}
186
187uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
188{
189
190 if (ri->type & ARM_CP_CONST) {
191 return ri->resetvalue;
192 } else if (ri->raw_readfn) {
193 return ri->raw_readfn(env, ri);
194 } else if (ri->readfn) {
195 return ri->readfn(env, ri);
196 } else {
197 return raw_read(env, ri);
198 }
199}
200
201static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
202 uint64_t v)
203{
204
205
206
207
208
209 if (ri->type & ARM_CP_CONST) {
210 return;
211 } else if (ri->raw_writefn) {
212 ri->raw_writefn(env, ri, v);
213 } else if (ri->writefn) {
214 ri->writefn(env, ri, v);
215 } else {
216 raw_write(env, ri, v);
217 }
218}
219
220static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
221{
222 ARMCPU *cpu = arm_env_get_cpu(env);
223 const ARMCPRegInfo *ri;
224 uint32_t key;
225
226 key = cpu->dyn_xml.cpregs_keys[reg];
227 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
228 if (ri) {
229 if (cpreg_field_is_64bit(ri)) {
230 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
231 } else {
232 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
233 }
234 }
235 return 0;
236}
237
238static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
239{
240 return 0;
241}
242
243static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
244{
245
246
247
248
249
250
251
252
253
254
255
256 if ((ri->type & ARM_CP_CONST) ||
257 ri->fieldoffset ||
258 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
259 return false;
260 }
261 return true;
262}
263
264bool write_cpustate_to_list(ARMCPU *cpu)
265{
266
267 int i;
268 bool ok = true;
269
270 for (i = 0; i < cpu->cpreg_array_len; i++) {
271 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
272 const ARMCPRegInfo *ri;
273
274 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
275 if (!ri) {
276 ok = false;
277 continue;
278 }
279 if (ri->type & ARM_CP_NO_RAW) {
280 continue;
281 }
282 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
283 }
284 return ok;
285}
286
287bool write_list_to_cpustate(ARMCPU *cpu)
288{
289 int i;
290 bool ok = true;
291
292 for (i = 0; i < cpu->cpreg_array_len; i++) {
293 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
294 uint64_t v = cpu->cpreg_values[i];
295 const ARMCPRegInfo *ri;
296
297 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
298 if (!ri) {
299 ok = false;
300 continue;
301 }
302 if (ri->type & ARM_CP_NO_RAW) {
303 continue;
304 }
305
306
307
308
309 write_raw_cp_reg(&cpu->env, ri, v);
310 if (read_raw_cp_reg(&cpu->env, ri) != v) {
311 ok = false;
312 }
313 }
314 return ok;
315}
316
317static void add_cpreg_to_list(gpointer key, gpointer opaque)
318{
319 ARMCPU *cpu = opaque;
320 uint64_t regidx;
321 const ARMCPRegInfo *ri;
322
323 regidx = *(uint32_t *)key;
324 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
325
326 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
327 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
328
329 cpu->cpreg_array_len++;
330 }
331}
332
333static void count_cpreg(gpointer key, gpointer opaque)
334{
335 ARMCPU *cpu = opaque;
336 uint64_t regidx;
337 const ARMCPRegInfo *ri;
338
339 regidx = *(uint32_t *)key;
340 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
341
342 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
343 cpu->cpreg_array_len++;
344 }
345}
346
347static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
348{
349 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
350 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
351
352 if (aidx > bidx) {
353 return 1;
354 }
355 if (aidx < bidx) {
356 return -1;
357 }
358 return 0;
359}
360
361void init_cpreg_list(ARMCPU *cpu)
362{
363
364
365
366 GList *keys;
367 int arraylen;
368
369 keys = g_hash_table_get_keys(cpu->cp_regs);
370 keys = g_list_sort(keys, cpreg_key_compare);
371
372 cpu->cpreg_array_len = 0;
373
374 g_list_foreach(keys, count_cpreg, cpu);
375
376 arraylen = cpu->cpreg_array_len;
377 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
378 cpu->cpreg_values = g_new(uint64_t, arraylen);
379 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
380 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
381 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
382 cpu->cpreg_array_len = 0;
383
384 g_list_foreach(keys, add_cpreg_to_list, cpu);
385
386 assert(cpu->cpreg_array_len == arraylen);
387
388 g_list_free(keys);
389}
390
391
392
393
394
395
396
397
398static CPAccessResult access_el3_aa32ns(CPUARMState *env,
399 const ARMCPRegInfo *ri,
400 bool isread)
401{
402 bool secure = arm_is_secure_below_el3(env);
403
404 assert(!arm_el_is_aa64(env, 3));
405 if (secure) {
406 return CP_ACCESS_TRAP_UNCATEGORIZED;
407 }
408 return CP_ACCESS_OK;
409}
410
411static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
412 const ARMCPRegInfo *ri,
413 bool isread)
414{
415 if (!arm_el_is_aa64(env, 3)) {
416 return access_el3_aa32ns(env, ri, isread);
417 }
418 return CP_ACCESS_OK;
419}
420
421
422
423
424
425
426static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
427 const ARMCPRegInfo *ri,
428 bool isread)
429{
430 if (arm_current_el(env) == 3) {
431 return CP_ACCESS_OK;
432 }
433 if (arm_is_secure_below_el3(env)) {
434 return CP_ACCESS_TRAP_EL3;
435 }
436
437 return CP_ACCESS_TRAP_UNCATEGORIZED;
438}
439
440
441
442
443static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
444 bool isread)
445{
446 int el = arm_current_el(env);
447
448 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
449 && !arm_is_secure_below_el3(env)) {
450 return CP_ACCESS_TRAP_EL2;
451 }
452 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
453 return CP_ACCESS_TRAP_EL3;
454 }
455 return CP_ACCESS_OK;
456}
457
458
459
460
461static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
462 bool isread)
463{
464 int el = arm_current_el(env);
465
466 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
467 && !arm_is_secure_below_el3(env)) {
468 return CP_ACCESS_TRAP_EL2;
469 }
470 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
471 return CP_ACCESS_TRAP_EL3;
472 }
473 return CP_ACCESS_OK;
474}
475
476
477
478
479static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
480 bool isread)
481{
482 int el = arm_current_el(env);
483
484 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
485 && !arm_is_secure_below_el3(env)) {
486 return CP_ACCESS_TRAP_EL2;
487 }
488 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
489 return CP_ACCESS_TRAP_EL3;
490 }
491 return CP_ACCESS_OK;
492}
493
494
495
496
497static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
498 bool isread)
499{
500 int el = arm_current_el(env);
501
502 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
503 && !arm_is_secure_below_el3(env)) {
504 return CP_ACCESS_TRAP_EL2;
505 }
506 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
507 return CP_ACCESS_TRAP_EL3;
508 }
509 return CP_ACCESS_OK;
510}
511
512static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
513{
514 ARMCPU *cpu = arm_env_get_cpu(env);
515
516 raw_write(env, ri, value);
517 tlb_flush(CPU(cpu));
518}
519
520static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
521{
522 ARMCPU *cpu = arm_env_get_cpu(env);
523
524 if (raw_read(env, ri) != value) {
525
526
527
528 tlb_flush(CPU(cpu));
529 raw_write(env, ri, value);
530 }
531}
532
533static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
534 uint64_t value)
535{
536 ARMCPU *cpu = arm_env_get_cpu(env);
537
538 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
539 && !extended_addresses_enabled(env)) {
540
541
542
543
544 tlb_flush(CPU(cpu));
545 }
546 raw_write(env, ri, value);
547}
548
549static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
550 uint64_t value)
551{
552
553 ARMCPU *cpu = arm_env_get_cpu(env);
554
555 tlb_flush(CPU(cpu));
556}
557
558static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
559 uint64_t value)
560{
561
562 ARMCPU *cpu = arm_env_get_cpu(env);
563
564 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
565}
566
567static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
568 uint64_t value)
569{
570
571 ARMCPU *cpu = arm_env_get_cpu(env);
572
573 tlb_flush(CPU(cpu));
574}
575
576static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
577 uint64_t value)
578{
579
580 ARMCPU *cpu = arm_env_get_cpu(env);
581
582 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
583}
584
585
586static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
587 uint64_t value)
588{
589 CPUState *cs = ENV_GET_CPU(env);
590
591 tlb_flush_all_cpus_synced(cs);
592}
593
594static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
595 uint64_t value)
596{
597 CPUState *cs = ENV_GET_CPU(env);
598
599 tlb_flush_all_cpus_synced(cs);
600}
601
602static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
603 uint64_t value)
604{
605 CPUState *cs = ENV_GET_CPU(env);
606
607 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
608}
609
610static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
611 uint64_t value)
612{
613 CPUState *cs = ENV_GET_CPU(env);
614
615 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
616}
617
618static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
619 uint64_t value)
620{
621 CPUState *cs = ENV_GET_CPU(env);
622
623 tlb_flush_by_mmuidx(cs,
624 ARMMMUIdxBit_S12NSE1 |
625 ARMMMUIdxBit_S12NSE0 |
626 ARMMMUIdxBit_S2NS);
627}
628
629static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
630 uint64_t value)
631{
632 CPUState *cs = ENV_GET_CPU(env);
633
634 tlb_flush_by_mmuidx_all_cpus_synced(cs,
635 ARMMMUIdxBit_S12NSE1 |
636 ARMMMUIdxBit_S12NSE0 |
637 ARMMMUIdxBit_S2NS);
638}
639
640static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
641 uint64_t value)
642{
643
644
645
646
647
648
649 CPUState *cs = ENV_GET_CPU(env);
650 uint64_t pageaddr;
651
652 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
653 return;
654 }
655
656 pageaddr = sextract64(value << 12, 0, 40);
657
658 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
659}
660
661static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
662 uint64_t value)
663{
664 CPUState *cs = ENV_GET_CPU(env);
665 uint64_t pageaddr;
666
667 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
668 return;
669 }
670
671 pageaddr = sextract64(value << 12, 0, 40);
672
673 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
674 ARMMMUIdxBit_S2NS);
675}
676
677static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
678 uint64_t value)
679{
680 CPUState *cs = ENV_GET_CPU(env);
681
682 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
683}
684
685static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
686 uint64_t value)
687{
688 CPUState *cs = ENV_GET_CPU(env);
689
690 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
691}
692
693static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
694 uint64_t value)
695{
696 CPUState *cs = ENV_GET_CPU(env);
697 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
698
699 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
700}
701
702static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
703 uint64_t value)
704{
705 CPUState *cs = ENV_GET_CPU(env);
706 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
707
708 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
709 ARMMMUIdxBit_S1E2);
710}
711
712static const ARMCPRegInfo cp_reginfo[] = {
713
714
715
716
717
718 { .name = "FCSEIDR",
719 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
720 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
721 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
722 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
723 { .name = "FCSEIDR_S",
724 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
725 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
726 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
727 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
728
729
730
731
732
733
734 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
735 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
736 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
737 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
738 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
739 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
740 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
741 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
742 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
743 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
744 REGINFO_SENTINEL
745};
746
747static const ARMCPRegInfo not_v8_cp_reginfo[] = {
748
749
750
751
752 { .name = "DACR",
753 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
754 .access = PL1_RW, .resetvalue = 0,
755 .writefn = dacr_write, .raw_writefn = raw_write,
756 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
757 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
758
759
760
761 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
762 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
763 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
764 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
765 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
766 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
767 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
768 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
769
770 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
771 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
772 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
773 REGINFO_SENTINEL
774};
775
776static const ARMCPRegInfo not_v6_cp_reginfo[] = {
777
778
779
780 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
781 .access = PL1_W, .type = ARM_CP_WFI },
782 REGINFO_SENTINEL
783};
784
785static const ARMCPRegInfo not_v7_cp_reginfo[] = {
786
787
788
789 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
790 .access = PL1_W, .type = ARM_CP_WFI },
791
792
793
794
795 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
796 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
797 .resetvalue = 0 },
798 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
799 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
800 .resetvalue = 0 },
801
802 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
803 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
804 .resetvalue = 0 },
805
806
807
808
809
810 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
811 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
812
813
814
815 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
816 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
817 .type = ARM_CP_NO_RAW },
818 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
819 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
820 .type = ARM_CP_NO_RAW },
821 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
822 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
823 .type = ARM_CP_NO_RAW },
824 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
825 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
826 .type = ARM_CP_NO_RAW },
827 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
828 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
829 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
830 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
831 REGINFO_SENTINEL
832};
833
834static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
835 uint64_t value)
836{
837 uint32_t mask = 0;
838
839
840 if (!arm_feature(env, ARM_FEATURE_V8)) {
841
842
843
844
845 if (arm_feature(env, ARM_FEATURE_VFP)) {
846
847 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
848
849 if (!arm_feature(env, ARM_FEATURE_NEON)) {
850
851 value |= (1 << 31);
852 }
853
854
855
856
857 if (!arm_feature(env, ARM_FEATURE_NEON) ||
858 !arm_feature(env, ARM_FEATURE_VFP3)) {
859
860 value |= (1 << 30);
861 }
862 }
863 value &= mask;
864 }
865 env->cp15.cpacr_el1 = value;
866}
867
868static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
869{
870
871
872
873 cpacr_write(env, ri, 0);
874}
875
876static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
877 bool isread)
878{
879 if (arm_feature(env, ARM_FEATURE_V8)) {
880
881 if (arm_current_el(env) == 1 &&
882 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
883 return CP_ACCESS_TRAP_EL2;
884
885 } else if (arm_current_el(env) < 3 &&
886 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
887 return CP_ACCESS_TRAP_EL3;
888 }
889 }
890
891 return CP_ACCESS_OK;
892}
893
894static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
895 bool isread)
896{
897
898 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
899 return CP_ACCESS_TRAP_EL3;
900 }
901
902 return CP_ACCESS_OK;
903}
904
905static const ARMCPRegInfo v6_cp_reginfo[] = {
906
907 { .name = "MVA_prefetch",
908 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
909 .access = PL1_W, .type = ARM_CP_NOP },
910
911
912
913
914 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
915 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
916 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
917 .access = PL0_W, .type = ARM_CP_NOP },
918 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
919 .access = PL0_W, .type = ARM_CP_NOP },
920 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
921 .access = PL1_RW,
922 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
923 offsetof(CPUARMState, cp15.ifar_ns) },
924 .resetvalue = 0, },
925
926
927
928 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
929 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
930 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
931 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
932 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
933 .resetfn = cpacr_reset, .writefn = cpacr_write },
934 REGINFO_SENTINEL
935};
936
937
938#define PMCRN_MASK 0xf800
939#define PMCRN_SHIFT 11
940#define PMCRD 0x8
941#define PMCRC 0x4
942#define PMCRE 0x1
943
944static inline uint32_t pmu_num_counters(CPUARMState *env)
945{
946 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
947}
948
949
950static inline uint64_t pmu_counter_mask(CPUARMState *env)
951{
952 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
953}
954
955static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
956 bool isread)
957{
958
959
960
961
962 int el = arm_current_el(env);
963
964 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
965 return CP_ACCESS_TRAP;
966 }
967 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
968 && !arm_is_secure_below_el3(env)) {
969 return CP_ACCESS_TRAP_EL2;
970 }
971 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
972 return CP_ACCESS_TRAP_EL3;
973 }
974
975 return CP_ACCESS_OK;
976}
977
978static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
979 const ARMCPRegInfo *ri,
980 bool isread)
981{
982
983 if (arm_feature(env, ARM_FEATURE_V8)
984 && arm_current_el(env) == 0
985 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
986 && isread) {
987 return CP_ACCESS_OK;
988 }
989
990 return pmreg_access(env, ri, isread);
991}
992
993static CPAccessResult pmreg_access_swinc(CPUARMState *env,
994 const ARMCPRegInfo *ri,
995 bool isread)
996{
997
998 if (arm_feature(env, ARM_FEATURE_V8)
999 && arm_current_el(env) == 0
1000 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1001 && !isread) {
1002 return CP_ACCESS_OK;
1003 }
1004
1005 return pmreg_access(env, ri, isread);
1006}
1007
1008#ifndef CONFIG_USER_ONLY
1009
1010static CPAccessResult pmreg_access_selr(CPUARMState *env,
1011 const ARMCPRegInfo *ri,
1012 bool isread)
1013{
1014
1015 if (arm_feature(env, ARM_FEATURE_V8)
1016 && arm_current_el(env) == 0
1017 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1018 return CP_ACCESS_OK;
1019 }
1020
1021 return pmreg_access(env, ri, isread);
1022}
1023
1024static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1025 const ARMCPRegInfo *ri,
1026 bool isread)
1027{
1028
1029 if (arm_feature(env, ARM_FEATURE_V8)
1030 && arm_current_el(env) == 0
1031 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1032 && isread) {
1033 return CP_ACCESS_OK;
1034 }
1035
1036 return pmreg_access(env, ri, isread);
1037}
1038
1039static inline bool arm_ccnt_enabled(CPUARMState *env)
1040{
1041
1042
1043 if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) {
1044 return false;
1045 }
1046
1047 return true;
1048}
1049
1050void pmccntr_sync(CPUARMState *env)
1051{
1052 uint64_t temp_ticks;
1053
1054 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1055 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1056
1057 if (env->cp15.c9_pmcr & PMCRD) {
1058
1059 temp_ticks /= 64;
1060 }
1061
1062 if (arm_ccnt_enabled(env)) {
1063 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
1064 }
1065}
1066
1067static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1068 uint64_t value)
1069{
1070 pmccntr_sync(env);
1071
1072 if (value & PMCRC) {
1073
1074 env->cp15.c15_ccnt = 0;
1075 }
1076
1077
1078 env->cp15.c9_pmcr &= ~0x39;
1079 env->cp15.c9_pmcr |= (value & 0x39);
1080
1081 pmccntr_sync(env);
1082}
1083
1084static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1085{
1086 uint64_t total_ticks;
1087
1088 if (!arm_ccnt_enabled(env)) {
1089
1090 return env->cp15.c15_ccnt;
1091 }
1092
1093 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1094 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1095
1096 if (env->cp15.c9_pmcr & PMCRD) {
1097
1098 total_ticks /= 64;
1099 }
1100 return total_ticks - env->cp15.c15_ccnt;
1101}
1102
1103static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1104 uint64_t value)
1105{
1106
1107
1108
1109
1110
1111 env->cp15.c9_pmselr = value & 0x1f;
1112}
1113
1114static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1115 uint64_t value)
1116{
1117 uint64_t total_ticks;
1118
1119 if (!arm_ccnt_enabled(env)) {
1120
1121 env->cp15.c15_ccnt = value;
1122 return;
1123 }
1124
1125 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1126 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1127
1128 if (env->cp15.c9_pmcr & PMCRD) {
1129
1130 total_ticks /= 64;
1131 }
1132 env->cp15.c15_ccnt = total_ticks - value;
1133}
1134
1135static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1136 uint64_t value)
1137{
1138 uint64_t cur_val = pmccntr_read(env, NULL);
1139
1140 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1141}
1142
1143#else
1144
1145void pmccntr_sync(CPUARMState *env)
1146{
1147}
1148
1149#endif
1150
1151static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1152 uint64_t value)
1153{
1154 pmccntr_sync(env);
1155 env->cp15.pmccfiltr_el0 = value & 0xfc000000;
1156 pmccntr_sync(env);
1157}
1158
1159static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1160 uint64_t value)
1161{
1162 value &= pmu_counter_mask(env);
1163 env->cp15.c9_pmcnten |= value;
1164}
1165
1166static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1167 uint64_t value)
1168{
1169 value &= pmu_counter_mask(env);
1170 env->cp15.c9_pmcnten &= ~value;
1171}
1172
1173static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1174 uint64_t value)
1175{
1176 env->cp15.c9_pmovsr &= ~value;
1177}
1178
1179static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1180 uint64_t value)
1181{
1182
1183
1184
1185
1186 if (env->cp15.c9_pmselr == 0x1f) {
1187 pmccfiltr_write(env, ri, value);
1188 }
1189}
1190
1191static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1192{
1193
1194
1195
1196 if (env->cp15.c9_pmselr == 0x1f) {
1197 return env->cp15.pmccfiltr_el0;
1198 } else {
1199 return 0;
1200 }
1201}
1202
1203static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1204 uint64_t value)
1205{
1206 if (arm_feature(env, ARM_FEATURE_V8)) {
1207 env->cp15.c9_pmuserenr = value & 0xf;
1208 } else {
1209 env->cp15.c9_pmuserenr = value & 1;
1210 }
1211}
1212
1213static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1214 uint64_t value)
1215{
1216
1217 value &= pmu_counter_mask(env);
1218 env->cp15.c9_pminten |= value;
1219}
1220
1221static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1222 uint64_t value)
1223{
1224 value &= pmu_counter_mask(env);
1225 env->cp15.c9_pminten &= ~value;
1226}
1227
1228static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1229 uint64_t value)
1230{
1231
1232
1233
1234
1235
1236
1237 raw_write(env, ri, value & ~0x1FULL);
1238}
1239
1240static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1241{
1242
1243
1244
1245
1246 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1247
1248 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1249 valid_mask &= ~SCR_HCE;
1250
1251
1252
1253
1254
1255
1256
1257 if (arm_feature(env, ARM_FEATURE_V7) &&
1258 !arm_feature(env, ARM_FEATURE_V8)) {
1259 valid_mask &= ~SCR_SMD;
1260 }
1261 }
1262
1263
1264 value &= valid_mask;
1265 raw_write(env, ri, value);
1266}
1267
1268static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1269{
1270 ARMCPU *cpu = arm_env_get_cpu(env);
1271
1272
1273
1274
1275 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1276 ri->secure & ARM_CP_SECSTATE_S);
1277
1278 return cpu->ccsidr[index];
1279}
1280
1281static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1282 uint64_t value)
1283{
1284 raw_write(env, ri, value & 0xf);
1285}
1286
1287static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1288{
1289 CPUState *cs = ENV_GET_CPU(env);
1290 uint64_t ret = 0;
1291
1292 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1293 ret |= CPSR_I;
1294 }
1295 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1296 ret |= CPSR_F;
1297 }
1298
1299 return ret;
1300}
1301
1302static const ARMCPRegInfo v7_cp_reginfo[] = {
1303
1304 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1305 .access = PL1_W, .type = ARM_CP_NOP },
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1318 .access = PL0_RW, .type = ARM_CP_ALIAS,
1319 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1320 .writefn = pmcntenset_write,
1321 .accessfn = pmreg_access,
1322 .raw_writefn = raw_write },
1323 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1324 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1325 .access = PL0_RW, .accessfn = pmreg_access,
1326 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1327 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1328 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1329 .access = PL0_RW,
1330 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1331 .accessfn = pmreg_access,
1332 .writefn = pmcntenclr_write,
1333 .type = ARM_CP_ALIAS },
1334 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1335 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1336 .access = PL0_RW, .accessfn = pmreg_access,
1337 .type = ARM_CP_ALIAS,
1338 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1339 .writefn = pmcntenclr_write },
1340 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1341 .access = PL0_RW,
1342 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1343 .accessfn = pmreg_access,
1344 .writefn = pmovsr_write,
1345 .raw_writefn = raw_write },
1346 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1347 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1348 .access = PL0_RW, .accessfn = pmreg_access,
1349 .type = ARM_CP_ALIAS,
1350 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1351 .writefn = pmovsr_write,
1352 .raw_writefn = raw_write },
1353
1354 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1355 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP },
1356#ifndef CONFIG_USER_ONLY
1357 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1358 .access = PL0_RW, .type = ARM_CP_ALIAS,
1359 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1360 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1361 .raw_writefn = raw_write},
1362 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1363 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1364 .access = PL0_RW, .accessfn = pmreg_access_selr,
1365 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1366 .writefn = pmselr_write, .raw_writefn = raw_write, },
1367 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1368 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1369 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1370 .accessfn = pmreg_access_ccntr },
1371 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1372 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1373 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1374 .type = ARM_CP_IO,
1375 .readfn = pmccntr_read, .writefn = pmccntr_write, },
1376#endif
1377 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1378 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1379 .writefn = pmccfiltr_write,
1380 .access = PL0_RW, .accessfn = pmreg_access,
1381 .type = ARM_CP_IO,
1382 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1383 .resetvalue = 0, },
1384 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1385 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1386 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1387 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1388 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1389 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1390 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1391
1392 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1393 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1394 .accessfn = pmreg_access_xevcntr },
1395 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1396 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1397 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
1398 .resetvalue = 0,
1399 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1400 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1401 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1402 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1403 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1404 .resetvalue = 0,
1405 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1406 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1407 .access = PL1_RW, .accessfn = access_tpm,
1408 .type = ARM_CP_ALIAS | ARM_CP_IO,
1409 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1410 .resetvalue = 0,
1411 .writefn = pmintenset_write, .raw_writefn = raw_write },
1412 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1413 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1414 .access = PL1_RW, .accessfn = access_tpm,
1415 .type = ARM_CP_IO,
1416 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1417 .writefn = pmintenset_write, .raw_writefn = raw_write,
1418 .resetvalue = 0x0 },
1419 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1420 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1421 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1422 .writefn = pmintenclr_write, },
1423 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1424 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1425 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1426 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1427 .writefn = pmintenclr_write },
1428 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1429 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1430 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1431 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1432 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1433 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1434 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1435 offsetof(CPUARMState, cp15.csselr_ns) } },
1436
1437
1438
1439 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1440 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1441 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1442
1443
1444
1445 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1446 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1447 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1448 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1449 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1450 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1451
1452
1453
1454 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1455 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1456 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1457 .resetvalue = 0 },
1458 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1459 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1460 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1461 .resetvalue = 0 },
1462
1463
1464
1465
1466
1467
1468
1469 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1470 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1471 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1472 offsetof(CPUARMState, cp15.mair0_ns) },
1473 .resetfn = arm_cp_reset_ignore },
1474 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1475 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1476 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1477 offsetof(CPUARMState, cp15.mair1_ns) },
1478 .resetfn = arm_cp_reset_ignore },
1479 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1480 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1481 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1482
1483 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1484 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1485 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1486 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1487 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1488 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1489
1490 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1491 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1492 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1493 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1494 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1495 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1496
1497 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1498 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1499 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1500 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1501 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1502 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1503 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1504 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1505 REGINFO_SENTINEL
1506};
1507
1508static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1509
1510 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1511 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1512 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1513 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1514 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1515 .type = ARM_CP_NO_RAW, .access = PL1_W,
1516 .writefn = tlbiasid_is_write },
1517 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1518 .type = ARM_CP_NO_RAW, .access = PL1_W,
1519 .writefn = tlbimvaa_is_write },
1520 REGINFO_SENTINEL
1521};
1522
1523static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1524 uint64_t value)
1525{
1526 value &= 1;
1527 env->teecr = value;
1528}
1529
1530static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1531 bool isread)
1532{
1533 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1534 return CP_ACCESS_TRAP;
1535 }
1536 return CP_ACCESS_OK;
1537}
1538
1539static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1540 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1541 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1542 .resetvalue = 0,
1543 .writefn = teecr_write },
1544 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1545 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1546 .accessfn = teehbr_access, .resetvalue = 0 },
1547 REGINFO_SENTINEL
1548};
1549
1550static const ARMCPRegInfo v6k_cp_reginfo[] = {
1551 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1552 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1553 .access = PL0_RW,
1554 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1555 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1556 .access = PL0_RW,
1557 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1558 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1559 .resetfn = arm_cp_reset_ignore },
1560 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1561 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1562 .access = PL0_R|PL1_W,
1563 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1564 .resetvalue = 0},
1565 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1566 .access = PL0_R|PL1_W,
1567 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1568 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1569 .resetfn = arm_cp_reset_ignore },
1570 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1571 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1572 .access = PL1_RW,
1573 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1574 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1575 .access = PL1_RW,
1576 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1577 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1578 .resetvalue = 0 },
1579 REGINFO_SENTINEL
1580};
1581
1582#ifndef CONFIG_USER_ONLY
1583
1584static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1585 bool isread)
1586{
1587
1588
1589
1590 int el = arm_current_el(env);
1591
1592 switch (el) {
1593 case 0:
1594 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1595 return CP_ACCESS_TRAP;
1596 }
1597 break;
1598 case 1:
1599 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1600 arm_is_secure_below_el3(env)) {
1601
1602 return CP_ACCESS_TRAP_UNCATEGORIZED;
1603 }
1604 break;
1605 case 2:
1606 case 3:
1607 break;
1608 }
1609
1610 if (!isread && el < arm_highest_el(env)) {
1611 return CP_ACCESS_TRAP_UNCATEGORIZED;
1612 }
1613
1614 return CP_ACCESS_OK;
1615}
1616
1617static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1618 bool isread)
1619{
1620 unsigned int cur_el = arm_current_el(env);
1621 bool secure = arm_is_secure(env);
1622
1623
1624 if (cur_el == 0 &&
1625 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1626 return CP_ACCESS_TRAP;
1627 }
1628
1629 if (arm_feature(env, ARM_FEATURE_EL2) &&
1630 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1631 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1632 return CP_ACCESS_TRAP_EL2;
1633 }
1634 return CP_ACCESS_OK;
1635}
1636
1637static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1638 bool isread)
1639{
1640 unsigned int cur_el = arm_current_el(env);
1641 bool secure = arm_is_secure(env);
1642
1643
1644
1645
1646 if (cur_el == 0 &&
1647 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1648 return CP_ACCESS_TRAP;
1649 }
1650
1651 if (arm_feature(env, ARM_FEATURE_EL2) &&
1652 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1653 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1654 return CP_ACCESS_TRAP_EL2;
1655 }
1656 return CP_ACCESS_OK;
1657}
1658
1659static CPAccessResult gt_pct_access(CPUARMState *env,
1660 const ARMCPRegInfo *ri,
1661 bool isread)
1662{
1663 return gt_counter_access(env, GTIMER_PHYS, isread);
1664}
1665
1666static CPAccessResult gt_vct_access(CPUARMState *env,
1667 const ARMCPRegInfo *ri,
1668 bool isread)
1669{
1670 return gt_counter_access(env, GTIMER_VIRT, isread);
1671}
1672
1673static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1674 bool isread)
1675{
1676 return gt_timer_access(env, GTIMER_PHYS, isread);
1677}
1678
1679static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1680 bool isread)
1681{
1682 return gt_timer_access(env, GTIMER_VIRT, isread);
1683}
1684
1685static CPAccessResult gt_stimer_access(CPUARMState *env,
1686 const ARMCPRegInfo *ri,
1687 bool isread)
1688{
1689
1690
1691
1692
1693 switch (arm_current_el(env)) {
1694 case 1:
1695 if (!arm_is_secure(env)) {
1696 return CP_ACCESS_TRAP;
1697 }
1698 if (!(env->cp15.scr_el3 & SCR_ST)) {
1699 return CP_ACCESS_TRAP_EL3;
1700 }
1701 return CP_ACCESS_OK;
1702 case 0:
1703 case 2:
1704 return CP_ACCESS_TRAP;
1705 case 3:
1706 return CP_ACCESS_OK;
1707 default:
1708 g_assert_not_reached();
1709 }
1710}
1711
1712static uint64_t gt_get_countervalue(CPUARMState *env)
1713{
1714 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1715}
1716
1717static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1718{
1719 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1720
1721 if (gt->ctl & 1) {
1722
1723
1724
1725 uint64_t offset = timeridx == GTIMER_VIRT ?
1726 cpu->env.cp15.cntvoff_el2 : 0;
1727 uint64_t count = gt_get_countervalue(&cpu->env);
1728
1729 int istatus = count - offset >= gt->cval;
1730 uint64_t nexttick;
1731 int irqstate;
1732
1733 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1734
1735 irqstate = (istatus && !(gt->ctl & 2));
1736 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1737
1738 if (istatus) {
1739
1740 nexttick = UINT64_MAX;
1741 } else {
1742
1743 nexttick = gt->cval + offset;
1744 }
1745
1746
1747
1748
1749
1750 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1751 nexttick = INT64_MAX / GTIMER_SCALE;
1752 }
1753 timer_mod(cpu->gt_timer[timeridx], nexttick);
1754 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1755 } else {
1756
1757 gt->ctl &= ~4;
1758 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1759 timer_del(cpu->gt_timer[timeridx]);
1760 trace_arm_gt_recalc_disabled(timeridx);
1761 }
1762}
1763
1764static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1765 int timeridx)
1766{
1767 ARMCPU *cpu = arm_env_get_cpu(env);
1768
1769 timer_del(cpu->gt_timer[timeridx]);
1770}
1771
1772static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1773{
1774 return gt_get_countervalue(env);
1775}
1776
1777static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1778{
1779 return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1780}
1781
1782static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1783 int timeridx,
1784 uint64_t value)
1785{
1786 trace_arm_gt_cval_write(timeridx, value);
1787 env->cp15.c14_timer[timeridx].cval = value;
1788 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1789}
1790
1791static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1792 int timeridx)
1793{
1794 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1795
1796 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1797 (gt_get_countervalue(env) - offset));
1798}
1799
1800static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1801 int timeridx,
1802 uint64_t value)
1803{
1804 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1805
1806 trace_arm_gt_tval_write(timeridx, value);
1807 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1808 sextract64(value, 0, 32);
1809 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1810}
1811
1812static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1813 int timeridx,
1814 uint64_t value)
1815{
1816 ARMCPU *cpu = arm_env_get_cpu(env);
1817 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1818
1819 trace_arm_gt_ctl_write(timeridx, value);
1820 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1821 if ((oldval ^ value) & 1) {
1822
1823 gt_recalc_timer(cpu, timeridx);
1824 } else if ((oldval ^ value) & 2) {
1825
1826
1827
1828 int irqstate = (oldval & 4) && !(value & 2);
1829
1830 trace_arm_gt_imask_toggle(timeridx, irqstate);
1831 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1832 }
1833}
1834
1835static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1836{
1837 gt_timer_reset(env, ri, GTIMER_PHYS);
1838}
1839
1840static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1841 uint64_t value)
1842{
1843 gt_cval_write(env, ri, GTIMER_PHYS, value);
1844}
1845
1846static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1847{
1848 return gt_tval_read(env, ri, GTIMER_PHYS);
1849}
1850
1851static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1852 uint64_t value)
1853{
1854 gt_tval_write(env, ri, GTIMER_PHYS, value);
1855}
1856
1857static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1858 uint64_t value)
1859{
1860 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1861}
1862
1863static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1864{
1865 gt_timer_reset(env, ri, GTIMER_VIRT);
1866}
1867
1868static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1869 uint64_t value)
1870{
1871 gt_cval_write(env, ri, GTIMER_VIRT, value);
1872}
1873
1874static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1875{
1876 return gt_tval_read(env, ri, GTIMER_VIRT);
1877}
1878
1879static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1880 uint64_t value)
1881{
1882 gt_tval_write(env, ri, GTIMER_VIRT, value);
1883}
1884
1885static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1886 uint64_t value)
1887{
1888 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1889}
1890
1891static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1892 uint64_t value)
1893{
1894 ARMCPU *cpu = arm_env_get_cpu(env);
1895
1896 trace_arm_gt_cntvoff_write(value);
1897 raw_write(env, ri, value);
1898 gt_recalc_timer(cpu, GTIMER_VIRT);
1899}
1900
1901static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1902{
1903 gt_timer_reset(env, ri, GTIMER_HYP);
1904}
1905
1906static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1907 uint64_t value)
1908{
1909 gt_cval_write(env, ri, GTIMER_HYP, value);
1910}
1911
1912static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1913{
1914 return gt_tval_read(env, ri, GTIMER_HYP);
1915}
1916
1917static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1918 uint64_t value)
1919{
1920 gt_tval_write(env, ri, GTIMER_HYP, value);
1921}
1922
1923static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1924 uint64_t value)
1925{
1926 gt_ctl_write(env, ri, GTIMER_HYP, value);
1927}
1928
1929static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1930{
1931 gt_timer_reset(env, ri, GTIMER_SEC);
1932}
1933
1934static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1935 uint64_t value)
1936{
1937 gt_cval_write(env, ri, GTIMER_SEC, value);
1938}
1939
1940static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1941{
1942 return gt_tval_read(env, ri, GTIMER_SEC);
1943}
1944
1945static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1946 uint64_t value)
1947{
1948 gt_tval_write(env, ri, GTIMER_SEC, value);
1949}
1950
1951static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1952 uint64_t value)
1953{
1954 gt_ctl_write(env, ri, GTIMER_SEC, value);
1955}
1956
1957void arm_gt_ptimer_cb(void *opaque)
1958{
1959 ARMCPU *cpu = opaque;
1960
1961 gt_recalc_timer(cpu, GTIMER_PHYS);
1962}
1963
1964void arm_gt_vtimer_cb(void *opaque)
1965{
1966 ARMCPU *cpu = opaque;
1967
1968 gt_recalc_timer(cpu, GTIMER_VIRT);
1969}
1970
1971void arm_gt_htimer_cb(void *opaque)
1972{
1973 ARMCPU *cpu = opaque;
1974
1975 gt_recalc_timer(cpu, GTIMER_HYP);
1976}
1977
1978void arm_gt_stimer_cb(void *opaque)
1979{
1980 ARMCPU *cpu = opaque;
1981
1982 gt_recalc_timer(cpu, GTIMER_SEC);
1983}
1984
1985static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1986
1987
1988
1989
1990 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1991 .type = ARM_CP_ALIAS,
1992 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1993 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1994 },
1995 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1996 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1997 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1998 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1999 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2000 },
2001
2002 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2003 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2004 .access = PL1_RW,
2005 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2006 .resetvalue = 0,
2007 },
2008
2009 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2010 .secure = ARM_CP_SECSTATE_NS,
2011 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2012 .accessfn = gt_ptimer_access,
2013 .fieldoffset = offsetoflow32(CPUARMState,
2014 cp15.c14_timer[GTIMER_PHYS].ctl),
2015 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2016 },
2017 { .name = "CNTP_CTL_S",
2018 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2019 .secure = ARM_CP_SECSTATE_S,
2020 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2021 .accessfn = gt_ptimer_access,
2022 .fieldoffset = offsetoflow32(CPUARMState,
2023 cp15.c14_timer[GTIMER_SEC].ctl),
2024 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2025 },
2026 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2027 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2028 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2029 .accessfn = gt_ptimer_access,
2030 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2031 .resetvalue = 0,
2032 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2033 },
2034 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2035 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2036 .accessfn = gt_vtimer_access,
2037 .fieldoffset = offsetoflow32(CPUARMState,
2038 cp15.c14_timer[GTIMER_VIRT].ctl),
2039 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2040 },
2041 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2042 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2043 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2044 .accessfn = gt_vtimer_access,
2045 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2046 .resetvalue = 0,
2047 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2048 },
2049
2050 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2051 .secure = ARM_CP_SECSTATE_NS,
2052 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2053 .accessfn = gt_ptimer_access,
2054 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2055 },
2056 { .name = "CNTP_TVAL_S",
2057 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2058 .secure = ARM_CP_SECSTATE_S,
2059 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2060 .accessfn = gt_ptimer_access,
2061 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2062 },
2063 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2064 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2065 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2066 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2067 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2068 },
2069 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2070 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2071 .accessfn = gt_vtimer_access,
2072 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2073 },
2074 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2075 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2076 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2077 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2078 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2079 },
2080
2081 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2082 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2083 .accessfn = gt_pct_access,
2084 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2085 },
2086 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2087 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2088 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2089 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2090 },
2091 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2092 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2093 .accessfn = gt_vct_access,
2094 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2095 },
2096 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2097 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2098 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2099 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2100 },
2101
2102 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2103 .secure = ARM_CP_SECSTATE_NS,
2104 .access = PL1_RW | PL0_R,
2105 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2106 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2107 .accessfn = gt_ptimer_access,
2108 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2109 },
2110 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2111 .secure = ARM_CP_SECSTATE_S,
2112 .access = PL1_RW | PL0_R,
2113 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2114 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2115 .accessfn = gt_ptimer_access,
2116 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2117 },
2118 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2119 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2120 .access = PL1_RW | PL0_R,
2121 .type = ARM_CP_IO,
2122 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2123 .resetvalue = 0, .accessfn = gt_ptimer_access,
2124 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2125 },
2126 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2127 .access = PL1_RW | PL0_R,
2128 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2129 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2130 .accessfn = gt_vtimer_access,
2131 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2132 },
2133 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2134 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2135 .access = PL1_RW | PL0_R,
2136 .type = ARM_CP_IO,
2137 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2138 .resetvalue = 0, .accessfn = gt_vtimer_access,
2139 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2140 },
2141
2142
2143
2144 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2145 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2146 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2147 .accessfn = gt_stimer_access,
2148 .readfn = gt_sec_tval_read,
2149 .writefn = gt_sec_tval_write,
2150 .resetfn = gt_sec_timer_reset,
2151 },
2152 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2153 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2154 .type = ARM_CP_IO, .access = PL1_RW,
2155 .accessfn = gt_stimer_access,
2156 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2157 .resetvalue = 0,
2158 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2159 },
2160 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2161 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2162 .type = ARM_CP_IO, .access = PL1_RW,
2163 .accessfn = gt_stimer_access,
2164 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2165 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2166 },
2167 REGINFO_SENTINEL
2168};
2169
2170#else
2171
2172
2173
2174
2175
2176static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2177{
2178
2179
2180
2181
2182 return cpu_get_clock() / GTIMER_SCALE;
2183}
2184
2185static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2186 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2187 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2188 .type = ARM_CP_CONST, .access = PL0_R ,
2189 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2190 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2191 },
2192 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2193 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2194 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2195 .readfn = gt_virt_cnt_read,
2196 },
2197 REGINFO_SENTINEL
2198};
2199
2200#endif
2201
2202static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2203{
2204 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2205 raw_write(env, ri, value);
2206 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2207 raw_write(env, ri, value & 0xfffff6ff);
2208 } else {
2209 raw_write(env, ri, value & 0xfffff1ff);
2210 }
2211}
2212
2213#ifndef CONFIG_USER_ONLY
2214
2215
2216static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2217 bool isread)
2218{
2219 if (ri->opc2 & 4) {
2220
2221
2222
2223
2224
2225 if (arm_current_el(env) == 1) {
2226 if (arm_is_secure_below_el3(env)) {
2227 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2228 }
2229 return CP_ACCESS_TRAP_UNCATEGORIZED;
2230 }
2231 }
2232 return CP_ACCESS_OK;
2233}
2234
2235static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2236 MMUAccessType access_type, ARMMMUIdx mmu_idx)
2237{
2238 hwaddr phys_addr;
2239 target_ulong page_size;
2240 int prot;
2241 bool ret;
2242 uint64_t par64;
2243 bool format64 = false;
2244 MemTxAttrs attrs = {};
2245 ARMMMUFaultInfo fi = {};
2246 ARMCacheAttrs cacheattrs = {};
2247
2248 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2249 &prot, &page_size, &fi, &cacheattrs);
2250
2251 if (is_a64(env)) {
2252 format64 = true;
2253 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2267
2268 if (arm_feature(env, ARM_FEATURE_EL2)) {
2269 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2270 format64 |= env->cp15.hcr_el2 & HCR_VM;
2271 } else {
2272 format64 |= arm_current_el(env) == 2;
2273 }
2274 }
2275 }
2276
2277 if (format64) {
2278
2279 par64 = (1 << 11);
2280 if (!ret) {
2281 par64 |= phys_addr & ~0xfffULL;
2282 if (!attrs.secure) {
2283 par64 |= (1 << 9);
2284 }
2285 par64 |= (uint64_t)cacheattrs.attrs << 56;
2286 par64 |= cacheattrs.shareability << 7;
2287 } else {
2288 uint32_t fsr = arm_fi_to_lfsc(&fi);
2289
2290 par64 |= 1;
2291 par64 |= (fsr & 0x3f) << 1;
2292
2293
2294
2295
2296 }
2297 } else {
2298
2299
2300
2301
2302 if (!ret) {
2303
2304 if (page_size == (1 << 24)
2305 && arm_feature(env, ARM_FEATURE_V7)) {
2306 par64 = (phys_addr & 0xff000000) | (1 << 1);
2307 } else {
2308 par64 = phys_addr & 0xfffff000;
2309 }
2310 if (!attrs.secure) {
2311 par64 |= (1 << 9);
2312 }
2313 } else {
2314 uint32_t fsr = arm_fi_to_sfsc(&fi);
2315
2316 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2317 ((fsr & 0xf) << 1) | 1;
2318 }
2319 }
2320 return par64;
2321}
2322
2323static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2324{
2325 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2326 uint64_t par64;
2327 ARMMMUIdx mmu_idx;
2328 int el = arm_current_el(env);
2329 bool secure = arm_is_secure_below_el3(env);
2330
2331 switch (ri->opc2 & 6) {
2332 case 0:
2333
2334 switch (el) {
2335 case 3:
2336 mmu_idx = ARMMMUIdx_S1E3;
2337 break;
2338 case 2:
2339 mmu_idx = ARMMMUIdx_S1NSE1;
2340 break;
2341 case 1:
2342 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2343 break;
2344 default:
2345 g_assert_not_reached();
2346 }
2347 break;
2348 case 2:
2349
2350 switch (el) {
2351 case 3:
2352 mmu_idx = ARMMMUIdx_S1SE0;
2353 break;
2354 case 2:
2355 mmu_idx = ARMMMUIdx_S1NSE0;
2356 break;
2357 case 1:
2358 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2359 break;
2360 default:
2361 g_assert_not_reached();
2362 }
2363 break;
2364 case 4:
2365
2366 mmu_idx = ARMMMUIdx_S12NSE1;
2367 break;
2368 case 6:
2369
2370 mmu_idx = ARMMMUIdx_S12NSE0;
2371 break;
2372 default:
2373 g_assert_not_reached();
2374 }
2375
2376 par64 = do_ats_write(env, value, access_type, mmu_idx);
2377
2378 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2379}
2380
2381static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2382 uint64_t value)
2383{
2384 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2385 uint64_t par64;
2386
2387 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2388
2389 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2390}
2391
2392static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2393 bool isread)
2394{
2395 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2396 return CP_ACCESS_TRAP;
2397 }
2398 return CP_ACCESS_OK;
2399}
2400
2401static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2402 uint64_t value)
2403{
2404 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2405 ARMMMUIdx mmu_idx;
2406 int secure = arm_is_secure_below_el3(env);
2407
2408 switch (ri->opc2 & 6) {
2409 case 0:
2410 switch (ri->opc1) {
2411 case 0:
2412 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2413 break;
2414 case 4:
2415 mmu_idx = ARMMMUIdx_S1E2;
2416 break;
2417 case 6:
2418 mmu_idx = ARMMMUIdx_S1E3;
2419 break;
2420 default:
2421 g_assert_not_reached();
2422 }
2423 break;
2424 case 2:
2425 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2426 break;
2427 case 4:
2428 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2429 break;
2430 case 6:
2431 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2432 break;
2433 default:
2434 g_assert_not_reached();
2435 }
2436
2437 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2438}
2439#endif
2440
2441static const ARMCPRegInfo vapa_cp_reginfo[] = {
2442 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2443 .access = PL1_RW, .resetvalue = 0,
2444 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2445 offsetoflow32(CPUARMState, cp15.par_ns) },
2446 .writefn = par_write },
2447#ifndef CONFIG_USER_ONLY
2448
2449 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2450 .access = PL1_W, .accessfn = ats_access,
2451 .writefn = ats_write, .type = ARM_CP_NO_RAW },
2452#endif
2453 REGINFO_SENTINEL
2454};
2455
2456
2457static uint32_t simple_mpu_ap_bits(uint32_t val)
2458{
2459 uint32_t ret;
2460 uint32_t mask;
2461 int i;
2462 ret = 0;
2463 mask = 3;
2464 for (i = 0; i < 16; i += 2) {
2465 ret |= (val >> i) & mask;
2466 mask <<= 2;
2467 }
2468 return ret;
2469}
2470
2471
2472static uint32_t extended_mpu_ap_bits(uint32_t val)
2473{
2474 uint32_t ret;
2475 uint32_t mask;
2476 int i;
2477 ret = 0;
2478 mask = 3;
2479 for (i = 0; i < 16; i += 2) {
2480 ret |= (val & mask) << i;
2481 mask <<= 2;
2482 }
2483 return ret;
2484}
2485
2486static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2487 uint64_t value)
2488{
2489 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2490}
2491
2492static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2493{
2494 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2495}
2496
2497static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2498 uint64_t value)
2499{
2500 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2501}
2502
2503static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2504{
2505 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2506}
2507
2508static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2509{
2510 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2511
2512 if (!u32p) {
2513 return 0;
2514 }
2515
2516 u32p += env->pmsav7.rnr[M_REG_NS];
2517 return *u32p;
2518}
2519
2520static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2521 uint64_t value)
2522{
2523 ARMCPU *cpu = arm_env_get_cpu(env);
2524 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2525
2526 if (!u32p) {
2527 return;
2528 }
2529
2530 u32p += env->pmsav7.rnr[M_REG_NS];
2531 tlb_flush(CPU(cpu));
2532 *u32p = value;
2533}
2534
2535static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2536 uint64_t value)
2537{
2538 ARMCPU *cpu = arm_env_get_cpu(env);
2539 uint32_t nrgs = cpu->pmsav7_dregion;
2540
2541 if (value >= nrgs) {
2542 qemu_log_mask(LOG_GUEST_ERROR,
2543 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2544 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2545 return;
2546 }
2547
2548 raw_write(env, ri, value);
2549}
2550
2551static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2552
2553
2554
2555
2556 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2557 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2558 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2559 .readfn = pmsav7_read, .writefn = pmsav7_write,
2560 .resetfn = arm_cp_reset_ignore },
2561 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2562 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2563 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2564 .readfn = pmsav7_read, .writefn = pmsav7_write,
2565 .resetfn = arm_cp_reset_ignore },
2566 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2567 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2568 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2569 .readfn = pmsav7_read, .writefn = pmsav7_write,
2570 .resetfn = arm_cp_reset_ignore },
2571 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2572 .access = PL1_RW,
2573 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2574 .writefn = pmsav7_rgnr_write,
2575 .resetfn = arm_cp_reset_ignore },
2576 REGINFO_SENTINEL
2577};
2578
2579static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2580 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2581 .access = PL1_RW, .type = ARM_CP_ALIAS,
2582 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2583 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2584 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2585 .access = PL1_RW, .type = ARM_CP_ALIAS,
2586 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2587 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2588 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2589 .access = PL1_RW,
2590 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2591 .resetvalue = 0, },
2592 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2593 .access = PL1_RW,
2594 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2595 .resetvalue = 0, },
2596 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2597 .access = PL1_RW,
2598 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2599 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2600 .access = PL1_RW,
2601 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2602
2603 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2604 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2605 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2606 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2607 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2608 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2609 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2610 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2611 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2612 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2613 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2614 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2615 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2616 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2617 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2618 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2619 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2620 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2621 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2622 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2623 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2624 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2625 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2626 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2627 REGINFO_SENTINEL
2628};
2629
2630static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2631 uint64_t value)
2632{
2633 TCR *tcr = raw_ptr(env, ri);
2634 int maskshift = extract32(value, 0, 3);
2635
2636 if (!arm_feature(env, ARM_FEATURE_V8)) {
2637 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2638
2639
2640 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2641 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2642
2643
2644
2645
2646 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2647 } else {
2648 value &= TTBCR_N;
2649 }
2650 }
2651
2652
2653
2654
2655
2656
2657
2658 tcr->raw_tcr = value;
2659 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2660 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2661}
2662
2663static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2664 uint64_t value)
2665{
2666 ARMCPU *cpu = arm_env_get_cpu(env);
2667
2668 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2669
2670
2671
2672 tlb_flush(CPU(cpu));
2673 }
2674 vmsa_ttbcr_raw_write(env, ri, value);
2675}
2676
2677static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2678{
2679 TCR *tcr = raw_ptr(env, ri);
2680
2681
2682
2683
2684 tcr->raw_tcr = 0;
2685 tcr->mask = 0;
2686 tcr->base_mask = 0xffffc000u;
2687}
2688
2689static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2690 uint64_t value)
2691{
2692 ARMCPU *cpu = arm_env_get_cpu(env);
2693 TCR *tcr = raw_ptr(env, ri);
2694
2695
2696 tlb_flush(CPU(cpu));
2697 tcr->raw_tcr = value;
2698}
2699
2700static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2701 uint64_t value)
2702{
2703
2704
2705
2706 if (cpreg_field_is_64bit(ri)) {
2707 ARMCPU *cpu = arm_env_get_cpu(env);
2708
2709 tlb_flush(CPU(cpu));
2710 }
2711 raw_write(env, ri, value);
2712}
2713
2714static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2715 uint64_t value)
2716{
2717 ARMCPU *cpu = arm_env_get_cpu(env);
2718 CPUState *cs = CPU(cpu);
2719
2720
2721 if (raw_read(env, ri) != value) {
2722 tlb_flush_by_mmuidx(cs,
2723 ARMMMUIdxBit_S12NSE1 |
2724 ARMMMUIdxBit_S12NSE0 |
2725 ARMMMUIdxBit_S2NS);
2726 raw_write(env, ri, value);
2727 }
2728}
2729
2730static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2731 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2732 .access = PL1_RW, .type = ARM_CP_ALIAS,
2733 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2734 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2735 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2736 .access = PL1_RW, .resetvalue = 0,
2737 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2738 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2739 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2740 .access = PL1_RW, .resetvalue = 0,
2741 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2742 offsetof(CPUARMState, cp15.dfar_ns) } },
2743 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2744 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2745 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2746 .resetvalue = 0, },
2747 REGINFO_SENTINEL
2748};
2749
2750static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2751 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2752 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2753 .access = PL1_RW,
2754 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2755 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2756 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2757 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2758 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2759 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2760 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2761 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2762 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2763 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2764 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2765 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2766 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2767 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2768 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2769 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2770 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2771 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2772 .raw_writefn = vmsa_ttbcr_raw_write,
2773 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2774 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2775 REGINFO_SENTINEL
2776};
2777
2778static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2779 uint64_t value)
2780{
2781 env->cp15.c15_ticonfig = value & 0xe7;
2782
2783 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2784 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2785}
2786
2787static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2788 uint64_t value)
2789{
2790 env->cp15.c15_threadid = value & 0xffff;
2791}
2792
2793static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2794 uint64_t value)
2795{
2796
2797 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2798}
2799
2800static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2801 uint64_t value)
2802{
2803
2804
2805
2806 env->cp15.c15_i_max = 0x000;
2807 env->cp15.c15_i_min = 0xff0;
2808}
2809
2810static const ARMCPRegInfo omap_cp_reginfo[] = {
2811 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2812 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2813 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2814 .resetvalue = 0, },
2815 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2816 .access = PL1_RW, .type = ARM_CP_NOP },
2817 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2818 .access = PL1_RW,
2819 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2820 .writefn = omap_ticonfig_write },
2821 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2822 .access = PL1_RW,
2823 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2824 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2825 .access = PL1_RW, .resetvalue = 0xff0,
2826 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2827 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2828 .access = PL1_RW,
2829 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2830 .writefn = omap_threadid_write },
2831 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2832 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2833 .type = ARM_CP_NO_RAW,
2834 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2835
2836
2837
2838
2839
2840 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2841 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2842 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2843 .writefn = omap_cachemaint_write },
2844 { .name = "C9", .cp = 15, .crn = 9,
2845 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2846 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2847 REGINFO_SENTINEL
2848};
2849
2850static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2851 uint64_t value)
2852{
2853 env->cp15.c15_cpar = value & 0x3fff;
2854}
2855
2856static const ARMCPRegInfo xscale_cp_reginfo[] = {
2857 { .name = "XSCALE_CPAR",
2858 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2859 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2860 .writefn = xscale_cpar_write, },
2861 { .name = "XSCALE_AUXCR",
2862 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2863 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2864 .resetvalue = 0, },
2865
2866
2867
2868 { .name = "XSCALE_LOCK_ICACHE_LINE",
2869 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2870 .access = PL1_W, .type = ARM_CP_NOP },
2871 { .name = "XSCALE_UNLOCK_ICACHE",
2872 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2873 .access = PL1_W, .type = ARM_CP_NOP },
2874 { .name = "XSCALE_DCACHE_LOCK",
2875 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2876 .access = PL1_RW, .type = ARM_CP_NOP },
2877 { .name = "XSCALE_UNLOCK_DCACHE",
2878 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2879 .access = PL1_W, .type = ARM_CP_NOP },
2880 REGINFO_SENTINEL
2881};
2882
2883static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2884
2885
2886
2887
2888
2889 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2890 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2891 .access = PL1_RW,
2892 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2893 .resetvalue = 0 },
2894 REGINFO_SENTINEL
2895};
2896
2897static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2898
2899 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2900 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2901 .resetvalue = 0 },
2902 REGINFO_SENTINEL
2903};
2904
2905static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2906
2907 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2908 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2909 .resetvalue = 0 },
2910
2911 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2912 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2913 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2914 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2915 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2916 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2917 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2918 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2919 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2920 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2921 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2922 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2923 REGINFO_SENTINEL
2924};
2925
2926static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2927
2928
2929
2930 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2931 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2932 .resetvalue = (1 << 30) },
2933 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2934 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2935 .resetvalue = (1 << 30) },
2936 REGINFO_SENTINEL
2937};
2938
2939static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2940
2941 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2942 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2943 .access = PL1_RW, .resetvalue = 0,
2944 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2945 REGINFO_SENTINEL
2946};
2947
2948static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2949{
2950 ARMCPU *cpu = arm_env_get_cpu(env);
2951 unsigned int cur_el = arm_current_el(env);
2952 bool secure = arm_is_secure(env);
2953
2954 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2955 return env->cp15.vpidr_el2;
2956 }
2957 return raw_read(env, ri);
2958}
2959
2960static uint64_t mpidr_read_val(CPUARMState *env)
2961{
2962 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2963 uint64_t mpidr = cpu->mp_affinity;
2964
2965 if (arm_feature(env, ARM_FEATURE_V7MP)) {
2966 mpidr |= (1U << 31);
2967
2968
2969
2970
2971 if (cpu->mp_is_up) {
2972 mpidr |= (1u << 30);
2973 }
2974 }
2975 return mpidr;
2976}
2977
2978static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2979{
2980 unsigned int cur_el = arm_current_el(env);
2981 bool secure = arm_is_secure(env);
2982
2983 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2984 return env->cp15.vmpidr_el2;
2985 }
2986 return mpidr_read_val(env);
2987}
2988
2989static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2990 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2991 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2992 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2993 REGINFO_SENTINEL
2994};
2995
2996static const ARMCPRegInfo lpae_cp_reginfo[] = {
2997
2998 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2999 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3000 .access = PL1_RW, .type = ARM_CP_CONST,
3001 .resetvalue = 0 },
3002
3003 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3004 .access = PL1_RW, .type = ARM_CP_CONST,
3005 .resetvalue = 0 },
3006 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3007 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3008 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3009 offsetof(CPUARMState, cp15.par_ns)} },
3010 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3011 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3012 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3013 offsetof(CPUARMState, cp15.ttbr0_ns) },
3014 .writefn = vmsa_ttbr_write, },
3015 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3016 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3017 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3018 offsetof(CPUARMState, cp15.ttbr1_ns) },
3019 .writefn = vmsa_ttbr_write, },
3020 REGINFO_SENTINEL
3021};
3022
3023static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3024{
3025 return vfp_get_fpcr(env);
3026}
3027
3028static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3029 uint64_t value)
3030{
3031 vfp_set_fpcr(env, value);
3032}
3033
3034static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3035{
3036 return vfp_get_fpsr(env);
3037}
3038
3039static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3040 uint64_t value)
3041{
3042 vfp_set_fpsr(env, value);
3043}
3044
3045static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3046 bool isread)
3047{
3048 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3049 return CP_ACCESS_TRAP;
3050 }
3051 return CP_ACCESS_OK;
3052}
3053
3054static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3055 uint64_t value)
3056{
3057 env->daif = value & PSTATE_DAIF;
3058}
3059
3060static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3061 const ARMCPRegInfo *ri,
3062 bool isread)
3063{
3064
3065
3066
3067 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3068 return CP_ACCESS_TRAP;
3069 }
3070 return CP_ACCESS_OK;
3071}
3072
3073
3074
3075
3076
3077static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3078 uint64_t value)
3079{
3080 CPUState *cs = ENV_GET_CPU(env);
3081
3082 if (arm_is_secure_below_el3(env)) {
3083 tlb_flush_by_mmuidx(cs,
3084 ARMMMUIdxBit_S1SE1 |
3085 ARMMMUIdxBit_S1SE0);
3086 } else {
3087 tlb_flush_by_mmuidx(cs,
3088 ARMMMUIdxBit_S12NSE1 |
3089 ARMMMUIdxBit_S12NSE0);
3090 }
3091}
3092
3093static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3094 uint64_t value)
3095{
3096 CPUState *cs = ENV_GET_CPU(env);
3097 bool sec = arm_is_secure_below_el3(env);
3098
3099 if (sec) {
3100 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3101 ARMMMUIdxBit_S1SE1 |
3102 ARMMMUIdxBit_S1SE0);
3103 } else {
3104 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3105 ARMMMUIdxBit_S12NSE1 |
3106 ARMMMUIdxBit_S12NSE0);
3107 }
3108}
3109
3110static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3111 uint64_t value)
3112{
3113
3114
3115
3116
3117 ARMCPU *cpu = arm_env_get_cpu(env);
3118 CPUState *cs = CPU(cpu);
3119
3120 if (arm_is_secure_below_el3(env)) {
3121 tlb_flush_by_mmuidx(cs,
3122 ARMMMUIdxBit_S1SE1 |
3123 ARMMMUIdxBit_S1SE0);
3124 } else {
3125 if (arm_feature(env, ARM_FEATURE_EL2)) {
3126 tlb_flush_by_mmuidx(cs,
3127 ARMMMUIdxBit_S12NSE1 |
3128 ARMMMUIdxBit_S12NSE0 |
3129 ARMMMUIdxBit_S2NS);
3130 } else {
3131 tlb_flush_by_mmuidx(cs,
3132 ARMMMUIdxBit_S12NSE1 |
3133 ARMMMUIdxBit_S12NSE0);
3134 }
3135 }
3136}
3137
3138static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3139 uint64_t value)
3140{
3141 ARMCPU *cpu = arm_env_get_cpu(env);
3142 CPUState *cs = CPU(cpu);
3143
3144 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3145}
3146
3147static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3148 uint64_t value)
3149{
3150 ARMCPU *cpu = arm_env_get_cpu(env);
3151 CPUState *cs = CPU(cpu);
3152
3153 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3154}
3155
3156static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3157 uint64_t value)
3158{
3159
3160
3161
3162
3163 CPUState *cs = ENV_GET_CPU(env);
3164 bool sec = arm_is_secure_below_el3(env);
3165 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3166
3167 if (sec) {
3168 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3169 ARMMMUIdxBit_S1SE1 |
3170 ARMMMUIdxBit_S1SE0);
3171 } else if (has_el2) {
3172 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3173 ARMMMUIdxBit_S12NSE1 |
3174 ARMMMUIdxBit_S12NSE0 |
3175 ARMMMUIdxBit_S2NS);
3176 } else {
3177 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3178 ARMMMUIdxBit_S12NSE1 |
3179 ARMMMUIdxBit_S12NSE0);
3180 }
3181}
3182
3183static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3184 uint64_t value)
3185{
3186 CPUState *cs = ENV_GET_CPU(env);
3187
3188 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3189}
3190
3191static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3192 uint64_t value)
3193{
3194 CPUState *cs = ENV_GET_CPU(env);
3195
3196 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3197}
3198
3199static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3200 uint64_t value)
3201{
3202
3203
3204
3205
3206
3207 ARMCPU *cpu = arm_env_get_cpu(env);
3208 CPUState *cs = CPU(cpu);
3209 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3210
3211 if (arm_is_secure_below_el3(env)) {
3212 tlb_flush_page_by_mmuidx(cs, pageaddr,
3213 ARMMMUIdxBit_S1SE1 |
3214 ARMMMUIdxBit_S1SE0);
3215 } else {
3216 tlb_flush_page_by_mmuidx(cs, pageaddr,
3217 ARMMMUIdxBit_S12NSE1 |
3218 ARMMMUIdxBit_S12NSE0);
3219 }
3220}
3221
3222static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3223 uint64_t value)
3224{
3225
3226
3227
3228
3229 ARMCPU *cpu = arm_env_get_cpu(env);
3230 CPUState *cs = CPU(cpu);
3231 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3232
3233 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3234}
3235
3236static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3237 uint64_t value)
3238{
3239
3240
3241
3242
3243 ARMCPU *cpu = arm_env_get_cpu(env);
3244 CPUState *cs = CPU(cpu);
3245 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3246
3247 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3248}
3249
3250static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3251 uint64_t value)
3252{
3253 ARMCPU *cpu = arm_env_get_cpu(env);
3254 CPUState *cs = CPU(cpu);
3255 bool sec = arm_is_secure_below_el3(env);
3256 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3257
3258 if (sec) {
3259 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3260 ARMMMUIdxBit_S1SE1 |
3261 ARMMMUIdxBit_S1SE0);
3262 } else {
3263 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3264 ARMMMUIdxBit_S12NSE1 |
3265 ARMMMUIdxBit_S12NSE0);
3266 }
3267}
3268
3269static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3270 uint64_t value)
3271{
3272 CPUState *cs = ENV_GET_CPU(env);
3273 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3274
3275 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3276 ARMMMUIdxBit_S1E2);
3277}
3278
3279static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3280 uint64_t value)
3281{
3282 CPUState *cs = ENV_GET_CPU(env);
3283 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3284
3285 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3286 ARMMMUIdxBit_S1E3);
3287}
3288
3289static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3290 uint64_t value)
3291{
3292
3293
3294
3295
3296
3297
3298 ARMCPU *cpu = arm_env_get_cpu(env);
3299 CPUState *cs = CPU(cpu);
3300 uint64_t pageaddr;
3301
3302 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3303 return;
3304 }
3305
3306 pageaddr = sextract64(value << 12, 0, 48);
3307
3308 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
3309}
3310
3311static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3312 uint64_t value)
3313{
3314 CPUState *cs = ENV_GET_CPU(env);
3315 uint64_t pageaddr;
3316
3317 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3318 return;
3319 }
3320
3321 pageaddr = sextract64(value << 12, 0, 48);
3322
3323 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3324 ARMMMUIdxBit_S2NS);
3325}
3326
3327static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3328 bool isread)
3329{
3330
3331
3332
3333 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3334 return CP_ACCESS_TRAP;
3335 }
3336 return CP_ACCESS_OK;
3337}
3338
3339static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3340{
3341 ARMCPU *cpu = arm_env_get_cpu(env);
3342 int dzp_bit = 1 << 4;
3343
3344
3345 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3346 dzp_bit = 0;
3347 }
3348 return cpu->dcz_blocksize | dzp_bit;
3349}
3350
3351static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3352 bool isread)
3353{
3354 if (!(env->pstate & PSTATE_SP)) {
3355
3356
3357
3358 return CP_ACCESS_TRAP_UNCATEGORIZED;
3359 }
3360 return CP_ACCESS_OK;
3361}
3362
3363static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3364{
3365 return env->pstate & PSTATE_SP;
3366}
3367
3368static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3369{
3370 update_spsel(env, val);
3371}
3372
3373static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3374 uint64_t value)
3375{
3376 ARMCPU *cpu = arm_env_get_cpu(env);
3377
3378 if (raw_read(env, ri) == value) {
3379
3380
3381
3382 return;
3383 }
3384
3385 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3386
3387 value &= ~SCTLR_M;
3388 }
3389
3390 raw_write(env, ri, value);
3391
3392
3393 tlb_flush(CPU(cpu));
3394}
3395
3396static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3397 bool isread)
3398{
3399 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3400 return CP_ACCESS_TRAP_FP_EL2;
3401 }
3402 if (env->cp15.cptr_el[3] & CPTR_TFP) {
3403 return CP_ACCESS_TRAP_FP_EL3;
3404 }
3405 return CP_ACCESS_OK;
3406}
3407
3408static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3409 uint64_t value)
3410{
3411 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3412}
3413
3414static const ARMCPRegInfo v8_cp_reginfo[] = {
3415
3416
3417
3418 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3419 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3420 .access = PL0_RW, .type = ARM_CP_NZCV },
3421 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3422 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3423 .type = ARM_CP_NO_RAW,
3424 .access = PL0_RW, .accessfn = aa64_daif_access,
3425 .fieldoffset = offsetof(CPUARMState, daif),
3426 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3427 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3428 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3429 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3430 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3431 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3432 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3433 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3434 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3435 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3436 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3437 .access = PL0_R, .type = ARM_CP_NO_RAW,
3438 .readfn = aa64_dczid_read },
3439 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3440 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3441 .access = PL0_W, .type = ARM_CP_DC_ZVA,
3442#ifndef CONFIG_USER_ONLY
3443
3444 .accessfn = aa64_zva_access,
3445#endif
3446 },
3447 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3448 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3449 .access = PL1_R, .type = ARM_CP_CURRENTEL },
3450
3451 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3452 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3453 .access = PL1_W, .type = ARM_CP_NOP },
3454 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3455 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3456 .access = PL1_W, .type = ARM_CP_NOP },
3457 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3458 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3459 .access = PL0_W, .type = ARM_CP_NOP,
3460 .accessfn = aa64_cacheop_access },
3461 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3462 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3463 .access = PL1_W, .type = ARM_CP_NOP },
3464 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3465 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3466 .access = PL1_W, .type = ARM_CP_NOP },
3467 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3468 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3469 .access = PL0_W, .type = ARM_CP_NOP,
3470 .accessfn = aa64_cacheop_access },
3471 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3472 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3473 .access = PL1_W, .type = ARM_CP_NOP },
3474 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3475 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3476 .access = PL0_W, .type = ARM_CP_NOP,
3477 .accessfn = aa64_cacheop_access },
3478 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3479 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3480 .access = PL0_W, .type = ARM_CP_NOP,
3481 .accessfn = aa64_cacheop_access },
3482 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3483 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3484 .access = PL1_W, .type = ARM_CP_NOP },
3485
3486 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3487 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3488 .access = PL1_W, .type = ARM_CP_NO_RAW,
3489 .writefn = tlbi_aa64_vmalle1is_write },
3490 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3491 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3492 .access = PL1_W, .type = ARM_CP_NO_RAW,
3493 .writefn = tlbi_aa64_vae1is_write },
3494 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3495 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3496 .access = PL1_W, .type = ARM_CP_NO_RAW,
3497 .writefn = tlbi_aa64_vmalle1is_write },
3498 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3499 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3500 .access = PL1_W, .type = ARM_CP_NO_RAW,
3501 .writefn = tlbi_aa64_vae1is_write },
3502 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3503 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3504 .access = PL1_W, .type = ARM_CP_NO_RAW,
3505 .writefn = tlbi_aa64_vae1is_write },
3506 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3507 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3508 .access = PL1_W, .type = ARM_CP_NO_RAW,
3509 .writefn = tlbi_aa64_vae1is_write },
3510 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3511 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3512 .access = PL1_W, .type = ARM_CP_NO_RAW,
3513 .writefn = tlbi_aa64_vmalle1_write },
3514 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3515 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3516 .access = PL1_W, .type = ARM_CP_NO_RAW,
3517 .writefn = tlbi_aa64_vae1_write },
3518 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3519 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3520 .access = PL1_W, .type = ARM_CP_NO_RAW,
3521 .writefn = tlbi_aa64_vmalle1_write },
3522 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3523 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3524 .access = PL1_W, .type = ARM_CP_NO_RAW,
3525 .writefn = tlbi_aa64_vae1_write },
3526 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3527 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3528 .access = PL1_W, .type = ARM_CP_NO_RAW,
3529 .writefn = tlbi_aa64_vae1_write },
3530 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3531 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3532 .access = PL1_W, .type = ARM_CP_NO_RAW,
3533 .writefn = tlbi_aa64_vae1_write },
3534 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3535 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3536 .access = PL2_W, .type = ARM_CP_NO_RAW,
3537 .writefn = tlbi_aa64_ipas2e1is_write },
3538 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3539 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3540 .access = PL2_W, .type = ARM_CP_NO_RAW,
3541 .writefn = tlbi_aa64_ipas2e1is_write },
3542 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3543 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3544 .access = PL2_W, .type = ARM_CP_NO_RAW,
3545 .writefn = tlbi_aa64_alle1is_write },
3546 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3547 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3548 .access = PL2_W, .type = ARM_CP_NO_RAW,
3549 .writefn = tlbi_aa64_alle1is_write },
3550 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3551 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3552 .access = PL2_W, .type = ARM_CP_NO_RAW,
3553 .writefn = tlbi_aa64_ipas2e1_write },
3554 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3555 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3556 .access = PL2_W, .type = ARM_CP_NO_RAW,
3557 .writefn = tlbi_aa64_ipas2e1_write },
3558 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3559 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3560 .access = PL2_W, .type = ARM_CP_NO_RAW,
3561 .writefn = tlbi_aa64_alle1_write },
3562 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3563 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3564 .access = PL2_W, .type = ARM_CP_NO_RAW,
3565 .writefn = tlbi_aa64_alle1is_write },
3566#ifndef CONFIG_USER_ONLY
3567
3568 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3569 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3570 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3571 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3572 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3573 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3574 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3575 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3576 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3577 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3578 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3579 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3580 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3581 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3582 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3583 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3584 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3585 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3586 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3587 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3588 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3589 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3590 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3591 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3592
3593 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3594 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3595 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3596 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3597 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3598 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3599 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3600 .type = ARM_CP_ALIAS,
3601 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3602 .access = PL1_RW, .resetvalue = 0,
3603 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3604 .writefn = par_write },
3605#endif
3606
3607 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3608 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3609 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3610 .type = ARM_CP_NO_RAW, .access = PL1_W,
3611 .writefn = tlbimvaa_is_write },
3612 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3613 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3614 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3615 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3616 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3617 .type = ARM_CP_NO_RAW, .access = PL2_W,
3618 .writefn = tlbimva_hyp_write },
3619 { .name = "TLBIMVALHIS",
3620 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3621 .type = ARM_CP_NO_RAW, .access = PL2_W,
3622 .writefn = tlbimva_hyp_is_write },
3623 { .name = "TLBIIPAS2",
3624 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3625 .type = ARM_CP_NO_RAW, .access = PL2_W,
3626 .writefn = tlbiipas2_write },
3627 { .name = "TLBIIPAS2IS",
3628 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3629 .type = ARM_CP_NO_RAW, .access = PL2_W,
3630 .writefn = tlbiipas2_is_write },
3631 { .name = "TLBIIPAS2L",
3632 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3633 .type = ARM_CP_NO_RAW, .access = PL2_W,
3634 .writefn = tlbiipas2_write },
3635 { .name = "TLBIIPAS2LIS",
3636 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3637 .type = ARM_CP_NO_RAW, .access = PL2_W,
3638 .writefn = tlbiipas2_is_write },
3639
3640 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3641 .type = ARM_CP_NOP, .access = PL1_W },
3642 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3643 .type = ARM_CP_NOP, .access = PL1_W },
3644 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3645 .type = ARM_CP_NOP, .access = PL1_W },
3646 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3647 .type = ARM_CP_NOP, .access = PL1_W },
3648 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3649 .type = ARM_CP_NOP, .access = PL1_W },
3650 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3651 .type = ARM_CP_NOP, .access = PL1_W },
3652 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3653 .type = ARM_CP_NOP, .access = PL1_W },
3654 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3655 .type = ARM_CP_NOP, .access = PL1_W },
3656 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3657 .type = ARM_CP_NOP, .access = PL1_W },
3658 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3659 .type = ARM_CP_NOP, .access = PL1_W },
3660 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3661 .type = ARM_CP_NOP, .access = PL1_W },
3662 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3663 .type = ARM_CP_NOP, .access = PL1_W },
3664 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3665 .type = ARM_CP_NOP, .access = PL1_W },
3666
3667 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3668 .access = PL1_RW, .resetvalue = 0,
3669 .writefn = dacr_write, .raw_writefn = raw_write,
3670 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3671 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3672 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3673 .type = ARM_CP_ALIAS,
3674 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3675 .access = PL1_RW,
3676 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3677 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3678 .type = ARM_CP_ALIAS,
3679 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3680 .access = PL1_RW,
3681 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3682
3683
3684
3685
3686 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3687 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3688 .access = PL1_RW, .accessfn = sp_el0_access,
3689 .type = ARM_CP_ALIAS,
3690 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3691 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3692 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3693 .access = PL2_RW, .type = ARM_CP_ALIAS,
3694 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3695 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3696 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3697 .type = ARM_CP_NO_RAW,
3698 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3699 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3700 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3701 .type = ARM_CP_ALIAS,
3702 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3703 .access = PL2_RW, .accessfn = fpexc32_access },
3704 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3705 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3706 .access = PL2_RW, .resetvalue = 0,
3707 .writefn = dacr_write, .raw_writefn = raw_write,
3708 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3709 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3710 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3711 .access = PL2_RW, .resetvalue = 0,
3712 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3713 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3714 .type = ARM_CP_ALIAS,
3715 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3716 .access = PL2_RW,
3717 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3718 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3719 .type = ARM_CP_ALIAS,
3720 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3721 .access = PL2_RW,
3722 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3723 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3724 .type = ARM_CP_ALIAS,
3725 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3726 .access = PL2_RW,
3727 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3728 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3729 .type = ARM_CP_ALIAS,
3730 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3731 .access = PL2_RW,
3732 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3733 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3734 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3735 .resetvalue = 0,
3736 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3737 { .name = "SDCR", .type = ARM_CP_ALIAS,
3738 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3739 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3740 .writefn = sdcr_write,
3741 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3742 REGINFO_SENTINEL
3743};
3744
3745
3746static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3747 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3748 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3749 .access = PL2_RW,
3750 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3751 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3752 .type = ARM_CP_NO_RAW,
3753 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3754 .access = PL2_RW,
3755 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3756 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3757 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3758 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3759 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3760 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3761 .access = PL2_RW, .type = ARM_CP_CONST,
3762 .resetvalue = 0 },
3763 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3764 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3765 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3766 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3767 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3768 .access = PL2_RW, .type = ARM_CP_CONST,
3769 .resetvalue = 0 },
3770 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3771 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3772 .access = PL2_RW, .type = ARM_CP_CONST,
3773 .resetvalue = 0 },
3774 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3775 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3776 .access = PL2_RW, .type = ARM_CP_CONST,
3777 .resetvalue = 0 },
3778 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3779 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3780 .access = PL2_RW, .type = ARM_CP_CONST,
3781 .resetvalue = 0 },
3782 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3783 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3784 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3785 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3786 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3787 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3788 .type = ARM_CP_CONST, .resetvalue = 0 },
3789 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3790 .cp = 15, .opc1 = 6, .crm = 2,
3791 .access = PL2_RW, .accessfn = access_el3_aa32ns,
3792 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3793 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3794 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3795 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3796 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3797 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3798 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3799 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3800 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3801 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3802 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3803 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3804 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3805 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3806 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3807 .resetvalue = 0 },
3808 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3809 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3810 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3811 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3812 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3813 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3814 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3815 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3816 .resetvalue = 0 },
3817 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3818 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3819 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3820 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3821 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3822 .resetvalue = 0 },
3823 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3824 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3825 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3826 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3827 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3828 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3829 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3830 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3831 .access = PL2_RW, .accessfn = access_tda,
3832 .type = ARM_CP_CONST, .resetvalue = 0 },
3833 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3834 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3835 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3836 .type = ARM_CP_CONST, .resetvalue = 0 },
3837 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3838 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3839 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3840 REGINFO_SENTINEL
3841};
3842
3843static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3844{
3845 ARMCPU *cpu = arm_env_get_cpu(env);
3846 uint64_t valid_mask = HCR_MASK;
3847
3848 if (arm_feature(env, ARM_FEATURE_EL3)) {
3849 valid_mask &= ~HCR_HCD;
3850 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3851
3852
3853
3854
3855
3856
3857
3858 valid_mask &= ~HCR_TSC;
3859 }
3860
3861
3862 value &= valid_mask;
3863
3864
3865
3866
3867
3868
3869 if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3870 tlb_flush(CPU(cpu));
3871 }
3872 raw_write(env, ri, value);
3873}
3874
3875static const ARMCPRegInfo el2_cp_reginfo[] = {
3876 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3877 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3878 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3879 .writefn = hcr_write },
3880 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
3881 .type = ARM_CP_ALIAS,
3882 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
3883 .access = PL2_RW,
3884 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
3885 { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
3886 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3887 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
3888 { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
3889 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3890 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
3891 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
3892 .type = ARM_CP_ALIAS,
3893 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
3894 .access = PL2_RW,
3895 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
3896 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3897 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3898 .access = PL2_RW, .writefn = vbar_write,
3899 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
3900 .resetvalue = 0 },
3901 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
3902 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
3903 .access = PL3_RW, .type = ARM_CP_ALIAS,
3904 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
3905 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3906 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3907 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
3908 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
3909 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3910 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3911 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
3912 .resetvalue = 0 },
3913 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3914 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3915 .access = PL2_RW, .type = ARM_CP_ALIAS,
3916 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
3917 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3918 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3919 .access = PL2_RW, .type = ARM_CP_CONST,
3920 .resetvalue = 0 },
3921
3922 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3923 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3924 .access = PL2_RW, .type = ARM_CP_CONST,
3925 .resetvalue = 0 },
3926 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3927 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3928 .access = PL2_RW, .type = ARM_CP_CONST,
3929 .resetvalue = 0 },
3930 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3931 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3932 .access = PL2_RW, .type = ARM_CP_CONST,
3933 .resetvalue = 0 },
3934 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3935 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3936 .access = PL2_RW,
3937
3938
3939
3940 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
3941 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
3942 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3943 .type = ARM_CP_ALIAS,
3944 .access = PL2_RW, .accessfn = access_el3_aa32ns,
3945 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3946 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
3947 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3948 .access = PL2_RW,
3949
3950
3951
3952 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3953 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3954 .cp = 15, .opc1 = 6, .crm = 2,
3955 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3956 .access = PL2_RW, .accessfn = access_el3_aa32ns,
3957 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
3958 .writefn = vttbr_write },
3959 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3960 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3961 .access = PL2_RW, .writefn = vttbr_write,
3962 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
3963 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3964 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3965 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
3966 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
3967 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3968 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3969 .access = PL2_RW, .resetvalue = 0,
3970 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
3971 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3972 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3973 .access = PL2_RW, .resetvalue = 0,
3974 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3975 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3976 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3977 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3978 { .name = "TLBIALLNSNH",
3979 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3980 .type = ARM_CP_NO_RAW, .access = PL2_W,
3981 .writefn = tlbiall_nsnh_write },
3982 { .name = "TLBIALLNSNHIS",
3983 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3984 .type = ARM_CP_NO_RAW, .access = PL2_W,
3985 .writefn = tlbiall_nsnh_is_write },
3986 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3987 .type = ARM_CP_NO_RAW, .access = PL2_W,
3988 .writefn = tlbiall_hyp_write },
3989 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3990 .type = ARM_CP_NO_RAW, .access = PL2_W,
3991 .writefn = tlbiall_hyp_is_write },
3992 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3993 .type = ARM_CP_NO_RAW, .access = PL2_W,
3994 .writefn = tlbimva_hyp_write },
3995 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3996 .type = ARM_CP_NO_RAW, .access = PL2_W,
3997 .writefn = tlbimva_hyp_is_write },
3998 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
3999 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4000 .type = ARM_CP_NO_RAW, .access = PL2_W,
4001 .writefn = tlbi_aa64_alle2_write },
4002 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4003 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4004 .type = ARM_CP_NO_RAW, .access = PL2_W,
4005 .writefn = tlbi_aa64_vae2_write },
4006 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4007 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4008 .access = PL2_W, .type = ARM_CP_NO_RAW,
4009 .writefn = tlbi_aa64_vae2_write },
4010 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4011 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4012 .access = PL2_W, .type = ARM_CP_NO_RAW,
4013 .writefn = tlbi_aa64_alle2is_write },
4014 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4015 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4016 .type = ARM_CP_NO_RAW, .access = PL2_W,
4017 .writefn = tlbi_aa64_vae2is_write },
4018 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
4019 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4020 .access = PL2_W, .type = ARM_CP_NO_RAW,
4021 .writefn = tlbi_aa64_vae2is_write },
4022#ifndef CONFIG_USER_ONLY
4023
4024
4025
4026
4027 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
4028 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4029 .access = PL2_W, .accessfn = at_s1e2_access,
4030 .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4031 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
4032 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4033 .access = PL2_W, .accessfn = at_s1e2_access,
4034 .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4035
4036
4037
4038
4039
4040 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4041 .access = PL2_W,
4042 .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4043 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4044 .access = PL2_W,
4045 .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4046 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4047 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4048
4049
4050
4051
4052 .access = PL2_RW, .resetvalue = 3,
4053 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4054 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4055 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4056 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4057 .writefn = gt_cntvoff_write,
4058 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4059 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4060 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4061 .writefn = gt_cntvoff_write,
4062 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4063 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4064 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4065 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4066 .type = ARM_CP_IO, .access = PL2_RW,
4067 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4068 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4069 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4070 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4071 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4072 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4073 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4074 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4075 .resetfn = gt_hyp_timer_reset,
4076 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4077 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4078 .type = ARM_CP_IO,
4079 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4080 .access = PL2_RW,
4081 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4082 .resetvalue = 0,
4083 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4084#endif
4085
4086
4087
4088
4089
4090 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4091 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4092 .access = PL2_RW, .resetvalue = 0,
4093 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
4094 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4095 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4096 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4097 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4098 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4099 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4100 .access = PL2_RW,
4101 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4102 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4103 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4104 .access = PL2_RW,
4105 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4106 REGINFO_SENTINEL
4107};
4108
4109static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4110 bool isread)
4111{
4112
4113
4114
4115 if (arm_current_el(env) == 3) {
4116 return CP_ACCESS_OK;
4117 }
4118 if (arm_is_secure_below_el3(env)) {
4119 return CP_ACCESS_TRAP_EL3;
4120 }
4121
4122 if (isread) {
4123 return CP_ACCESS_OK;
4124 }
4125 return CP_ACCESS_TRAP_UNCATEGORIZED;
4126}
4127
4128static const ARMCPRegInfo el3_cp_reginfo[] = {
4129 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4130 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4131 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4132 .resetvalue = 0, .writefn = scr_write },
4133 { .name = "SCR", .type = ARM_CP_ALIAS,
4134 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4135 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4136 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4137 .writefn = scr_write },
4138 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4139 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4140 .access = PL3_RW, .resetvalue = 0,
4141 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4142 { .name = "SDER",
4143 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4144 .access = PL3_RW, .resetvalue = 0,
4145 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4146 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4147 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4148 .writefn = vbar_write, .resetvalue = 0,
4149 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4150 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4151 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4152 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
4153 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4154 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4155 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4156 .access = PL3_RW,
4157
4158
4159
4160
4161
4162 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
4163 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4164 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4165 .type = ARM_CP_ALIAS,
4166 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4167 .access = PL3_RW,
4168 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4169 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4170 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4171 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4172 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4173 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4174 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4175 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4176 .type = ARM_CP_ALIAS,
4177 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4178 .access = PL3_RW,
4179 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4180 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4181 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4182 .access = PL3_RW, .writefn = vbar_write,
4183 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4184 .resetvalue = 0 },
4185 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4186 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4187 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4188 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4189 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4190 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4191 .access = PL3_RW, .resetvalue = 0,
4192 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4193 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4194 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4195 .access = PL3_RW, .type = ARM_CP_CONST,
4196 .resetvalue = 0 },
4197 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4198 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4199 .access = PL3_RW, .type = ARM_CP_CONST,
4200 .resetvalue = 0 },
4201 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4202 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4203 .access = PL3_RW, .type = ARM_CP_CONST,
4204 .resetvalue = 0 },
4205 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
4206 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
4207 .access = PL3_W, .type = ARM_CP_NO_RAW,
4208 .writefn = tlbi_aa64_alle3is_write },
4209 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
4210 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
4211 .access = PL3_W, .type = ARM_CP_NO_RAW,
4212 .writefn = tlbi_aa64_vae3is_write },
4213 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
4214 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
4215 .access = PL3_W, .type = ARM_CP_NO_RAW,
4216 .writefn = tlbi_aa64_vae3is_write },
4217 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
4218 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
4219 .access = PL3_W, .type = ARM_CP_NO_RAW,
4220 .writefn = tlbi_aa64_alle3_write },
4221 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
4222 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
4223 .access = PL3_W, .type = ARM_CP_NO_RAW,
4224 .writefn = tlbi_aa64_vae3_write },
4225 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
4226 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
4227 .access = PL3_W, .type = ARM_CP_NO_RAW,
4228 .writefn = tlbi_aa64_vae3_write },
4229 REGINFO_SENTINEL
4230};
4231
4232static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4233 bool isread)
4234{
4235
4236
4237
4238 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4239 return CP_ACCESS_TRAP;
4240 }
4241 return CP_ACCESS_OK;
4242}
4243
4244static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4245 uint64_t value)
4246{
4247
4248
4249
4250 int oslock;
4251
4252 if (ri->state == ARM_CP_STATE_AA32) {
4253 oslock = (value == 0xC5ACCE55);
4254 } else {
4255 oslock = value & 1;
4256 }
4257
4258 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
4259}
4260
4261static const ARMCPRegInfo debug_cp_reginfo[] = {
4262
4263
4264
4265
4266
4267
4268 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4269 .access = PL0_R, .accessfn = access_tdra,
4270 .type = ARM_CP_CONST, .resetvalue = 0 },
4271 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4272 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4273 .access = PL1_R, .accessfn = access_tdra,
4274 .type = ARM_CP_CONST, .resetvalue = 0 },
4275 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4276 .access = PL0_R, .accessfn = access_tdra,
4277 .type = ARM_CP_CONST, .resetvalue = 0 },
4278
4279 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4280 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4281 .access = PL1_RW, .accessfn = access_tda,
4282 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4283 .resetvalue = 0 },
4284
4285
4286
4287 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4288 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4289 .type = ARM_CP_ALIAS,
4290 .access = PL1_R, .accessfn = access_tda,
4291 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4292 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4293 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4294 .access = PL1_W, .type = ARM_CP_NO_RAW,
4295 .accessfn = access_tdosa,
4296 .writefn = oslar_write },
4297 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4298 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4299 .access = PL1_R, .resetvalue = 10,
4300 .accessfn = access_tdosa,
4301 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4302
4303 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4304 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4305 .access = PL1_RW, .accessfn = access_tdosa,
4306 .type = ARM_CP_NOP },
4307
4308
4309
4310 { .name = "DBGVCR",
4311 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4312 .access = PL1_RW, .accessfn = access_tda,
4313 .type = ARM_CP_NOP },
4314
4315
4316
4317 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
4318 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
4319 .access = PL2_RW, .accessfn = access_tda,
4320 .type = ARM_CP_NOP },
4321
4322
4323
4324
4325 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
4326 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4327 .access = PL1_RW, .accessfn = access_tda,
4328 .type = ARM_CP_NOP },
4329 REGINFO_SENTINEL
4330};
4331
4332static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4333
4334 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4335 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4336 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4337 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4338 REGINFO_SENTINEL
4339};
4340
4341
4342
4343
4344static int sve_exception_el(CPUARMState *env)
4345{
4346#ifndef CONFIG_USER_ONLY
4347 unsigned current_el = arm_current_el(env);
4348
4349
4350
4351
4352
4353
4354 switch (extract32(env->cp15.cpacr_el1, 16, 2)) {
4355 default:
4356 if (current_el <= 1) {
4357
4358 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
4359 return 3;
4360 }
4361 return 1;
4362 }
4363 break;
4364 case 1:
4365 if (current_el == 0) {
4366 return 1;
4367 }
4368 break;
4369 case 3:
4370 break;
4371 }
4372
4373
4374 switch (extract32(env->cp15.cpacr_el1, 20, 2)) {
4375 default:
4376 if (current_el <= 1) {
4377 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
4378 return 3;
4379 }
4380 return 1;
4381 }
4382 break;
4383 case 1:
4384 if (current_el == 0) {
4385 return 1;
4386 }
4387 break;
4388 case 3:
4389 break;
4390 }
4391
4392
4393 if (current_el <= 2
4394 && (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ))
4395 && !arm_is_secure_below_el3(env)) {
4396 return 2;
4397 }
4398
4399
4400 if (!(env->cp15.cptr_el[3] & CPTR_EZ)
4401 || (env->cp15.cptr_el[3] & CPTR_TFP)) {
4402 return 3;
4403 }
4404#endif
4405 return 0;
4406}
4407
4408static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4409 uint64_t value)
4410{
4411
4412 raw_write(env, ri, value & 0xf);
4413}
4414
4415static const ARMCPRegInfo zcr_el1_reginfo = {
4416 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
4417 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
4418 .access = PL1_RW, .type = ARM_CP_SVE,
4419 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
4420 .writefn = zcr_write, .raw_writefn = raw_write
4421};
4422
4423static const ARMCPRegInfo zcr_el2_reginfo = {
4424 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4425 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4426 .access = PL2_RW, .type = ARM_CP_SVE,
4427 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
4428 .writefn = zcr_write, .raw_writefn = raw_write
4429};
4430
4431static const ARMCPRegInfo zcr_no_el2_reginfo = {
4432 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4433 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4434 .access = PL2_RW, .type = ARM_CP_SVE,
4435 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
4436};
4437
4438static const ARMCPRegInfo zcr_el3_reginfo = {
4439 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
4440 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
4441 .access = PL3_RW, .type = ARM_CP_SVE,
4442 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
4443 .writefn = zcr_write, .raw_writefn = raw_write
4444};
4445
4446void hw_watchpoint_update(ARMCPU *cpu, int n)
4447{
4448 CPUARMState *env = &cpu->env;
4449 vaddr len = 0;
4450 vaddr wvr = env->cp15.dbgwvr[n];
4451 uint64_t wcr = env->cp15.dbgwcr[n];
4452 int mask;
4453 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4454
4455 if (env->cpu_watchpoint[n]) {
4456 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4457 env->cpu_watchpoint[n] = NULL;
4458 }
4459
4460 if (!extract64(wcr, 0, 1)) {
4461
4462 return;
4463 }
4464
4465 switch (extract64(wcr, 3, 2)) {
4466 case 0:
4467
4468 return;
4469 case 1:
4470 flags |= BP_MEM_READ;
4471 break;
4472 case 2:
4473 flags |= BP_MEM_WRITE;
4474 break;
4475 case 3:
4476 flags |= BP_MEM_ACCESS;
4477 break;
4478 }
4479
4480
4481
4482
4483
4484 mask = extract64(wcr, 24, 4);
4485 if (mask == 1 || mask == 2) {
4486
4487
4488
4489
4490 return;
4491 } else if (mask) {
4492
4493 len = 1ULL << mask;
4494
4495
4496
4497
4498 wvr &= ~(len - 1);
4499 } else {
4500
4501 int bas = extract64(wcr, 5, 8);
4502 int basstart;
4503
4504 if (bas == 0) {
4505
4506 return;
4507 }
4508
4509 if (extract64(wvr, 2, 1)) {
4510
4511
4512
4513 bas &= 0xf;
4514 }
4515
4516
4517
4518
4519
4520 basstart = ctz32(bas);
4521 len = cto32(bas >> basstart);
4522 wvr += basstart;
4523 }
4524
4525 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4526 &env->cpu_watchpoint[n]);
4527}
4528
4529void hw_watchpoint_update_all(ARMCPU *cpu)
4530{
4531 int i;
4532 CPUARMState *env = &cpu->env;
4533
4534
4535
4536
4537 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4538 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4539
4540 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4541 hw_watchpoint_update(cpu, i);
4542 }
4543}
4544
4545static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4546 uint64_t value)
4547{
4548 ARMCPU *cpu = arm_env_get_cpu(env);
4549 int i = ri->crm;
4550
4551
4552
4553
4554
4555 value = sextract64(value, 0, 49) & ~3ULL;
4556
4557 raw_write(env, ri, value);
4558 hw_watchpoint_update(cpu, i);
4559}
4560
4561static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4562 uint64_t value)
4563{
4564 ARMCPU *cpu = arm_env_get_cpu(env);
4565 int i = ri->crm;
4566
4567 raw_write(env, ri, value);
4568 hw_watchpoint_update(cpu, i);
4569}
4570
4571void hw_breakpoint_update(ARMCPU *cpu, int n)
4572{
4573 CPUARMState *env = &cpu->env;
4574 uint64_t bvr = env->cp15.dbgbvr[n];
4575 uint64_t bcr = env->cp15.dbgbcr[n];
4576 vaddr addr;
4577 int bt;
4578 int flags = BP_CPU;
4579
4580 if (env->cpu_breakpoint[n]) {
4581 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4582 env->cpu_breakpoint[n] = NULL;
4583 }
4584
4585 if (!extract64(bcr, 0, 1)) {
4586
4587 return;
4588 }
4589
4590 bt = extract64(bcr, 20, 4);
4591
4592 switch (bt) {
4593 case 4:
4594 case 5:
4595 qemu_log_mask(LOG_UNIMP,
4596 "arm: address mismatch breakpoint types not implemented\n");
4597 return;
4598 case 0:
4599 case 1:
4600 {
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616 int bas = extract64(bcr, 5, 4);
4617 addr = sextract64(bvr, 0, 49) & ~3ULL;
4618 if (bas == 0) {
4619 return;
4620 }
4621 if (bas == 0xc) {
4622 addr += 2;
4623 }
4624 break;
4625 }
4626 case 2:
4627 case 8:
4628 case 10:
4629 qemu_log_mask(LOG_UNIMP,
4630 "arm: unlinked context breakpoint types not implemented\n");
4631 return;
4632 case 9:
4633 case 11:
4634 case 3:
4635 default:
4636
4637
4638
4639
4640
4641 return;
4642 }
4643
4644 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4645}
4646
4647void hw_breakpoint_update_all(ARMCPU *cpu)
4648{
4649 int i;
4650 CPUARMState *env = &cpu->env;
4651
4652
4653
4654
4655 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4656 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4657
4658 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4659 hw_breakpoint_update(cpu, i);
4660 }
4661}
4662
4663static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4664 uint64_t value)
4665{
4666 ARMCPU *cpu = arm_env_get_cpu(env);
4667 int i = ri->crm;
4668
4669 raw_write(env, ri, value);
4670 hw_breakpoint_update(cpu, i);
4671}
4672
4673static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4674 uint64_t value)
4675{
4676 ARMCPU *cpu = arm_env_get_cpu(env);
4677 int i = ri->crm;
4678
4679
4680
4681
4682 value = deposit64(value, 6, 1, extract64(value, 5, 1));
4683 value = deposit64(value, 8, 1, extract64(value, 7, 1));
4684
4685 raw_write(env, ri, value);
4686 hw_breakpoint_update(cpu, i);
4687}
4688
4689static void define_debug_regs(ARMCPU *cpu)
4690{
4691
4692
4693
4694 int i;
4695 int wrps, brps, ctx_cmps;
4696 ARMCPRegInfo dbgdidr = {
4697 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4698 .access = PL0_R, .accessfn = access_tda,
4699 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4700 };
4701
4702
4703 brps = extract32(cpu->dbgdidr, 24, 4);
4704 wrps = extract32(cpu->dbgdidr, 28, 4);
4705 ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4706
4707 assert(ctx_cmps <= brps);
4708
4709
4710
4711
4712
4713 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4714 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4715 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4716 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4717 }
4718
4719 define_one_arm_cp_reg(cpu, &dbgdidr);
4720 define_arm_cp_regs(cpu, debug_cp_reginfo);
4721
4722 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4723 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4724 }
4725
4726 for (i = 0; i < brps + 1; i++) {
4727 ARMCPRegInfo dbgregs[] = {
4728 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4729 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4730 .access = PL1_RW, .accessfn = access_tda,
4731 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4732 .writefn = dbgbvr_write, .raw_writefn = raw_write
4733 },
4734 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4735 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4736 .access = PL1_RW, .accessfn = access_tda,
4737 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4738 .writefn = dbgbcr_write, .raw_writefn = raw_write
4739 },
4740 REGINFO_SENTINEL
4741 };
4742 define_arm_cp_regs(cpu, dbgregs);
4743 }
4744
4745 for (i = 0; i < wrps + 1; i++) {
4746 ARMCPRegInfo dbgregs[] = {
4747 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4748 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4749 .access = PL1_RW, .accessfn = access_tda,
4750 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4751 .writefn = dbgwvr_write, .raw_writefn = raw_write
4752 },
4753 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4754 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4755 .access = PL1_RW, .accessfn = access_tda,
4756 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4757 .writefn = dbgwcr_write, .raw_writefn = raw_write
4758 },
4759 REGINFO_SENTINEL
4760 };
4761 define_arm_cp_regs(cpu, dbgregs);
4762 }
4763}
4764
4765
4766
4767
4768
4769
4770static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
4771{
4772 ARMCPU *cpu = arm_env_get_cpu(env);
4773 uint64_t pfr1 = cpu->id_pfr1;
4774
4775 if (env->gicv3state) {
4776 pfr1 |= 1 << 28;
4777 }
4778 return pfr1;
4779}
4780
4781static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
4782{
4783 ARMCPU *cpu = arm_env_get_cpu(env);
4784 uint64_t pfr0 = cpu->id_aa64pfr0;
4785
4786 if (env->gicv3state) {
4787 pfr0 |= 1 << 24;
4788 }
4789 return pfr0;
4790}
4791
4792void register_cp_regs_for_features(ARMCPU *cpu)
4793{
4794
4795 CPUARMState *env = &cpu->env;
4796 if (arm_feature(env, ARM_FEATURE_M)) {
4797
4798 return;
4799 }
4800
4801 define_arm_cp_regs(cpu, cp_reginfo);
4802 if (!arm_feature(env, ARM_FEATURE_V8)) {
4803
4804
4805
4806 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4807 }
4808
4809 if (arm_feature(env, ARM_FEATURE_V6)) {
4810
4811 ARMCPRegInfo v6_idregs[] = {
4812 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4813 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4814 .access = PL1_R, .type = ARM_CP_CONST,
4815 .resetvalue = cpu->id_pfr0 },
4816
4817
4818
4819 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4820 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4821 .access = PL1_R, .type = ARM_CP_NO_RAW,
4822 .readfn = id_pfr1_read,
4823 .writefn = arm_cp_write_ignore },
4824 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4825 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4826 .access = PL1_R, .type = ARM_CP_CONST,
4827 .resetvalue = cpu->id_dfr0 },
4828 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4829 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4830 .access = PL1_R, .type = ARM_CP_CONST,
4831 .resetvalue = cpu->id_afr0 },
4832 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
4833 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
4834 .access = PL1_R, .type = ARM_CP_CONST,
4835 .resetvalue = cpu->id_mmfr0 },
4836 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
4837 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
4838 .access = PL1_R, .type = ARM_CP_CONST,
4839 .resetvalue = cpu->id_mmfr1 },
4840 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
4841 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
4842 .access = PL1_R, .type = ARM_CP_CONST,
4843 .resetvalue = cpu->id_mmfr2 },
4844 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
4845 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
4846 .access = PL1_R, .type = ARM_CP_CONST,
4847 .resetvalue = cpu->id_mmfr3 },
4848 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
4849 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4850 .access = PL1_R, .type = ARM_CP_CONST,
4851 .resetvalue = cpu->id_isar0 },
4852 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
4853 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
4854 .access = PL1_R, .type = ARM_CP_CONST,
4855 .resetvalue = cpu->id_isar1 },
4856 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
4857 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4858 .access = PL1_R, .type = ARM_CP_CONST,
4859 .resetvalue = cpu->id_isar2 },
4860 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
4861 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
4862 .access = PL1_R, .type = ARM_CP_CONST,
4863 .resetvalue = cpu->id_isar3 },
4864 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
4865 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
4866 .access = PL1_R, .type = ARM_CP_CONST,
4867 .resetvalue = cpu->id_isar4 },
4868 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
4869 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
4870 .access = PL1_R, .type = ARM_CP_CONST,
4871 .resetvalue = cpu->id_isar5 },
4872 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
4873 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
4874 .access = PL1_R, .type = ARM_CP_CONST,
4875 .resetvalue = cpu->id_mmfr4 },
4876 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
4877 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
4878 .access = PL1_R, .type = ARM_CP_CONST,
4879 .resetvalue = cpu->id_isar6 },
4880 REGINFO_SENTINEL
4881 };
4882 define_arm_cp_regs(cpu, v6_idregs);
4883 define_arm_cp_regs(cpu, v6_cp_reginfo);
4884 } else {
4885 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
4886 }
4887 if (arm_feature(env, ARM_FEATURE_V6K)) {
4888 define_arm_cp_regs(cpu, v6k_cp_reginfo);
4889 }
4890 if (arm_feature(env, ARM_FEATURE_V7MP) &&
4891 !arm_feature(env, ARM_FEATURE_PMSA)) {
4892 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
4893 }
4894 if (arm_feature(env, ARM_FEATURE_V7)) {
4895
4896
4897
4898
4899#ifndef CONFIG_USER_ONLY
4900 ARMCPRegInfo pmcr = {
4901 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
4902 .access = PL0_RW,
4903 .type = ARM_CP_IO | ARM_CP_ALIAS,
4904 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
4905 .accessfn = pmreg_access, .writefn = pmcr_write,
4906 .raw_writefn = raw_write,
4907 };
4908 ARMCPRegInfo pmcr64 = {
4909 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
4910 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
4911 .access = PL0_RW, .accessfn = pmreg_access,
4912 .type = ARM_CP_IO,
4913 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
4914 .resetvalue = cpu->midr & 0xff000000,
4915 .writefn = pmcr_write, .raw_writefn = raw_write,
4916 };
4917 define_one_arm_cp_reg(cpu, &pmcr);
4918 define_one_arm_cp_reg(cpu, &pmcr64);
4919#endif
4920 ARMCPRegInfo clidr = {
4921 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
4922 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
4923 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
4924 };
4925 define_one_arm_cp_reg(cpu, &clidr);
4926 define_arm_cp_regs(cpu, v7_cp_reginfo);
4927 define_debug_regs(cpu);
4928 } else {
4929 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
4930 }
4931 if (arm_feature(env, ARM_FEATURE_V8)) {
4932
4933
4934
4935
4936
4937 ARMCPRegInfo v8_idregs[] = {
4938
4939
4940
4941
4942 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
4943 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
4944 .access = PL1_R, .type = ARM_CP_NO_RAW,
4945 .readfn = id_aa64pfr0_read,
4946 .writefn = arm_cp_write_ignore },
4947 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
4948 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
4949 .access = PL1_R, .type = ARM_CP_CONST,
4950 .resetvalue = cpu->id_aa64pfr1},
4951 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4952 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
4953 .access = PL1_R, .type = ARM_CP_CONST,
4954 .resetvalue = 0 },
4955 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4956 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
4957 .access = PL1_R, .type = ARM_CP_CONST,
4958 .resetvalue = 0 },
4959 { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4960 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
4961 .access = PL1_R, .type = ARM_CP_CONST,
4962 .resetvalue = 0 },
4963 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4964 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
4965 .access = PL1_R, .type = ARM_CP_CONST,
4966 .resetvalue = 0 },
4967 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4968 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
4969 .access = PL1_R, .type = ARM_CP_CONST,
4970 .resetvalue = 0 },
4971 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4972 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
4973 .access = PL1_R, .type = ARM_CP_CONST,
4974 .resetvalue = 0 },
4975 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
4976 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
4977 .access = PL1_R, .type = ARM_CP_CONST,
4978 .resetvalue = cpu->id_aa64dfr0 },
4979 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
4980 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
4981 .access = PL1_R, .type = ARM_CP_CONST,
4982 .resetvalue = cpu->id_aa64dfr1 },
4983 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4984 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
4985 .access = PL1_R, .type = ARM_CP_CONST,
4986 .resetvalue = 0 },
4987 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4988 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
4989 .access = PL1_R, .type = ARM_CP_CONST,
4990 .resetvalue = 0 },
4991 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
4992 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
4993 .access = PL1_R, .type = ARM_CP_CONST,
4994 .resetvalue = cpu->id_aa64afr0 },
4995 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
4996 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
4997 .access = PL1_R, .type = ARM_CP_CONST,
4998 .resetvalue = cpu->id_aa64afr1 },
4999 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5000 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
5001 .access = PL1_R, .type = ARM_CP_CONST,
5002 .resetvalue = 0 },
5003 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5004 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
5005 .access = PL1_R, .type = ARM_CP_CONST,
5006 .resetvalue = 0 },
5007 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
5008 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
5009 .access = PL1_R, .type = ARM_CP_CONST,
5010 .resetvalue = cpu->id_aa64isar0 },
5011 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
5012 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
5013 .access = PL1_R, .type = ARM_CP_CONST,
5014 .resetvalue = cpu->id_aa64isar1 },
5015 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5016 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
5017 .access = PL1_R, .type = ARM_CP_CONST,
5018 .resetvalue = 0 },
5019 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5020 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
5021 .access = PL1_R, .type = ARM_CP_CONST,
5022 .resetvalue = 0 },
5023 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5024 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
5025 .access = PL1_R, .type = ARM_CP_CONST,
5026 .resetvalue = 0 },
5027 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5028 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
5029 .access = PL1_R, .type = ARM_CP_CONST,
5030 .resetvalue = 0 },
5031 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5032 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
5033 .access = PL1_R, .type = ARM_CP_CONST,
5034 .resetvalue = 0 },
5035 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5036 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
5037 .access = PL1_R, .type = ARM_CP_CONST,
5038 .resetvalue = 0 },
5039 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
5040 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
5041 .access = PL1_R, .type = ARM_CP_CONST,
5042 .resetvalue = cpu->id_aa64mmfr0 },
5043 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
5044 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
5045 .access = PL1_R, .type = ARM_CP_CONST,
5046 .resetvalue = cpu->id_aa64mmfr1 },
5047 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5048 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
5049 .access = PL1_R, .type = ARM_CP_CONST,
5050 .resetvalue = 0 },
5051 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5052 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
5053 .access = PL1_R, .type = ARM_CP_CONST,
5054 .resetvalue = 0 },
5055 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5056 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
5057 .access = PL1_R, .type = ARM_CP_CONST,
5058 .resetvalue = 0 },
5059 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5060 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
5061 .access = PL1_R, .type = ARM_CP_CONST,
5062 .resetvalue = 0 },
5063 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5064 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
5065 .access = PL1_R, .type = ARM_CP_CONST,
5066 .resetvalue = 0 },
5067 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5068 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
5069 .access = PL1_R, .type = ARM_CP_CONST,
5070 .resetvalue = 0 },
5071 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
5072 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
5073 .access = PL1_R, .type = ARM_CP_CONST,
5074 .resetvalue = cpu->mvfr0 },
5075 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
5076 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
5077 .access = PL1_R, .type = ARM_CP_CONST,
5078 .resetvalue = cpu->mvfr1 },
5079 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
5080 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
5081 .access = PL1_R, .type = ARM_CP_CONST,
5082 .resetvalue = cpu->mvfr2 },
5083 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5084 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
5085 .access = PL1_R, .type = ARM_CP_CONST,
5086 .resetvalue = 0 },
5087 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5088 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
5089 .access = PL1_R, .type = ARM_CP_CONST,
5090 .resetvalue = 0 },
5091 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5092 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
5093 .access = PL1_R, .type = ARM_CP_CONST,
5094 .resetvalue = 0 },
5095 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5096 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
5097 .access = PL1_R, .type = ARM_CP_CONST,
5098 .resetvalue = 0 },
5099 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5100 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
5101 .access = PL1_R, .type = ARM_CP_CONST,
5102 .resetvalue = 0 },
5103 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
5104 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
5105 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5106 .resetvalue = cpu->pmceid0 },
5107 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
5108 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
5109 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5110 .resetvalue = cpu->pmceid0 },
5111 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
5112 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
5113 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5114 .resetvalue = cpu->pmceid1 },
5115 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
5116 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
5117 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5118 .resetvalue = cpu->pmceid1 },
5119 REGINFO_SENTINEL
5120 };
5121
5122 if (!arm_feature(env, ARM_FEATURE_EL3) &&
5123 !arm_feature(env, ARM_FEATURE_EL2)) {
5124 ARMCPRegInfo rvbar = {
5125 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
5126 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5127 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
5128 };
5129 define_one_arm_cp_reg(cpu, &rvbar);
5130 }
5131 define_arm_cp_regs(cpu, v8_idregs);
5132 define_arm_cp_regs(cpu, v8_cp_reginfo);
5133 }
5134 if (arm_feature(env, ARM_FEATURE_EL2)) {
5135 uint64_t vmpidr_def = mpidr_read_val(env);
5136 ARMCPRegInfo vpidr_regs[] = {
5137 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
5138 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5139 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5140 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
5141 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
5142 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
5143 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5144 .access = PL2_RW, .resetvalue = cpu->midr,
5145 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
5146 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
5147 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5148 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5149 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
5150 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
5151 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
5152 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5153 .access = PL2_RW,
5154 .resetvalue = vmpidr_def,
5155 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
5156 REGINFO_SENTINEL
5157 };
5158 define_arm_cp_regs(cpu, vpidr_regs);
5159 define_arm_cp_regs(cpu, el2_cp_reginfo);
5160
5161 if (!arm_feature(env, ARM_FEATURE_EL3)) {
5162 ARMCPRegInfo rvbar = {
5163 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
5164 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
5165 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
5166 };
5167 define_one_arm_cp_reg(cpu, &rvbar);
5168 }
5169 } else {
5170
5171
5172
5173 if (arm_feature(env, ARM_FEATURE_EL3)) {
5174
5175
5176
5177 ARMCPRegInfo vpidr_regs[] = {
5178 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5179 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5180 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5181 .type = ARM_CP_CONST, .resetvalue = cpu->midr,
5182 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
5183 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5184 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5185 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5186 .type = ARM_CP_NO_RAW,
5187 .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
5188 REGINFO_SENTINEL
5189 };
5190 define_arm_cp_regs(cpu, vpidr_regs);
5191 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
5192 }
5193 }
5194 if (arm_feature(env, ARM_FEATURE_EL3)) {
5195 define_arm_cp_regs(cpu, el3_cp_reginfo);
5196 ARMCPRegInfo el3_regs[] = {
5197 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
5198 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
5199 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
5200 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
5201 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
5202 .access = PL3_RW,
5203 .raw_writefn = raw_write, .writefn = sctlr_write,
5204 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
5205 .resetvalue = cpu->reset_sctlr },
5206 REGINFO_SENTINEL
5207 };
5208
5209 define_arm_cp_regs(cpu, el3_regs);
5210 }
5211
5212
5213
5214
5215
5216
5217
5218
5219 if (arm_feature(env, ARM_FEATURE_EL3)) {
5220 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5221 ARMCPRegInfo nsacr = {
5222 .name = "NSACR", .type = ARM_CP_CONST,
5223 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5224 .access = PL1_RW, .accessfn = nsacr_access,
5225 .resetvalue = 0xc00
5226 };
5227 define_one_arm_cp_reg(cpu, &nsacr);
5228 } else {
5229 ARMCPRegInfo nsacr = {
5230 .name = "NSACR",
5231 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5232 .access = PL3_RW | PL1_R,
5233 .resetvalue = 0,
5234 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
5235 };
5236 define_one_arm_cp_reg(cpu, &nsacr);
5237 }
5238 } else {
5239 if (arm_feature(env, ARM_FEATURE_V8)) {
5240 ARMCPRegInfo nsacr = {
5241 .name = "NSACR", .type = ARM_CP_CONST,
5242 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5243 .access = PL1_R,
5244 .resetvalue = 0xc00
5245 };
5246 define_one_arm_cp_reg(cpu, &nsacr);
5247 }
5248 }
5249
5250 if (arm_feature(env, ARM_FEATURE_PMSA)) {
5251 if (arm_feature(env, ARM_FEATURE_V6)) {
5252
5253 assert(arm_feature(env, ARM_FEATURE_V7));
5254 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
5255 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
5256 } else {
5257 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
5258 }
5259 } else {
5260 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
5261 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
5262 }
5263 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5264 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
5265 }
5266 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
5267 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
5268 }
5269 if (arm_feature(env, ARM_FEATURE_VAPA)) {
5270 define_arm_cp_regs(cpu, vapa_cp_reginfo);
5271 }
5272 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
5273 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
5274 }
5275 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
5276 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
5277 }
5278 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
5279 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
5280 }
5281 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
5282 define_arm_cp_regs(cpu, omap_cp_reginfo);
5283 }
5284 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
5285 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
5286 }
5287 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5288 define_arm_cp_regs(cpu, xscale_cp_reginfo);
5289 }
5290 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
5291 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
5292 }
5293 if (arm_feature(env, ARM_FEATURE_LPAE)) {
5294 define_arm_cp_regs(cpu, lpae_cp_reginfo);
5295 }
5296
5297
5298
5299
5300 {
5301 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311 { .name = "MIDR",
5312 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
5313 .access = PL1_R, .resetvalue = cpu->midr,
5314 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
5315 .readfn = midr_read,
5316 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5317 .type = ARM_CP_OVERRIDE },
5318
5319 { .name = "DUMMY",
5320 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
5321 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5322 { .name = "DUMMY",
5323 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
5324 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5325 { .name = "DUMMY",
5326 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
5327 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5328 { .name = "DUMMY",
5329 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
5330 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5331 { .name = "DUMMY",
5332 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
5333 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5334 REGINFO_SENTINEL
5335 };
5336 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
5337 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
5338 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
5339 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
5340 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5341 .readfn = midr_read },
5342
5343 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5344 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5345 .access = PL1_R, .resetvalue = cpu->midr },
5346 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5347 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
5348 .access = PL1_R, .resetvalue = cpu->midr },
5349 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
5350 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
5351 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
5352 REGINFO_SENTINEL
5353 };
5354 ARMCPRegInfo id_cp_reginfo[] = {
5355
5356 { .name = "CTR",
5357 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
5358 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5359 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
5360 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
5361 .access = PL0_R, .accessfn = ctr_el0_access,
5362 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5363
5364 { .name = "TCMTR",
5365 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
5366 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5367 REGINFO_SENTINEL
5368 };
5369
5370 ARMCPRegInfo id_tlbtr_reginfo = {
5371 .name = "TLBTR",
5372 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
5373 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
5374 };
5375
5376 ARMCPRegInfo id_mpuir_reginfo = {
5377 .name = "MPUIR",
5378 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5379 .access = PL1_R, .type = ARM_CP_CONST,
5380 .resetvalue = cpu->pmsav7_dregion << 8
5381 };
5382 ARMCPRegInfo crn0_wi_reginfo = {
5383 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
5384 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
5385 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
5386 };
5387 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
5388 arm_feature(env, ARM_FEATURE_STRONGARM)) {
5389 ARMCPRegInfo *r;
5390
5391
5392
5393
5394
5395 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5396 for (r = id_pre_v8_midr_cp_reginfo;
5397 r->type != ARM_CP_SENTINEL; r++) {
5398 r->access = PL1_RW;
5399 }
5400 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
5401 r->access = PL1_RW;
5402 }
5403 id_mpuir_reginfo.access = PL1_RW;
5404 id_tlbtr_reginfo.access = PL1_RW;
5405 }
5406 if (arm_feature(env, ARM_FEATURE_V8)) {
5407 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
5408 } else {
5409 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
5410 }
5411 define_arm_cp_regs(cpu, id_cp_reginfo);
5412 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
5413 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
5414 } else if (arm_feature(env, ARM_FEATURE_V7)) {
5415 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
5416 }
5417 }
5418
5419 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
5420 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
5421 }
5422
5423 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
5424 ARMCPRegInfo auxcr_reginfo[] = {
5425 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
5426 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
5427 .access = PL1_RW, .type = ARM_CP_CONST,
5428 .resetvalue = cpu->reset_auxcr },
5429 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
5430 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
5431 .access = PL2_RW, .type = ARM_CP_CONST,
5432 .resetvalue = 0 },
5433 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
5434 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
5435 .access = PL3_RW, .type = ARM_CP_CONST,
5436 .resetvalue = 0 },
5437 REGINFO_SENTINEL
5438 };
5439 define_arm_cp_regs(cpu, auxcr_reginfo);
5440 }
5441
5442 if (arm_feature(env, ARM_FEATURE_CBAR)) {
5443 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5444
5445 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
5446 | extract64(cpu->reset_cbar, 32, 12);
5447 ARMCPRegInfo cbar_reginfo[] = {
5448 { .name = "CBAR",
5449 .type = ARM_CP_CONST,
5450 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5451 .access = PL1_R, .resetvalue = cpu->reset_cbar },
5452 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
5453 .type = ARM_CP_CONST,
5454 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
5455 .access = PL1_R, .resetvalue = cbar32 },
5456 REGINFO_SENTINEL
5457 };
5458
5459 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
5460 define_arm_cp_regs(cpu, cbar_reginfo);
5461 } else {
5462 ARMCPRegInfo cbar = {
5463 .name = "CBAR",
5464 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5465 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
5466 .fieldoffset = offsetof(CPUARMState,
5467 cp15.c15_config_base_address)
5468 };
5469 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
5470 cbar.access = PL1_R;
5471 cbar.fieldoffset = 0;
5472 cbar.type = ARM_CP_CONST;
5473 }
5474 define_one_arm_cp_reg(cpu, &cbar);
5475 }
5476 }
5477
5478 if (arm_feature(env, ARM_FEATURE_VBAR)) {
5479 ARMCPRegInfo vbar_cp_reginfo[] = {
5480 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
5481 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
5482 .access = PL1_RW, .writefn = vbar_write,
5483 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
5484 offsetof(CPUARMState, cp15.vbar_ns) },
5485 .resetvalue = 0 },
5486 REGINFO_SENTINEL
5487 };
5488 define_arm_cp_regs(cpu, vbar_cp_reginfo);
5489 }
5490
5491
5492 {
5493 ARMCPRegInfo sctlr = {
5494 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
5495 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5496 .access = PL1_RW,
5497 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
5498 offsetof(CPUARMState, cp15.sctlr_ns) },
5499 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
5500 .raw_writefn = raw_write,
5501 };
5502 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5503
5504
5505
5506
5507 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
5508 }
5509 define_one_arm_cp_reg(cpu, &sctlr);
5510 }
5511
5512 if (arm_feature(env, ARM_FEATURE_SVE)) {
5513 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
5514 if (arm_feature(env, ARM_FEATURE_EL2)) {
5515 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
5516 } else {
5517 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
5518 }
5519 if (arm_feature(env, ARM_FEATURE_EL3)) {
5520 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
5521 }
5522 }
5523}
5524
5525void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
5526{
5527 CPUState *cs = CPU(cpu);
5528 CPUARMState *env = &cpu->env;
5529
5530 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5531 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
5532 aarch64_fpu_gdb_set_reg,
5533 34, "aarch64-fpu.xml", 0);
5534 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
5535 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5536 51, "arm-neon.xml", 0);
5537 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
5538 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5539 35, "arm-vfp3.xml", 0);
5540 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
5541 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5542 19, "arm-vfp.xml", 0);
5543 }
5544 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
5545 arm_gen_dynamic_xml(cs),
5546 "system-registers.xml", 0);
5547}
5548
5549
5550static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5551{
5552 ObjectClass *class_a = (ObjectClass *)a;
5553 ObjectClass *class_b = (ObjectClass *)b;
5554 const char *name_a, *name_b;
5555
5556 name_a = object_class_get_name(class_a);
5557 name_b = object_class_get_name(class_b);
5558 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
5559 return 1;
5560 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
5561 return -1;
5562 } else {
5563 return strcmp(name_a, name_b);
5564 }
5565}
5566
5567static void arm_cpu_list_entry(gpointer data, gpointer user_data)
5568{
5569 ObjectClass *oc = data;
5570 CPUListState *s = user_data;
5571 const char *typename;
5572 char *name;
5573
5574 typename = object_class_get_name(oc);
5575 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
5576 (*s->cpu_fprintf)(s->file, " %s\n",
5577 name);
5578 g_free(name);
5579}
5580
5581void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
5582{
5583 CPUListState s = {
5584 .file = f,
5585 .cpu_fprintf = cpu_fprintf,
5586 };
5587 GSList *list;
5588
5589 list = object_class_get_list(TYPE_ARM_CPU, false);
5590 list = g_slist_sort(list, arm_cpu_list_compare);
5591 (*cpu_fprintf)(f, "Available CPUs:\n");
5592 g_slist_foreach(list, arm_cpu_list_entry, &s);
5593 g_slist_free(list);
5594}
5595
5596static void arm_cpu_add_definition(gpointer data, gpointer user_data)
5597{
5598 ObjectClass *oc = data;
5599 CpuDefinitionInfoList **cpu_list = user_data;
5600 CpuDefinitionInfoList *entry;
5601 CpuDefinitionInfo *info;
5602 const char *typename;
5603
5604 typename = object_class_get_name(oc);
5605 info = g_malloc0(sizeof(*info));
5606 info->name = g_strndup(typename,
5607 strlen(typename) - strlen("-" TYPE_ARM_CPU));
5608 info->q_typename = g_strdup(typename);
5609
5610 entry = g_malloc0(sizeof(*entry));
5611 entry->value = info;
5612 entry->next = *cpu_list;
5613 *cpu_list = entry;
5614}
5615
5616CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
5617{
5618 CpuDefinitionInfoList *cpu_list = NULL;
5619 GSList *list;
5620
5621 list = object_class_get_list(TYPE_ARM_CPU, false);
5622 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
5623 g_slist_free(list);
5624
5625 return cpu_list;
5626}
5627
5628static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
5629 void *opaque, int state, int secstate,
5630 int crm, int opc1, int opc2,
5631 const char *name)
5632{
5633
5634
5635
5636 uint32_t *key = g_new(uint32_t, 1);
5637 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
5638 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
5639 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
5640
5641 r2->name = g_strdup(name);
5642
5643
5644
5645 r2->secure = secstate;
5646
5647 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5648
5649
5650
5651
5652 r2->fieldoffset = r->bank_fieldoffsets[ns];
5653 }
5654
5655 if (state == ARM_CP_STATE_AA32) {
5656 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
5668 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
5669 r2->type |= ARM_CP_ALIAS;
5670 }
5671 } else if ((secstate != r->secure) && !ns) {
5672
5673
5674
5675 r2->type |= ARM_CP_ALIAS;
5676 }
5677
5678 if (r->state == ARM_CP_STATE_BOTH) {
5679
5680
5681 if (r2->cp == 0) {
5682 r2->cp = 15;
5683 }
5684
5685#ifdef HOST_WORDS_BIGENDIAN
5686 if (r2->fieldoffset) {
5687 r2->fieldoffset += sizeof(uint32_t);
5688 }
5689#endif
5690 }
5691 }
5692 if (state == ARM_CP_STATE_AA64) {
5693
5694
5695
5696
5697
5698
5699
5700 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
5701 r2->cp = CP_REG_ARM64_SYSREG_CP;
5702 }
5703 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
5704 r2->opc0, opc1, opc2);
5705 } else {
5706 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
5707 }
5708 if (opaque) {
5709 r2->opaque = opaque;
5710 }
5711
5712
5713
5714 r2->state = state;
5715
5716
5717
5718 r2->crm = crm;
5719 r2->opc1 = opc1;
5720 r2->opc2 = opc2;
5721
5722
5723
5724
5725
5726
5727 if ((r->type & ARM_CP_SPECIAL)) {
5728 r2->type |= ARM_CP_NO_RAW;
5729 }
5730 if (((r->crm == CP_ANY) && crm != 0) ||
5731 ((r->opc1 == CP_ANY) && opc1 != 0) ||
5732 ((r->opc2 == CP_ANY) && opc2 != 0)) {
5733 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
5734 }
5735
5736
5737
5738
5739
5740 if (!(r2->type & ARM_CP_NO_RAW)) {
5741 assert(!raw_accessors_invalid(r2));
5742 }
5743
5744
5745
5746
5747 if (!(r->type & ARM_CP_OVERRIDE)) {
5748 ARMCPRegInfo *oldreg;
5749 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
5750 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
5751 fprintf(stderr, "Register redefined: cp=%d %d bit "
5752 "crn=%d crm=%d opc1=%d opc2=%d, "
5753 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
5754 r2->crn, r2->crm, r2->opc1, r2->opc2,
5755 oldreg->name, r2->name);
5756 g_assert_not_reached();
5757 }
5758 }
5759 g_hash_table_insert(cpu->cp_regs, key, r2);
5760}
5761
5762
5763void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
5764 const ARMCPRegInfo *r, void *opaque)
5765{
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789 int crm, opc1, opc2, state;
5790 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
5791 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
5792 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
5793 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
5794 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
5795 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
5796
5797 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
5798
5799 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
5800
5801 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
5802
5803
5804
5805
5806
5807
5808 if (r->state != ARM_CP_STATE_AA32) {
5809 int mask = 0;
5810 switch (r->opc1) {
5811 case 0: case 1: case 2:
5812
5813 mask = PL1_RW;
5814 break;
5815 case 3:
5816
5817 mask = PL0_RW;
5818 break;
5819 case 4:
5820
5821 mask = PL2_RW;
5822 break;
5823 case 5:
5824
5825 assert(false);
5826 break;
5827 case 6:
5828
5829 mask = PL3_RW;
5830 break;
5831 case 7:
5832
5833 mask = PL1_RW;
5834 break;
5835 default:
5836
5837 assert(false);
5838 break;
5839 }
5840
5841 assert((r->access & ~mask) == 0);
5842 }
5843
5844
5845
5846
5847 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
5848 if (r->access & PL3_R) {
5849 assert((r->fieldoffset ||
5850 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5851 r->readfn);
5852 }
5853 if (r->access & PL3_W) {
5854 assert((r->fieldoffset ||
5855 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5856 r->writefn);
5857 }
5858 }
5859
5860 assert(cptype_valid(r->type));
5861 for (crm = crmmin; crm <= crmmax; crm++) {
5862 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
5863 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
5864 for (state = ARM_CP_STATE_AA32;
5865 state <= ARM_CP_STATE_AA64; state++) {
5866 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
5867 continue;
5868 }
5869 if (state == ARM_CP_STATE_AA32) {
5870
5871
5872
5873 char *name;
5874
5875 switch (r->secure) {
5876 case ARM_CP_SECSTATE_S:
5877 case ARM_CP_SECSTATE_NS:
5878 add_cpreg_to_hashtable(cpu, r, opaque, state,
5879 r->secure, crm, opc1, opc2,
5880 r->name);
5881 break;
5882 default:
5883 name = g_strdup_printf("%s_S", r->name);
5884 add_cpreg_to_hashtable(cpu, r, opaque, state,
5885 ARM_CP_SECSTATE_S,
5886 crm, opc1, opc2, name);
5887 g_free(name);
5888 add_cpreg_to_hashtable(cpu, r, opaque, state,
5889 ARM_CP_SECSTATE_NS,
5890 crm, opc1, opc2, r->name);
5891 break;
5892 }
5893 } else {
5894
5895
5896 add_cpreg_to_hashtable(cpu, r, opaque, state,
5897 ARM_CP_SECSTATE_NS,
5898 crm, opc1, opc2, r->name);
5899 }
5900 }
5901 }
5902 }
5903 }
5904}
5905
5906void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
5907 const ARMCPRegInfo *regs, void *opaque)
5908{
5909
5910 const ARMCPRegInfo *r;
5911 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
5912 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
5913 }
5914}
5915
5916const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
5917{
5918 return g_hash_table_lookup(cpregs, &encoded_cp);
5919}
5920
5921void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
5922 uint64_t value)
5923{
5924
5925}
5926
5927uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
5928{
5929
5930 return 0;
5931}
5932
5933void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
5934{
5935
5936}
5937
5938static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
5939{
5940
5941
5942
5943
5944
5945
5946 if (write_type == CPSRWriteByInstr &&
5947 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
5948 mode == ARM_CPU_MODE_HYP)) {
5949 return 1;
5950 }
5951
5952 switch (mode) {
5953 case ARM_CPU_MODE_USR:
5954 return 0;
5955 case ARM_CPU_MODE_SYS:
5956 case ARM_CPU_MODE_SVC:
5957 case ARM_CPU_MODE_ABT:
5958 case ARM_CPU_MODE_UND:
5959 case ARM_CPU_MODE_IRQ:
5960 case ARM_CPU_MODE_FIQ:
5961
5962
5963
5964
5965
5966
5967 if (write_type == CPSRWriteByInstr &&
5968 (env->cp15.hcr_el2 & HCR_TGE) &&
5969 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
5970 !arm_is_secure_below_el3(env)) {
5971 return 1;
5972 }
5973 return 0;
5974 case ARM_CPU_MODE_HYP:
5975 return !arm_feature(env, ARM_FEATURE_EL2)
5976 || arm_current_el(env) < 2 || arm_is_secure(env);
5977 case ARM_CPU_MODE_MON:
5978 return arm_current_el(env) < 3;
5979 default:
5980 return 1;
5981 }
5982}
5983
5984uint32_t cpsr_read(CPUARMState *env)
5985{
5986 int ZF;
5987 ZF = (env->ZF == 0);
5988 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
5989 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
5990 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
5991 | ((env->condexec_bits & 0xfc) << 8)
5992 | (env->GE << 16) | (env->daif & CPSR_AIF);
5993}
5994
5995void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
5996 CPSRWriteType write_type)
5997{
5998 uint32_t changed_daif;
5999
6000 if (mask & CPSR_NZCV) {
6001 env->ZF = (~val) & CPSR_Z;
6002 env->NF = val;
6003 env->CF = (val >> 29) & 1;
6004 env->VF = (val << 3) & 0x80000000;
6005 }
6006 if (mask & CPSR_Q)
6007 env->QF = ((val & CPSR_Q) != 0);
6008 if (mask & CPSR_T)
6009 env->thumb = ((val & CPSR_T) != 0);
6010 if (mask & CPSR_IT_0_1) {
6011 env->condexec_bits &= ~3;
6012 env->condexec_bits |= (val >> 25) & 3;
6013 }
6014 if (mask & CPSR_IT_2_7) {
6015 env->condexec_bits &= 3;
6016 env->condexec_bits |= (val >> 8) & 0xfc;
6017 }
6018 if (mask & CPSR_GE) {
6019 env->GE = (val >> 16) & 0xf;
6020 }
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
6031 arm_feature(env, ARM_FEATURE_EL3) &&
6032 !arm_feature(env, ARM_FEATURE_EL2) &&
6033 !arm_is_secure(env)) {
6034
6035 changed_daif = (env->daif ^ val) & mask;
6036
6037 if (changed_daif & CPSR_A) {
6038
6039
6040
6041 if (!(env->cp15.scr_el3 & SCR_AW)) {
6042 qemu_log_mask(LOG_GUEST_ERROR,
6043 "Ignoring attempt to switch CPSR_A flag from "
6044 "non-secure world with SCR.AW bit clear\n");
6045 mask &= ~CPSR_A;
6046 }
6047 }
6048
6049 if (changed_daif & CPSR_F) {
6050
6051
6052
6053 if (!(env->cp15.scr_el3 & SCR_FW)) {
6054 qemu_log_mask(LOG_GUEST_ERROR,
6055 "Ignoring attempt to switch CPSR_F flag from "
6056 "non-secure world with SCR.FW bit clear\n");
6057 mask &= ~CPSR_F;
6058 }
6059
6060
6061
6062
6063
6064 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
6065 (val & CPSR_F)) {
6066 qemu_log_mask(LOG_GUEST_ERROR,
6067 "Ignoring attempt to enable CPSR_F flag "
6068 "(non-maskable FIQ [NMFI] support enabled)\n");
6069 mask &= ~CPSR_F;
6070 }
6071 }
6072 }
6073
6074 env->daif &= ~(CPSR_AIF & mask);
6075 env->daif |= val & CPSR_AIF & mask;
6076
6077 if (write_type != CPSRWriteRaw &&
6078 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
6079 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
6080
6081
6082
6083
6084
6085
6086 mask &= ~CPSR_M;
6087 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
6088
6089
6090
6091
6092
6093
6094
6095
6096 mask &= ~CPSR_M;
6097 if (write_type != CPSRWriteByGDBStub &&
6098 arm_feature(env, ARM_FEATURE_V8)) {
6099 mask |= CPSR_IL;
6100 val |= CPSR_IL;
6101 }
6102 } else {
6103 switch_mode(env, val & CPSR_M);
6104 }
6105 }
6106 mask &= ~CACHED_CPSR_BITS;
6107 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
6108}
6109
6110
6111uint32_t HELPER(sxtb16)(uint32_t x)
6112{
6113 uint32_t res;
6114 res = (uint16_t)(int8_t)x;
6115 res |= (uint32_t)(int8_t)(x >> 16) << 16;
6116 return res;
6117}
6118
6119uint32_t HELPER(uxtb16)(uint32_t x)
6120{
6121 uint32_t res;
6122 res = (uint16_t)(uint8_t)x;
6123 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
6124 return res;
6125}
6126
6127int32_t HELPER(sdiv)(int32_t num, int32_t den)
6128{
6129 if (den == 0)
6130 return 0;
6131 if (num == INT_MIN && den == -1)
6132 return INT_MIN;
6133 return num / den;
6134}
6135
6136uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
6137{
6138 if (den == 0)
6139 return 0;
6140 return num / den;
6141}
6142
6143uint32_t HELPER(rbit)(uint32_t x)
6144{
6145 return revbit32(x);
6146}
6147
6148#if defined(CONFIG_USER_ONLY)
6149
6150
6151void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
6152{
6153 ARMCPU *cpu = arm_env_get_cpu(env);
6154
6155 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
6156}
6157
6158uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
6159{
6160 ARMCPU *cpu = arm_env_get_cpu(env);
6161
6162 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
6163 return 0;
6164}
6165
6166void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
6167{
6168
6169 g_assert_not_reached();
6170}
6171
6172void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
6173{
6174
6175 g_assert_not_reached();
6176}
6177
6178uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
6179{
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197 return 0;
6198}
6199
6200void switch_mode(CPUARMState *env, int mode)
6201{
6202 ARMCPU *cpu = arm_env_get_cpu(env);
6203
6204 if (mode != ARM_CPU_MODE_USR) {
6205 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
6206 }
6207}
6208
6209uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
6210 uint32_t cur_el, bool secure)
6211{
6212 return 1;
6213}
6214
6215void aarch64_sync_64_to_32(CPUARMState *env)
6216{
6217 g_assert_not_reached();
6218}
6219
6220#else
6221
6222void switch_mode(CPUARMState *env, int mode)
6223{
6224 int old_mode;
6225 int i;
6226
6227 old_mode = env->uncached_cpsr & CPSR_M;
6228 if (mode == old_mode)
6229 return;
6230
6231 if (old_mode == ARM_CPU_MODE_FIQ) {
6232 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
6233 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
6234 } else if (mode == ARM_CPU_MODE_FIQ) {
6235 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
6236 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
6237 }
6238
6239 i = bank_number(old_mode);
6240 env->banked_r13[i] = env->regs[13];
6241 env->banked_r14[i] = env->regs[14];
6242 env->banked_spsr[i] = env->spsr;
6243
6244 i = bank_number(mode);
6245 env->regs[13] = env->banked_r13[i];
6246 env->regs[14] = env->banked_r14[i];
6247 env->spsr = env->banked_spsr[i];
6248}
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283
6284
6285
6286
6287static const int8_t target_el_table[2][2][2][2][2][4] = {
6288 {{{{{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6289 {{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
6290 {{{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6291 {{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
6292 {{{{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6293 {{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
6294 {{{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6295 {{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
6296 {{{{{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6297 {{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
6298 {{{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
6299 {{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
6300 {{{{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6301 {{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6302 {{{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6303 {{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
6304};
6305
6306
6307
6308
6309uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
6310 uint32_t cur_el, bool secure)
6311{
6312 CPUARMState *env = cs->env_ptr;
6313 int rw;
6314 int scr;
6315 int hcr;
6316 int target_el;
6317
6318 int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
6319
6320 if (arm_feature(env, ARM_FEATURE_EL3)) {
6321 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
6322 } else {
6323
6324
6325
6326
6327 rw = is64;
6328 }
6329
6330 switch (excp_idx) {
6331 case EXCP_IRQ:
6332 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
6333 hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
6334 break;
6335 case EXCP_FIQ:
6336 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
6337 hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
6338 break;
6339 default:
6340 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
6341 hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
6342 break;
6343 };
6344
6345
6346 hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
6347
6348
6349 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
6350
6351 assert(target_el > 0);
6352
6353 return target_el;
6354}
6355
6356static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
6357 ARMMMUIdx mmu_idx, bool ignfault)
6358{
6359 CPUState *cs = CPU(cpu);
6360 CPUARMState *env = &cpu->env;
6361 MemTxAttrs attrs = {};
6362 MemTxResult txres;
6363 target_ulong page_size;
6364 hwaddr physaddr;
6365 int prot;
6366 ARMMMUFaultInfo fi;
6367 bool secure = mmu_idx & ARM_MMU_IDX_M_S;
6368 int exc;
6369 bool exc_secure;
6370
6371 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
6372 &attrs, &prot, &page_size, &fi, NULL)) {
6373
6374 if (fi.type == ARMFault_QEMU_SFault) {
6375 qemu_log_mask(CPU_LOG_INT,
6376 "...SecureFault with SFSR.AUVIOL during stacking\n");
6377 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
6378 env->v7m.sfar = addr;
6379 exc = ARMV7M_EXCP_SECURE;
6380 exc_secure = false;
6381 } else {
6382 qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n");
6383 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
6384 exc = ARMV7M_EXCP_MEM;
6385 exc_secure = secure;
6386 }
6387 goto pend_fault;
6388 }
6389 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
6390 attrs, &txres);
6391 if (txres != MEMTX_OK) {
6392
6393 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
6394 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
6395 exc = ARMV7M_EXCP_BUS;
6396 exc_secure = false;
6397 goto pend_fault;
6398 }
6399 return true;
6400
6401pend_fault:
6402
6403
6404
6405
6406
6407
6408
6409
6410
6411
6412 if (!ignfault) {
6413 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
6414 }
6415 return false;
6416}
6417
6418static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
6419 ARMMMUIdx mmu_idx)
6420{
6421 CPUState *cs = CPU(cpu);
6422 CPUARMState *env = &cpu->env;
6423 MemTxAttrs attrs = {};
6424 MemTxResult txres;
6425 target_ulong page_size;
6426 hwaddr physaddr;
6427 int prot;
6428 ARMMMUFaultInfo fi;
6429 bool secure = mmu_idx & ARM_MMU_IDX_M_S;
6430 int exc;
6431 bool exc_secure;
6432 uint32_t value;
6433
6434 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
6435 &attrs, &prot, &page_size, &fi, NULL)) {
6436
6437 if (fi.type == ARMFault_QEMU_SFault) {
6438 qemu_log_mask(CPU_LOG_INT,
6439 "...SecureFault with SFSR.AUVIOL during unstack\n");
6440 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
6441 env->v7m.sfar = addr;
6442 exc = ARMV7M_EXCP_SECURE;
6443 exc_secure = false;
6444 } else {
6445 qemu_log_mask(CPU_LOG_INT,
6446 "...MemManageFault with CFSR.MUNSTKERR\n");
6447 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
6448 exc = ARMV7M_EXCP_MEM;
6449 exc_secure = secure;
6450 }
6451 goto pend_fault;
6452 }
6453
6454 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
6455 attrs, &txres);
6456 if (txres != MEMTX_OK) {
6457
6458 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
6459 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
6460 exc = ARMV7M_EXCP_BUS;
6461 exc_secure = false;
6462 goto pend_fault;
6463 }
6464
6465 *dest = value;
6466 return true;
6467
6468pend_fault:
6469
6470
6471
6472
6473
6474
6475 armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
6476 return false;
6477}
6478
6479
6480static bool v7m_using_psp(CPUARMState *env)
6481{
6482
6483
6484
6485
6486
6487 return !arm_v7m_is_handler_mode(env) &&
6488 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
6489}
6490
6491
6492
6493
6494
6495
6496static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
6497 bool new_spsel,
6498 bool secstate)
6499{
6500 bool old_is_psp = v7m_using_psp(env);
6501
6502 env->v7m.control[secstate] =
6503 deposit32(env->v7m.control[secstate],
6504 R_V7M_CONTROL_SPSEL_SHIFT,
6505 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
6506
6507 if (secstate == env->v7m.secure) {
6508 bool new_is_psp = v7m_using_psp(env);
6509 uint32_t tmp;
6510
6511 if (old_is_psp != new_is_psp) {
6512 tmp = env->v7m.other_sp;
6513 env->v7m.other_sp = env->regs[13];
6514 env->regs[13] = tmp;
6515 }
6516 }
6517}
6518
6519
6520
6521
6522static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
6523{
6524 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
6525}
6526
6527void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
6528{
6529
6530
6531
6532 bool new_is_psp, old_is_psp = v7m_using_psp(env);
6533 uint32_t tmp;
6534
6535 env->v7m.exception = new_exc;
6536
6537 new_is_psp = v7m_using_psp(env);
6538
6539 if (old_is_psp != new_is_psp) {
6540 tmp = env->v7m.other_sp;
6541 env->v7m.other_sp = env->regs[13];
6542 env->regs[13] = tmp;
6543 }
6544}
6545
6546
6547static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
6548{
6549 uint32_t new_ss_msp, new_ss_psp;
6550
6551 if (env->v7m.secure == new_secstate) {
6552 return;
6553 }
6554
6555
6556
6557
6558 new_ss_msp = env->v7m.other_ss_msp;
6559 new_ss_psp = env->v7m.other_ss_psp;
6560
6561 if (v7m_using_psp(env)) {
6562 env->v7m.other_ss_psp = env->regs[13];
6563 env->v7m.other_ss_msp = env->v7m.other_sp;
6564 } else {
6565 env->v7m.other_ss_msp = env->regs[13];
6566 env->v7m.other_ss_psp = env->v7m.other_sp;
6567 }
6568
6569 env->v7m.secure = new_secstate;
6570
6571 if (v7m_using_psp(env)) {
6572 env->regs[13] = new_ss_psp;
6573 env->v7m.other_sp = new_ss_msp;
6574 } else {
6575 env->regs[13] = new_ss_msp;
6576 env->v7m.other_sp = new_ss_psp;
6577 }
6578}
6579
6580void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
6581{
6582
6583
6584
6585
6586 uint32_t min_magic;
6587
6588 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6589
6590 min_magic = FNC_RETURN_MIN_MAGIC;
6591 } else {
6592
6593 min_magic = EXC_RETURN_MIN_MAGIC;
6594 }
6595
6596 if (dest >= min_magic) {
6597
6598
6599
6600
6601
6602
6603 env->regs[15] = dest & ~1;
6604 env->thumb = dest & 1;
6605 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
6606
6607 }
6608
6609
6610 assert(env->v7m.secure);
6611
6612 switch_v7m_security_state(env, dest & 1);
6613 env->thumb = 1;
6614 env->regs[15] = dest & ~1;
6615}
6616
6617void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
6618{
6619
6620
6621
6622
6623
6624 uint32_t nextinst = env->regs[15] | 1;
6625 uint32_t sp = env->regs[13] - 8;
6626 uint32_t saved_psr;
6627
6628
6629 assert(env->v7m.secure);
6630
6631 if (dest & 1) {
6632
6633
6634
6635 env->regs[14] = nextinst;
6636 env->thumb = 1;
6637 env->regs[15] = dest & ~1;
6638 return;
6639 }
6640
6641
6642 if (!QEMU_IS_ALIGNED(sp, 8)) {
6643 qemu_log_mask(LOG_GUEST_ERROR,
6644 "BLXNS with misaligned SP is UNPREDICTABLE\n");
6645 }
6646
6647 saved_psr = env->v7m.exception;
6648 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
6649 saved_psr |= XPSR_SFPA;
6650 }
6651
6652
6653 cpu_stl_data(env, sp, nextinst);
6654 cpu_stl_data(env, sp + 4, saved_psr);
6655
6656 env->regs[13] = sp;
6657 env->regs[14] = 0xfeffffff;
6658 if (arm_v7m_is_handler_mode(env)) {
6659
6660
6661
6662
6663 write_v7m_exception(env, 1);
6664 }
6665 switch_v7m_security_state(env, 0);
6666 env->thumb = 1;
6667 env->regs[15] = dest;
6668}
6669
6670static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
6671 bool spsel)
6672{
6673
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688 bool want_psp = threadmode && spsel;
6689
6690 if (secure == env->v7m.secure) {
6691 if (want_psp == v7m_using_psp(env)) {
6692 return &env->regs[13];
6693 } else {
6694 return &env->v7m.other_sp;
6695 }
6696 } else {
6697 if (want_psp) {
6698 return &env->v7m.other_ss_psp;
6699 } else {
6700 return &env->v7m.other_ss_msp;
6701 }
6702 }
6703}
6704
6705static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
6706 uint32_t *pvec)
6707{
6708 CPUState *cs = CPU(cpu);
6709 CPUARMState *env = &cpu->env;
6710 MemTxResult result;
6711 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
6712 uint32_t vector_entry;
6713 MemTxAttrs attrs = {};
6714 ARMMMUIdx mmu_idx;
6715 bool exc_secure;
6716
6717 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
6718
6719
6720
6721
6722
6723
6724
6725
6726 attrs.secure = targets_secure;
6727 attrs.user = false;
6728
6729 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6730 V8M_SAttributes sattrs = {};
6731
6732 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
6733 if (sattrs.ns) {
6734 attrs.secure = false;
6735 } else if (!targets_secure) {
6736
6737 goto load_fail;
6738 }
6739 }
6740
6741 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
6742 attrs, &result);
6743 if (result != MEMTX_OK) {
6744 goto load_fail;
6745 }
6746 *pvec = vector_entry;
6747 return true;
6748
6749load_fail:
6750
6751
6752
6753
6754
6755
6756
6757 exc_secure = targets_secure ||
6758 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
6759 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
6760 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
6761 return false;
6762}
6763
6764static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
6765 bool ignore_faults)
6766{
6767
6768
6769
6770
6771 CPUARMState *env = &cpu->env;
6772 uint32_t *frame_sp_p;
6773 uint32_t frameptr;
6774 ARMMMUIdx mmu_idx;
6775 bool stacked_ok;
6776
6777 if (dotailchain) {
6778 bool mode = lr & R_V7M_EXCRET_MODE_MASK;
6779 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
6780 !mode;
6781
6782 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
6783 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
6784 lr & R_V7M_EXCRET_SPSEL_MASK);
6785 } else {
6786 mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
6787 frame_sp_p = &env->regs[13];
6788 }
6789
6790 frameptr = *frame_sp_p - 0x28;
6791
6792
6793
6794
6795 stacked_ok =
6796 v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) &&
6797 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx,
6798 ignore_faults) &&
6799 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx,
6800 ignore_faults) &&
6801 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx,
6802 ignore_faults) &&
6803 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx,
6804 ignore_faults) &&
6805 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx,
6806 ignore_faults) &&
6807 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx,
6808 ignore_faults) &&
6809 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx,
6810 ignore_faults) &&
6811 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx,
6812 ignore_faults);
6813
6814
6815
6816
6817
6818 *frame_sp_p = frameptr;
6819
6820 return !stacked_ok;
6821}
6822
6823static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
6824 bool ignore_stackfaults)
6825{
6826
6827
6828
6829
6830 CPUARMState *env = &cpu->env;
6831 uint32_t addr;
6832 bool targets_secure;
6833 int exc;
6834 bool push_failed = false;
6835
6836 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
6837
6838 if (arm_feature(env, ARM_FEATURE_V8)) {
6839 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
6840 (lr & R_V7M_EXCRET_S_MASK)) {
6841
6842
6843
6844
6845 if (targets_secure) {
6846 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
6847
6848
6849
6850
6851
6852
6853 lr &= ~R_V7M_EXCRET_DCRS_MASK;
6854 }
6855 } else {
6856
6857
6858
6859
6860 if (lr & R_V7M_EXCRET_DCRS_MASK &&
6861 !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
6862 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
6863 ignore_stackfaults);
6864 }
6865 lr |= R_V7M_EXCRET_DCRS_MASK;
6866 }
6867 }
6868
6869 lr &= ~R_V7M_EXCRET_ES_MASK;
6870 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6871 lr |= R_V7M_EXCRET_ES_MASK;
6872 }
6873 lr &= ~R_V7M_EXCRET_SPSEL_MASK;
6874 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
6875 lr |= R_V7M_EXCRET_SPSEL_MASK;
6876 }
6877
6878
6879
6880
6881
6882
6883 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6884 if (!targets_secure) {
6885
6886
6887
6888
6889
6890
6891 int i;
6892
6893 for (i = 0; i < 13; i++) {
6894
6895 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
6896 env->regs[i] = 0;
6897 }
6898 }
6899
6900 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
6901 }
6902 }
6903 }
6904
6905 if (push_failed && !ignore_stackfaults) {
6906
6907
6908
6909
6910 v7m_exception_taken(cpu, lr, true, true);
6911 return;
6912 }
6913
6914 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
6915
6916 v7m_exception_taken(cpu, lr, true, true);
6917 return;
6918 }
6919
6920
6921
6922
6923
6924 armv7m_nvic_acknowledge_irq(env->nvic);
6925
6926
6927 switch_v7m_security_state(env, targets_secure);
6928 write_v7m_control_spsel(env, 0);
6929 arm_clear_exclusive(env);
6930
6931 env->condexec_bits = 0;
6932 env->regs[14] = lr;
6933 env->regs[15] = addr & 0xfffffffe;
6934 env->thumb = addr & 1;
6935}
6936
6937static bool v7m_push_stack(ARMCPU *cpu)
6938{
6939
6940
6941
6942
6943
6944
6945 bool stacked_ok;
6946 CPUARMState *env = &cpu->env;
6947 uint32_t xpsr = xpsr_read(env);
6948 uint32_t frameptr = env->regs[13];
6949 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
6950
6951
6952 if ((frameptr & 4) &&
6953 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
6954 frameptr -= 4;
6955 xpsr |= XPSR_SPREALIGN;
6956 }
6957
6958 frameptr -= 0x20;
6959
6960
6961
6962
6963
6964
6965 stacked_ok =
6966 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) &&
6967 v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) &&
6968 v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) &&
6969 v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) &&
6970 v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) &&
6971 v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) &&
6972 v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) &&
6973 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false);
6974
6975
6976
6977
6978
6979 env->regs[13] = frameptr;
6980
6981 return !stacked_ok;
6982}
6983
6984static void do_v7m_exception_exit(ARMCPU *cpu)
6985{
6986 CPUARMState *env = &cpu->env;
6987 uint32_t excret;
6988 uint32_t xpsr;
6989 bool ufault = false;
6990 bool sfault = false;
6991 bool return_to_sp_process;
6992 bool return_to_handler;
6993 bool rettobase = false;
6994 bool exc_secure = false;
6995 bool return_to_secure;
6996
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007 if (!arm_v7m_is_handler_mode(env)) {
7008 return;
7009 }
7010
7011
7012
7013
7014
7015
7016
7017
7018 excret = env->regs[15];
7019 if (env->thumb) {
7020 excret |= 1;
7021 }
7022
7023 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
7024 " previous exception %d\n",
7025 excret, env->v7m.exception);
7026
7027 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
7028 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
7029 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
7030 excret);
7031 }
7032
7033 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7034
7035
7036
7037 if (!env->v7m.secure &&
7038 ((excret & R_V7M_EXCRET_ES_MASK) ||
7039 !(excret & R_V7M_EXCRET_DCRS_MASK))) {
7040 sfault = 1;
7041
7042 excret &= ~R_V7M_EXCRET_ES_MASK;
7043 }
7044 }
7045
7046 if (env->v7m.exception != ARMV7M_EXCP_NMI) {
7047
7048
7049
7050
7051
7052
7053 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7054 exc_secure = excret & R_V7M_EXCRET_ES_MASK;
7055 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
7056 env->v7m.faultmask[exc_secure] = 0;
7057 }
7058 } else {
7059 env->v7m.faultmask[M_REG_NS] = 0;
7060 }
7061 }
7062
7063 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
7064 exc_secure)) {
7065 case -1:
7066
7067 ufault = true;
7068 break;
7069 case 0:
7070
7071 break;
7072 case 1:
7073
7074
7075
7076
7077 rettobase = true;
7078 break;
7079 default:
7080 g_assert_not_reached();
7081 }
7082
7083 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
7084 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
7085 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
7086 (excret & R_V7M_EXCRET_S_MASK);
7087
7088 if (arm_feature(env, ARM_FEATURE_V8)) {
7089 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7090
7091
7092
7093 if ((excret & R_V7M_EXCRET_S_MASK) ||
7094 (excret & R_V7M_EXCRET_ES_MASK) ||
7095 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
7096 ufault = true;
7097 }
7098 }
7099 if (excret & R_V7M_EXCRET_RES0_MASK) {
7100 ufault = true;
7101 }
7102 } else {
7103
7104 switch (excret & 0xf) {
7105 case 1:
7106 break;
7107 case 13:
7108 case 9:
7109
7110
7111
7112 if (!rettobase &&
7113 !(env->v7m.ccr[env->v7m.secure] &
7114 R_V7M_CCR_NONBASETHRDENA_MASK)) {
7115 ufault = true;
7116 }
7117 break;
7118 default:
7119 ufault = true;
7120 }
7121 }
7122
7123 if (sfault) {
7124 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
7125 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7126 v7m_exception_taken(cpu, excret, true, false);
7127 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
7128 "stackframe: failed EXC_RETURN.ES validity check\n");
7129 return;
7130 }
7131
7132 if (ufault) {
7133
7134
7135
7136 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7137 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7138 v7m_exception_taken(cpu, excret, true, false);
7139 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
7140 "stackframe: failed exception return integrity check\n");
7141 return;
7142 }
7143
7144
7145
7146
7147
7148 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
7149
7150 switch_v7m_security_state(env, return_to_secure);
7151
7152 {
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162 uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
7163 return_to_secure,
7164 !return_to_handler,
7165 return_to_sp_process);
7166 uint32_t frameptr = *frame_sp_p;
7167 bool pop_ok = true;
7168 ARMMMUIdx mmu_idx;
7169 bool return_to_priv = return_to_handler ||
7170 !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
7171
7172 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
7173 return_to_priv);
7174
7175 if (!QEMU_IS_ALIGNED(frameptr, 8) &&
7176 arm_feature(env, ARM_FEATURE_V8)) {
7177 qemu_log_mask(LOG_GUEST_ERROR,
7178 "M profile exception return with non-8-aligned SP "
7179 "for destination state is UNPREDICTABLE\n");
7180 }
7181
7182
7183 if (return_to_secure &&
7184 ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
7185 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
7186 uint32_t expected_sig = 0xfefa125b;
7187 uint32_t actual_sig;
7188
7189 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
7190
7191 if (pop_ok && expected_sig != actual_sig) {
7192
7193 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
7194 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7195 v7m_exception_taken(cpu, excret, true, false);
7196 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
7197 "stackframe: failed exception return integrity "
7198 "signature check\n");
7199 return;
7200 }
7201
7202 pop_ok = pop_ok &&
7203 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
7204 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
7205 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
7206 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
7207 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
7208 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
7209 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
7210 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
7211 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
7212
7213 frameptr += 0x28;
7214 }
7215
7216
7217 pop_ok = pop_ok &&
7218 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
7219 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
7220 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
7221 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
7222 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
7223 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
7224 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
7225 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
7226
7227 if (!pop_ok) {
7228
7229
7230
7231 v7m_exception_taken(cpu, excret, true, false);
7232 return;
7233 }
7234
7235
7236
7237
7238
7239
7240
7241
7242
7243 if (env->regs[15] & 1) {
7244 env->regs[15] &= ~1U;
7245 if (!arm_feature(env, ARM_FEATURE_V8)) {
7246 qemu_log_mask(LOG_GUEST_ERROR,
7247 "M profile return from interrupt with misaligned "
7248 "PC is UNPREDICTABLE on v7M\n");
7249 }
7250 }
7251
7252 if (arm_feature(env, ARM_FEATURE_V8)) {
7253
7254
7255
7256
7257 bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
7258 if (return_to_handler != will_be_handler) {
7259
7260
7261
7262
7263
7264 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
7265 env->v7m.secure);
7266 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7267 v7m_exception_taken(cpu, excret, true, false);
7268 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
7269 "stackframe: failed exception return integrity "
7270 "check\n");
7271 return;
7272 }
7273 }
7274
7275
7276 frameptr += 0x20;
7277
7278
7279
7280
7281
7282
7283 if (xpsr & XPSR_SPREALIGN) {
7284 frameptr |= 4;
7285 }
7286 *frame_sp_p = frameptr;
7287 }
7288
7289 xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
7290
7291
7292
7293
7294
7295
7296 if (return_to_handler != arm_v7m_is_handler_mode(env)) {
7297
7298
7299
7300 bool ignore_stackfaults;
7301
7302 assert(!arm_feature(env, ARM_FEATURE_V8));
7303 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
7304 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7305 ignore_stackfaults = v7m_push_stack(cpu);
7306 v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
7307 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
7308 "failed exception return integrity check\n");
7309 return;
7310 }
7311
7312
7313 arm_clear_exclusive(env);
7314 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
7315}
7316
7317static bool do_v7m_function_return(ARMCPU *cpu)
7318{
7319
7320
7321
7322
7323
7324
7325
7326
7327
7328
7329
7330 CPUARMState *env = &cpu->env;
7331 uint32_t newpc, newpsr, newpsr_exc;
7332
7333 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
7334
7335 {
7336 bool threadmode, spsel;
7337 TCGMemOpIdx oi;
7338 ARMMMUIdx mmu_idx;
7339 uint32_t *frame_sp_p;
7340 uint32_t frameptr;
7341
7342
7343 threadmode = !arm_v7m_is_handler_mode(env);
7344 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
7345
7346 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
7347 frameptr = *frame_sp_p;
7348
7349
7350
7351
7352 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
7353 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
7354 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
7355 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
7356
7357
7358 newpsr_exc = newpsr & XPSR_EXCP;
7359 if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
7360 (env->v7m.exception == 1 && newpsr_exc != 0))) {
7361
7362 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7363 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
7364 env->v7m.secure);
7365 qemu_log_mask(CPU_LOG_INT,
7366 "...taking INVPC UsageFault: "
7367 "IPSR consistency check failed\n");
7368 return false;
7369 }
7370
7371 *frame_sp_p = frameptr + 8;
7372 }
7373
7374
7375 switch_v7m_security_state(env, true);
7376 env->v7m.exception = newpsr_exc;
7377 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
7378 if (newpsr & XPSR_SFPA) {
7379 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
7380 }
7381 xpsr_write(env, 0, XPSR_IT);
7382 env->thumb = newpc & 1;
7383 env->regs[15] = newpc & ~1;
7384
7385 qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
7386 return true;
7387}
7388
7389static void arm_log_exception(int idx)
7390{
7391 if (qemu_loglevel_mask(CPU_LOG_INT)) {
7392 const char *exc = NULL;
7393 static const char * const excnames[] = {
7394 [EXCP_UDEF] = "Undefined Instruction",
7395 [EXCP_SWI] = "SVC",
7396 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
7397 [EXCP_DATA_ABORT] = "Data Abort",
7398 [EXCP_IRQ] = "IRQ",
7399 [EXCP_FIQ] = "FIQ",
7400 [EXCP_BKPT] = "Breakpoint",
7401 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
7402 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
7403 [EXCP_HVC] = "Hypervisor Call",
7404 [EXCP_HYP_TRAP] = "Hypervisor Trap",
7405 [EXCP_SMC] = "Secure Monitor Call",
7406 [EXCP_VIRQ] = "Virtual IRQ",
7407 [EXCP_VFIQ] = "Virtual FIQ",
7408 [EXCP_SEMIHOST] = "Semihosting call",
7409 [EXCP_NOCP] = "v7M NOCP UsageFault",
7410 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
7411 };
7412
7413 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
7414 exc = excnames[idx];
7415 }
7416 if (!exc) {
7417 exc = "unknown";
7418 }
7419 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
7420 }
7421}
7422
7423static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
7424 uint32_t addr, uint16_t *insn)
7425{
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436
7437 CPUState *cs = CPU(cpu);
7438 CPUARMState *env = &cpu->env;
7439 V8M_SAttributes sattrs = {};
7440 MemTxAttrs attrs = {};
7441 ARMMMUFaultInfo fi = {};
7442 MemTxResult txres;
7443 target_ulong page_size;
7444 hwaddr physaddr;
7445 int prot;
7446
7447 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
7448 if (!sattrs.nsc || sattrs.ns) {
7449
7450
7451
7452 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7453 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7454 qemu_log_mask(CPU_LOG_INT,
7455 "...really SecureFault with SFSR.INVEP\n");
7456 return false;
7457 }
7458 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
7459 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
7460
7461 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
7462 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
7463 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
7464 return false;
7465 }
7466 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
7467 attrs, &txres);
7468 if (txres != MEMTX_OK) {
7469 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
7470 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
7471 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
7472 return false;
7473 }
7474 return true;
7475}
7476
7477static bool v7m_handle_execute_nsc(ARMCPU *cpu)
7478{
7479
7480
7481
7482
7483
7484 CPUARMState *env = &cpu->env;
7485 ARMMMUIdx mmu_idx;
7486 uint16_t insn;
7487
7488
7489
7490
7491 assert(!env->v7m.secure);
7492 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
7493
7494
7495 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
7496
7497 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
7498 return false;
7499 }
7500
7501 if (!env->thumb) {
7502 goto gen_invep;
7503 }
7504
7505 if (insn != 0xe97f) {
7506
7507
7508
7509 goto gen_invep;
7510 }
7511
7512 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
7513 return false;
7514 }
7515
7516 if (insn != 0xe97f) {
7517
7518
7519
7520 goto gen_invep;
7521 }
7522
7523
7524
7525
7526 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
7527 ", executing it\n", env->regs[15]);
7528 env->regs[14] &= ~1;
7529 switch_v7m_security_state(env, true);
7530 xpsr_write(env, 0, XPSR_IT);
7531 env->regs[15] += 4;
7532 return true;
7533
7534gen_invep:
7535 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7536 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7537 qemu_log_mask(CPU_LOG_INT,
7538 "...really SecureFault with SFSR.INVEP\n");
7539 return false;
7540}
7541
7542void arm_v7m_cpu_do_interrupt(CPUState *cs)
7543{
7544 ARMCPU *cpu = ARM_CPU(cs);
7545 CPUARMState *env = &cpu->env;
7546 uint32_t lr;
7547 bool ignore_stackfaults;
7548
7549 arm_log_exception(cs->exception_index);
7550
7551
7552
7553 switch (cs->exception_index) {
7554 case EXCP_UDEF:
7555 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7556 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
7557 break;
7558 case EXCP_NOCP:
7559 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7560 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
7561 break;
7562 case EXCP_INVSTATE:
7563 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7564 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
7565 break;
7566 case EXCP_SWI:
7567
7568 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
7569 break;
7570 case EXCP_PREFETCH_ABORT:
7571 case EXCP_DATA_ABORT:
7572
7573
7574
7575
7576 switch (env->exception.fsr & 0xf) {
7577 case M_FAKE_FSR_NSC_EXEC:
7578
7579
7580
7581
7582
7583
7584
7585
7586 if (v7m_handle_execute_nsc(cpu)) {
7587 return;
7588 }
7589 break;
7590 case M_FAKE_FSR_SFAULT:
7591
7592
7593
7594 switch (cs->exception_index) {
7595 case EXCP_PREFETCH_ABORT:
7596 if (env->v7m.secure) {
7597 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
7598 qemu_log_mask(CPU_LOG_INT,
7599 "...really SecureFault with SFSR.INVTRAN\n");
7600 } else {
7601 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7602 qemu_log_mask(CPU_LOG_INT,
7603 "...really SecureFault with SFSR.INVEP\n");
7604 }
7605 break;
7606 case EXCP_DATA_ABORT:
7607
7608 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
7609 qemu_log_mask(CPU_LOG_INT,
7610 "...really SecureFault with SFSR.AUVIOL\n");
7611 break;
7612 }
7613 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7614 break;
7615 case 0x8:
7616 switch (cs->exception_index) {
7617 case EXCP_PREFETCH_ABORT:
7618 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
7619 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
7620 break;
7621 case EXCP_DATA_ABORT:
7622 env->v7m.cfsr[M_REG_NS] |=
7623 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
7624 env->v7m.bfar = env->exception.vaddress;
7625 qemu_log_mask(CPU_LOG_INT,
7626 "...with CFSR.PRECISERR and BFAR 0x%x\n",
7627 env->v7m.bfar);
7628 break;
7629 }
7630 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
7631 break;
7632 default:
7633
7634
7635
7636 switch (cs->exception_index) {
7637 case EXCP_PREFETCH_ABORT:
7638 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
7639 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
7640 break;
7641 case EXCP_DATA_ABORT:
7642 env->v7m.cfsr[env->v7m.secure] |=
7643 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
7644 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
7645 qemu_log_mask(CPU_LOG_INT,
7646 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
7647 env->v7m.mmfar[env->v7m.secure]);
7648 break;
7649 }
7650 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
7651 env->v7m.secure);
7652 break;
7653 }
7654 break;
7655 case EXCP_BKPT:
7656 if (semihosting_enabled()) {
7657 int nr;
7658 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
7659 if (nr == 0xab) {
7660 env->regs[15] += 2;
7661 qemu_log_mask(CPU_LOG_INT,
7662 "...handling as semihosting call 0x%x\n",
7663 env->regs[0]);
7664 env->regs[0] = do_arm_semihosting(env);
7665 return;
7666 }
7667 }
7668 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
7669 break;
7670 case EXCP_IRQ:
7671 break;
7672 case EXCP_EXCEPTION_EXIT:
7673 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
7674
7675 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
7676 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
7677 if (do_v7m_function_return(cpu)) {
7678 return;
7679 }
7680 } else {
7681 do_v7m_exception_exit(cpu);
7682 return;
7683 }
7684 break;
7685 default:
7686 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
7687 return;
7688 }
7689
7690 if (arm_feature(env, ARM_FEATURE_V8)) {
7691 lr = R_V7M_EXCRET_RES1_MASK |
7692 R_V7M_EXCRET_DCRS_MASK |
7693 R_V7M_EXCRET_FTYPE_MASK;
7694
7695
7696
7697
7698
7699
7700
7701
7702
7703
7704
7705 if (env->v7m.secure) {
7706 lr |= R_V7M_EXCRET_S_MASK;
7707 }
7708 } else {
7709 lr = R_V7M_EXCRET_RES1_MASK |
7710 R_V7M_EXCRET_S_MASK |
7711 R_V7M_EXCRET_DCRS_MASK |
7712 R_V7M_EXCRET_FTYPE_MASK |
7713 R_V7M_EXCRET_ES_MASK;
7714 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
7715 lr |= R_V7M_EXCRET_SPSEL_MASK;
7716 }
7717 }
7718 if (!arm_v7m_is_handler_mode(env)) {
7719 lr |= R_V7M_EXCRET_MODE_MASK;
7720 }
7721
7722 ignore_stackfaults = v7m_push_stack(cpu);
7723 v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
7724 qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
7725}
7726
7727
7728
7729
7730
7731void aarch64_sync_32_to_64(CPUARMState *env)
7732{
7733 int i;
7734 uint32_t mode = env->uncached_cpsr & CPSR_M;
7735
7736
7737 for (i = 0; i < 8; i++) {
7738 env->xregs[i] = env->regs[i];
7739 }
7740
7741
7742
7743
7744 if (mode == ARM_CPU_MODE_FIQ) {
7745 for (i = 8; i < 13; i++) {
7746 env->xregs[i] = env->usr_regs[i - 8];
7747 }
7748 } else {
7749 for (i = 8; i < 13; i++) {
7750 env->xregs[i] = env->regs[i];
7751 }
7752 }
7753
7754
7755
7756
7757
7758 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
7759 env->xregs[13] = env->regs[13];
7760 env->xregs[14] = env->regs[14];
7761 } else {
7762 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
7763
7764 if (mode == ARM_CPU_MODE_HYP) {
7765 env->xregs[14] = env->regs[14];
7766 } else {
7767 env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
7768 }
7769 }
7770
7771 if (mode == ARM_CPU_MODE_HYP) {
7772 env->xregs[15] = env->regs[13];
7773 } else {
7774 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
7775 }
7776
7777 if (mode == ARM_CPU_MODE_IRQ) {
7778 env->xregs[16] = env->regs[14];
7779 env->xregs[17] = env->regs[13];
7780 } else {
7781 env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
7782 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
7783 }
7784
7785 if (mode == ARM_CPU_MODE_SVC) {
7786 env->xregs[18] = env->regs[14];
7787 env->xregs[19] = env->regs[13];
7788 } else {
7789 env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
7790 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
7791 }
7792
7793 if (mode == ARM_CPU_MODE_ABT) {
7794 env->xregs[20] = env->regs[14];
7795 env->xregs[21] = env->regs[13];
7796 } else {
7797 env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
7798 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
7799 }
7800
7801 if (mode == ARM_CPU_MODE_UND) {
7802 env->xregs[22] = env->regs[14];
7803 env->xregs[23] = env->regs[13];
7804 } else {
7805 env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
7806 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
7807 }
7808
7809
7810
7811
7812
7813 if (mode == ARM_CPU_MODE_FIQ) {
7814 for (i = 24; i < 31; i++) {
7815 env->xregs[i] = env->regs[i - 16];
7816 }
7817 } else {
7818 for (i = 24; i < 29; i++) {
7819 env->xregs[i] = env->fiq_regs[i - 24];
7820 }
7821 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
7822 env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
7823 }
7824
7825 env->pc = env->regs[15];
7826}
7827
7828
7829
7830
7831
7832void aarch64_sync_64_to_32(CPUARMState *env)
7833{
7834 int i;
7835 uint32_t mode = env->uncached_cpsr & CPSR_M;
7836
7837
7838 for (i = 0; i < 8; i++) {
7839 env->regs[i] = env->xregs[i];
7840 }
7841
7842
7843
7844
7845 if (mode == ARM_CPU_MODE_FIQ) {
7846 for (i = 8; i < 13; i++) {
7847 env->usr_regs[i - 8] = env->xregs[i];
7848 }
7849 } else {
7850 for (i = 8; i < 13; i++) {
7851 env->regs[i] = env->xregs[i];
7852 }
7853 }
7854
7855
7856
7857
7858
7859
7860 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
7861 env->regs[13] = env->xregs[13];
7862 env->regs[14] = env->xregs[14];
7863 } else {
7864 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
7865
7866
7867
7868
7869 if (mode == ARM_CPU_MODE_HYP) {
7870 env->regs[14] = env->xregs[14];
7871 } else {
7872 env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
7873 }
7874 }
7875
7876 if (mode == ARM_CPU_MODE_HYP) {
7877 env->regs[13] = env->xregs[15];
7878 } else {
7879 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
7880 }
7881
7882 if (mode == ARM_CPU_MODE_IRQ) {
7883 env->regs[14] = env->xregs[16];
7884 env->regs[13] = env->xregs[17];
7885 } else {
7886 env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
7887 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
7888 }
7889
7890 if (mode == ARM_CPU_MODE_SVC) {
7891 env->regs[14] = env->xregs[18];
7892 env->regs[13] = env->xregs[19];
7893 } else {
7894 env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
7895 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
7896 }
7897
7898 if (mode == ARM_CPU_MODE_ABT) {
7899 env->regs[14] = env->xregs[20];
7900 env->regs[13] = env->xregs[21];
7901 } else {
7902 env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
7903 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
7904 }
7905
7906 if (mode == ARM_CPU_MODE_UND) {
7907 env->regs[14] = env->xregs[22];
7908 env->regs[13] = env->xregs[23];
7909 } else {
7910 env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
7911 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
7912 }
7913
7914
7915
7916
7917
7918 if (mode == ARM_CPU_MODE_FIQ) {
7919 for (i = 24; i < 31; i++) {
7920 env->regs[i - 16] = env->xregs[i];
7921 }
7922 } else {
7923 for (i = 24; i < 29; i++) {
7924 env->fiq_regs[i - 24] = env->xregs[i];
7925 }
7926 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
7927 env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
7928 }
7929
7930 env->regs[15] = env->pc;
7931}
7932
7933static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
7934{
7935 ARMCPU *cpu = ARM_CPU(cs);
7936 CPUARMState *env = &cpu->env;
7937 uint32_t addr;
7938 uint32_t mask;
7939 int new_mode;
7940 uint32_t offset;
7941 uint32_t moe;
7942
7943
7944 switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
7945 case EC_BREAKPOINT:
7946 case EC_BREAKPOINT_SAME_EL:
7947 moe = 1;
7948 break;
7949 case EC_WATCHPOINT:
7950 case EC_WATCHPOINT_SAME_EL:
7951 moe = 10;
7952 break;
7953 case EC_AA32_BKPT:
7954 moe = 3;
7955 break;
7956 case EC_VECTORCATCH:
7957 moe = 5;
7958 break;
7959 default:
7960 moe = 0;
7961 break;
7962 }
7963
7964 if (moe) {
7965 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
7966 }
7967
7968
7969 switch (cs->exception_index) {
7970 case EXCP_UDEF:
7971 new_mode = ARM_CPU_MODE_UND;
7972 addr = 0x04;
7973 mask = CPSR_I;
7974 if (env->thumb)
7975 offset = 2;
7976 else
7977 offset = 4;
7978 break;
7979 case EXCP_SWI:
7980 new_mode = ARM_CPU_MODE_SVC;
7981 addr = 0x08;
7982 mask = CPSR_I;
7983
7984 offset = 0;
7985 break;
7986 case EXCP_BKPT:
7987
7988 case EXCP_PREFETCH_ABORT:
7989 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
7990 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
7991 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
7992 env->exception.fsr, (uint32_t)env->exception.vaddress);
7993 new_mode = ARM_CPU_MODE_ABT;
7994 addr = 0x0c;
7995 mask = CPSR_A | CPSR_I;
7996 offset = 4;
7997 break;
7998 case EXCP_DATA_ABORT:
7999 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8000 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8001 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8002 env->exception.fsr,
8003 (uint32_t)env->exception.vaddress);
8004 new_mode = ARM_CPU_MODE_ABT;
8005 addr = 0x10;
8006 mask = CPSR_A | CPSR_I;
8007 offset = 8;
8008 break;
8009 case EXCP_IRQ:
8010 new_mode = ARM_CPU_MODE_IRQ;
8011 addr = 0x18;
8012
8013 mask = CPSR_A | CPSR_I;
8014 offset = 4;
8015 if (env->cp15.scr_el3 & SCR_IRQ) {
8016
8017 new_mode = ARM_CPU_MODE_MON;
8018 mask |= CPSR_F;
8019 }
8020 break;
8021 case EXCP_FIQ:
8022 new_mode = ARM_CPU_MODE_FIQ;
8023 addr = 0x1c;
8024
8025 mask = CPSR_A | CPSR_I | CPSR_F;
8026 if (env->cp15.scr_el3 & SCR_FIQ) {
8027
8028 new_mode = ARM_CPU_MODE_MON;
8029 }
8030 offset = 4;
8031 break;
8032 case EXCP_VIRQ:
8033 new_mode = ARM_CPU_MODE_IRQ;
8034 addr = 0x18;
8035
8036 mask = CPSR_A | CPSR_I;
8037 offset = 4;
8038 break;
8039 case EXCP_VFIQ:
8040 new_mode = ARM_CPU_MODE_FIQ;
8041 addr = 0x1c;
8042
8043 mask = CPSR_A | CPSR_I | CPSR_F;
8044 offset = 4;
8045 break;
8046 case EXCP_SMC:
8047 new_mode = ARM_CPU_MODE_MON;
8048 addr = 0x08;
8049 mask = CPSR_A | CPSR_I | CPSR_F;
8050 offset = 0;
8051 break;
8052 default:
8053 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8054 return;
8055 }
8056
8057 if (new_mode == ARM_CPU_MODE_MON) {
8058 addr += env->cp15.mvbar;
8059 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
8060
8061 addr += 0xffff0000;
8062 } else {
8063
8064
8065
8066
8067
8068 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
8069 }
8070
8071 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
8072 env->cp15.scr_el3 &= ~SCR_NS;
8073 }
8074
8075 switch_mode (env, new_mode);
8076
8077
8078
8079 env->uncached_cpsr &= ~PSTATE_SS;
8080 env->spsr = cpsr_read(env);
8081
8082 env->condexec_bits = 0;
8083
8084 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8085
8086 env->uncached_cpsr &= ~CPSR_E;
8087 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
8088 env->uncached_cpsr |= CPSR_E;
8089 }
8090 env->daif |= mask;
8091
8092
8093 if (arm_feature(env, ARM_FEATURE_V4T)) {
8094 env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8095 }
8096 env->regs[14] = env->regs[15] + offset;
8097 env->regs[15] = addr;
8098}
8099
8100
8101static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
8102{
8103 ARMCPU *cpu = ARM_CPU(cs);
8104 CPUARMState *env = &cpu->env;
8105 unsigned int new_el = env->exception.target_el;
8106 target_ulong addr = env->cp15.vbar_el[new_el];
8107 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
8108
8109 if (arm_current_el(env) < new_el) {
8110
8111
8112
8113 bool is_aa64;
8114
8115 switch (new_el) {
8116 case 3:
8117 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
8118 break;
8119 case 2:
8120 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
8121 break;
8122 case 1:
8123 is_aa64 = is_a64(env);
8124 break;
8125 default:
8126 g_assert_not_reached();
8127 }
8128
8129 if (is_aa64) {
8130 addr += 0x400;
8131 } else {
8132 addr += 0x600;
8133 }
8134 } else if (pstate_read(env) & PSTATE_SP) {
8135 addr += 0x200;
8136 }
8137
8138 switch (cs->exception_index) {
8139 case EXCP_PREFETCH_ABORT:
8140 case EXCP_DATA_ABORT:
8141 env->cp15.far_el[new_el] = env->exception.vaddress;
8142 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
8143 env->cp15.far_el[new_el]);
8144
8145 case EXCP_BKPT:
8146 case EXCP_UDEF:
8147 case EXCP_SWI:
8148 case EXCP_HVC:
8149 case EXCP_HYP_TRAP:
8150 case EXCP_SMC:
8151 env->cp15.esr_el[new_el] = env->exception.syndrome;
8152 break;
8153 case EXCP_IRQ:
8154 case EXCP_VIRQ:
8155 addr += 0x80;
8156 break;
8157 case EXCP_FIQ:
8158 case EXCP_VFIQ:
8159 addr += 0x100;
8160 break;
8161 case EXCP_SEMIHOST:
8162 qemu_log_mask(CPU_LOG_INT,
8163 "...handling as semihosting call 0x%" PRIx64 "\n",
8164 env->xregs[0]);
8165 env->xregs[0] = do_arm_semihosting(env);
8166 return;
8167 default:
8168 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8169 }
8170
8171 if (is_a64(env)) {
8172 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
8173 aarch64_save_sp(env, arm_current_el(env));
8174 env->elr_el[new_el] = env->pc;
8175 } else {
8176 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
8177 env->elr_el[new_el] = env->regs[15];
8178
8179 aarch64_sync_32_to_64(env);
8180
8181 env->condexec_bits = 0;
8182 }
8183 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
8184 env->elr_el[new_el]);
8185
8186 pstate_write(env, PSTATE_DAIF | new_mode);
8187 env->aarch64 = 1;
8188 aarch64_restore_sp(env, new_el);
8189
8190 env->pc = addr;
8191
8192 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
8193 new_el, env->pc, pstate_read(env));
8194}
8195
8196static inline bool check_for_semihosting(CPUState *cs)
8197{
8198
8199
8200
8201 ARMCPU *cpu = ARM_CPU(cs);
8202 CPUARMState *env = &cpu->env;
8203
8204 if (is_a64(env)) {
8205 if (cs->exception_index == EXCP_SEMIHOST) {
8206
8207
8208
8209
8210 qemu_log_mask(CPU_LOG_INT,
8211 "...handling as semihosting call 0x%" PRIx64 "\n",
8212 env->xregs[0]);
8213 env->xregs[0] = do_arm_semihosting(env);
8214 return true;
8215 }
8216 return false;
8217 } else {
8218 uint32_t imm;
8219
8220
8221
8222
8223 if (cs->exception_index != EXCP_SEMIHOST &&
8224 (!semihosting_enabled() ||
8225 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
8226 return false;
8227 }
8228
8229 switch (cs->exception_index) {
8230 case EXCP_SEMIHOST:
8231
8232
8233
8234
8235 break;
8236 case EXCP_SWI:
8237
8238 if (env->thumb) {
8239 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
8240 & 0xff;
8241 if (imm == 0xab) {
8242 break;
8243 }
8244 } else {
8245 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
8246 & 0xffffff;
8247 if (imm == 0x123456) {
8248 break;
8249 }
8250 }
8251 return false;
8252 case EXCP_BKPT:
8253
8254 if (env->thumb) {
8255 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
8256 & 0xff;
8257 if (imm == 0xab) {
8258 env->regs[15] += 2;
8259 break;
8260 }
8261 }
8262 return false;
8263 default:
8264 return false;
8265 }
8266
8267 qemu_log_mask(CPU_LOG_INT,
8268 "...handling as semihosting call 0x%x\n",
8269 env->regs[0]);
8270 env->regs[0] = do_arm_semihosting(env);
8271 return true;
8272 }
8273}
8274
8275
8276
8277
8278
8279
8280void arm_cpu_do_interrupt(CPUState *cs)
8281{
8282 ARMCPU *cpu = ARM_CPU(cs);
8283 CPUARMState *env = &cpu->env;
8284 unsigned int new_el = env->exception.target_el;
8285
8286 assert(!arm_feature(env, ARM_FEATURE_M));
8287
8288 arm_log_exception(cs->exception_index);
8289 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
8290 new_el);
8291 if (qemu_loglevel_mask(CPU_LOG_INT)
8292 && !excp_is_internal(cs->exception_index)) {
8293 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
8294 env->exception.syndrome >> ARM_EL_EC_SHIFT,
8295 env->exception.syndrome);
8296 }
8297
8298 if (arm_is_psci_call(cpu, cs->exception_index)) {
8299 arm_handle_psci_call(cpu);
8300 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
8301 return;
8302 }
8303
8304
8305
8306
8307
8308 if (check_for_semihosting(cs)) {
8309 return;
8310 }
8311
8312
8313
8314
8315
8316 g_assert(qemu_mutex_iothread_locked());
8317
8318 arm_call_pre_el_change_hook(cpu);
8319
8320 assert(!excp_is_internal(cs->exception_index));
8321 if (arm_el_is_aa64(env, new_el)) {
8322 arm_cpu_do_interrupt_aarch64(cs);
8323 } else {
8324 arm_cpu_do_interrupt_aarch32(cs);
8325 }
8326
8327 arm_call_el_change_hook(cpu);
8328
8329 if (!kvm_enabled()) {
8330 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
8331 }
8332}
8333
8334
8335static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
8336{
8337 switch (mmu_idx) {
8338 case ARMMMUIdx_S2NS:
8339 case ARMMMUIdx_S1E2:
8340 return 2;
8341 case ARMMMUIdx_S1E3:
8342 return 3;
8343 case ARMMMUIdx_S1SE0:
8344 return arm_el_is_aa64(env, 3) ? 1 : 3;
8345 case ARMMMUIdx_S1SE1:
8346 case ARMMMUIdx_S1NSE0:
8347 case ARMMMUIdx_S1NSE1:
8348 case ARMMMUIdx_MPrivNegPri:
8349 case ARMMMUIdx_MUserNegPri:
8350 case ARMMMUIdx_MPriv:
8351 case ARMMMUIdx_MUser:
8352 case ARMMMUIdx_MSPrivNegPri:
8353 case ARMMMUIdx_MSUserNegPri:
8354 case ARMMMUIdx_MSPriv:
8355 case ARMMMUIdx_MSUser:
8356 return 1;
8357 default:
8358 g_assert_not_reached();
8359 }
8360}
8361
8362
8363static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
8364{
8365 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
8366}
8367
8368
8369static inline bool regime_translation_disabled(CPUARMState *env,
8370 ARMMMUIdx mmu_idx)
8371{
8372 if (arm_feature(env, ARM_FEATURE_M)) {
8373 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
8374 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
8375 case R_V7M_MPU_CTRL_ENABLE_MASK:
8376
8377 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
8378 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
8379
8380 return false;
8381 case 0:
8382 default:
8383
8384
8385
8386 return true;
8387 }
8388 }
8389
8390 if (mmu_idx == ARMMMUIdx_S2NS) {
8391 return (env->cp15.hcr_el2 & HCR_VM) == 0;
8392 }
8393 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
8394}
8395
8396static inline bool regime_translation_big_endian(CPUARMState *env,
8397 ARMMMUIdx mmu_idx)
8398{
8399 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
8400}
8401
8402
8403static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
8404{
8405 if (mmu_idx == ARMMMUIdx_S2NS) {
8406 return &env->cp15.vtcr_el2;
8407 }
8408 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
8409}
8410
8411
8412
8413
8414static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
8415{
8416 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8417 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
8418 }
8419 return mmu_idx;
8420}
8421
8422
8423uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
8424{
8425 TCR *tcr;
8426 uint32_t el;
8427
8428
8429
8430
8431 mmu_idx = stage_1_mmu_idx(mmu_idx);
8432
8433 tcr = regime_tcr(env, mmu_idx);
8434 el = regime_el(env, mmu_idx);
8435
8436 if (el > 1) {
8437 return extract64(tcr->raw_tcr, 20, 1);
8438 } else {
8439 return extract64(tcr->raw_tcr, 37, 1);
8440 }
8441}
8442
8443
8444uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
8445{
8446 TCR *tcr;
8447 uint32_t el;
8448
8449
8450
8451
8452 mmu_idx = stage_1_mmu_idx(mmu_idx);
8453
8454 tcr = regime_tcr(env, mmu_idx);
8455 el = regime_el(env, mmu_idx);
8456
8457 if (el > 1) {
8458 return 0;
8459 } else {
8460 return extract64(tcr->raw_tcr, 38, 1);
8461 }
8462}
8463
8464
8465static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
8466 int ttbrn)
8467{
8468 if (mmu_idx == ARMMMUIdx_S2NS) {
8469 return env->cp15.vttbr_el2;
8470 }
8471 if (ttbrn == 0) {
8472 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
8473 } else {
8474 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
8475 }
8476}
8477
8478
8479static inline bool regime_using_lpae_format(CPUARMState *env,
8480 ARMMMUIdx mmu_idx)
8481{
8482 int el = regime_el(env, mmu_idx);
8483 if (el == 2 || arm_el_is_aa64(env, el)) {
8484 return true;
8485 }
8486 if (arm_feature(env, ARM_FEATURE_LPAE)
8487 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
8488 return true;
8489 }
8490 return false;
8491}
8492
8493
8494
8495
8496bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
8497{
8498 mmu_idx = stage_1_mmu_idx(mmu_idx);
8499
8500 return regime_using_lpae_format(env, mmu_idx);
8501}
8502
8503static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
8504{
8505 switch (mmu_idx) {
8506 case ARMMMUIdx_S1SE0:
8507 case ARMMMUIdx_S1NSE0:
8508 case ARMMMUIdx_MUser:
8509 case ARMMMUIdx_MSUser:
8510 case ARMMMUIdx_MUserNegPri:
8511 case ARMMMUIdx_MSUserNegPri:
8512 return true;
8513 default:
8514 return false;
8515 case ARMMMUIdx_S12NSE0:
8516 case ARMMMUIdx_S12NSE1:
8517 g_assert_not_reached();
8518 }
8519}
8520
8521
8522
8523
8524
8525
8526
8527
8528
8529static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
8530 int ap, int domain_prot)
8531{
8532 bool is_user = regime_is_user(env, mmu_idx);
8533
8534 if (domain_prot == 3) {
8535 return PAGE_READ | PAGE_WRITE;
8536 }
8537
8538 switch (ap) {
8539 case 0:
8540 if (arm_feature(env, ARM_FEATURE_V7)) {
8541 return 0;
8542 }
8543 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
8544 case SCTLR_S:
8545 return is_user ? 0 : PAGE_READ;
8546 case SCTLR_R:
8547 return PAGE_READ;
8548 default:
8549 return 0;
8550 }
8551 case 1:
8552 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8553 case 2:
8554 if (is_user) {
8555 return PAGE_READ;
8556 } else {
8557 return PAGE_READ | PAGE_WRITE;
8558 }
8559 case 3:
8560 return PAGE_READ | PAGE_WRITE;
8561 case 4:
8562 return 0;
8563 case 5:
8564 return is_user ? 0 : PAGE_READ;
8565 case 6:
8566 return PAGE_READ;
8567 case 7:
8568 if (!arm_feature(env, ARM_FEATURE_V6K)) {
8569 return 0;
8570 }
8571 return PAGE_READ;
8572 default:
8573 g_assert_not_reached();
8574 }
8575}
8576
8577
8578
8579
8580
8581
8582
8583static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
8584{
8585 switch (ap) {
8586 case 0:
8587 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8588 case 1:
8589 return PAGE_READ | PAGE_WRITE;
8590 case 2:
8591 return is_user ? 0 : PAGE_READ;
8592 case 3:
8593 return PAGE_READ;
8594 default:
8595 g_assert_not_reached();
8596 }
8597}
8598
8599static inline int
8600simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
8601{
8602 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
8603}
8604
8605
8606
8607
8608
8609
8610
8611static int get_S2prot(CPUARMState *env, int s2ap, int xn)
8612{
8613 int prot = 0;
8614
8615 if (s2ap & 1) {
8616 prot |= PAGE_READ;
8617 }
8618 if (s2ap & 2) {
8619 prot |= PAGE_WRITE;
8620 }
8621 if (!xn) {
8622 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
8623 prot |= PAGE_EXEC;
8624 }
8625 }
8626 return prot;
8627}
8628
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
8640 int ap, int ns, int xn, int pxn)
8641{
8642 bool is_user = regime_is_user(env, mmu_idx);
8643 int prot_rw, user_rw;
8644 bool have_wxn;
8645 int wxn = 0;
8646
8647 assert(mmu_idx != ARMMMUIdx_S2NS);
8648
8649 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
8650 if (is_user) {
8651 prot_rw = user_rw;
8652 } else {
8653 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
8654 }
8655
8656 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
8657 return prot_rw;
8658 }
8659
8660
8661
8662
8663
8664
8665 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
8666
8667 if (have_wxn) {
8668 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
8669 }
8670
8671 if (is_aa64) {
8672 switch (regime_el(env, mmu_idx)) {
8673 case 1:
8674 if (!is_user) {
8675 xn = pxn || (user_rw & PAGE_WRITE);
8676 }
8677 break;
8678 case 2:
8679 case 3:
8680 break;
8681 }
8682 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8683 switch (regime_el(env, mmu_idx)) {
8684 case 1:
8685 case 3:
8686 if (is_user) {
8687 xn = xn || !(user_rw & PAGE_READ);
8688 } else {
8689 int uwxn = 0;
8690 if (have_wxn) {
8691 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
8692 }
8693 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
8694 (uwxn && (user_rw & PAGE_WRITE));
8695 }
8696 break;
8697 case 2:
8698 break;
8699 }
8700 } else {
8701 xn = wxn = 0;
8702 }
8703
8704 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
8705 return prot_rw;
8706 }
8707 return prot_rw | PAGE_EXEC;
8708}
8709
8710static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
8711 uint32_t *table, uint32_t address)
8712{
8713
8714 TCR *tcr = regime_tcr(env, mmu_idx);
8715
8716 if (address & tcr->mask) {
8717 if (tcr->raw_tcr & TTBCR_PD1) {
8718
8719 return false;
8720 }
8721 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
8722 } else {
8723 if (tcr->raw_tcr & TTBCR_PD0) {
8724
8725 return false;
8726 }
8727 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
8728 }
8729 *table |= (address >> 18) & 0x3ffc;
8730 return true;
8731}
8732
8733
8734static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
8735 hwaddr addr, MemTxAttrs txattrs,
8736 ARMMMUFaultInfo *fi)
8737{
8738 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
8739 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
8740 target_ulong s2size;
8741 hwaddr s2pa;
8742 int s2prot;
8743 int ret;
8744
8745 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
8746 &txattrs, &s2prot, &s2size, fi, NULL);
8747 if (ret) {
8748 assert(fi->type != ARMFault_None);
8749 fi->s2addr = addr;
8750 fi->stage2 = true;
8751 fi->s1ptw = true;
8752 return ~0;
8753 }
8754 addr = s2pa;
8755 }
8756 return addr;
8757}
8758
8759
8760static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
8761 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
8762{
8763 ARMCPU *cpu = ARM_CPU(cs);
8764 CPUARMState *env = &cpu->env;
8765 MemTxAttrs attrs = {};
8766 MemTxResult result = MEMTX_OK;
8767 AddressSpace *as;
8768 uint32_t data;
8769
8770 attrs.secure = is_secure;
8771 as = arm_addressspace(cs, attrs);
8772 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
8773 if (fi->s1ptw) {
8774 return 0;
8775 }
8776 if (regime_translation_big_endian(env, mmu_idx)) {
8777 data = address_space_ldl_be(as, addr, attrs, &result);
8778 } else {
8779 data = address_space_ldl_le(as, addr, attrs, &result);
8780 }
8781 if (result == MEMTX_OK) {
8782 return data;
8783 }
8784 fi->type = ARMFault_SyncExternalOnWalk;
8785 fi->ea = arm_extabort_type(result);
8786 return 0;
8787}
8788
8789static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
8790 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
8791{
8792 ARMCPU *cpu = ARM_CPU(cs);
8793 CPUARMState *env = &cpu->env;
8794 MemTxAttrs attrs = {};
8795 MemTxResult result = MEMTX_OK;
8796 AddressSpace *as;
8797 uint64_t data;
8798
8799 attrs.secure = is_secure;
8800 as = arm_addressspace(cs, attrs);
8801 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
8802 if (fi->s1ptw) {
8803 return 0;
8804 }
8805 if (regime_translation_big_endian(env, mmu_idx)) {
8806 data = address_space_ldq_be(as, addr, attrs, &result);
8807 } else {
8808 data = address_space_ldq_le(as, addr, attrs, &result);
8809 }
8810 if (result == MEMTX_OK) {
8811 return data;
8812 }
8813 fi->type = ARMFault_SyncExternalOnWalk;
8814 fi->ea = arm_extabort_type(result);
8815 return 0;
8816}
8817
8818static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
8819 MMUAccessType access_type, ARMMMUIdx mmu_idx,
8820 hwaddr *phys_ptr, int *prot,
8821 target_ulong *page_size,
8822 ARMMMUFaultInfo *fi)
8823{
8824 CPUState *cs = CPU(arm_env_get_cpu(env));
8825 int level = 1;
8826 uint32_t table;
8827 uint32_t desc;
8828 int type;
8829 int ap;
8830 int domain = 0;
8831 int domain_prot;
8832 hwaddr phys_addr;
8833 uint32_t dacr;
8834
8835
8836
8837 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
8838
8839 fi->type = ARMFault_Translation;
8840 goto do_fault;
8841 }
8842 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8843 mmu_idx, fi);
8844 if (fi->type != ARMFault_None) {
8845 goto do_fault;
8846 }
8847 type = (desc & 3);
8848 domain = (desc >> 5) & 0x0f;
8849 if (regime_el(env, mmu_idx) == 1) {
8850 dacr = env->cp15.dacr_ns;
8851 } else {
8852 dacr = env->cp15.dacr_s;
8853 }
8854 domain_prot = (dacr >> (domain * 2)) & 3;
8855 if (type == 0) {
8856
8857 fi->type = ARMFault_Translation;
8858 goto do_fault;
8859 }
8860 if (type != 2) {
8861 level = 2;
8862 }
8863 if (domain_prot == 0 || domain_prot == 2) {
8864 fi->type = ARMFault_Domain;
8865 goto do_fault;
8866 }
8867 if (type == 2) {
8868
8869 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
8870 ap = (desc >> 10) & 3;
8871 *page_size = 1024 * 1024;
8872 } else {
8873
8874 if (type == 1) {
8875
8876 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
8877 } else {
8878
8879 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
8880 }
8881 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8882 mmu_idx, fi);
8883 if (fi->type != ARMFault_None) {
8884 goto do_fault;
8885 }
8886 switch (desc & 3) {
8887 case 0:
8888 fi->type = ARMFault_Translation;
8889 goto do_fault;
8890 case 1:
8891 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
8892 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
8893 *page_size = 0x10000;
8894 break;
8895 case 2:
8896 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
8897 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
8898 *page_size = 0x1000;
8899 break;
8900 case 3:
8901 if (type == 1) {
8902
8903 if (arm_feature(env, ARM_FEATURE_XSCALE)
8904 || arm_feature(env, ARM_FEATURE_V6)) {
8905 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
8906 *page_size = 0x1000;
8907 } else {
8908
8909
8910
8911 fi->type = ARMFault_Translation;
8912 goto do_fault;
8913 }
8914 } else {
8915 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
8916 *page_size = 0x400;
8917 }
8918 ap = (desc >> 4) & 3;
8919 break;
8920 default:
8921
8922 abort();
8923 }
8924 }
8925 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
8926 *prot |= *prot ? PAGE_EXEC : 0;
8927 if (!(*prot & (1 << access_type))) {
8928
8929 fi->type = ARMFault_Permission;
8930 goto do_fault;
8931 }
8932 *phys_ptr = phys_addr;
8933 return false;
8934do_fault:
8935 fi->domain = domain;
8936 fi->level = level;
8937 return true;
8938}
8939
8940static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
8941 MMUAccessType access_type, ARMMMUIdx mmu_idx,
8942 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
8943 target_ulong *page_size, ARMMMUFaultInfo *fi)
8944{
8945 CPUState *cs = CPU(arm_env_get_cpu(env));
8946 int level = 1;
8947 uint32_t table;
8948 uint32_t desc;
8949 uint32_t xn;
8950 uint32_t pxn = 0;
8951 int type;
8952 int ap;
8953 int domain = 0;
8954 int domain_prot;
8955 hwaddr phys_addr;
8956 uint32_t dacr;
8957 bool ns;
8958
8959
8960
8961 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
8962
8963 fi->type = ARMFault_Translation;
8964 goto do_fault;
8965 }
8966 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8967 mmu_idx, fi);
8968 if (fi->type != ARMFault_None) {
8969 goto do_fault;
8970 }
8971 type = (desc & 3);
8972 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
8973
8974
8975
8976 fi->type = ARMFault_Translation;
8977 goto do_fault;
8978 }
8979 if ((type == 1) || !(desc & (1 << 18))) {
8980
8981 domain = (desc >> 5) & 0x0f;
8982 }
8983 if (regime_el(env, mmu_idx) == 1) {
8984 dacr = env->cp15.dacr_ns;
8985 } else {
8986 dacr = env->cp15.dacr_s;
8987 }
8988 if (type == 1) {
8989 level = 2;
8990 }
8991 domain_prot = (dacr >> (domain * 2)) & 3;
8992 if (domain_prot == 0 || domain_prot == 2) {
8993
8994 fi->type = ARMFault_Domain;
8995 goto do_fault;
8996 }
8997 if (type != 1) {
8998 if (desc & (1 << 18)) {
8999
9000 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
9001 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
9002 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
9003 *page_size = 0x1000000;
9004 } else {
9005
9006 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9007 *page_size = 0x100000;
9008 }
9009 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
9010 xn = desc & (1 << 4);
9011 pxn = desc & 1;
9012 ns = extract32(desc, 19, 1);
9013 } else {
9014 if (arm_feature(env, ARM_FEATURE_PXN)) {
9015 pxn = (desc >> 2) & 1;
9016 }
9017 ns = extract32(desc, 3, 1);
9018
9019 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9020 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9021 mmu_idx, fi);
9022 if (fi->type != ARMFault_None) {
9023 goto do_fault;
9024 }
9025 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
9026 switch (desc & 3) {
9027 case 0:
9028 fi->type = ARMFault_Translation;
9029 goto do_fault;
9030 case 1:
9031 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9032 xn = desc & (1 << 15);
9033 *page_size = 0x10000;
9034 break;
9035 case 2: case 3:
9036 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9037 xn = desc & 1;
9038 *page_size = 0x1000;
9039 break;
9040 default:
9041
9042 abort();
9043 }
9044 }
9045 if (domain_prot == 3) {
9046 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9047 } else {
9048 if (pxn && !regime_is_user(env, mmu_idx)) {
9049 xn = 1;
9050 }
9051 if (xn && access_type == MMU_INST_FETCH) {
9052 fi->type = ARMFault_Permission;
9053 goto do_fault;
9054 }
9055
9056 if (arm_feature(env, ARM_FEATURE_V6K) &&
9057 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
9058
9059 if ((ap & 1) == 0) {
9060
9061 fi->type = ARMFault_AccessFlag;
9062 goto do_fault;
9063 }
9064 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
9065 } else {
9066 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9067 }
9068 if (*prot && !xn) {
9069 *prot |= PAGE_EXEC;
9070 }
9071 if (!(*prot & (1 << access_type))) {
9072
9073 fi->type = ARMFault_Permission;
9074 goto do_fault;
9075 }
9076 }
9077 if (ns) {
9078
9079
9080
9081
9082 attrs->secure = false;
9083 }
9084 *phys_ptr = phys_addr;
9085 return false;
9086do_fault:
9087 fi->domain = domain;
9088 fi->level = level;
9089 return true;
9090}
9091
9092
9093
9094
9095
9096
9097
9098
9099
9100
9101
9102
9103static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
9104 int inputsize, int stride)
9105{
9106 const int grainsize = stride + 3;
9107 int startsizecheck;
9108
9109
9110 if (level < 0) {
9111 return false;
9112 }
9113
9114 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
9115 if (startsizecheck < 1 || startsizecheck > stride + 4) {
9116 return false;
9117 }
9118
9119 if (is_aa64) {
9120 CPUARMState *env = &cpu->env;
9121 unsigned int pamax = arm_pamax(cpu);
9122
9123 switch (stride) {
9124 case 13:
9125 if (level == 0 || (level == 1 && pamax <= 42)) {
9126 return false;
9127 }
9128 break;
9129 case 11:
9130 if (level == 0 || (level == 1 && pamax <= 40)) {
9131 return false;
9132 }
9133 break;
9134 case 9:
9135 if (level == 0 && pamax <= 42) {
9136 return false;
9137 }
9138 break;
9139 default:
9140 g_assert_not_reached();
9141 }
9142
9143
9144 if (inputsize > pamax &&
9145 (arm_el_is_aa64(env, 1) || inputsize > 40)) {
9146
9147 return false;
9148 }
9149 } else {
9150
9151 assert(stride == 9);
9152
9153 if (level == 0) {
9154 return false;
9155 }
9156 }
9157 return true;
9158}
9159
9160
9161
9162
9163
9164
9165
9166
9167
9168static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
9169{
9170 uint8_t hiattr = extract32(s2attrs, 2, 2);
9171 uint8_t loattr = extract32(s2attrs, 0, 2);
9172 uint8_t hihint = 0, lohint = 0;
9173
9174 if (hiattr != 0) {
9175 if ((env->cp15.hcr_el2 & HCR_CD) != 0) {
9176 hiattr = loattr = 1;
9177 } else {
9178 if (hiattr != 1) {
9179 hihint = 3;
9180 }
9181 if (loattr != 1) {
9182 lohint = 3;
9183 }
9184 }
9185 }
9186
9187 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
9188}
9189
9190static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
9191 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9192 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
9193 target_ulong *page_size_ptr,
9194 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
9195{
9196 ARMCPU *cpu = arm_env_get_cpu(env);
9197 CPUState *cs = CPU(cpu);
9198
9199 ARMFaultType fault_type = ARMFault_Translation;
9200 uint32_t level;
9201 uint32_t epd = 0;
9202 int32_t t0sz, t1sz;
9203 uint32_t tg;
9204 uint64_t ttbr;
9205 int ttbr_select;
9206 hwaddr descaddr, indexmask, indexmask_grainsize;
9207 uint32_t tableattrs;
9208 target_ulong page_size;
9209 uint32_t attrs;
9210 int32_t stride = 9;
9211 int32_t addrsize;
9212 int inputsize;
9213 int32_t tbi = 0;
9214 TCR *tcr = regime_tcr(env, mmu_idx);
9215 int ap, ns, xn, pxn;
9216 uint32_t el = regime_el(env, mmu_idx);
9217 bool ttbr1_valid = true;
9218 uint64_t descaddrmask;
9219 bool aarch64 = arm_el_is_aa64(env, el);
9220
9221
9222
9223
9224
9225
9226
9227 if (aarch64) {
9228 level = 0;
9229 addrsize = 64;
9230 if (el > 1) {
9231 if (mmu_idx != ARMMMUIdx_S2NS) {
9232 tbi = extract64(tcr->raw_tcr, 20, 1);
9233 }
9234 } else {
9235 if (extract64(address, 55, 1)) {
9236 tbi = extract64(tcr->raw_tcr, 38, 1);
9237 } else {
9238 tbi = extract64(tcr->raw_tcr, 37, 1);
9239 }
9240 }
9241 tbi *= 8;
9242
9243
9244
9245
9246 if (el > 1) {
9247 ttbr1_valid = false;
9248 }
9249 } else {
9250 level = 1;
9251 addrsize = 32;
9252
9253 if (el == 2) {
9254 ttbr1_valid = false;
9255 }
9256 }
9257
9258
9259
9260
9261
9262
9263 if (aarch64) {
9264
9265 t0sz = extract32(tcr->raw_tcr, 0, 6);
9266 t0sz = MIN(t0sz, 39);
9267 t0sz = MAX(t0sz, 16);
9268 } else if (mmu_idx != ARMMMUIdx_S2NS) {
9269
9270 t0sz = extract32(tcr->raw_tcr, 0, 3);
9271 } else {
9272
9273 bool sext = extract32(tcr->raw_tcr, 4, 1);
9274 bool sign = extract32(tcr->raw_tcr, 3, 1);
9275
9276
9277
9278
9279 addrsize = 40;
9280 t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8;
9281
9282
9283
9284 if (sign != sext) {
9285 qemu_log_mask(LOG_GUEST_ERROR,
9286 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9287 }
9288 }
9289 t1sz = extract32(tcr->raw_tcr, 16, 6);
9290 if (aarch64) {
9291 t1sz = MIN(t1sz, 39);
9292 t1sz = MAX(t1sz, 16);
9293 }
9294 if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) {
9295
9296 ttbr_select = 0;
9297 } else if (ttbr1_valid && t1sz &&
9298 !extract64(~address, addrsize - t1sz, t1sz - tbi)) {
9299
9300 ttbr_select = 1;
9301 } else if (!t0sz) {
9302
9303 ttbr_select = 0;
9304 } else if (!t1sz && ttbr1_valid) {
9305
9306 ttbr_select = 1;
9307 } else {
9308
9309 fault_type = ARMFault_Translation;
9310 goto do_fault;
9311 }
9312
9313
9314
9315
9316
9317
9318
9319
9320 if (ttbr_select == 0) {
9321 ttbr = regime_ttbr(env, mmu_idx, 0);
9322 if (el < 2) {
9323 epd = extract32(tcr->raw_tcr, 7, 1);
9324 }
9325 inputsize = addrsize - t0sz;
9326
9327 tg = extract32(tcr->raw_tcr, 14, 2);
9328 if (tg == 1) {
9329 stride = 13;
9330 }
9331 if (tg == 2) {
9332 stride = 11;
9333 }
9334 } else {
9335
9336 assert(ttbr1_valid);
9337
9338 ttbr = regime_ttbr(env, mmu_idx, 1);
9339 epd = extract32(tcr->raw_tcr, 23, 1);
9340 inputsize = addrsize - t1sz;
9341
9342 tg = extract32(tcr->raw_tcr, 30, 2);
9343 if (tg == 3) {
9344 stride = 13;
9345 }
9346 if (tg == 1) {
9347 stride = 11;
9348 }
9349 }
9350
9351
9352
9353
9354
9355 if (epd) {
9356
9357
9358
9359 goto do_fault;
9360 }
9361
9362 if (mmu_idx != ARMMMUIdx_S2NS) {
9363
9364
9365
9366
9367
9368
9369
9370
9371
9372
9373
9374 level = 4 - (inputsize - 4) / stride;
9375 } else {
9376
9377
9378
9379 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
9380 uint32_t startlevel;
9381 bool ok;
9382
9383 if (!aarch64 || stride == 9) {
9384
9385 startlevel = 2 - sl0;
9386 } else {
9387
9388 startlevel = 3 - sl0;
9389 }
9390
9391
9392 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
9393 inputsize, stride);
9394 if (!ok) {
9395 fault_type = ARMFault_Translation;
9396 goto do_fault;
9397 }
9398 level = startlevel;
9399 }
9400
9401 indexmask_grainsize = (1ULL << (stride + 3)) - 1;
9402 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
9403
9404
9405 descaddr = extract64(ttbr, 0, 48);
9406 descaddr &= ~indexmask;
9407
9408
9409
9410
9411
9412
9413 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
9414 ~indexmask_grainsize;
9415
9416
9417
9418
9419
9420
9421 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
9422 for (;;) {
9423 uint64_t descriptor;
9424 bool nstable;
9425
9426 descaddr |= (address >> (stride * (4 - level))) & indexmask;
9427 descaddr &= ~7ULL;
9428 nstable = extract32(tableattrs, 4, 1);
9429 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
9430 if (fi->type != ARMFault_None) {
9431 goto do_fault;
9432 }
9433
9434 if (!(descriptor & 1) ||
9435 (!(descriptor & 2) && (level == 3))) {
9436
9437 goto do_fault;
9438 }
9439 descaddr = descriptor & descaddrmask;
9440
9441 if ((descriptor & 2) && (level < 3)) {
9442
9443
9444
9445
9446
9447 tableattrs |= extract64(descriptor, 59, 5);
9448 level++;
9449 indexmask = indexmask_grainsize;
9450 continue;
9451 }
9452
9453
9454
9455
9456 page_size = (1ULL << ((stride * (4 - level)) + 3));
9457 descaddr |= (address & (page_size - 1));
9458
9459 attrs = extract64(descriptor, 2, 10)
9460 | (extract64(descriptor, 52, 12) << 10);
9461
9462 if (mmu_idx == ARMMMUIdx_S2NS) {
9463
9464 break;
9465 }
9466
9467 attrs |= extract32(tableattrs, 0, 2) << 11;
9468 attrs |= extract32(tableattrs, 3, 1) << 5;
9469
9470
9471
9472 if (extract32(tableattrs, 2, 1)) {
9473 attrs &= ~(1 << 4);
9474 }
9475 attrs |= nstable << 3;
9476 break;
9477 }
9478
9479
9480
9481 fault_type = ARMFault_AccessFlag;
9482 if ((attrs & (1 << 8)) == 0) {
9483
9484 goto do_fault;
9485 }
9486
9487 ap = extract32(attrs, 4, 2);
9488 xn = extract32(attrs, 12, 1);
9489
9490 if (mmu_idx == ARMMMUIdx_S2NS) {
9491 ns = true;
9492 *prot = get_S2prot(env, ap, xn);
9493 } else {
9494 ns = extract32(attrs, 3, 1);
9495 pxn = extract32(attrs, 11, 1);
9496 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
9497 }
9498
9499 fault_type = ARMFault_Permission;
9500 if (!(*prot & (1 << access_type))) {
9501 goto do_fault;
9502 }
9503
9504 if (ns) {
9505
9506
9507
9508
9509 txattrs->secure = false;
9510 }
9511
9512 if (cacheattrs != NULL) {
9513 if (mmu_idx == ARMMMUIdx_S2NS) {
9514 cacheattrs->attrs = convert_stage2_attrs(env,
9515 extract32(attrs, 0, 4));
9516 } else {
9517
9518 uint8_t attrindx = extract32(attrs, 0, 3);
9519 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
9520 assert(attrindx <= 7);
9521 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
9522 }
9523 cacheattrs->shareability = extract32(attrs, 6, 2);
9524 }
9525
9526 *phys_ptr = descaddr;
9527 *page_size_ptr = page_size;
9528 return false;
9529
9530do_fault:
9531 fi->type = fault_type;
9532 fi->level = level;
9533
9534 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
9535 return true;
9536}
9537
9538static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
9539 ARMMMUIdx mmu_idx,
9540 int32_t address, int *prot)
9541{
9542 if (!arm_feature(env, ARM_FEATURE_M)) {
9543 *prot = PAGE_READ | PAGE_WRITE;
9544 switch (address) {
9545 case 0xF0000000 ... 0xFFFFFFFF:
9546 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
9547
9548 *prot |= PAGE_EXEC;
9549 }
9550 break;
9551 case 0x00000000 ... 0x7FFFFFFF:
9552 *prot |= PAGE_EXEC;
9553 break;
9554 }
9555 } else {
9556
9557
9558
9559
9560 switch (address) {
9561 case 0x00000000 ... 0x1fffffff:
9562 case 0x20000000 ... 0x3fffffff:
9563 case 0x60000000 ... 0x7fffffff:
9564 case 0x80000000 ... 0x9fffffff:
9565 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9566 break;
9567 case 0x40000000 ... 0x5fffffff:
9568 case 0xa0000000 ... 0xbfffffff:
9569 case 0xc0000000 ... 0xdfffffff:
9570 case 0xe0000000 ... 0xffffffff:
9571 *prot = PAGE_READ | PAGE_WRITE;
9572 break;
9573 default:
9574 g_assert_not_reached();
9575 }
9576 }
9577}
9578
9579static bool pmsav7_use_background_region(ARMCPU *cpu,
9580 ARMMMUIdx mmu_idx, bool is_user)
9581{
9582
9583
9584
9585 CPUARMState *env = &cpu->env;
9586
9587 if (is_user) {
9588 return false;
9589 }
9590
9591 if (arm_feature(env, ARM_FEATURE_M)) {
9592 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
9593 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
9594 } else {
9595 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
9596 }
9597}
9598
9599static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
9600{
9601
9602 return arm_feature(env, ARM_FEATURE_M) &&
9603 extract32(address, 20, 12) == 0xe00;
9604}
9605
9606static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
9607{
9608
9609
9610
9611 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
9612}
9613
9614static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
9615 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9616 hwaddr *phys_ptr, int *prot,
9617 target_ulong *page_size,
9618 ARMMMUFaultInfo *fi)
9619{
9620 ARMCPU *cpu = arm_env_get_cpu(env);
9621 int n;
9622 bool is_user = regime_is_user(env, mmu_idx);
9623
9624 *phys_ptr = address;
9625 *page_size = TARGET_PAGE_SIZE;
9626 *prot = 0;
9627
9628 if (regime_translation_disabled(env, mmu_idx) ||
9629 m_is_ppb_region(env, address)) {
9630
9631
9632
9633
9634
9635
9636
9637 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
9638 } else {
9639 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
9640
9641 uint32_t base = env->pmsav7.drbar[n];
9642 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
9643 uint32_t rmask;
9644 bool srdis = false;
9645
9646 if (!(env->pmsav7.drsr[n] & 0x1)) {
9647 continue;
9648 }
9649
9650 if (!rsize) {
9651 qemu_log_mask(LOG_GUEST_ERROR,
9652 "DRSR[%d]: Rsize field cannot be 0\n", n);
9653 continue;
9654 }
9655 rsize++;
9656 rmask = (1ull << rsize) - 1;
9657
9658 if (base & rmask) {
9659 qemu_log_mask(LOG_GUEST_ERROR,
9660 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
9661 "to DRSR region size, mask = 0x%" PRIx32 "\n",
9662 n, base, rmask);
9663 continue;
9664 }
9665
9666 if (address < base || address > base + rmask) {
9667
9668
9669
9670
9671
9672
9673
9674
9675
9676 if (ranges_overlap(base, rmask,
9677 address & TARGET_PAGE_MASK,
9678 TARGET_PAGE_SIZE)) {
9679 *page_size = 1;
9680 }
9681 continue;
9682 }
9683
9684
9685
9686 if (rsize >= 8) {
9687 int i, snd;
9688 uint32_t srdis_mask;
9689
9690 rsize -= 3;
9691 snd = ((address - base) >> rsize) & 0x7;
9692 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
9693
9694 srdis_mask = srdis ? 0x3 : 0x0;
9695 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
9696
9697
9698
9699
9700
9701
9702 int snd_rounded = snd & ~(i - 1);
9703 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
9704 snd_rounded + 8, i);
9705 if (srdis_mask ^ srdis_multi) {
9706 break;
9707 }
9708 srdis_mask = (srdis_mask << i) | srdis_mask;
9709 rsize++;
9710 }
9711 }
9712 if (srdis) {
9713 continue;
9714 }
9715 if (rsize < TARGET_PAGE_BITS) {
9716 *page_size = 1 << rsize;
9717 }
9718 break;
9719 }
9720
9721 if (n == -1) {
9722 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
9723
9724 fi->type = ARMFault_Background;
9725 return true;
9726 }
9727 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
9728 } else {
9729 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
9730 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
9731
9732 if (m_is_system_region(env, address)) {
9733
9734 xn = 1;
9735 }
9736
9737 if (is_user) {
9738 switch (ap) {
9739 case 0:
9740 case 1:
9741 case 5:
9742 break;
9743 case 3:
9744 *prot |= PAGE_WRITE;
9745
9746 case 2:
9747 case 6:
9748 *prot |= PAGE_READ | PAGE_EXEC;
9749 break;
9750 case 7:
9751
9752 if (arm_feature(env, ARM_FEATURE_M)) {
9753 *prot |= PAGE_READ | PAGE_EXEC;
9754 break;
9755 }
9756
9757 default:
9758 qemu_log_mask(LOG_GUEST_ERROR,
9759 "DRACR[%d]: Bad value for AP bits: 0x%"
9760 PRIx32 "\n", n, ap);
9761 }
9762 } else {
9763 switch (ap) {
9764 case 0:
9765 break;
9766 case 1:
9767 case 2:
9768 case 3:
9769 *prot |= PAGE_WRITE;
9770
9771 case 5:
9772 case 6:
9773 *prot |= PAGE_READ | PAGE_EXEC;
9774 break;
9775 case 7:
9776
9777 if (arm_feature(env, ARM_FEATURE_M)) {
9778 *prot |= PAGE_READ | PAGE_EXEC;
9779 break;
9780 }
9781
9782 default:
9783 qemu_log_mask(LOG_GUEST_ERROR,
9784 "DRACR[%d]: Bad value for AP bits: 0x%"
9785 PRIx32 "\n", n, ap);
9786 }
9787 }
9788
9789
9790 if (xn) {
9791 *prot &= ~PAGE_EXEC;
9792 }
9793 }
9794 }
9795
9796 fi->type = ARMFault_Permission;
9797 fi->level = 1;
9798
9799
9800
9801
9802
9803 if (*page_size < TARGET_PAGE_SIZE && (*prot & PAGE_EXEC)) {
9804 qemu_log_mask(LOG_UNIMP,
9805 "MPU: No support for execution from regions "
9806 "smaller than 1K\n");
9807 *prot &= ~PAGE_EXEC;
9808 }
9809 return !(*prot & (1 << access_type));
9810}
9811
9812static bool v8m_is_sau_exempt(CPUARMState *env,
9813 uint32_t address, MMUAccessType access_type)
9814{
9815
9816
9817
9818 return
9819 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
9820 (address >= 0xe0000000 && address <= 0xe0002fff) ||
9821 (address >= 0xe000e000 && address <= 0xe000efff) ||
9822 (address >= 0xe002e000 && address <= 0xe002efff) ||
9823 (address >= 0xe0040000 && address <= 0xe0041fff) ||
9824 (address >= 0xe00ff000 && address <= 0xe00fffff);
9825}
9826
9827static void v8m_security_lookup(CPUARMState *env, uint32_t address,
9828 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9829 V8M_SAttributes *sattrs)
9830{
9831
9832
9833
9834
9835 ARMCPU *cpu = arm_env_get_cpu(env);
9836 int r;
9837 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
9838 int idau_region = IREGION_NOTVALID;
9839 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
9840 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
9841
9842 if (cpu->idau) {
9843 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
9844 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
9845
9846 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
9847 &idau_nsc);
9848 }
9849
9850 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
9851
9852 return;
9853 }
9854
9855 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
9856 sattrs->ns = !regime_is_secure(env, mmu_idx);
9857 return;
9858 }
9859
9860 if (idau_region != IREGION_NOTVALID) {
9861 sattrs->irvalid = true;
9862 sattrs->iregion = idau_region;
9863 }
9864
9865 switch (env->sau.ctrl & 3) {
9866 case 0:
9867 break;
9868 case 2:
9869 sattrs->ns = true;
9870 break;
9871 default:
9872 for (r = 0; r < cpu->sau_sregion; r++) {
9873 if (env->sau.rlar[r] & 1) {
9874 uint32_t base = env->sau.rbar[r] & ~0x1f;
9875 uint32_t limit = env->sau.rlar[r] | 0x1f;
9876
9877 if (base <= address && limit >= address) {
9878 if (base > addr_page_base || limit < addr_page_limit) {
9879 sattrs->subpage = true;
9880 }
9881 if (sattrs->srvalid) {
9882
9883
9884
9885
9886 sattrs->ns = false;
9887 sattrs->nsc = false;
9888 sattrs->sregion = 0;
9889 sattrs->srvalid = false;
9890 break;
9891 } else {
9892 if (env->sau.rlar[r] & 2) {
9893 sattrs->nsc = true;
9894 } else {
9895 sattrs->ns = true;
9896 }
9897 sattrs->srvalid = true;
9898 sattrs->sregion = r;
9899 }
9900 } else {
9901
9902
9903
9904
9905
9906
9907
9908
9909
9910 if (limit >= base &&
9911 ranges_overlap(base, limit - base + 1,
9912 addr_page_base,
9913 TARGET_PAGE_SIZE)) {
9914 sattrs->subpage = true;
9915 }
9916 }
9917 }
9918 }
9919
9920
9921
9922
9923 if (!idau_ns) {
9924 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
9925 sattrs->ns = false;
9926 sattrs->nsc = idau_nsc;
9927 }
9928 }
9929 break;
9930 }
9931}
9932
9933static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
9934 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9935 hwaddr *phys_ptr, MemTxAttrs *txattrs,
9936 int *prot, bool *is_subpage,
9937 ARMMMUFaultInfo *fi, uint32_t *mregion)
9938{
9939
9940
9941
9942
9943
9944
9945
9946
9947 ARMCPU *cpu = arm_env_get_cpu(env);
9948 bool is_user = regime_is_user(env, mmu_idx);
9949 uint32_t secure = regime_is_secure(env, mmu_idx);
9950 int n;
9951 int matchregion = -1;
9952 bool hit = false;
9953 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
9954 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
9955
9956 *is_subpage = false;
9957 *phys_ptr = address;
9958 *prot = 0;
9959 if (mregion) {
9960 *mregion = -1;
9961 }
9962
9963
9964
9965
9966
9967
9968
9969 if (regime_translation_disabled(env, mmu_idx)) {
9970 hit = true;
9971 } else if (m_is_ppb_region(env, address)) {
9972 hit = true;
9973 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
9974 hit = true;
9975 } else {
9976 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
9977
9978
9979
9980
9981
9982 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
9983 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
9984
9985 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
9986
9987 continue;
9988 }
9989
9990 if (address < base || address > limit) {
9991
9992
9993
9994
9995
9996
9997
9998
9999
10000 if (limit >= base &&
10001 ranges_overlap(base, limit - base + 1,
10002 addr_page_base,
10003 TARGET_PAGE_SIZE)) {
10004 *is_subpage = true;
10005 }
10006 continue;
10007 }
10008
10009 if (base > addr_page_base || limit < addr_page_limit) {
10010 *is_subpage = true;
10011 }
10012
10013 if (hit) {
10014
10015
10016
10017 fi->type = ARMFault_Permission;
10018 fi->level = 1;
10019 return true;
10020 }
10021
10022 matchregion = n;
10023 hit = true;
10024 }
10025 }
10026
10027 if (!hit) {
10028
10029 fi->type = ARMFault_Background;
10030 return true;
10031 }
10032
10033 if (matchregion == -1) {
10034
10035 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10036 } else {
10037 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
10038 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
10039
10040 if (m_is_system_region(env, address)) {
10041
10042 xn = 1;
10043 }
10044
10045 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
10046 if (*prot && !xn) {
10047 *prot |= PAGE_EXEC;
10048 }
10049
10050
10051
10052 if (mregion) {
10053 *mregion = matchregion;
10054 }
10055 }
10056
10057 fi->type = ARMFault_Permission;
10058 fi->level = 1;
10059
10060
10061
10062
10063
10064
10065 if (*is_subpage && (*prot & PAGE_EXEC)) {
10066 qemu_log_mask(LOG_UNIMP,
10067 "MPU: No support for execution from regions "
10068 "smaller than 1K\n");
10069 *prot &= ~PAGE_EXEC;
10070 }
10071 return !(*prot & (1 << access_type));
10072}
10073
10074
10075static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
10076 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10077 hwaddr *phys_ptr, MemTxAttrs *txattrs,
10078 int *prot, target_ulong *page_size,
10079 ARMMMUFaultInfo *fi)
10080{
10081 uint32_t secure = regime_is_secure(env, mmu_idx);
10082 V8M_SAttributes sattrs = {};
10083 bool ret;
10084 bool mpu_is_subpage;
10085
10086 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10087 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
10088 if (access_type == MMU_INST_FETCH) {
10089
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105
10106 if (sattrs.ns != !secure) {
10107 if (sattrs.nsc) {
10108 fi->type = ARMFault_QEMU_NSCExec;
10109 } else {
10110 fi->type = ARMFault_QEMU_SFault;
10111 }
10112 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10113 *phys_ptr = address;
10114 *prot = 0;
10115 return true;
10116 }
10117 } else {
10118
10119
10120
10121
10122 if (sattrs.ns) {
10123 txattrs->secure = false;
10124 } else if (!secure) {
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134 fi->type = ARMFault_QEMU_SFault;
10135 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10136 *phys_ptr = address;
10137 *prot = 0;
10138 return true;
10139 }
10140 }
10141 }
10142
10143 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
10144 txattrs, prot, &mpu_is_subpage, fi, NULL);
10145
10146
10147
10148
10149
10150
10151
10152
10153
10154 if (*prot & PAGE_EXEC) {
10155 sattrs.subpage = false;
10156 }
10157 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
10158 return ret;
10159}
10160
10161static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
10162 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10163 hwaddr *phys_ptr, int *prot,
10164 ARMMMUFaultInfo *fi)
10165{
10166 int n;
10167 uint32_t mask;
10168 uint32_t base;
10169 bool is_user = regime_is_user(env, mmu_idx);
10170
10171 if (regime_translation_disabled(env, mmu_idx)) {
10172
10173 *phys_ptr = address;
10174 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10175 return false;
10176 }
10177
10178 *phys_ptr = address;
10179 for (n = 7; n >= 0; n--) {
10180 base = env->cp15.c6_region[n];
10181 if ((base & 1) == 0) {
10182 continue;
10183 }
10184 mask = 1 << ((base >> 1) & 0x1f);
10185
10186
10187 mask = (mask << 1) - 1;
10188 if (((base ^ address) & ~mask) == 0) {
10189 break;
10190 }
10191 }
10192 if (n < 0) {
10193 fi->type = ARMFault_Background;
10194 return true;
10195 }
10196
10197 if (access_type == MMU_INST_FETCH) {
10198 mask = env->cp15.pmsav5_insn_ap;
10199 } else {
10200 mask = env->cp15.pmsav5_data_ap;
10201 }
10202 mask = (mask >> (n * 4)) & 0xf;
10203 switch (mask) {
10204 case 0:
10205 fi->type = ARMFault_Permission;
10206 fi->level = 1;
10207 return true;
10208 case 1:
10209 if (is_user) {
10210 fi->type = ARMFault_Permission;
10211 fi->level = 1;
10212 return true;
10213 }
10214 *prot = PAGE_READ | PAGE_WRITE;
10215 break;
10216 case 2:
10217 *prot = PAGE_READ;
10218 if (!is_user) {
10219 *prot |= PAGE_WRITE;
10220 }
10221 break;
10222 case 3:
10223 *prot = PAGE_READ | PAGE_WRITE;
10224 break;
10225 case 5:
10226 if (is_user) {
10227 fi->type = ARMFault_Permission;
10228 fi->level = 1;
10229 return true;
10230 }
10231 *prot = PAGE_READ;
10232 break;
10233 case 6:
10234 *prot = PAGE_READ;
10235 break;
10236 default:
10237
10238 fi->type = ARMFault_Permission;
10239 fi->level = 1;
10240 return true;
10241 }
10242 *prot |= PAGE_EXEC;
10243 return false;
10244}
10245
10246
10247
10248
10249
10250
10251
10252
10253static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
10254{
10255 if (s1 == 4 || s2 == 4) {
10256
10257 return 4;
10258 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
10259
10260 return s1;
10261 } else if (extract32(s2, 2, 2) == 2) {
10262
10263
10264
10265 return (2 << 2) | extract32(s1, 0, 2);
10266 } else {
10267 return s1;
10268 }
10269}
10270
10271
10272
10273
10274
10275
10276
10277static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
10278{
10279 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
10280 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
10281 ARMCacheAttrs ret;
10282
10283
10284 if (s1.shareability == 2 || s2.shareability == 2) {
10285
10286 ret.shareability = 2;
10287 } else if (s1.shareability == 3 || s2.shareability == 3) {
10288
10289 ret.shareability = 3;
10290 } else {
10291
10292 ret.shareability = 0;
10293 }
10294
10295
10296 if (s1hi == 0 || s2hi == 0) {
10297
10298 if (s1lo == 0 || s2lo == 0) {
10299
10300 ret.attrs = 0;
10301 } else if (s1lo == 4 || s2lo == 4) {
10302
10303 ret.attrs = 4;
10304 } else if (s1lo == 8 || s2lo == 8) {
10305
10306 ret.attrs = 8;
10307 } else {
10308 ret.attrs = 0xc;
10309 }
10310
10311
10312
10313
10314 ret.shareability = 2;
10315 } else {
10316
10317 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
10318 | combine_cacheattr_nibble(s1lo, s2lo);
10319
10320 if (ret.attrs == 0x44) {
10321
10322
10323
10324
10325 ret.shareability = 2;
10326 }
10327 }
10328
10329 return ret;
10330}
10331
10332
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359static bool get_phys_addr(CPUARMState *env, target_ulong address,
10360 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10361 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10362 target_ulong *page_size,
10363 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
10364{
10365 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
10366
10367
10368
10369 if (arm_feature(env, ARM_FEATURE_EL2)) {
10370 hwaddr ipa;
10371 int s2_prot;
10372 int ret;
10373 ARMCacheAttrs cacheattrs2 = {};
10374
10375 ret = get_phys_addr(env, address, access_type,
10376 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
10377 prot, page_size, fi, cacheattrs);
10378
10379
10380 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
10381 *phys_ptr = ipa;
10382 return ret;
10383 }
10384
10385
10386 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
10387 phys_ptr, attrs, &s2_prot,
10388 page_size, fi,
10389 cacheattrs != NULL ? &cacheattrs2 : NULL);
10390 fi->s2addr = ipa;
10391
10392 *prot &= s2_prot;
10393
10394
10395 if (!ret && cacheattrs != NULL) {
10396 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
10397 }
10398
10399 return ret;
10400 } else {
10401
10402
10403
10404 mmu_idx = stage_1_mmu_idx(mmu_idx);
10405 }
10406 }
10407
10408
10409
10410
10411
10412 attrs->secure = regime_is_secure(env, mmu_idx);
10413 attrs->user = regime_is_user(env, mmu_idx);
10414
10415
10416
10417
10418 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
10419 && !arm_feature(env, ARM_FEATURE_V8)) {
10420 if (regime_el(env, mmu_idx) == 3) {
10421 address += env->cp15.fcseidr_s;
10422 } else {
10423 address += env->cp15.fcseidr_ns;
10424 }
10425 }
10426
10427 if (arm_feature(env, ARM_FEATURE_PMSA)) {
10428 bool ret;
10429 *page_size = TARGET_PAGE_SIZE;
10430
10431 if (arm_feature(env, ARM_FEATURE_V8)) {
10432
10433 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
10434 phys_ptr, attrs, prot, page_size, fi);
10435 } else if (arm_feature(env, ARM_FEATURE_V7)) {
10436
10437 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
10438 phys_ptr, prot, page_size, fi);
10439 } else {
10440
10441 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
10442 phys_ptr, prot, fi);
10443 }
10444 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
10445 " mmu_idx %u -> %s (prot %c%c%c)\n",
10446 access_type == MMU_DATA_LOAD ? "reading" :
10447 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
10448 (uint32_t)address, mmu_idx,
10449 ret ? "Miss" : "Hit",
10450 *prot & PAGE_READ ? 'r' : '-',
10451 *prot & PAGE_WRITE ? 'w' : '-',
10452 *prot & PAGE_EXEC ? 'x' : '-');
10453
10454 return ret;
10455 }
10456
10457
10458
10459 if (regime_translation_disabled(env, mmu_idx)) {
10460
10461 *phys_ptr = address;
10462 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10463 *page_size = TARGET_PAGE_SIZE;
10464 return 0;
10465 }
10466
10467 if (regime_using_lpae_format(env, mmu_idx)) {
10468 return get_phys_addr_lpae(env, address, access_type, mmu_idx,
10469 phys_ptr, attrs, prot, page_size,
10470 fi, cacheattrs);
10471 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
10472 return get_phys_addr_v6(env, address, access_type, mmu_idx,
10473 phys_ptr, attrs, prot, page_size, fi);
10474 } else {
10475 return get_phys_addr_v5(env, address, access_type, mmu_idx,
10476 phys_ptr, prot, page_size, fi);
10477 }
10478}
10479
10480
10481
10482
10483
10484bool arm_tlb_fill(CPUState *cs, vaddr address,
10485 MMUAccessType access_type, int mmu_idx,
10486 ARMMMUFaultInfo *fi)
10487{
10488 ARMCPU *cpu = ARM_CPU(cs);
10489 CPUARMState *env = &cpu->env;
10490 hwaddr phys_addr;
10491 target_ulong page_size;
10492 int prot;
10493 int ret;
10494 MemTxAttrs attrs = {};
10495
10496 ret = get_phys_addr(env, address, access_type,
10497 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
10498 &attrs, &prot, &page_size, fi, NULL);
10499 if (!ret) {
10500
10501
10502
10503
10504
10505 if (page_size >= TARGET_PAGE_SIZE) {
10506 phys_addr &= TARGET_PAGE_MASK;
10507 address &= TARGET_PAGE_MASK;
10508 }
10509 tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
10510 prot, mmu_idx, page_size);
10511 return 0;
10512 }
10513
10514 return ret;
10515}
10516
10517hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
10518 MemTxAttrs *attrs)
10519{
10520 ARMCPU *cpu = ARM_CPU(cs);
10521 CPUARMState *env = &cpu->env;
10522 hwaddr phys_addr;
10523 target_ulong page_size;
10524 int prot;
10525 bool ret;
10526 ARMMMUFaultInfo fi = {};
10527 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
10528
10529 *attrs = (MemTxAttrs) {};
10530
10531 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
10532 attrs, &prot, &page_size, &fi, NULL);
10533
10534 if (ret) {
10535 return -1;
10536 }
10537 return phys_addr;
10538}
10539
10540uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
10541{
10542 uint32_t mask;
10543 unsigned el = arm_current_el(env);
10544
10545
10546
10547 switch (reg) {
10548 case 0 ... 7:
10549 mask = 0;
10550 if ((reg & 1) && el) {
10551 mask |= XPSR_EXCP;
10552 }
10553 if (!(reg & 4)) {
10554 mask |= XPSR_NZCV | XPSR_Q;
10555 }
10556
10557 return xpsr_read(env) & mask;
10558 break;
10559 case 20:
10560 return env->v7m.control[env->v7m.secure];
10561 case 0x94:
10562
10563
10564
10565 if (!env->v7m.secure) {
10566 return 0;
10567 }
10568 return env->v7m.control[M_REG_NS];
10569 }
10570
10571 if (el == 0) {
10572 return 0;
10573 }
10574
10575 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10576 switch (reg) {
10577 case 0x88:
10578 if (!env->v7m.secure) {
10579 return 0;
10580 }
10581 return env->v7m.other_ss_msp;
10582 case 0x89:
10583 if (!env->v7m.secure) {
10584 return 0;
10585 }
10586 return env->v7m.other_ss_psp;
10587 case 0x8a:
10588 if (!env->v7m.secure) {
10589 return 0;
10590 }
10591 return env->v7m.msplim[M_REG_NS];
10592 case 0x8b:
10593 if (!env->v7m.secure) {
10594 return 0;
10595 }
10596 return env->v7m.psplim[M_REG_NS];
10597 case 0x90:
10598 if (!env->v7m.secure) {
10599 return 0;
10600 }
10601 return env->v7m.primask[M_REG_NS];
10602 case 0x91:
10603 if (!env->v7m.secure) {
10604 return 0;
10605 }
10606 return env->v7m.basepri[M_REG_NS];
10607 case 0x93:
10608 if (!env->v7m.secure) {
10609 return 0;
10610 }
10611 return env->v7m.faultmask[M_REG_NS];
10612 case 0x98:
10613 {
10614
10615
10616
10617 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
10618
10619 if (!env->v7m.secure) {
10620 return 0;
10621 }
10622 if (!arm_v7m_is_handler_mode(env) && spsel) {
10623 return env->v7m.other_ss_psp;
10624 } else {
10625 return env->v7m.other_ss_msp;
10626 }
10627 }
10628 default:
10629 break;
10630 }
10631 }
10632
10633 switch (reg) {
10634 case 8:
10635 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
10636 case 9:
10637 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
10638 case 10:
10639 if (!arm_feature(env, ARM_FEATURE_V8)) {
10640 goto bad_reg;
10641 }
10642 return env->v7m.msplim[env->v7m.secure];
10643 case 11:
10644 if (!arm_feature(env, ARM_FEATURE_V8)) {
10645 goto bad_reg;
10646 }
10647 return env->v7m.psplim[env->v7m.secure];
10648 case 16:
10649 return env->v7m.primask[env->v7m.secure];
10650 case 17:
10651 case 18:
10652 return env->v7m.basepri[env->v7m.secure];
10653 case 19:
10654 return env->v7m.faultmask[env->v7m.secure];
10655 default:
10656 bad_reg:
10657 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
10658 " register %d\n", reg);
10659 return 0;
10660 }
10661}
10662
10663void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
10664{
10665
10666
10667
10668
10669
10670
10671
10672 uint32_t mask = extract32(maskreg, 8, 4);
10673 uint32_t reg = extract32(maskreg, 0, 8);
10674
10675 if (arm_current_el(env) == 0 && reg > 7) {
10676
10677 return;
10678 }
10679
10680 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10681 switch (reg) {
10682 case 0x88:
10683 if (!env->v7m.secure) {
10684 return;
10685 }
10686 env->v7m.other_ss_msp = val;
10687 return;
10688 case 0x89:
10689 if (!env->v7m.secure) {
10690 return;
10691 }
10692 env->v7m.other_ss_psp = val;
10693 return;
10694 case 0x8a:
10695 if (!env->v7m.secure) {
10696 return;
10697 }
10698 env->v7m.msplim[M_REG_NS] = val & ~7;
10699 return;
10700 case 0x8b:
10701 if (!env->v7m.secure) {
10702 return;
10703 }
10704 env->v7m.psplim[M_REG_NS] = val & ~7;
10705 return;
10706 case 0x90:
10707 if (!env->v7m.secure) {
10708 return;
10709 }
10710 env->v7m.primask[M_REG_NS] = val & 1;
10711 return;
10712 case 0x91:
10713 if (!env->v7m.secure) {
10714 return;
10715 }
10716 env->v7m.basepri[M_REG_NS] = val & 0xff;
10717 return;
10718 case 0x93:
10719 if (!env->v7m.secure) {
10720 return;
10721 }
10722 env->v7m.faultmask[M_REG_NS] = val & 1;
10723 return;
10724 case 0x94:
10725 if (!env->v7m.secure) {
10726 return;
10727 }
10728 write_v7m_control_spsel_for_secstate(env,
10729 val & R_V7M_CONTROL_SPSEL_MASK,
10730 M_REG_NS);
10731 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
10732 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
10733 return;
10734 case 0x98:
10735 {
10736
10737
10738
10739 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
10740
10741 if (!env->v7m.secure) {
10742 return;
10743 }
10744 if (!arm_v7m_is_handler_mode(env) && spsel) {
10745 env->v7m.other_ss_psp = val;
10746 } else {
10747 env->v7m.other_ss_msp = val;
10748 }
10749 return;
10750 }
10751 default:
10752 break;
10753 }
10754 }
10755
10756 switch (reg) {
10757 case 0 ... 7:
10758
10759 if (!(reg & 4)) {
10760 uint32_t apsrmask = 0;
10761
10762 if (mask & 8) {
10763 apsrmask |= XPSR_NZCV | XPSR_Q;
10764 }
10765 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
10766 apsrmask |= XPSR_GE;
10767 }
10768 xpsr_write(env, val, apsrmask);
10769 }
10770 break;
10771 case 8:
10772 if (v7m_using_psp(env)) {
10773 env->v7m.other_sp = val;
10774 } else {
10775 env->regs[13] = val;
10776 }
10777 break;
10778 case 9:
10779 if (v7m_using_psp(env)) {
10780 env->regs[13] = val;
10781 } else {
10782 env->v7m.other_sp = val;
10783 }
10784 break;
10785 case 10:
10786 if (!arm_feature(env, ARM_FEATURE_V8)) {
10787 goto bad_reg;
10788 }
10789 env->v7m.msplim[env->v7m.secure] = val & ~7;
10790 break;
10791 case 11:
10792 if (!arm_feature(env, ARM_FEATURE_V8)) {
10793 goto bad_reg;
10794 }
10795 env->v7m.psplim[env->v7m.secure] = val & ~7;
10796 break;
10797 case 16:
10798 env->v7m.primask[env->v7m.secure] = val & 1;
10799 break;
10800 case 17:
10801 env->v7m.basepri[env->v7m.secure] = val & 0xff;
10802 break;
10803 case 18:
10804 val &= 0xff;
10805 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
10806 || env->v7m.basepri[env->v7m.secure] == 0)) {
10807 env->v7m.basepri[env->v7m.secure] = val;
10808 }
10809 break;
10810 case 19:
10811 env->v7m.faultmask[env->v7m.secure] = val & 1;
10812 break;
10813 case 20:
10814
10815
10816
10817
10818
10819
10820
10821 if (arm_feature(env, ARM_FEATURE_V8) ||
10822 !arm_v7m_is_handler_mode(env)) {
10823 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
10824 }
10825 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
10826 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
10827 break;
10828 default:
10829 bad_reg:
10830 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
10831 " register %d\n", reg);
10832 return;
10833 }
10834}
10835
10836uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
10837{
10838
10839 bool forceunpriv = op & 1;
10840 bool alt = op & 2;
10841 V8M_SAttributes sattrs = {};
10842 uint32_t tt_resp;
10843 bool r, rw, nsr, nsrw, mrvalid;
10844 int prot;
10845 ARMMMUFaultInfo fi = {};
10846 MemTxAttrs attrs = {};
10847 hwaddr phys_addr;
10848 ARMMMUIdx mmu_idx;
10849 uint32_t mregion;
10850 bool targetpriv;
10851 bool targetsec = env->v7m.secure;
10852 bool is_subpage;
10853
10854
10855
10856
10857 if (alt) {
10858 targetsec = !targetsec;
10859 }
10860
10861 if (forceunpriv) {
10862 targetpriv = false;
10863 } else {
10864 targetpriv = arm_v7m_is_handler_mode(env) ||
10865 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
10866 }
10867
10868
10869 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
10870
10871
10872
10873
10874
10875
10876
10877
10878
10879 if (arm_current_el(env) != 0 || alt) {
10880
10881 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
10882 &phys_addr, &attrs, &prot, &is_subpage,
10883 &fi, &mregion);
10884 if (mregion == -1) {
10885 mrvalid = false;
10886 mregion = 0;
10887 } else {
10888 mrvalid = true;
10889 }
10890 r = prot & PAGE_READ;
10891 rw = prot & PAGE_WRITE;
10892 } else {
10893 r = false;
10894 rw = false;
10895 mrvalid = false;
10896 mregion = 0;
10897 }
10898
10899 if (env->v7m.secure) {
10900 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
10901 nsr = sattrs.ns && r;
10902 nsrw = sattrs.ns && rw;
10903 } else {
10904 sattrs.ns = true;
10905 nsr = false;
10906 nsrw = false;
10907 }
10908
10909 tt_resp = (sattrs.iregion << 24) |
10910 (sattrs.irvalid << 23) |
10911 ((!sattrs.ns) << 22) |
10912 (nsrw << 21) |
10913 (nsr << 20) |
10914 (rw << 19) |
10915 (r << 18) |
10916 (sattrs.srvalid << 17) |
10917 (mrvalid << 16) |
10918 (sattrs.sregion << 8) |
10919 mregion;
10920
10921 return tt_resp;
10922}
10923
10924#endif
10925
10926void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
10927{
10928
10929
10930
10931
10932
10933
10934
10935 ARMCPU *cpu = arm_env_get_cpu(env);
10936 uint64_t blocklen = 4 << cpu->dcz_blocksize;
10937 uint64_t vaddr = vaddr_in & ~(blocklen - 1);
10938
10939#ifndef CONFIG_USER_ONLY
10940 {
10941
10942
10943
10944
10945
10946
10947
10948 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
10949 void *hostaddr[maxidx];
10950 int try, i;
10951 unsigned mmu_idx = cpu_mmu_index(env, false);
10952 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
10953
10954 for (try = 0; try < 2; try++) {
10955
10956 for (i = 0; i < maxidx; i++) {
10957 hostaddr[i] = tlb_vaddr_to_host(env,
10958 vaddr + TARGET_PAGE_SIZE * i,
10959 1, mmu_idx);
10960 if (!hostaddr[i]) {
10961 break;
10962 }
10963 }
10964 if (i == maxidx) {
10965
10966
10967
10968 for (i = 0; i < maxidx - 1; i++) {
10969 memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
10970 }
10971 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
10972 return;
10973 }
10974
10975
10976
10977
10978
10979
10980 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
10981
10982 for (i = 0; i < maxidx; i++) {
10983 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
10984 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
10985 helper_ret_stb_mmu(env, va, 0, oi, GETPC());
10986 }
10987 }
10988 }
10989
10990
10991
10992
10993
10994
10995
10996
10997
10998
10999
11000
11001 for (i = 0; i < blocklen; i++) {
11002 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
11003 }
11004 }
11005#else
11006 memset(g2h(vaddr), 0, blocklen);
11007#endif
11008}
11009
11010
11011
11012
11013
11014
11015
11016
11017static inline uint16_t add16_sat(uint16_t a, uint16_t b)
11018{
11019 uint16_t res;
11020
11021 res = a + b;
11022 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
11023 if (a & 0x8000)
11024 res = 0x8000;
11025 else
11026 res = 0x7fff;
11027 }
11028 return res;
11029}
11030
11031
11032static inline uint8_t add8_sat(uint8_t a, uint8_t b)
11033{
11034 uint8_t res;
11035
11036 res = a + b;
11037 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
11038 if (a & 0x80)
11039 res = 0x80;
11040 else
11041 res = 0x7f;
11042 }
11043 return res;
11044}
11045
11046
11047static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
11048{
11049 uint16_t res;
11050
11051 res = a - b;
11052 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
11053 if (a & 0x8000)
11054 res = 0x8000;
11055 else
11056 res = 0x7fff;
11057 }
11058 return res;
11059}
11060
11061
11062static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
11063{
11064 uint8_t res;
11065
11066 res = a - b;
11067 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
11068 if (a & 0x80)
11069 res = 0x80;
11070 else
11071 res = 0x7f;
11072 }
11073 return res;
11074}
11075
11076#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11077#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11078#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
11079#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
11080#define PFX q
11081
11082#include "op_addsub.h"
11083
11084
11085static inline uint16_t add16_usat(uint16_t a, uint16_t b)
11086{
11087 uint16_t res;
11088 res = a + b;
11089 if (res < a)
11090 res = 0xffff;
11091 return res;
11092}
11093
11094static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
11095{
11096 if (a > b)
11097 return a - b;
11098 else
11099 return 0;
11100}
11101
11102static inline uint8_t add8_usat(uint8_t a, uint8_t b)
11103{
11104 uint8_t res;
11105 res = a + b;
11106 if (res < a)
11107 res = 0xff;
11108 return res;
11109}
11110
11111static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
11112{
11113 if (a > b)
11114 return a - b;
11115 else
11116 return 0;
11117}
11118
11119#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11120#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11121#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11122#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11123#define PFX uq
11124
11125#include "op_addsub.h"
11126
11127
11128#define SARITH16(a, b, n, op) do { \
11129 int32_t sum; \
11130 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11131 RESULT(sum, n, 16); \
11132 if (sum >= 0) \
11133 ge |= 3 << (n * 2); \
11134 } while(0)
11135
11136#define SARITH8(a, b, n, op) do { \
11137 int32_t sum; \
11138 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11139 RESULT(sum, n, 8); \
11140 if (sum >= 0) \
11141 ge |= 1 << n; \
11142 } while(0)
11143
11144
11145#define ADD16(a, b, n) SARITH16(a, b, n, +)
11146#define SUB16(a, b, n) SARITH16(a, b, n, -)
11147#define ADD8(a, b, n) SARITH8(a, b, n, +)
11148#define SUB8(a, b, n) SARITH8(a, b, n, -)
11149#define PFX s
11150#define ARITH_GE
11151
11152#include "op_addsub.h"
11153
11154
11155#define ADD16(a, b, n) do { \
11156 uint32_t sum; \
11157 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11158 RESULT(sum, n, 16); \
11159 if ((sum >> 16) == 1) \
11160 ge |= 3 << (n * 2); \
11161 } while(0)
11162
11163#define ADD8(a, b, n) do { \
11164 uint32_t sum; \
11165 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11166 RESULT(sum, n, 8); \
11167 if ((sum >> 8) == 1) \
11168 ge |= 1 << n; \
11169 } while(0)
11170
11171#define SUB16(a, b, n) do { \
11172 uint32_t sum; \
11173 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11174 RESULT(sum, n, 16); \
11175 if ((sum >> 16) == 0) \
11176 ge |= 3 << (n * 2); \
11177 } while(0)
11178
11179#define SUB8(a, b, n) do { \
11180 uint32_t sum; \
11181 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11182 RESULT(sum, n, 8); \
11183 if ((sum >> 8) == 0) \
11184 ge |= 1 << n; \
11185 } while(0)
11186
11187#define PFX u
11188#define ARITH_GE
11189
11190#include "op_addsub.h"
11191
11192
11193#define ADD16(a, b, n) \
11194 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11195#define SUB16(a, b, n) \
11196 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11197#define ADD8(a, b, n) \
11198 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11199#define SUB8(a, b, n) \
11200 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11201#define PFX sh
11202
11203#include "op_addsub.h"
11204
11205
11206#define ADD16(a, b, n) \
11207 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11208#define SUB16(a, b, n) \
11209 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11210#define ADD8(a, b, n) \
11211 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11212#define SUB8(a, b, n) \
11213 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11214#define PFX uh
11215
11216#include "op_addsub.h"
11217
11218static inline uint8_t do_usad(uint8_t a, uint8_t b)
11219{
11220 if (a > b)
11221 return a - b;
11222 else
11223 return b - a;
11224}
11225
11226
11227uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
11228{
11229 uint32_t sum;
11230 sum = do_usad(a, b);
11231 sum += do_usad(a >> 8, b >> 8);
11232 sum += do_usad(a >> 16, b >>16);
11233 sum += do_usad(a >> 24, b >> 24);
11234 return sum;
11235}
11236
11237
11238uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
11239{
11240 uint32_t mask;
11241
11242 mask = 0;
11243 if (flags & 1)
11244 mask |= 0xff;
11245 if (flags & 2)
11246 mask |= 0xff00;
11247 if (flags & 4)
11248 mask |= 0xff0000;
11249 if (flags & 8)
11250 mask |= 0xff000000;
11251 return (a & mask) | (b & ~mask);
11252}
11253
11254
11255
11256
11257
11258
11259static inline int vfp_exceptbits_from_host(int host_bits)
11260{
11261 int target_bits = 0;
11262
11263 if (host_bits & float_flag_invalid)
11264 target_bits |= 1;
11265 if (host_bits & float_flag_divbyzero)
11266 target_bits |= 2;
11267 if (host_bits & float_flag_overflow)
11268 target_bits |= 4;
11269 if (host_bits & (float_flag_underflow | float_flag_output_denormal))
11270 target_bits |= 8;
11271 if (host_bits & float_flag_inexact)
11272 target_bits |= 0x10;
11273 if (host_bits & float_flag_input_denormal)
11274 target_bits |= 0x80;
11275 return target_bits;
11276}
11277
11278uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
11279{
11280 int i;
11281 uint32_t fpscr;
11282
11283 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
11284 | (env->vfp.vec_len << 16)
11285 | (env->vfp.vec_stride << 20);
11286
11287 i = get_float_exception_flags(&env->vfp.fp_status);
11288 i |= get_float_exception_flags(&env->vfp.standard_fp_status);
11289
11290 i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
11291 & ~float_flag_input_denormal);
11292
11293 fpscr |= vfp_exceptbits_from_host(i);
11294 return fpscr;
11295}
11296
11297uint32_t vfp_get_fpscr(CPUARMState *env)
11298{
11299 return HELPER(vfp_get_fpscr)(env);
11300}
11301
11302
11303static inline int vfp_exceptbits_to_host(int target_bits)
11304{
11305 int host_bits = 0;
11306
11307 if (target_bits & 1)
11308 host_bits |= float_flag_invalid;
11309 if (target_bits & 2)
11310 host_bits |= float_flag_divbyzero;
11311 if (target_bits & 4)
11312 host_bits |= float_flag_overflow;
11313 if (target_bits & 8)
11314 host_bits |= float_flag_underflow;
11315 if (target_bits & 0x10)
11316 host_bits |= float_flag_inexact;
11317 if (target_bits & 0x80)
11318 host_bits |= float_flag_input_denormal;
11319 return host_bits;
11320}
11321
11322void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
11323{
11324 int i;
11325 uint32_t changed;
11326
11327
11328 if (!arm_feature(env, ARM_FEATURE_V8_FP16)) {
11329 val &= ~FPCR_FZ16;
11330 }
11331
11332 changed = env->vfp.xregs[ARM_VFP_FPSCR];
11333 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
11334 env->vfp.vec_len = (val >> 16) & 7;
11335 env->vfp.vec_stride = (val >> 20) & 3;
11336
11337 changed ^= val;
11338 if (changed & (3 << 22)) {
11339 i = (val >> 22) & 3;
11340 switch (i) {
11341 case FPROUNDING_TIEEVEN:
11342 i = float_round_nearest_even;
11343 break;
11344 case FPROUNDING_POSINF:
11345 i = float_round_up;
11346 break;
11347 case FPROUNDING_NEGINF:
11348 i = float_round_down;
11349 break;
11350 case FPROUNDING_ZERO:
11351 i = float_round_to_zero;
11352 break;
11353 }
11354 set_float_rounding_mode(i, &env->vfp.fp_status);
11355 set_float_rounding_mode(i, &env->vfp.fp_status_f16);
11356 }
11357 if (changed & FPCR_FZ16) {
11358 bool ftz_enabled = val & FPCR_FZ16;
11359 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
11360 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
11361 }
11362 if (changed & FPCR_FZ) {
11363 bool ftz_enabled = val & FPCR_FZ;
11364 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status);
11365 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status);
11366 }
11367 if (changed & FPCR_DN) {
11368 bool dnan_enabled = val & FPCR_DN;
11369 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status);
11370 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
11371 }
11372
11373
11374
11375
11376
11377 i = vfp_exceptbits_to_host(val);
11378 set_float_exception_flags(i, &env->vfp.fp_status);
11379 set_float_exception_flags(0, &env->vfp.fp_status_f16);
11380 set_float_exception_flags(0, &env->vfp.standard_fp_status);
11381}
11382
11383void vfp_set_fpscr(CPUARMState *env, uint32_t val)
11384{
11385 HELPER(vfp_set_fpscr)(env, val);
11386}
11387
11388#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
11389
11390#define VFP_BINOP(name) \
11391float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
11392{ \
11393 float_status *fpst = fpstp; \
11394 return float32_ ## name(a, b, fpst); \
11395} \
11396float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
11397{ \
11398 float_status *fpst = fpstp; \
11399 return float64_ ## name(a, b, fpst); \
11400}
11401VFP_BINOP(add)
11402VFP_BINOP(sub)
11403VFP_BINOP(mul)
11404VFP_BINOP(div)
11405VFP_BINOP(min)
11406VFP_BINOP(max)
11407VFP_BINOP(minnum)
11408VFP_BINOP(maxnum)
11409#undef VFP_BINOP
11410
11411float32 VFP_HELPER(neg, s)(float32 a)
11412{
11413 return float32_chs(a);
11414}
11415
11416float64 VFP_HELPER(neg, d)(float64 a)
11417{
11418 return float64_chs(a);
11419}
11420
11421float32 VFP_HELPER(abs, s)(float32 a)
11422{
11423 return float32_abs(a);
11424}
11425
11426float64 VFP_HELPER(abs, d)(float64 a)
11427{
11428 return float64_abs(a);
11429}
11430
11431float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
11432{
11433 return float32_sqrt(a, &env->vfp.fp_status);
11434}
11435
11436float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
11437{
11438 return float64_sqrt(a, &env->vfp.fp_status);
11439}
11440
11441
11442#define DO_VFP_cmp(p, type) \
11443void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
11444{ \
11445 uint32_t flags; \
11446 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
11447 case 0: flags = 0x6; break; \
11448 case -1: flags = 0x8; break; \
11449 case 1: flags = 0x2; break; \
11450 default: case 2: flags = 0x3; break; \
11451 } \
11452 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11453 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11454} \
11455void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
11456{ \
11457 uint32_t flags; \
11458 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
11459 case 0: flags = 0x6; break; \
11460 case -1: flags = 0x8; break; \
11461 case 1: flags = 0x2; break; \
11462 default: case 2: flags = 0x3; break; \
11463 } \
11464 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11465 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11466}
11467DO_VFP_cmp(s, float32)
11468DO_VFP_cmp(d, float64)
11469#undef DO_VFP_cmp
11470
11471
11472
11473#define CONV_ITOF(name, ftype, fsz, sign) \
11474ftype HELPER(name)(uint32_t x, void *fpstp) \
11475{ \
11476 float_status *fpst = fpstp; \
11477 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
11478}
11479
11480#define CONV_FTOI(name, ftype, fsz, sign, round) \
11481sign##int32_t HELPER(name)(ftype x, void *fpstp) \
11482{ \
11483 float_status *fpst = fpstp; \
11484 if (float##fsz##_is_any_nan(x)) { \
11485 float_raise(float_flag_invalid, fpst); \
11486 return 0; \
11487 } \
11488 return float##fsz##_to_##sign##int32##round(x, fpst); \
11489}
11490
11491#define FLOAT_CONVS(name, p, ftype, fsz, sign) \
11492 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
11493 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
11494 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
11495
11496FLOAT_CONVS(si, h, uint32_t, 16, )
11497FLOAT_CONVS(si, s, float32, 32, )
11498FLOAT_CONVS(si, d, float64, 64, )
11499FLOAT_CONVS(ui, h, uint32_t, 16, u)
11500FLOAT_CONVS(ui, s, float32, 32, u)
11501FLOAT_CONVS(ui, d, float64, 64, u)
11502
11503#undef CONV_ITOF
11504#undef CONV_FTOI
11505#undef FLOAT_CONVS
11506
11507
11508float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
11509{
11510 return float32_to_float64(x, &env->vfp.fp_status);
11511}
11512
11513float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
11514{
11515 return float64_to_float32(x, &env->vfp.fp_status);
11516}
11517
11518
11519#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11520float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
11521 void *fpstp) \
11522{ \
11523 float_status *fpst = fpstp; \
11524 float##fsz tmp; \
11525 tmp = itype##_to_##float##fsz(x, fpst); \
11526 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
11527}
11528
11529
11530
11531
11532
11533
11534#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
11535uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
11536 uint32_t shift, \
11537 void *fpstp) \
11538{ \
11539 float_status *fpst = fpstp; \
11540 int old_exc_flags = get_float_exception_flags(fpst); \
11541 float##fsz tmp; \
11542 if (float##fsz##_is_any_nan(x)) { \
11543 float_raise(float_flag_invalid, fpst); \
11544 return 0; \
11545 } \
11546 tmp = float##fsz##_scalbn(x, shift, fpst); \
11547 old_exc_flags |= get_float_exception_flags(fpst) \
11548 & float_flag_input_denormal; \
11549 set_float_exception_flags(old_exc_flags, fpst); \
11550 return float##fsz##_to_##itype##round(tmp, fpst); \
11551}
11552
11553#define VFP_CONV_FIX(name, p, fsz, isz, itype) \
11554VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11555VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
11556VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
11557
11558#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
11559VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11560VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
11561
11562VFP_CONV_FIX(sh, d, 64, 64, int16)
11563VFP_CONV_FIX(sl, d, 64, 64, int32)
11564VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
11565VFP_CONV_FIX(uh, d, 64, 64, uint16)
11566VFP_CONV_FIX(ul, d, 64, 64, uint32)
11567VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
11568VFP_CONV_FIX(sh, s, 32, 32, int16)
11569VFP_CONV_FIX(sl, s, 32, 32, int32)
11570VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
11571VFP_CONV_FIX(uh, s, 32, 32, uint16)
11572VFP_CONV_FIX(ul, s, 32, 32, uint32)
11573VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
11574
11575#undef VFP_CONV_FIX
11576#undef VFP_CONV_FIX_FLOAT
11577#undef VFP_CONV_FLOAT_FIX_ROUND
11578#undef VFP_CONV_FIX_A64
11579
11580
11581
11582
11583
11584
11585
11586
11587
11588
11589static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst)
11590{
11591 return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst);
11592}
11593
11594uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
11595{
11596 return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst);
11597}
11598
11599uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
11600{
11601 return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst);
11602}
11603
11604uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
11605{
11606 return do_postscale_fp16(int64_to_float64(x, fpst), shift, fpst);
11607}
11608
11609uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
11610{
11611 return do_postscale_fp16(uint64_to_float64(x, fpst), shift, fpst);
11612}
11613
11614static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst)
11615{
11616 if (unlikely(float16_is_any_nan(f))) {
11617 float_raise(float_flag_invalid, fpst);
11618 return 0;
11619 } else {
11620 int old_exc_flags = get_float_exception_flags(fpst);
11621 float64 ret;
11622
11623 ret = float16_to_float64(f, true, fpst);
11624 ret = float64_scalbn(ret, shift, fpst);
11625 old_exc_flags |= get_float_exception_flags(fpst)
11626 & float_flag_input_denormal;
11627 set_float_exception_flags(old_exc_flags, fpst);
11628
11629 return ret;
11630 }
11631}
11632
11633uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst)
11634{
11635 return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst);
11636}
11637
11638uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst)
11639{
11640 return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst);
11641}
11642
11643uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst)
11644{
11645 return float64_to_int32(do_prescale_fp16(x, shift, fpst), fpst);
11646}
11647
11648uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst)
11649{
11650 return float64_to_uint32(do_prescale_fp16(x, shift, fpst), fpst);
11651}
11652
11653uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst)
11654{
11655 return float64_to_int64(do_prescale_fp16(x, shift, fpst), fpst);
11656}
11657
11658uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst)
11659{
11660 return float64_to_uint64(do_prescale_fp16(x, shift, fpst), fpst);
11661}
11662
11663
11664
11665
11666uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp)
11667{
11668 float_status *fp_status = fpstp;
11669
11670 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
11671 set_float_rounding_mode(rmode, fp_status);
11672
11673 return prev_rmode;
11674}
11675
11676
11677
11678
11679
11680
11681
11682
11683uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
11684{
11685 float_status *fp_status = &env->vfp.standard_fp_status;
11686
11687 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
11688 set_float_rounding_mode(rmode, fp_status);
11689
11690 return prev_rmode;
11691}
11692
11693
11694float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
11695{
11696
11697
11698
11699 float_status *fpst = fpstp;
11700 flag save = get_flush_inputs_to_zero(fpst);
11701 set_flush_inputs_to_zero(false, fpst);
11702 float32 r = float16_to_float32(a, !ahp_mode, fpst);
11703 set_flush_inputs_to_zero(save, fpst);
11704 return r;
11705}
11706
11707uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
11708{
11709
11710
11711
11712 float_status *fpst = fpstp;
11713 flag save = get_flush_to_zero(fpst);
11714 set_flush_to_zero(false, fpst);
11715 float16 r = float32_to_float16(a, !ahp_mode, fpst);
11716 set_flush_to_zero(save, fpst);
11717 return r;
11718}
11719
11720float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
11721{
11722
11723
11724
11725 float_status *fpst = fpstp;
11726 flag save = get_flush_inputs_to_zero(fpst);
11727 set_flush_inputs_to_zero(false, fpst);
11728 float64 r = float16_to_float64(a, !ahp_mode, fpst);
11729 set_flush_inputs_to_zero(save, fpst);
11730 return r;
11731}
11732
11733uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
11734{
11735
11736
11737
11738 float_status *fpst = fpstp;
11739 flag save = get_flush_to_zero(fpst);
11740 set_flush_to_zero(false, fpst);
11741 float16 r = float64_to_float16(a, !ahp_mode, fpst);
11742 set_flush_to_zero(save, fpst);
11743 return r;
11744}
11745
11746#define float32_two make_float32(0x40000000)
11747#define float32_three make_float32(0x40400000)
11748#define float32_one_point_five make_float32(0x3fc00000)
11749
11750float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
11751{
11752 float_status *s = &env->vfp.standard_fp_status;
11753 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
11754 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
11755 if (!(float32_is_zero(a) || float32_is_zero(b))) {
11756 float_raise(float_flag_input_denormal, s);
11757 }
11758 return float32_two;
11759 }
11760 return float32_sub(float32_two, float32_mul(a, b, s), s);
11761}
11762
11763float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
11764{
11765 float_status *s = &env->vfp.standard_fp_status;
11766 float32 product;
11767 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
11768 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
11769 if (!(float32_is_zero(a) || float32_is_zero(b))) {
11770 float_raise(float_flag_input_denormal, s);
11771 }
11772 return float32_one_point_five;
11773 }
11774 product = float32_mul(a, b, s);
11775 return float32_div(float32_sub(float32_three, product, s), float32_two, s);
11776}
11777
11778
11779
11780
11781
11782#define float64_256 make_float64(0x4070000000000000LL)
11783#define float64_512 make_float64(0x4080000000000000LL)
11784#define float16_maxnorm make_float16(0x7bff)
11785#define float32_maxnorm make_float32(0x7f7fffff)
11786#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
11787
11788
11789
11790
11791
11792
11793
11794
11795
11796
11797
11798
11799
11800
11801static int recip_estimate(int input)
11802{
11803 int a, b, r;
11804 assert(256 <= input && input < 512);
11805 a = (input * 2) + 1;
11806 b = (1 << 19) / a;
11807 r = (b + 1) >> 1;
11808 assert(256 <= r && r < 512);
11809 return r;
11810}
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac)
11822{
11823 uint32_t scaled, estimate;
11824 uint64_t result_frac;
11825 int result_exp;
11826
11827
11828 if (*exp == 0) {
11829 if (extract64(frac, 51, 1) == 0) {
11830 *exp = -1;
11831 frac <<= 2;
11832 } else {
11833 frac <<= 1;
11834 }
11835 }
11836
11837
11838 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
11839 estimate = recip_estimate(scaled);
11840
11841 result_exp = exp_off - *exp;
11842 result_frac = deposit64(0, 44, 8, estimate);
11843 if (result_exp == 0) {
11844 result_frac = deposit64(result_frac >> 1, 51, 1, 1);
11845 } else if (result_exp == -1) {
11846 result_frac = deposit64(result_frac >> 2, 50, 2, 1);
11847 result_exp = 0;
11848 }
11849
11850 *exp = result_exp;
11851
11852 return result_frac;
11853}
11854
11855static bool round_to_inf(float_status *fpst, bool sign_bit)
11856{
11857 switch (fpst->float_rounding_mode) {
11858 case float_round_nearest_even:
11859 return true;
11860 case float_round_up:
11861 return !sign_bit;
11862 case float_round_down:
11863 return sign_bit;
11864 case float_round_to_zero:
11865 return false;
11866 }
11867
11868 g_assert_not_reached();
11869}
11870
11871uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
11872{
11873 float_status *fpst = fpstp;
11874 float16 f16 = float16_squash_input_denormal(input, fpst);
11875 uint32_t f16_val = float16_val(f16);
11876 uint32_t f16_sign = float16_is_neg(f16);
11877 int f16_exp = extract32(f16_val, 10, 5);
11878 uint32_t f16_frac = extract32(f16_val, 0, 10);
11879 uint64_t f64_frac;
11880
11881 if (float16_is_any_nan(f16)) {
11882 float16 nan = f16;
11883 if (float16_is_signaling_nan(f16, fpst)) {
11884 float_raise(float_flag_invalid, fpst);
11885 nan = float16_silence_nan(f16, fpst);
11886 }
11887 if (fpst->default_nan_mode) {
11888 nan = float16_default_nan(fpst);
11889 }
11890 return nan;
11891 } else if (float16_is_infinity(f16)) {
11892 return float16_set_sign(float16_zero, float16_is_neg(f16));
11893 } else if (float16_is_zero(f16)) {
11894 float_raise(float_flag_divbyzero, fpst);
11895 return float16_set_sign(float16_infinity, float16_is_neg(f16));
11896 } else if (float16_abs(f16) < (1 << 8)) {
11897
11898 float_raise(float_flag_overflow | float_flag_inexact, fpst);
11899 if (round_to_inf(fpst, f16_sign)) {
11900 return float16_set_sign(float16_infinity, f16_sign);
11901 } else {
11902 return float16_set_sign(float16_maxnorm, f16_sign);
11903 }
11904 } else if (f16_exp >= 29 && fpst->flush_to_zero) {
11905 float_raise(float_flag_underflow, fpst);
11906 return float16_set_sign(float16_zero, float16_is_neg(f16));
11907 }
11908
11909 f64_frac = call_recip_estimate(&f16_exp, 29,
11910 ((uint64_t) f16_frac) << (52 - 10));
11911
11912
11913 f16_val = deposit32(0, 15, 1, f16_sign);
11914 f16_val = deposit32(f16_val, 10, 5, f16_exp);
11915 f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10));
11916 return make_float16(f16_val);
11917}
11918
11919float32 HELPER(recpe_f32)(float32 input, void *fpstp)
11920{
11921 float_status *fpst = fpstp;
11922 float32 f32 = float32_squash_input_denormal(input, fpst);
11923 uint32_t f32_val = float32_val(f32);
11924 bool f32_sign = float32_is_neg(f32);
11925 int f32_exp = extract32(f32_val, 23, 8);
11926 uint32_t f32_frac = extract32(f32_val, 0, 23);
11927 uint64_t f64_frac;
11928
11929 if (float32_is_any_nan(f32)) {
11930 float32 nan = f32;
11931 if (float32_is_signaling_nan(f32, fpst)) {
11932 float_raise(float_flag_invalid, fpst);
11933 nan = float32_silence_nan(f32, fpst);
11934 }
11935 if (fpst->default_nan_mode) {
11936 nan = float32_default_nan(fpst);
11937 }
11938 return nan;
11939 } else if (float32_is_infinity(f32)) {
11940 return float32_set_sign(float32_zero, float32_is_neg(f32));
11941 } else if (float32_is_zero(f32)) {
11942 float_raise(float_flag_divbyzero, fpst);
11943 return float32_set_sign(float32_infinity, float32_is_neg(f32));
11944 } else if (float32_abs(f32) < (1ULL << 21)) {
11945
11946 float_raise(float_flag_overflow | float_flag_inexact, fpst);
11947 if (round_to_inf(fpst, f32_sign)) {
11948 return float32_set_sign(float32_infinity, f32_sign);
11949 } else {
11950 return float32_set_sign(float32_maxnorm, f32_sign);
11951 }
11952 } else if (f32_exp >= 253 && fpst->flush_to_zero) {
11953 float_raise(float_flag_underflow, fpst);
11954 return float32_set_sign(float32_zero, float32_is_neg(f32));
11955 }
11956
11957 f64_frac = call_recip_estimate(&f32_exp, 253,
11958 ((uint64_t) f32_frac) << (52 - 23));
11959
11960
11961 f32_val = deposit32(0, 31, 1, f32_sign);
11962 f32_val = deposit32(f32_val, 23, 8, f32_exp);
11963 f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23));
11964 return make_float32(f32_val);
11965}
11966
11967float64 HELPER(recpe_f64)(float64 input, void *fpstp)
11968{
11969 float_status *fpst = fpstp;
11970 float64 f64 = float64_squash_input_denormal(input, fpst);
11971 uint64_t f64_val = float64_val(f64);
11972 bool f64_sign = float64_is_neg(f64);
11973 int f64_exp = extract64(f64_val, 52, 11);
11974 uint64_t f64_frac = extract64(f64_val, 0, 52);
11975
11976
11977 if (float64_is_any_nan(f64)) {
11978 float64 nan = f64;
11979 if (float64_is_signaling_nan(f64, fpst)) {
11980 float_raise(float_flag_invalid, fpst);
11981 nan = float64_silence_nan(f64, fpst);
11982 }
11983 if (fpst->default_nan_mode) {
11984 nan = float64_default_nan(fpst);
11985 }
11986 return nan;
11987 } else if (float64_is_infinity(f64)) {
11988 return float64_set_sign(float64_zero, float64_is_neg(f64));
11989 } else if (float64_is_zero(f64)) {
11990 float_raise(float_flag_divbyzero, fpst);
11991 return float64_set_sign(float64_infinity, float64_is_neg(f64));
11992 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
11993
11994 float_raise(float_flag_overflow | float_flag_inexact, fpst);
11995 if (round_to_inf(fpst, f64_sign)) {
11996 return float64_set_sign(float64_infinity, f64_sign);
11997 } else {
11998 return float64_set_sign(float64_maxnorm, f64_sign);
11999 }
12000 } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
12001 float_raise(float_flag_underflow, fpst);
12002 return float64_set_sign(float64_zero, float64_is_neg(f64));
12003 }
12004
12005 f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac);
12006
12007
12008 f64_val = deposit64(0, 63, 1, f64_sign);
12009 f64_val = deposit64(f64_val, 52, 11, f64_exp);
12010 f64_val = deposit64(f64_val, 0, 52, f64_frac);
12011 return make_float64(f64_val);
12012}
12013
12014
12015
12016
12017
12018static int do_recip_sqrt_estimate(int a)
12019{
12020 int b, estimate;
12021
12022 assert(128 <= a && a < 512);
12023 if (a < 256) {
12024 a = a * 2 + 1;
12025 } else {
12026 a = (a >> 1) << 1;
12027 a = (a + 1) * 2;
12028 }
12029 b = 512;
12030 while (a * (b + 1) * (b + 1) < (1 << 28)) {
12031 b += 1;
12032 }
12033 estimate = (b + 1) / 2;
12034 assert(256 <= estimate && estimate < 512);
12035
12036 return estimate;
12037}
12038
12039
12040static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac)
12041{
12042 int estimate;
12043 uint32_t scaled;
12044
12045 if (*exp == 0) {
12046 while (extract64(frac, 51, 1) == 0) {
12047 frac = frac << 1;
12048 *exp -= 1;
12049 }
12050 frac = extract64(frac, 0, 51) << 1;
12051 }
12052
12053 if (*exp & 1) {
12054
12055 scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7));
12056 } else {
12057
12058 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
12059 }
12060 estimate = do_recip_sqrt_estimate(scaled);
12061
12062 *exp = (exp_off - *exp) / 2;
12063 return extract64(estimate, 0, 8) << 44;
12064}
12065
12066uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
12067{
12068 float_status *s = fpstp;
12069 float16 f16 = float16_squash_input_denormal(input, s);
12070 uint16_t val = float16_val(f16);
12071 bool f16_sign = float16_is_neg(f16);
12072 int f16_exp = extract32(val, 10, 5);
12073 uint16_t f16_frac = extract32(val, 0, 10);
12074 uint64_t f64_frac;
12075
12076 if (float16_is_any_nan(f16)) {
12077 float16 nan = f16;
12078 if (float16_is_signaling_nan(f16, s)) {
12079 float_raise(float_flag_invalid, s);
12080 nan = float16_silence_nan(f16, s);
12081 }
12082 if (s->default_nan_mode) {
12083 nan = float16_default_nan(s);
12084 }
12085 return nan;
12086 } else if (float16_is_zero(f16)) {
12087 float_raise(float_flag_divbyzero, s);
12088 return float16_set_sign(float16_infinity, f16_sign);
12089 } else if (f16_sign) {
12090 float_raise(float_flag_invalid, s);
12091 return float16_default_nan(s);
12092 } else if (float16_is_infinity(f16)) {
12093 return float16_zero;
12094 }
12095
12096
12097
12098
12099 f64_frac = ((uint64_t) f16_frac) << (52 - 10);
12100
12101 f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac);
12102
12103
12104 val = deposit32(0, 15, 1, f16_sign);
12105 val = deposit32(val, 10, 5, f16_exp);
12106 val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8));
12107 return make_float16(val);
12108}
12109
12110float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
12111{
12112 float_status *s = fpstp;
12113 float32 f32 = float32_squash_input_denormal(input, s);
12114 uint32_t val = float32_val(f32);
12115 uint32_t f32_sign = float32_is_neg(f32);
12116 int f32_exp = extract32(val, 23, 8);
12117 uint32_t f32_frac = extract32(val, 0, 23);
12118 uint64_t f64_frac;
12119
12120 if (float32_is_any_nan(f32)) {
12121 float32 nan = f32;
12122 if (float32_is_signaling_nan(f32, s)) {
12123 float_raise(float_flag_invalid, s);
12124 nan = float32_silence_nan(f32, s);
12125 }
12126 if (s->default_nan_mode) {
12127 nan = float32_default_nan(s);
12128 }
12129 return nan;
12130 } else if (float32_is_zero(f32)) {
12131 float_raise(float_flag_divbyzero, s);
12132 return float32_set_sign(float32_infinity, float32_is_neg(f32));
12133 } else if (float32_is_neg(f32)) {
12134 float_raise(float_flag_invalid, s);
12135 return float32_default_nan(s);
12136 } else if (float32_is_infinity(f32)) {
12137 return float32_zero;
12138 }
12139
12140
12141
12142
12143 f64_frac = ((uint64_t) f32_frac) << 29;
12144
12145 f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac);
12146
12147
12148 val = deposit32(0, 31, 1, f32_sign);
12149 val = deposit32(val, 23, 8, f32_exp);
12150 val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8));
12151 return make_float32(val);
12152}
12153
12154float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
12155{
12156 float_status *s = fpstp;
12157 float64 f64 = float64_squash_input_denormal(input, s);
12158 uint64_t val = float64_val(f64);
12159 bool f64_sign = float64_is_neg(f64);
12160 int f64_exp = extract64(val, 52, 11);
12161 uint64_t f64_frac = extract64(val, 0, 52);
12162
12163 if (float64_is_any_nan(f64)) {
12164 float64 nan = f64;
12165 if (float64_is_signaling_nan(f64, s)) {
12166 float_raise(float_flag_invalid, s);
12167 nan = float64_silence_nan(f64, s);
12168 }
12169 if (s->default_nan_mode) {
12170 nan = float64_default_nan(s);
12171 }
12172 return nan;
12173 } else if (float64_is_zero(f64)) {
12174 float_raise(float_flag_divbyzero, s);
12175 return float64_set_sign(float64_infinity, float64_is_neg(f64));
12176 } else if (float64_is_neg(f64)) {
12177 float_raise(float_flag_invalid, s);
12178 return float64_default_nan(s);
12179 } else if (float64_is_infinity(f64)) {
12180 return float64_zero;
12181 }
12182
12183 f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac);
12184
12185
12186 val = deposit64(0, 61, 1, f64_sign);
12187 val = deposit64(val, 52, 11, f64_exp);
12188 val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8));
12189 return make_float64(val);
12190}
12191
12192uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
12193{
12194
12195 int input, estimate;
12196
12197 if ((a & 0x80000000) == 0) {
12198 return 0xffffffff;
12199 }
12200
12201 input = extract32(a, 23, 9);
12202 estimate = recip_estimate(input);
12203
12204 return deposit32(0, (32 - 9), 9, estimate);
12205}
12206
12207uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
12208{
12209 int estimate;
12210
12211 if ((a & 0xc0000000) == 0) {
12212 return 0xffffffff;
12213 }
12214
12215 estimate = do_recip_sqrt_estimate(extract32(a, 23, 9));
12216
12217 return deposit32(0, 23, 9, estimate);
12218}
12219
12220
12221float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
12222{
12223 float_status *fpst = fpstp;
12224 return float32_muladd(a, b, c, 0, fpst);
12225}
12226
12227float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
12228{
12229 float_status *fpst = fpstp;
12230 return float64_muladd(a, b, c, 0, fpst);
12231}
12232
12233
12234float32 HELPER(rints_exact)(float32 x, void *fp_status)
12235{
12236 return float32_round_to_int(x, fp_status);
12237}
12238
12239float64 HELPER(rintd_exact)(float64 x, void *fp_status)
12240{
12241 return float64_round_to_int(x, fp_status);
12242}
12243
12244float32 HELPER(rints)(float32 x, void *fp_status)
12245{
12246 int old_flags = get_float_exception_flags(fp_status), new_flags;
12247 float32 ret;
12248
12249 ret = float32_round_to_int(x, fp_status);
12250
12251
12252 if (!(old_flags & float_flag_inexact)) {
12253 new_flags = get_float_exception_flags(fp_status);
12254 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
12255 }
12256
12257 return ret;
12258}
12259
12260float64 HELPER(rintd)(float64 x, void *fp_status)
12261{
12262 int old_flags = get_float_exception_flags(fp_status), new_flags;
12263 float64 ret;
12264
12265 ret = float64_round_to_int(x, fp_status);
12266
12267 new_flags = get_float_exception_flags(fp_status);
12268
12269
12270 if (!(old_flags & float_flag_inexact)) {
12271 new_flags = get_float_exception_flags(fp_status);
12272 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
12273 }
12274
12275 return ret;
12276}
12277
12278
12279int arm_rmode_to_sf(int rmode)
12280{
12281 switch (rmode) {
12282 case FPROUNDING_TIEAWAY:
12283 rmode = float_round_ties_away;
12284 break;
12285 case FPROUNDING_ODD:
12286
12287 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
12288 rmode);
12289 case FPROUNDING_TIEEVEN:
12290 default:
12291 rmode = float_round_nearest_even;
12292 break;
12293 case FPROUNDING_POSINF:
12294 rmode = float_round_up;
12295 break;
12296 case FPROUNDING_NEGINF:
12297 rmode = float_round_down;
12298 break;
12299 case FPROUNDING_ZERO:
12300 rmode = float_round_to_zero;
12301 break;
12302 }
12303 return rmode;
12304}
12305
12306
12307
12308
12309
12310uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12311{
12312 uint8_t buf[4];
12313
12314 stl_le_p(buf, val);
12315
12316
12317 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12318}
12319
12320uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12321{
12322 uint8_t buf[4];
12323
12324 stl_le_p(buf, val);
12325
12326
12327 return crc32c(acc, buf, bytes) ^ 0xffffffff;
12328}
12329
12330
12331
12332
12333static inline int fp_exception_el(CPUARMState *env)
12334{
12335#ifndef CONFIG_USER_ONLY
12336 int fpen;
12337 int cur_el = arm_current_el(env);
12338
12339
12340
12341
12342 if (!arm_feature(env, ARM_FEATURE_V6)) {
12343 return 0;
12344 }
12345
12346
12347
12348
12349
12350
12351 fpen = extract32(env->cp15.cpacr_el1, 20, 2);
12352 switch (fpen) {
12353 case 0:
12354 case 2:
12355 if (cur_el == 0 || cur_el == 1) {
12356
12357 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
12358 return 3;
12359 }
12360 return 1;
12361 }
12362 if (cur_el == 3 && !is_a64(env)) {
12363
12364 return 3;
12365 }
12366 break;
12367 case 1:
12368 if (cur_el == 0) {
12369 return 1;
12370 }
12371 break;
12372 case 3:
12373 break;
12374 }
12375
12376
12377
12378
12379
12380
12381 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
12382 && !arm_is_secure_below_el3(env)) {
12383
12384 return 2;
12385 }
12386
12387
12388 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
12389
12390 return 3;
12391 }
12392#endif
12393 return 0;
12394}
12395
12396void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
12397 target_ulong *cs_base, uint32_t *pflags)
12398{
12399 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
12400 int fp_el = fp_exception_el(env);
12401 uint32_t flags;
12402
12403 if (is_a64(env)) {
12404 *pc = env->pc;
12405 flags = ARM_TBFLAG_AARCH64_STATE_MASK;
12406
12407 flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
12408 flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
12409
12410 if (arm_feature(env, ARM_FEATURE_SVE)) {
12411 int sve_el = sve_exception_el(env);
12412 uint32_t zcr_len;
12413
12414
12415
12416
12417 if (sve_el != 0 && fp_el == 0) {
12418 zcr_len = 0;
12419 } else {
12420 int current_el = arm_current_el(env);
12421 ARMCPU *cpu = arm_env_get_cpu(env);
12422
12423 zcr_len = cpu->sve_max_vq - 1;
12424 if (current_el <= 1) {
12425 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
12426 }
12427 if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
12428 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
12429 }
12430 if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
12431 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
12432 }
12433 }
12434 flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT;
12435 flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT;
12436 }
12437 } else {
12438 *pc = env->regs[15];
12439 flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
12440 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
12441 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
12442 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
12443 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
12444 if (!(access_secure_reg(env))) {
12445 flags |= ARM_TBFLAG_NS_MASK;
12446 }
12447 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
12448 || arm_el_is_aa64(env, 1)) {
12449 flags |= ARM_TBFLAG_VFPEN_MASK;
12450 }
12451 flags |= (extract32(env->cp15.c15_cpar, 0, 2)
12452 << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
12453 }
12454
12455 flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
12456
12457
12458
12459
12460
12461
12462
12463
12464 if (arm_singlestep_active(env)) {
12465 flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
12466 if (is_a64(env)) {
12467 if (env->pstate & PSTATE_SS) {
12468 flags |= ARM_TBFLAG_PSTATE_SS_MASK;
12469 }
12470 } else {
12471 if (env->uncached_cpsr & PSTATE_SS) {
12472 flags |= ARM_TBFLAG_PSTATE_SS_MASK;
12473 }
12474 }
12475 }
12476 if (arm_cpu_data_is_big_endian(env)) {
12477 flags |= ARM_TBFLAG_BE_DATA_MASK;
12478 }
12479 flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT;
12480
12481 if (arm_v7m_is_handler_mode(env)) {
12482 flags |= ARM_TBFLAG_HANDLER_MASK;
12483 }
12484
12485 *pflags = flags;
12486 *cs_base = 0;
12487}
12488