1
2
3
4
5
6
7
8#include "qemu/osdep.h"
9#include "cpu.h"
10#include "internals.h"
11#include "exec/exec-all.h"
12
13static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
14 unsigned int target_el,
15 bool same_el, bool ea,
16 bool s1ptw, bool is_write,
17 int fsc)
18{
19 uint32_t syn;
20
21
22
23
24
25
26
27
28
29
30
31
32
33 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
34 syn = syn_data_abort_no_iss(same_el, 0,
35 ea, 0, s1ptw, is_write, fsc);
36 } else {
37
38
39
40
41
42 syn = syn_data_abort_with_iss(same_el,
43 0, 0, 0, 0, 0,
44 ea, 0, s1ptw, is_write, fsc,
45 true);
46
47 syn |= template_syn;
48 }
49 return syn;
50}
51
52static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
53 MMUAccessType access_type,
54 int mmu_idx, ARMMMUFaultInfo *fi)
55{
56 CPUARMState *env = &cpu->env;
57 int target_el;
58 bool same_el;
59 uint32_t syn, exc, fsr, fsc;
60 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
61
62 target_el = exception_target_el(env);
63 if (fi->stage2) {
64 target_el = 2;
65 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
66 if (arm_is_secure_below_el3(env) && fi->s1ns) {
67 env->cp15.hpfar_el2 |= HPFAR_NS;
68 }
69 }
70 same_el = (arm_current_el(env) == target_el);
71
72 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
73 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
74
75
76
77
78 fsr = arm_fi_to_lfsc(fi);
79 fsc = extract32(fsr, 0, 6);
80 } else {
81 fsr = arm_fi_to_sfsc(fi);
82
83
84
85
86
87
88 fsc = 0x3f;
89 }
90
91 if (access_type == MMU_INST_FETCH) {
92 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
93 exc = EXCP_PREFETCH_ABORT;
94 } else {
95 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
96 same_el, fi->ea, fi->s1ptw,
97 access_type == MMU_DATA_STORE,
98 fsc);
99 if (access_type == MMU_DATA_STORE
100 && arm_feature(env, ARM_FEATURE_V6)) {
101 fsr |= (1 << 11);
102 }
103 exc = EXCP_DATA_ABORT;
104 }
105
106 env->exception.vaddress = addr;
107 env->exception.fsr = fsr;
108 raise_exception(env, exc, syn, target_el);
109}
110
111
112void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
113 MMUAccessType access_type,
114 int mmu_idx, uintptr_t retaddr)
115{
116 ARMCPU *cpu = ARM_CPU(cs);
117 ARMMMUFaultInfo fi = {};
118
119
120 cpu_restore_state(cs, retaddr, true);
121
122 fi.type = ARMFault_Alignment;
123 arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
124}
125
126#if !defined(CONFIG_USER_ONLY)
127
128
129
130
131
132
133void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
134 vaddr addr, unsigned size,
135 MMUAccessType access_type,
136 int mmu_idx, MemTxAttrs attrs,
137 MemTxResult response, uintptr_t retaddr)
138{
139 ARMCPU *cpu = ARM_CPU(cs);
140 ARMMMUFaultInfo fi = {};
141
142
143 cpu_restore_state(cs, retaddr, true);
144
145 fi.ea = arm_extabort_type(response);
146 fi.type = ARMFault_SyncExternal;
147 arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
148}
149
150bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
151 MMUAccessType access_type, int mmu_idx,
152 bool probe, uintptr_t retaddr)
153{
154 ARMCPU *cpu = ARM_CPU(cs);
155 ARMMMUFaultInfo fi = {};
156 hwaddr phys_addr;
157 target_ulong page_size;
158 int prot, ret;
159 MemTxAttrs attrs = {};
160 ARMCacheAttrs cacheattrs = {};
161
162
163
164
165
166
167
168 ret = get_phys_addr(&cpu->env, address, access_type,
169 core_to_arm_mmu_idx(&cpu->env, mmu_idx),
170 &phys_addr, &attrs, &prot, &page_size,
171 &fi, &cacheattrs);
172 if (likely(!ret)) {
173
174
175
176
177
178 if (page_size >= TARGET_PAGE_SIZE) {
179 phys_addr &= TARGET_PAGE_MASK;
180 address &= TARGET_PAGE_MASK;
181 }
182
183 if (cpu_isar_feature(aa64_mte, cpu) && cacheattrs.attrs == 0xf0) {
184 arm_tlb_mte_tagged(&attrs) = true;
185 }
186
187 tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
188 prot, mmu_idx, page_size);
189 return true;
190 } else if (probe) {
191 return false;
192 } else {
193
194 cpu_restore_state(cs, retaddr, true);
195 arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
196 }
197}
198#else
199void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
200 MMUAccessType access_type,
201 bool maperr, uintptr_t ra)
202{
203 ARMMMUFaultInfo fi = {
204 .type = maperr ? ARMFault_Translation : ARMFault_Permission,
205 .level = 3,
206 };
207 ARMCPU *cpu = ARM_CPU(cs);
208
209
210
211
212
213 cpu_restore_state(cs, ra, true);
214 arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
215}
216
217void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
218 MMUAccessType access_type, uintptr_t ra)
219{
220 arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
221}
222#endif
223