1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "qemu/host-utils.h"
25#include "exec/log.h"
26
27#if defined(CONFIG_USER_ONLY)
28
29void mb_cpu_do_interrupt(CPUState *cs)
30{
31 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
32 CPUMBState *env = &cpu->env;
33
34 cs->exception_index = -1;
35 env->res_addr = RES_ADDR_NONE;
36 env->regs[14] = env->pc;
37}
38
39bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
40 MMUAccessType access_type, int mmu_idx,
41 bool probe, uintptr_t retaddr)
42{
43 cs->exception_index = 0xaa;
44 cpu_loop_exit_restore(cs, retaddr);
45}
46
47#else
48
49static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
50 MMUAccessType access_type)
51{
52 if (access_type == MMU_INST_FETCH) {
53 return !cpu->ns_axi_ip;
54 } else {
55 return !cpu->ns_axi_dp;
56 }
57}
58
59bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
60 MMUAccessType access_type, int mmu_idx,
61 bool probe, uintptr_t retaddr)
62{
63 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
64 CPUMBState *env = &cpu->env;
65 MicroBlazeMMULookup lu;
66 unsigned int hit;
67 int prot;
68 MemTxAttrs attrs = {};
69
70 attrs.secure = mb_cpu_access_is_secure(cpu, access_type);
71
72 if (mmu_idx == MMU_NOMMU_IDX) {
73
74 address &= TARGET_PAGE_MASK;
75 prot = PAGE_BITS;
76 tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
77 TARGET_PAGE_SIZE);
78 return true;
79 }
80
81 hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx);
82 if (likely(hit)) {
83 uint32_t vaddr = address & TARGET_PAGE_MASK;
84 uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
85
86 qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
87 mmu_idx, vaddr, paddr, lu.prot);
88 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx,
89 TARGET_PAGE_SIZE);
90 return true;
91 }
92
93
94 if (probe) {
95 return false;
96 }
97
98 qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
99 mmu_idx, address);
100
101 env->ear = address;
102 switch (lu.err) {
103 case ERR_PROT:
104 env->esr = access_type == MMU_INST_FETCH ? 17 : 16;
105 env->esr |= (access_type == MMU_DATA_STORE) << 10;
106 break;
107 case ERR_MISS:
108 env->esr = access_type == MMU_INST_FETCH ? 19 : 18;
109 env->esr |= (access_type == MMU_DATA_STORE) << 10;
110 break;
111 default:
112 abort();
113 }
114
115 if (cs->exception_index == EXCP_MMU) {
116 cpu_abort(cs, "recursive faults\n");
117 }
118
119
120 cs->exception_index = EXCP_MMU;
121 cpu_loop_exit_restore(cs, retaddr);
122}
123
124void mb_cpu_do_interrupt(CPUState *cs)
125{
126 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
127 CPUMBState *env = &cpu->env;
128 uint32_t t, msr = mb_cpu_read_msr(env);
129 bool set_esr;
130
131
132 assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG));
133
134 assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG);
135
136 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
137
138 switch (cs->exception_index) {
139 case EXCP_HW_EXCP:
140 if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) {
141 qemu_log_mask(LOG_GUEST_ERROR,
142 "Exception raised on system without exceptions!\n");
143 return;
144 }
145
146 qemu_log_mask(CPU_LOG_INT,
147 "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
148 env->pc, msr, env->iflags);
149
150
151 set_esr = true;
152 env->esr &= ~D_FLAG;
153 if (env->iflags & D_FLAG) {
154 env->esr |= D_FLAG;
155 env->btr = env->btarget;
156 }
157
158
159 msr |= MSR_EIP;
160 env->regs[17] = env->pc + 4;
161 env->pc = cpu->cfg.base_vectors + 0x20;
162 break;
163
164 case EXCP_MMU:
165 qemu_log_mask(CPU_LOG_INT,
166 "INT: MMU at pc=%08x msr=%08x "
167 "ear=%" PRIx64 " iflags=%x\n",
168 env->pc, msr, env->ear, env->iflags);
169
170
171 set_esr = true;
172 env->esr &= ~D_FLAG;
173 if (env->iflags & D_FLAG) {
174 env->esr |= D_FLAG;
175 env->btr = env->btarget;
176
177 env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4);
178 } else if (env->iflags & IMM_FLAG) {
179
180 env->regs[17] = env->pc - 4;
181 } else {
182 env->regs[17] = env->pc;
183 }
184
185
186 msr |= MSR_EIP;
187 env->pc = cpu->cfg.base_vectors + 0x20;
188 break;
189
190 case EXCP_IRQ:
191 assert(!(msr & (MSR_EIP | MSR_BIP)));
192 assert(msr & MSR_IE);
193 assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
194
195 qemu_log_mask(CPU_LOG_INT,
196 "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
197 env->pc, msr, env->iflags);
198 set_esr = false;
199
200
201 msr &= ~MSR_IE;
202 env->regs[14] = env->pc;
203 env->pc = cpu->cfg.base_vectors + 0x10;
204 break;
205
206 case EXCP_HW_BREAK:
207 assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
208
209 qemu_log_mask(CPU_LOG_INT,
210 "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
211 env->pc, msr, env->iflags);
212 set_esr = false;
213
214
215 msr |= MSR_BIP;
216 env->regs[16] = env->pc;
217 env->pc = cpu->cfg.base_vectors + 0x18;
218 break;
219
220 default:
221 cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index);
222
223 }
224
225
226 t = (msr & (MSR_VM | MSR_UM)) << 1;
227 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
228 msr |= t;
229 mb_cpu_write_msr(env, msr);
230
231 env->res_addr = RES_ADDR_NONE;
232 env->iflags = 0;
233
234 if (!set_esr) {
235 qemu_log_mask(CPU_LOG_INT,
236 " to pc=%08x msr=%08x\n", env->pc, msr);
237 } else if (env->esr & D_FLAG) {
238 qemu_log_mask(CPU_LOG_INT,
239 " to pc=%08x msr=%08x esr=%04x btr=%08x\n",
240 env->pc, msr, env->esr, env->btr);
241 } else {
242 qemu_log_mask(CPU_LOG_INT,
243 " to pc=%08x msr=%08x esr=%04x\n",
244 env->pc, msr, env->esr);
245 }
246}
247
248hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
249 MemTxAttrs *attrs)
250{
251 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
252 CPUMBState *env = &cpu->env;
253 target_ulong vaddr, paddr = 0;
254 MicroBlazeMMULookup lu;
255 int mmu_idx = cpu_mmu_index(env, false);
256 unsigned int hit;
257
258
259 *attrs = (MemTxAttrs) {};
260 attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD);
261
262 if (mmu_idx != MMU_NOMMU_IDX) {
263 hit = mmu_translate(cpu, &lu, addr, 0, 0);
264 if (hit) {
265 vaddr = addr & TARGET_PAGE_MASK;
266 paddr = lu.paddr + vaddr - lu.vaddr;
267 } else
268 paddr = 0;
269 } else
270 paddr = addr & TARGET_PAGE_MASK;
271
272 return paddr;
273}
274#endif
275
276bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
277{
278 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
279 CPUMBState *env = &cpu->env;
280
281 if ((interrupt_request & CPU_INTERRUPT_HARD)
282 && (env->msr & MSR_IE)
283 && !(env->msr & (MSR_EIP | MSR_BIP))
284 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
285 cs->exception_index = EXCP_IRQ;
286 mb_cpu_do_interrupt(cs);
287 return true;
288 }
289 return false;
290}
291
292void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
293 MMUAccessType access_type,
294 int mmu_idx, uintptr_t retaddr)
295{
296 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
297 uint32_t esr, iflags;
298
299
300 cpu_restore_state(cs, retaddr, true);
301 iflags = cpu->env.iflags;
302
303 qemu_log_mask(CPU_LOG_INT,
304 "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
305 (target_ulong)addr, cpu->env.pc, iflags);
306
307 esr = ESR_EC_UNALIGNED_DATA;
308 if (likely(iflags & ESR_ESS_FLAG)) {
309 esr |= iflags & ESR_ESS_MASK;
310 } else {
311 qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
312 }
313
314 cpu->env.ear = addr;
315 cpu->env.esr = esr;
316 cs->exception_index = EXCP_HW_EXCP;
317 cpu_loop_exit(cs);
318}
319