1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/helper-proto.h"
24#include "qom/cpu.h"
25
26#ifdef CONFIG_USER_ONLY
27int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
28 int size, int rw, int mmu_idx)
29{
30 HPPACPU *cpu = HPPA_CPU(cs);
31
32
33
34 cs->exception_index = EXCP_DMP;
35 cpu->env.cr[CR_IOR] = address;
36 return 1;
37}
38#else
39static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
40{
41 int i;
42
43 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
44 hppa_tlb_entry *ent = &env->tlb[i];
45 if (ent->va_b <= addr && addr <= ent->va_e) {
46 return ent;
47 }
48 }
49 return NULL;
50}
51
52static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
53{
54 CPUState *cs = CPU(hppa_env_get_cpu(env));
55 unsigned i, n = 1 << (2 * ent->page_size);
56 uint64_t addr = ent->va_b;
57
58 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
59
60 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
61 }
62
63 memset(ent, 0, sizeof(*ent));
64 ent->va_b = -1;
65}
66
67static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
68{
69 hppa_tlb_entry *ent;
70 uint32_t i = env->tlb_last;
71
72 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
73 ent = &env->tlb[i];
74
75 hppa_flush_tlb_ent(env, ent);
76 return ent;
77}
78
79int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
80 int type, hwaddr *pphys, int *pprot)
81{
82 hwaddr phys;
83 int prot, r_prot, w_prot, x_prot;
84 hppa_tlb_entry *ent;
85 int ret = -1;
86
87
88 if (mmu_idx == MMU_PHYS_IDX) {
89 phys = addr;
90 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
91 goto egress;
92 }
93
94
95 ent = hppa_find_tlb(env, addr);
96 if (ent == NULL || !ent->entry_valid) {
97 phys = 0;
98 prot = 0;
99
100
101 ret = EXCP_DTLB_MISS;
102 goto egress;
103 }
104
105
106 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
107
108
109 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
110 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
111 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
112 switch (ent->ar_type) {
113 case 0:
114 prot = r_prot;
115 break;
116 case 1:
117 prot = r_prot | w_prot;
118 break;
119 case 2:
120 prot = r_prot | x_prot;
121 break;
122 case 3:
123 prot = r_prot | w_prot | x_prot;
124 break;
125 default:
126 prot = x_prot;
127 break;
128 }
129
130
131
132
133
134 if (type == 0) {
135 goto egress;
136 }
137
138 if (unlikely(!(prot & type))) {
139
140 ret = (type & PAGE_EXEC ? EXCP_IMP :
141 prot & PAGE_READ ? EXCP_DMP : EXCP_DMAR);
142 goto egress;
143 }
144
145
146
147
148
149 if (unlikely(!ent->d)) {
150 if (type & PAGE_WRITE) {
151
152 ret = EXCP_TLB_DIRTY;
153 }
154 prot &= PAGE_READ | PAGE_EXEC;
155 }
156 if (unlikely(ent->b)) {
157 if (type & PAGE_WRITE) {
158
159 ret = EXCP_DMB;
160 }
161 prot &= PAGE_READ | PAGE_EXEC;
162 }
163 if (unlikely(ent->t)) {
164 if (!(type & PAGE_EXEC)) {
165
166 ret = EXCP_PAGE_REF;
167 }
168 prot &= PAGE_EXEC;
169 }
170
171 egress:
172 *pphys = phys;
173 *pprot = prot;
174 return ret;
175}
176
177hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
178{
179 HPPACPU *cpu = HPPA_CPU(cs);
180 hwaddr phys;
181 int prot, excp;
182
183
184
185
186 if (!(cpu->env.psw & PSW_D)) {
187 return addr;
188 }
189
190 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
191 &phys, &prot);
192
193
194
195
196 return excp == EXCP_DTLB_MISS ? -1 : phys;
197}
198
199void tlb_fill(CPUState *cs, target_ulong addr, int size,
200 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
201{
202 HPPACPU *cpu = HPPA_CPU(cs);
203 int prot, excp, a_prot;
204 hwaddr phys;
205
206 switch (type) {
207 case MMU_INST_FETCH:
208 a_prot = PAGE_EXEC;
209 break;
210 case MMU_DATA_STORE:
211 a_prot = PAGE_WRITE;
212 break;
213 default:
214 a_prot = PAGE_READ;
215 break;
216 }
217
218 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx,
219 a_prot, &phys, &prot);
220 if (unlikely(excp >= 0)) {
221
222 cs->exception_index = excp;
223 if (cpu->env.psw & PSW_Q) {
224
225 cpu->env.cr[CR_IOR] = addr;
226 cpu->env.cr[CR_ISR] = addr >> 32;
227 }
228 cpu_loop_exit_restore(cs, retaddr);
229 }
230
231
232 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
233 prot, mmu_idx, TARGET_PAGE_SIZE);
234}
235
236
237void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
238{
239 hppa_tlb_entry *empty = NULL;
240 int i;
241
242
243 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
244 hppa_tlb_entry *ent = &env->tlb[i];
245 if (!ent->entry_valid) {
246 empty = ent;
247 } else if (ent->va_b <= addr && addr <= ent->va_e) {
248 hppa_flush_tlb_ent(env, ent);
249 empty = ent;
250 }
251 }
252
253
254 if (empty == NULL) {
255 empty = hppa_alloc_tlb_ent(env);
256 }
257
258
259 empty->va_b = addr & TARGET_PAGE_MASK;
260 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
261 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
262}
263
264
265void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
266{
267 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
268
269 if (unlikely(ent == NULL || ent->entry_valid)) {
270 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
271 return;
272 }
273
274 ent->access_id = extract32(reg, 1, 18);
275 ent->u = extract32(reg, 19, 1);
276 ent->ar_pl2 = extract32(reg, 20, 2);
277 ent->ar_pl1 = extract32(reg, 22, 2);
278 ent->ar_type = extract32(reg, 24, 3);
279 ent->b = extract32(reg, 27, 1);
280 ent->d = extract32(reg, 28, 1);
281 ent->t = extract32(reg, 29, 1);
282 ent->entry_valid = 1;
283}
284
285
286
287static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
288{
289 CPUHPPAState *env = cpu->env_ptr;
290 target_ulong addr = (target_ulong) data.target_ptr;
291 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
292
293 if (ent && ent->entry_valid) {
294 hppa_flush_tlb_ent(env, ent);
295 }
296}
297
298void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
299{
300 CPUState *src = CPU(hppa_env_get_cpu(env));
301 CPUState *cpu;
302 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
303
304 CPU_FOREACH(cpu) {
305 if (cpu != src) {
306 async_run_on_cpu(cpu, ptlb_work, data);
307 }
308 }
309 async_safe_run_on_cpu(src, ptlb_work, data);
310}
311
312
313
314void HELPER(ptlbe)(CPUHPPAState *env)
315{
316 CPUState *src = CPU(hppa_env_get_cpu(env));
317
318 memset(env->tlb, 0, sizeof(env->tlb));
319 tlb_flush_by_mmuidx(src, 0xf);
320}
321
322target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
323{
324 hwaddr phys;
325 int prot, excp;
326
327 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
328 &phys, &prot);
329 if (excp >= 0) {
330 if (env->psw & PSW_Q) {
331
332 env->cr[CR_IOR] = addr;
333 env->cr[CR_ISR] = addr >> 32;
334 }
335 if (excp == EXCP_DTLB_MISS) {
336 excp = EXCP_NA_DTLB_MISS;
337 }
338 hppa_dynamic_excp(env, excp, GETPC());
339 }
340 return phys;
341}
342
343
344int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
345{
346 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
347 return ent ? ent->ar_type : -1;
348}
349#endif
350