1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/helper-proto.h"
24#include "qom/cpu.h"
25#include "trace.h"
26
27#ifdef CONFIG_USER_ONLY
28int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
29 int size, int rw, int mmu_idx)
30{
31 HPPACPU *cpu = HPPA_CPU(cs);
32
33
34
35 cs->exception_index = EXCP_DMP;
36 cpu->env.cr[CR_IOR] = address;
37 return 1;
38}
39#else
40static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
41{
42 int i;
43
44 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
45 hppa_tlb_entry *ent = &env->tlb[i];
46 if (ent->va_b <= addr && addr <= ent->va_e) {
47 trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
48 ent->va_b, ent->va_e, ent->pa);
49 return ent;
50 }
51 }
52 trace_hppa_tlb_find_entry_not_found(env, addr);
53 return NULL;
54}
55
56static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
57{
58 CPUState *cs = CPU(hppa_env_get_cpu(env));
59 unsigned i, n = 1 << (2 * ent->page_size);
60 uint64_t addr = ent->va_b;
61
62 trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
63
64 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
65
66 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
67 }
68
69 memset(ent, 0, sizeof(*ent));
70 ent->va_b = -1;
71}
72
73static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
74{
75 hppa_tlb_entry *ent;
76 uint32_t i = env->tlb_last;
77
78 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
79 ent = &env->tlb[i];
80
81 hppa_flush_tlb_ent(env, ent);
82 return ent;
83}
84
85int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
86 int type, hwaddr *pphys, int *pprot)
87{
88 hwaddr phys;
89 int prot, r_prot, w_prot, x_prot;
90 hppa_tlb_entry *ent;
91 int ret = -1;
92
93
94 if (mmu_idx == MMU_PHYS_IDX) {
95 phys = addr;
96 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
97 goto egress;
98 }
99
100
101 ent = hppa_find_tlb(env, addr);
102 if (ent == NULL || !ent->entry_valid) {
103 phys = 0;
104 prot = 0;
105 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
106 goto egress;
107 }
108
109
110 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
111
112
113 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
114 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
115 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
116 switch (ent->ar_type) {
117 case 0:
118 prot = r_prot;
119 break;
120 case 1:
121 prot = r_prot | w_prot;
122 break;
123 case 2:
124 prot = r_prot | x_prot;
125 break;
126 case 3:
127 prot = r_prot | w_prot | x_prot;
128 break;
129 default:
130 prot = x_prot;
131 break;
132 }
133
134
135 if ((env->psw & PSW_P) && ent->access_id) {
136
137 int match = ent->access_id * 2 + 1;
138
139 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
140 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
141 prot &= PAGE_READ | PAGE_EXEC;
142 if (type == PAGE_WRITE) {
143 ret = EXCP_DMPI;
144 goto egress;
145 }
146 }
147 }
148
149
150
151 if (type == 0) {
152 goto egress;
153 }
154
155 if (unlikely(!(prot & type))) {
156
157 ret = (type & PAGE_EXEC ? EXCP_IMP :
158 prot & PAGE_READ ? EXCP_DMP : EXCP_DMAR);
159 goto egress;
160 }
161
162
163
164
165
166 if (unlikely(!ent->d)) {
167 if (type & PAGE_WRITE) {
168
169 ret = EXCP_TLB_DIRTY;
170 }
171 prot &= PAGE_READ | PAGE_EXEC;
172 }
173 if (unlikely(ent->b)) {
174 if (type & PAGE_WRITE) {
175
176 ret = EXCP_DMB;
177 }
178 prot &= PAGE_READ | PAGE_EXEC;
179 }
180 if (unlikely(ent->t)) {
181 if (!(type & PAGE_EXEC)) {
182
183 ret = EXCP_PAGE_REF;
184 }
185 prot &= PAGE_EXEC;
186 }
187
188 egress:
189 *pphys = phys;
190 *pprot = prot;
191 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
192 return ret;
193}
194
195hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
196{
197 HPPACPU *cpu = HPPA_CPU(cs);
198 hwaddr phys;
199 int prot, excp;
200
201
202
203
204 if (!(cpu->env.psw & PSW_D)) {
205 return addr;
206 }
207
208 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
209 &phys, &prot);
210
211
212
213
214 return excp == EXCP_DTLB_MISS ? -1 : phys;
215}
216
217void tlb_fill(CPUState *cs, target_ulong addr, int size,
218 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
219{
220 HPPACPU *cpu = HPPA_CPU(cs);
221 CPUHPPAState *env = &cpu->env;
222 int prot, excp, a_prot;
223 hwaddr phys;
224
225 switch (type) {
226 case MMU_INST_FETCH:
227 a_prot = PAGE_EXEC;
228 break;
229 case MMU_DATA_STORE:
230 a_prot = PAGE_WRITE;
231 break;
232 default:
233 a_prot = PAGE_READ;
234 break;
235 }
236
237 excp = hppa_get_physical_address(env, addr, mmu_idx,
238 a_prot, &phys, &prot);
239 if (unlikely(excp >= 0)) {
240 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
241
242 cs->exception_index = excp;
243 if (cpu->env.psw & PSW_Q) {
244
245 cpu->env.cr[CR_IOR] = addr;
246 cpu->env.cr[CR_ISR] = addr >> 32;
247 }
248 cpu_loop_exit_restore(cs, retaddr);
249 }
250
251 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
252 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
253
254 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
255 prot, mmu_idx, TARGET_PAGE_SIZE);
256}
257
258
259void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
260{
261 hppa_tlb_entry *empty = NULL;
262 int i;
263
264
265 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
266 hppa_tlb_entry *ent = &env->tlb[i];
267 if (ent->va_b <= addr && addr <= ent->va_e) {
268 if (ent->entry_valid) {
269 hppa_flush_tlb_ent(env, ent);
270 }
271 if (!empty) {
272 empty = ent;
273 }
274 }
275 }
276
277
278 if (empty == NULL) {
279 empty = hppa_alloc_tlb_ent(env);
280 }
281
282
283 empty->va_b = addr & TARGET_PAGE_MASK;
284 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
285 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
286 trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
287}
288
289
290void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
291{
292 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
293
294 if (unlikely(ent == NULL)) {
295 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
296 return;
297 }
298
299 ent->access_id = extract32(reg, 1, 18);
300 ent->u = extract32(reg, 19, 1);
301 ent->ar_pl2 = extract32(reg, 20, 2);
302 ent->ar_pl1 = extract32(reg, 22, 2);
303 ent->ar_type = extract32(reg, 24, 3);
304 ent->b = extract32(reg, 27, 1);
305 ent->d = extract32(reg, 28, 1);
306 ent->t = extract32(reg, 29, 1);
307 ent->entry_valid = 1;
308 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
309 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
310}
311
312
313
314static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
315{
316 CPUHPPAState *env = cpu->env_ptr;
317 target_ulong addr = (target_ulong) data.target_ptr;
318 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
319
320 if (ent && ent->entry_valid) {
321 hppa_flush_tlb_ent(env, ent);
322 }
323}
324
325void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
326{
327 CPUState *src = CPU(hppa_env_get_cpu(env));
328 CPUState *cpu;
329 trace_hppa_tlb_ptlb(env);
330 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
331
332 CPU_FOREACH(cpu) {
333 if (cpu != src) {
334 async_run_on_cpu(cpu, ptlb_work, data);
335 }
336 }
337 async_safe_run_on_cpu(src, ptlb_work, data);
338}
339
340
341
342void HELPER(ptlbe)(CPUHPPAState *env)
343{
344 CPUState *src = CPU(hppa_env_get_cpu(env));
345 trace_hppa_tlb_ptlbe(env);
346 memset(env->tlb, 0, sizeof(env->tlb));
347 tlb_flush_by_mmuidx(src, 0xf);
348}
349
350void cpu_hppa_change_prot_id(CPUHPPAState *env)
351{
352 if (env->psw & PSW_P) {
353 CPUState *src = CPU(hppa_env_get_cpu(env));
354 tlb_flush_by_mmuidx(src, 0xf);
355 }
356}
357
358void HELPER(change_prot_id)(CPUHPPAState *env)
359{
360 cpu_hppa_change_prot_id(env);
361}
362
363target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
364{
365 hwaddr phys;
366 int prot, excp;
367
368 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
369 &phys, &prot);
370 if (excp >= 0) {
371 if (env->psw & PSW_Q) {
372
373 env->cr[CR_IOR] = addr;
374 env->cr[CR_ISR] = addr >> 32;
375 }
376 if (excp == EXCP_DTLB_MISS) {
377 excp = EXCP_NA_DTLB_MISS;
378 }
379 trace_hppa_tlb_lpa_failed(env, addr);
380 hppa_dynamic_excp(env, excp, GETPC());
381 }
382 trace_hppa_tlb_lpa_success(env, addr, phys);
383 return phys;
384}
385
386
387int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
388{
389 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
390 return ent ? ent->ar_type : -1;
391}
392#endif
393