1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/helper-proto.h"
24#include "qom/cpu.h"
25
26#ifdef CONFIG_USER_ONLY
27int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
28 int size, int rw, int mmu_idx)
29{
30 HPPACPU *cpu = HPPA_CPU(cs);
31
32
33
34 cs->exception_index = EXCP_DMP;
35 cpu->env.cr[CR_IOR] = address;
36 return 1;
37}
38#else
39static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
40{
41 int i;
42
43 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
44 hppa_tlb_entry *ent = &env->tlb[i];
45 if (ent->va_b <= addr && addr <= ent->va_e) {
46 return ent;
47 }
48 }
49 return NULL;
50}
51
52static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
53{
54 CPUState *cs = CPU(hppa_env_get_cpu(env));
55 unsigned i, n = 1 << (2 * ent->page_size);
56 uint64_t addr = ent->va_b;
57
58 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
59
60 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
61 }
62
63 memset(ent, 0, sizeof(*ent));
64 ent->va_b = -1;
65}
66
67static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
68{
69 hppa_tlb_entry *ent;
70 uint32_t i = env->tlb_last;
71
72 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
73 ent = &env->tlb[i];
74
75 hppa_flush_tlb_ent(env, ent);
76 return ent;
77}
78
79int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
80 int type, hwaddr *pphys, int *pprot)
81{
82 hwaddr phys;
83 int prot, r_prot, w_prot, x_prot;
84 hppa_tlb_entry *ent;
85 int ret = -1;
86
87
88 if (mmu_idx == MMU_PHYS_IDX) {
89 phys = addr;
90 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
91 goto egress;
92 }
93
94
95 ent = hppa_find_tlb(env, addr);
96 if (ent == NULL || !ent->entry_valid) {
97 phys = 0;
98 prot = 0;
99
100
101 ret = EXCP_DTLB_MISS;
102 goto egress;
103 }
104
105
106 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
107
108
109 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
110 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
111 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
112 switch (ent->ar_type) {
113 case 0:
114 prot = r_prot;
115 break;
116 case 1:
117 prot = r_prot | w_prot;
118 break;
119 case 2:
120 prot = r_prot | x_prot;
121 break;
122 case 3:
123 prot = r_prot | w_prot | x_prot;
124 break;
125 default:
126 prot = x_prot;
127 break;
128 }
129
130
131
132
133
134 if (type == 0) {
135 goto egress;
136 }
137
138 if (unlikely(!(prot & type))) {
139
140 ret = (type & PAGE_EXEC ? EXCP_IMP : EXCP_DMP);
141 goto egress;
142 }
143
144
145
146
147
148 if (unlikely(!ent->d)) {
149 if (type & PAGE_WRITE) {
150
151 ret = EXCP_TLB_DIRTY;
152 }
153 prot &= PAGE_READ | PAGE_EXEC;
154 }
155 if (unlikely(ent->b)) {
156 if (type & PAGE_WRITE) {
157
158 ret = EXCP_DMB;
159 }
160 prot &= PAGE_READ | PAGE_EXEC;
161 }
162 if (unlikely(ent->t)) {
163 if (!(type & PAGE_EXEC)) {
164
165 ret = EXCP_PAGE_REF;
166 }
167 prot &= PAGE_EXEC;
168 }
169
170 egress:
171 *pphys = phys;
172 *pprot = prot;
173 return ret;
174}
175
176hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
177{
178 HPPACPU *cpu = HPPA_CPU(cs);
179 hwaddr phys;
180 int prot, excp;
181
182
183
184
185 if (!(cpu->env.psw & PSW_D)) {
186 return addr;
187 }
188
189 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
190 &phys, &prot);
191
192
193
194
195 return excp == EXCP_DTLB_MISS ? -1 : phys;
196}
197
198void tlb_fill(CPUState *cs, target_ulong addr, int size,
199 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
200{
201 HPPACPU *cpu = HPPA_CPU(cs);
202 int prot, excp, a_prot;
203 hwaddr phys;
204
205 switch (type) {
206 case MMU_INST_FETCH:
207 a_prot = PAGE_EXEC;
208 break;
209 case MMU_DATA_STORE:
210 a_prot = PAGE_WRITE;
211 break;
212 default:
213 a_prot = PAGE_READ;
214 break;
215 }
216
217 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx,
218 a_prot, &phys, &prot);
219 if (unlikely(excp >= 0)) {
220
221 cs->exception_index = excp;
222 if (cpu->env.psw & PSW_Q) {
223
224 cpu->env.cr[CR_IOR] = addr;
225 cpu->env.cr[CR_ISR] = addr >> 32;
226 }
227 cpu_loop_exit_restore(cs, retaddr);
228 }
229
230
231 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
232 prot, mmu_idx, TARGET_PAGE_SIZE);
233}
234
235
236void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
237{
238 hppa_tlb_entry *empty = NULL;
239 int i;
240
241
242 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
243 hppa_tlb_entry *ent = &env->tlb[i];
244 if (!ent->entry_valid) {
245 empty = ent;
246 } else if (ent->va_b <= addr && addr <= ent->va_e) {
247 hppa_flush_tlb_ent(env, ent);
248 empty = ent;
249 }
250 }
251
252
253 if (empty == NULL) {
254 empty = hppa_alloc_tlb_ent(env);
255 }
256
257
258 empty->va_b = addr & TARGET_PAGE_MASK;
259 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
260 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
261}
262
263
264void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
265{
266 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
267
268 if (unlikely(ent == NULL || ent->entry_valid)) {
269 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
270 return;
271 }
272
273 ent->access_id = extract32(reg, 1, 18);
274 ent->u = extract32(reg, 19, 1);
275 ent->ar_pl2 = extract32(reg, 20, 2);
276 ent->ar_pl1 = extract32(reg, 22, 2);
277 ent->ar_type = extract32(reg, 24, 3);
278 ent->b = extract32(reg, 27, 1);
279 ent->d = extract32(reg, 28, 1);
280 ent->t = extract32(reg, 29, 1);
281 ent->entry_valid = 1;
282}
283
284
285
286static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
287{
288 CPUHPPAState *env = cpu->env_ptr;
289 target_ulong addr = (target_ulong) data.target_ptr;
290 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
291
292 if (ent && ent->entry_valid) {
293 hppa_flush_tlb_ent(env, ent);
294 }
295}
296
297void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
298{
299 CPUState *src = CPU(hppa_env_get_cpu(env));
300 CPUState *cpu;
301 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
302
303 CPU_FOREACH(cpu) {
304 if (cpu != src) {
305 async_run_on_cpu(cpu, ptlb_work, data);
306 }
307 }
308 async_safe_run_on_cpu(src, ptlb_work, data);
309}
310
311
312
313void HELPER(ptlbe)(CPUHPPAState *env)
314{
315 CPUState *src = CPU(hppa_env_get_cpu(env));
316
317 memset(env->tlb, 0, sizeof(env->tlb));
318 tlb_flush_by_mmuidx(src, 0xf);
319}
320
321target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
322{
323 hwaddr phys;
324 int prot, excp;
325
326 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
327 &phys, &prot);
328 if (excp >= 0) {
329 if (env->psw & PSW_Q) {
330
331 env->cr[CR_IOR] = addr;
332 env->cr[CR_ISR] = addr >> 32;
333 }
334 if (excp == EXCP_DTLB_MISS) {
335 excp = EXCP_NA_DTLB_MISS;
336 }
337 hppa_dynamic_excp(env, excp, GETPC());
338 }
339 return phys;
340}
341
342
343int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
344{
345 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
346 return ent ? ent->ar_type : -1;
347}
348#endif
349