1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "hw/core/tcg-cpu-ops.h"
21#include "disas/disas.h"
22#include "exec/exec-all.h"
23#include "tcg/tcg.h"
24#include "qemu/bitops.h"
25#include "exec/cpu_ldst.h"
26#include "exec/translate-all.h"
27#include "exec/helper-proto.h"
28#include "qemu/atomic128.h"
29#include "trace/trace-root.h"
30#include "tcg/tcg-ldst.h"
31#include "internal.h"
32
33__thread uintptr_t helper_retaddr;
34
35
36
37
38
39
40MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
41{
42 switch (helper_retaddr) {
43 default:
44
45
46
47
48
49
50 *pc = helper_retaddr;
51 break;
52
53 case 0:
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 *pc += GETPC_ADJ;
71 break;
72
73 case 1:
74
75
76
77
78
79
80
81
82
83
84
85
86 mmap_unlock();
87 *pc = 0;
88 return MMU_INST_FETCH;
89 }
90
91 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
113 uintptr_t host_pc, abi_ptr guest_addr)
114{
115 switch (page_unprotect(guest_addr, host_pc)) {
116 case 0:
117
118
119
120
121 return false;
122 case 1:
123
124
125
126
127 return true;
128 case 2:
129
130
131
132
133 sigprocmask(SIG_SETMASK, old_set, NULL);
134 cpu_loop_exit_noexc(cpu);
135
136 default:
137 g_assert_not_reached();
138 }
139}
140
141static int probe_access_internal(CPUArchState *env, target_ulong addr,
142 int fault_size, MMUAccessType access_type,
143 bool nonfault, uintptr_t ra)
144{
145 int acc_flag;
146 bool maperr;
147
148 switch (access_type) {
149 case MMU_DATA_STORE:
150 acc_flag = PAGE_WRITE_ORG;
151 break;
152 case MMU_DATA_LOAD:
153 acc_flag = PAGE_READ;
154 break;
155 case MMU_INST_FETCH:
156 acc_flag = PAGE_EXEC;
157 break;
158 default:
159 g_assert_not_reached();
160 }
161
162 if (guest_addr_valid_untagged(addr)) {
163 int page_flags = page_get_flags(addr);
164 if (page_flags & acc_flag) {
165 return 0;
166 }
167 maperr = !(page_flags & PAGE_VALID);
168 } else {
169 maperr = true;
170 }
171
172 if (nonfault) {
173 return TLB_INVALID_MASK;
174 }
175
176 cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
177}
178
179int probe_access_flags(CPUArchState *env, target_ulong addr,
180 MMUAccessType access_type, int mmu_idx,
181 bool nonfault, void **phost, uintptr_t ra)
182{
183 int flags;
184
185 flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
186 *phost = flags ? NULL : g2h(env_cpu(env), addr);
187 return flags;
188}
189
190void *probe_access(CPUArchState *env, target_ulong addr, int size,
191 MMUAccessType access_type, int mmu_idx, uintptr_t ra)
192{
193 int flags;
194
195 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
196 flags = probe_access_internal(env, addr, size, access_type, false, ra);
197 g_assert(flags == 0);
198
199 return size ? g2h(env_cpu(env), addr) : NULL;
200}
201
202
203
204
205
206
207
208
209
210
211static void validate_memop(MemOpIdx oi, MemOp expected)
212{
213#ifdef CONFIG_DEBUG_TCG
214 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
215 assert(have == expected);
216#endif
217}
218
219void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
220{
221 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
222}
223
224void helper_unaligned_st(CPUArchState *env, target_ulong addr)
225{
226 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
227}
228
229static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
230 MemOpIdx oi, uintptr_t ra, MMUAccessType type)
231{
232 MemOp mop = get_memop(oi);
233 int a_bits = get_alignment_bits(mop);
234 void *ret;
235
236
237 if (unlikely(addr & ((1 << a_bits) - 1))) {
238 cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
239 }
240
241 ret = g2h(env_cpu(env), addr);
242 set_helper_retaddr(ra);
243 return ret;
244}
245
246uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
247 MemOpIdx oi, uintptr_t ra)
248{
249 void *haddr;
250 uint8_t ret;
251
252 validate_memop(oi, MO_UB);
253 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
254 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
255 ret = ldub_p(haddr);
256 clear_helper_retaddr();
257 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
258 return ret;
259}
260
261uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
262 MemOpIdx oi, uintptr_t ra)
263{
264 void *haddr;
265 uint16_t ret;
266
267 validate_memop(oi, MO_BEUW);
268 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
269 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
270 ret = lduw_be_p(haddr);
271 clear_helper_retaddr();
272 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
273 return ret;
274}
275
276uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
277 MemOpIdx oi, uintptr_t ra)
278{
279 void *haddr;
280 uint32_t ret;
281
282 validate_memop(oi, MO_BEUL);
283 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
284 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
285 ret = ldl_be_p(haddr);
286 clear_helper_retaddr();
287 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
288 return ret;
289}
290
291uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
292 MemOpIdx oi, uintptr_t ra)
293{
294 void *haddr;
295 uint64_t ret;
296
297 validate_memop(oi, MO_BEQ);
298 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
299 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
300 ret = ldq_be_p(haddr);
301 clear_helper_retaddr();
302 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
303 return ret;
304}
305
306uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
307 MemOpIdx oi, uintptr_t ra)
308{
309 void *haddr;
310 uint16_t ret;
311
312 validate_memop(oi, MO_LEUW);
313 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
314 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
315 ret = lduw_le_p(haddr);
316 clear_helper_retaddr();
317 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
318 return ret;
319}
320
321uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
322 MemOpIdx oi, uintptr_t ra)
323{
324 void *haddr;
325 uint32_t ret;
326
327 validate_memop(oi, MO_LEUL);
328 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
329 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
330 ret = ldl_le_p(haddr);
331 clear_helper_retaddr();
332 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
333 return ret;
334}
335
336uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
337 MemOpIdx oi, uintptr_t ra)
338{
339 void *haddr;
340 uint64_t ret;
341
342 validate_memop(oi, MO_LEQ);
343 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
344 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
345 ret = ldq_le_p(haddr);
346 clear_helper_retaddr();
347 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
348 return ret;
349}
350
351void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
352 MemOpIdx oi, uintptr_t ra)
353{
354 void *haddr;
355
356 validate_memop(oi, MO_UB);
357 trace_guest_st_before_exec(env_cpu(env), addr, oi);
358 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
359 stb_p(haddr, val);
360 clear_helper_retaddr();
361 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
362}
363
364void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
365 MemOpIdx oi, uintptr_t ra)
366{
367 void *haddr;
368
369 validate_memop(oi, MO_BEUW);
370 trace_guest_st_before_exec(env_cpu(env), addr, oi);
371 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
372 stw_be_p(haddr, val);
373 clear_helper_retaddr();
374 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
375}
376
377void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
378 MemOpIdx oi, uintptr_t ra)
379{
380 void *haddr;
381
382 validate_memop(oi, MO_BEUL);
383 trace_guest_st_before_exec(env_cpu(env), addr, oi);
384 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
385 stl_be_p(haddr, val);
386 clear_helper_retaddr();
387 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
388}
389
390void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
391 MemOpIdx oi, uintptr_t ra)
392{
393 void *haddr;
394
395 validate_memop(oi, MO_BEQ);
396 trace_guest_st_before_exec(env_cpu(env), addr, oi);
397 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
398 stq_be_p(haddr, val);
399 clear_helper_retaddr();
400 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
401}
402
403void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
404 MemOpIdx oi, uintptr_t ra)
405{
406 void *haddr;
407
408 validate_memop(oi, MO_LEUW);
409 trace_guest_st_before_exec(env_cpu(env), addr, oi);
410 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
411 stw_le_p(haddr, val);
412 clear_helper_retaddr();
413 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
414}
415
416void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
417 MemOpIdx oi, uintptr_t ra)
418{
419 void *haddr;
420
421 validate_memop(oi, MO_LEUL);
422 trace_guest_st_before_exec(env_cpu(env), addr, oi);
423 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
424 stl_le_p(haddr, val);
425 clear_helper_retaddr();
426 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
427}
428
429void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
430 MemOpIdx oi, uintptr_t ra)
431{
432 void *haddr;
433
434 validate_memop(oi, MO_LEQ);
435 trace_guest_st_before_exec(env_cpu(env), addr, oi);
436 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
437 stq_le_p(haddr, val);
438 clear_helper_retaddr();
439 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
440}
441
442uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
443{
444 uint32_t ret;
445
446 set_helper_retaddr(1);
447 ret = ldub_p(g2h_untagged(ptr));
448 clear_helper_retaddr();
449 return ret;
450}
451
452uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
453{
454 uint32_t ret;
455
456 set_helper_retaddr(1);
457 ret = lduw_p(g2h_untagged(ptr));
458 clear_helper_retaddr();
459 return ret;
460}
461
462uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
463{
464 uint32_t ret;
465
466 set_helper_retaddr(1);
467 ret = ldl_p(g2h_untagged(ptr));
468 clear_helper_retaddr();
469 return ret;
470}
471
472uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
473{
474 uint64_t ret;
475
476 set_helper_retaddr(1);
477 ret = ldq_p(g2h_untagged(ptr));
478 clear_helper_retaddr();
479 return ret;
480}
481
482#include "ldst_common.c.inc"
483
484
485
486
487
488
489static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
490 MemOpIdx oi, int size, int prot,
491 uintptr_t retaddr)
492{
493 MemOp mop = get_memop(oi);
494 int a_bits = get_alignment_bits(mop);
495 void *ret;
496
497
498 if (unlikely(addr & ((1 << a_bits) - 1))) {
499 MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
500 cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
501 }
502
503
504 if (unlikely(addr & (size - 1))) {
505 cpu_loop_exit_atomic(env_cpu(env), retaddr);
506 }
507
508 ret = g2h(env_cpu(env), addr);
509 set_helper_retaddr(retaddr);
510 return ret;
511}
512
513#include "atomic_common.c.inc"
514
515
516
517
518
519
520#define ATOMIC_NAME(X) \
521 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
522#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
523#define ATOMIC_MMU_IDX MMU_USER_IDX
524
525#define DATA_SIZE 1
526#include "atomic_template.h"
527
528#define DATA_SIZE 2
529#include "atomic_template.h"
530
531#define DATA_SIZE 4
532#include "atomic_template.h"
533
534#ifdef CONFIG_ATOMIC64
535#define DATA_SIZE 8
536#include "atomic_template.h"
537#endif
538
539#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
540#define DATA_SIZE 16
541#include "atomic_template.h"
542#endif
543