1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "hw/core/tcg-cpu-ops.h"
21#include "disas/disas.h"
22#include "exec/exec-all.h"
23#include "tcg/tcg.h"
24#include "qemu/bitops.h"
25#include "exec/cpu_ldst.h"
26#include "exec/translate-all.h"
27#include "exec/helper-proto.h"
28#include "qemu/atomic128.h"
29#include "trace/trace-root.h"
30#include "tcg/tcg-ldst.h"
31#include "internal.h"
32
33__thread uintptr_t helper_retaddr;
34
35
36
37
38
39
40MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
41{
42 switch (helper_retaddr) {
43 default:
44
45
46
47
48
49
50 *pc = helper_retaddr;
51 break;
52
53 case 0:
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 *pc += GETPC_ADJ;
71 break;
72
73 case 1:
74
75
76
77
78
79
80
81
82
83
84
85
86 mmap_unlock();
87 *pc = 0;
88 return MMU_INST_FETCH;
89 }
90
91 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
113 uintptr_t host_pc, abi_ptr guest_addr)
114{
115 switch (page_unprotect(guest_addr, host_pc)) {
116 case 0:
117
118
119
120
121 return false;
122 case 1:
123
124
125
126
127 return true;
128 case 2:
129
130
131
132
133 sigprocmask(SIG_SETMASK, old_set, NULL);
134 cpu_loop_exit_noexc(cpu);
135
136 default:
137 g_assert_not_reached();
138 }
139}
140
141static int probe_access_internal(CPUArchState *env, target_ulong addr,
142 int fault_size, MMUAccessType access_type,
143 bool nonfault, uintptr_t ra)
144{
145 int acc_flag;
146 bool maperr;
147
148 switch (access_type) {
149 case MMU_DATA_STORE:
150 acc_flag = PAGE_WRITE_ORG;
151 break;
152 case MMU_DATA_LOAD:
153 acc_flag = PAGE_READ;
154 break;
155 case MMU_INST_FETCH:
156 acc_flag = PAGE_EXEC;
157 break;
158 default:
159 g_assert_not_reached();
160 }
161
162 if (guest_addr_valid_untagged(addr)) {
163 int page_flags = page_get_flags(addr);
164 if (page_flags & acc_flag) {
165 return 0;
166 }
167 maperr = !(page_flags & PAGE_VALID);
168 } else {
169 maperr = true;
170 }
171
172 if (nonfault) {
173 return TLB_INVALID_MASK;
174 }
175
176 cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
177}
178
179int probe_access_flags(CPUArchState *env, target_ulong addr,
180 MMUAccessType access_type, int mmu_idx,
181 bool nonfault, void **phost, uintptr_t ra)
182{
183 int flags;
184
185 flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
186 *phost = flags ? NULL : g2h(env_cpu(env), addr);
187 return flags;
188}
189
190void *probe_access(CPUArchState *env, target_ulong addr, int size,
191 MMUAccessType access_type, int mmu_idx, uintptr_t ra)
192{
193 int flags;
194
195 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
196 flags = probe_access_internal(env, addr, size, access_type, false, ra);
197 g_assert(flags == 0);
198
199 return size ? g2h(env_cpu(env), addr) : NULL;
200}
201
202
203
204
205
206
207
208
209
210
211static void validate_memop(MemOpIdx oi, MemOp expected)
212{
213#ifdef CONFIG_DEBUG_TCG
214 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
215 assert(have == expected);
216#endif
217}
218
219void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
220{
221 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
222}
223
224void helper_unaligned_st(CPUArchState *env, target_ulong addr)
225{
226 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
227}
228
229static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
230 MemOpIdx oi, uintptr_t ra, MMUAccessType type)
231{
232 MemOp mop = get_memop(oi);
233 int a_bits = get_alignment_bits(mop);
234 void *ret;
235
236
237 if (unlikely(addr & ((1 << a_bits) - 1))) {
238 cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
239 }
240
241 ret = g2h(env_cpu(env), addr);
242 set_helper_retaddr(ra);
243 return ret;
244}
245
246uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
247 MemOpIdx oi, uintptr_t ra)
248{
249 void *haddr;
250 uint8_t ret;
251
252 validate_memop(oi, MO_UB);
253 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
254 ret = ldub_p(haddr);
255 clear_helper_retaddr();
256 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
257 return ret;
258}
259
260uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
261 MemOpIdx oi, uintptr_t ra)
262{
263 void *haddr;
264 uint16_t ret;
265
266 validate_memop(oi, MO_BEUW);
267 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
268 ret = lduw_be_p(haddr);
269 clear_helper_retaddr();
270 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
271 return ret;
272}
273
274uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
275 MemOpIdx oi, uintptr_t ra)
276{
277 void *haddr;
278 uint32_t ret;
279
280 validate_memop(oi, MO_BEUL);
281 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
282 ret = ldl_be_p(haddr);
283 clear_helper_retaddr();
284 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
285 return ret;
286}
287
288uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
289 MemOpIdx oi, uintptr_t ra)
290{
291 void *haddr;
292 uint64_t ret;
293
294 validate_memop(oi, MO_BEUQ);
295 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
296 ret = ldq_be_p(haddr);
297 clear_helper_retaddr();
298 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
299 return ret;
300}
301
302uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
303 MemOpIdx oi, uintptr_t ra)
304{
305 void *haddr;
306 uint16_t ret;
307
308 validate_memop(oi, MO_LEUW);
309 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
310 ret = lduw_le_p(haddr);
311 clear_helper_retaddr();
312 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
313 return ret;
314}
315
316uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
317 MemOpIdx oi, uintptr_t ra)
318{
319 void *haddr;
320 uint32_t ret;
321
322 validate_memop(oi, MO_LEUL);
323 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
324 ret = ldl_le_p(haddr);
325 clear_helper_retaddr();
326 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
327 return ret;
328}
329
330uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
331 MemOpIdx oi, uintptr_t ra)
332{
333 void *haddr;
334 uint64_t ret;
335
336 validate_memop(oi, MO_LEUQ);
337 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
338 ret = ldq_le_p(haddr);
339 clear_helper_retaddr();
340 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
341 return ret;
342}
343
344void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
345 MemOpIdx oi, uintptr_t ra)
346{
347 void *haddr;
348
349 validate_memop(oi, MO_UB);
350 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
351 stb_p(haddr, val);
352 clear_helper_retaddr();
353 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
354}
355
356void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
357 MemOpIdx oi, uintptr_t ra)
358{
359 void *haddr;
360
361 validate_memop(oi, MO_BEUW);
362 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
363 stw_be_p(haddr, val);
364 clear_helper_retaddr();
365 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
366}
367
368void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
369 MemOpIdx oi, uintptr_t ra)
370{
371 void *haddr;
372
373 validate_memop(oi, MO_BEUL);
374 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
375 stl_be_p(haddr, val);
376 clear_helper_retaddr();
377 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
378}
379
380void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
381 MemOpIdx oi, uintptr_t ra)
382{
383 void *haddr;
384
385 validate_memop(oi, MO_BEUQ);
386 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
387 stq_be_p(haddr, val);
388 clear_helper_retaddr();
389 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
390}
391
392void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
393 MemOpIdx oi, uintptr_t ra)
394{
395 void *haddr;
396
397 validate_memop(oi, MO_LEUW);
398 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
399 stw_le_p(haddr, val);
400 clear_helper_retaddr();
401 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
402}
403
404void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
405 MemOpIdx oi, uintptr_t ra)
406{
407 void *haddr;
408
409 validate_memop(oi, MO_LEUL);
410 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
411 stl_le_p(haddr, val);
412 clear_helper_retaddr();
413 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
414}
415
416void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
417 MemOpIdx oi, uintptr_t ra)
418{
419 void *haddr;
420
421 validate_memop(oi, MO_LEUQ);
422 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
423 stq_le_p(haddr, val);
424 clear_helper_retaddr();
425 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
426}
427
428uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
429{
430 uint32_t ret;
431
432 set_helper_retaddr(1);
433 ret = ldub_p(g2h_untagged(ptr));
434 clear_helper_retaddr();
435 return ret;
436}
437
438uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
439{
440 uint32_t ret;
441
442 set_helper_retaddr(1);
443 ret = lduw_p(g2h_untagged(ptr));
444 clear_helper_retaddr();
445 return ret;
446}
447
448uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
449{
450 uint32_t ret;
451
452 set_helper_retaddr(1);
453 ret = ldl_p(g2h_untagged(ptr));
454 clear_helper_retaddr();
455 return ret;
456}
457
458uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
459{
460 uint64_t ret;
461
462 set_helper_retaddr(1);
463 ret = ldq_p(g2h_untagged(ptr));
464 clear_helper_retaddr();
465 return ret;
466}
467
468#include "ldst_common.c.inc"
469
470
471
472
473
474
475static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
476 MemOpIdx oi, int size, int prot,
477 uintptr_t retaddr)
478{
479 MemOp mop = get_memop(oi);
480 int a_bits = get_alignment_bits(mop);
481 void *ret;
482
483
484 if (unlikely(addr & ((1 << a_bits) - 1))) {
485 MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
486 cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
487 }
488
489
490 if (unlikely(addr & (size - 1))) {
491 cpu_loop_exit_atomic(env_cpu(env), retaddr);
492 }
493
494 ret = g2h(env_cpu(env), addr);
495 set_helper_retaddr(retaddr);
496 return ret;
497}
498
499#include "atomic_common.c.inc"
500
501
502
503
504
505
506#define ATOMIC_NAME(X) \
507 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
508#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
509
510#define DATA_SIZE 1
511#include "atomic_template.h"
512
513#define DATA_SIZE 2
514#include "atomic_template.h"
515
516#define DATA_SIZE 4
517#include "atomic_template.h"
518
519#ifdef CONFIG_ATOMIC64
520#define DATA_SIZE 8
521#include "atomic_template.h"
522#endif
523
524#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
525#define DATA_SIZE 16
526#include "atomic_template.h"
527#endif
528