1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
25#include "exec/cpu_ldst.h"
26#include "exec/cputlb.h"
27#include "exec/memory-internal.h"
28#include "exec/ram_addr.h"
29#include "tcg/tcg.h"
30#include "qemu/error-report.h"
31#include "exec/log.h"
32#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
34#include "qemu/etrace.h"
35
36
37
38
39
40#ifdef DEBUG_TLB
41# define DEBUG_TLB_GATE 1
42# ifdef DEBUG_TLB_LOG
43# define DEBUG_TLB_LOG_GATE 1
44# else
45# define DEBUG_TLB_LOG_GATE 0
46# endif
47#else
48# define DEBUG_TLB_GATE 0
49# define DEBUG_TLB_LOG_GATE 0
50#endif
51
52#define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59} while (0)
60
61
62int tlb_flush_count;
63
64
65
66
67
68
69
70
71
72
73
74
75
76void tlb_flush(CPUState *cpu, int flush_global)
77{
78 CPUArchState *env = cpu->env_ptr;
79
80 tlb_debug("(%d)\n", flush_global);
81
82 memset(env->tlb_table, -1, sizeof(env->tlb_table));
83 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
84 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
85
86 env->vtlb_index = 0;
87 env->tlb_flush_addr = -1;
88 env->tlb_flush_mask = 0;
89 tlb_flush_count++;
90}
91
92static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
93{
94 CPUArchState *env = cpu->env_ptr;
95
96 tlb_debug("start\n");
97
98 for (;;) {
99 int mmu_idx = va_arg(argp, int);
100
101 if (mmu_idx < 0) {
102 break;
103 }
104
105 tlb_debug("%d\n", mmu_idx);
106
107 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
108 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
109 }
110
111 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
112}
113
114void tlb_flush_by_mmuidx(CPUState *cpu, ...)
115{
116 va_list argp;
117 va_start(argp, cpu);
118 v_tlb_flush_by_mmuidx(cpu, argp);
119 va_end(argp);
120}
121
122static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
123{
124 if (addr == (tlb_entry->addr_read &
125 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
126 addr == (tlb_entry->addr_write &
127 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
128 addr == (tlb_entry->addr_code &
129 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
130 memset(tlb_entry, -1, sizeof(*tlb_entry));
131 }
132}
133
134void tlb_flush_page(CPUState *cpu, target_ulong addr)
135{
136 CPUArchState *env = cpu->env_ptr;
137 int i;
138 int mmu_idx;
139
140 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
141
142
143 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
144 tlb_debug("forcing full flush ("
145 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
146 env->tlb_flush_addr, env->tlb_flush_mask);
147
148 tlb_flush(cpu, 1);
149 return;
150 }
151
152 addr &= TARGET_PAGE_MASK;
153 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
154 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
155 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
156 }
157
158
159 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
160 int k;
161 for (k = 0; k < CPU_VTLB_SIZE; k++) {
162 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
163 }
164 }
165
166 tb_flush_jmp_cache(cpu, addr);
167}
168
169void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
170{
171 CPUArchState *env = cpu->env_ptr;
172 int i, k;
173 va_list argp;
174
175 va_start(argp, addr);
176
177 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
178
179
180 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
181 tlb_debug("forced full flush ("
182 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
183 env->tlb_flush_addr, env->tlb_flush_mask);
184
185 v_tlb_flush_by_mmuidx(cpu, argp);
186 va_end(argp);
187 return;
188 }
189
190 addr &= TARGET_PAGE_MASK;
191 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
192
193 for (;;) {
194 int mmu_idx = va_arg(argp, int);
195
196 if (mmu_idx < 0) {
197 break;
198 }
199
200 tlb_debug("idx %d\n", mmu_idx);
201
202 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
203
204
205 for (k = 0; k < CPU_VTLB_SIZE; k++) {
206 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
207 }
208 }
209 va_end(argp);
210
211 tb_flush_jmp_cache(cpu, addr);
212}
213
214
215
216void tlb_protect_code(ram_addr_t ram_addr)
217{
218 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
219 DIRTY_MEMORY_CODE);
220}
221
222
223
224void tlb_unprotect_code(ram_addr_t ram_addr)
225{
226 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
227}
228
229static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
230{
231 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
232}
233
234void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
235 uintptr_t length)
236{
237 uintptr_t addr;
238
239 if (tlb_is_dirty_ram(tlb_entry)) {
240 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
241 if ((addr - start) < length) {
242 tlb_entry->addr_write |= TLB_NOTDIRTY;
243 }
244 }
245}
246
247static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
248{
249 ram_addr_t ram_addr;
250
251 ram_addr = qemu_ram_addr_from_host(ptr);
252 if (ram_addr == RAM_ADDR_INVALID) {
253 fprintf(stderr, "Bad ram pointer %p\n", ptr);
254 abort();
255 }
256 return ram_addr;
257}
258
259void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
260{
261 CPUArchState *env;
262
263 int mmu_idx;
264
265 env = cpu->env_ptr;
266 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
267 unsigned int i;
268
269 for (i = 0; i < CPU_TLB_SIZE; i++) {
270 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
271 start1, length);
272 }
273
274 for (i = 0; i < CPU_VTLB_SIZE; i++) {
275 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
276 start1, length);
277 }
278 }
279}
280
281static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
282{
283 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
284 tlb_entry->addr_write = vaddr;
285 }
286}
287
288
289
290void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
291{
292 CPUArchState *env = cpu->env_ptr;
293 int i;
294 int mmu_idx;
295
296 vaddr &= TARGET_PAGE_MASK;
297 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
298 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
299 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
300 }
301
302 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
303 int k;
304 for (k = 0; k < CPU_VTLB_SIZE; k++) {
305 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
306 }
307 }
308}
309
310
311
312static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
313 target_ulong size)
314{
315 target_ulong mask = ~(size - 1);
316
317 if (env->tlb_flush_addr == (target_ulong)-1) {
318 env->tlb_flush_addr = vaddr & mask;
319 env->tlb_flush_mask = mask;
320 return;
321 }
322
323
324
325 mask &= env->tlb_flush_mask;
326 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
327 mask <<= 1;
328 }
329 env->tlb_flush_addr &= mask;
330 env->tlb_flush_mask = mask;
331}
332
333
334
335
336
337
338
339
340void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
341 hwaddr paddr, MemTxAttrs attrs, int prot,
342 int mmu_idx, target_ulong size)
343{
344 CPUArchState *env = cpu->env_ptr;
345 MemoryRegionSection *section;
346 unsigned int index;
347 target_ulong address;
348 target_ulong code_address;
349 uintptr_t addend;
350 CPUTLBEntry *te;
351 hwaddr iotlb, xlat, sz;
352 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
353 int asidx = cpu_asidx_from_attrs(cpu, attrs);
354
355
356
357 CPUIOTLBEntry *attr = &env->memattr[attrs.secure];
358
359 assert(size >= TARGET_PAGE_SIZE);
360 if (size != TARGET_PAGE_SIZE) {
361 tlb_add_large_page(env, vaddr, size);
362 }
363
364 sz = size;
365 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz,
366 &prot, &attr->attrs);
367 assert(sz >= TARGET_PAGE_SIZE);
368
369 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
370 " prot=%x idx=%d sec=%d.%d\n",
371 vaddr, paddr, prot, mmu_idx, attr->attrs.secure, attrs.secure);
372
373 address = vaddr;
374 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
375
376 address |= TLB_MMIO;
377 addend = 0;
378 } else {
379
380 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
381 }
382
383 code_address = address;
384 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
385 prot, &address);
386
387 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
388 te = &env->tlb_table[mmu_idx][index];
389
390
391 env->tlb_v_table[mmu_idx][vidx] = *te;
392 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
393
394
395 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
396 env->iotlb[mmu_idx][index].attrs = attr->attrs;
397 te->addend = addend - vaddr;
398 if (prot & PAGE_READ) {
399 te->addr_read = address;
400 } else {
401 te->addr_read = -1;
402 }
403
404 if (prot & PAGE_EXEC) {
405 te->addr_code = code_address;
406 } else {
407 te->addr_code = -1;
408 }
409 if (prot & PAGE_WRITE) {
410 if ((memory_region_is_ram(section->mr) && section->readonly)
411 || memory_region_is_romd(section->mr)) {
412
413 te->addr_write = address | TLB_MMIO;
414 } else if (memory_region_is_ram(section->mr)
415 && cpu_physical_memory_is_clean(
416 memory_region_get_ram_addr(section->mr) + xlat)) {
417 te->addr_write = address | TLB_NOTDIRTY;
418 } else {
419 te->addr_write = address;
420 }
421 } else {
422 te->addr_write = -1;
423 }
424}
425
426
427
428
429void tlb_set_page(CPUState *cpu, target_ulong vaddr,
430 hwaddr paddr, int prot,
431 int mmu_idx, target_ulong size)
432{
433 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
434 prot, mmu_idx, size);
435}
436
437static void report_bad_exec(CPUState *cpu, target_ulong addr)
438{
439
440
441
442
443
444 error_report("Trying to execute code outside RAM or ROM at 0x"
445 TARGET_FMT_lx, addr);
446 error_printf("This usually means one of the following happened:\n\n"
447 "(1) You told QEMU to execute a kernel for the wrong machine "
448 "type, and it crashed on startup (eg trying to run a "
449 "raspberry pi kernel on a versatilepb QEMU machine)\n"
450 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
451 "and QEMU executed a ROM full of no-op instructions until "
452 "it fell off the end\n"
453 "(3) Your guest kernel has a bug and crashed by jumping "
454 "off into nowhere\n\n"
455 "This is almost always one of the first two, so check your "
456 "command line and that you are using the right type of kernel "
457 "for this machine.\n"
458 "If you think option (3) is likely then you can try debugging "
459 "your guest with the -d debug options; in particular "
460 "-d guest_errors will cause the log to include a dump of the "
461 "guest register state at this point.\n\n"
462 "Execution cannot continue; stopping here.\n\n");
463
464
465 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
466 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
467 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
468}
469
470
471
472
473
474
475tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
476{
477 int mmu_idx, page_index, pd;
478 void *p;
479 MemoryRegion *mr;
480 CPUState *cpu = ENV_GET_CPU(env1);
481 CPUIOTLBEntry *iotlbentry;
482
483 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
484 mmu_idx = cpu_mmu_index(env1, true);
485 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
486 (addr & TARGET_PAGE_MASK))) {
487 cpu_ldub_code(env1, addr);
488 }
489 iotlbentry = &env1->iotlb[mmu_idx][page_index];
490 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
491 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
492 if (memory_region_is_unassigned(mr)) {
493 CPUClass *cc = CPU_GET_CLASS(cpu);
494
495 if (cc->do_unassigned_access) {
496 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
497 } else {
498 report_bad_exec(cpu, addr);
499 exit(1);
500 }
501 }
502 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
503 return qemu_ram_addr_from_host_nofail(p);
504}
505
506static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
507 target_ulong addr, uintptr_t retaddr, int size)
508{
509 CPUState *cpu = ENV_GET_CPU(env);
510 hwaddr physaddr = iotlbentry->addr;
511 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
512 uint64_t val;
513
514 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
515 cpu->mem_io_pc = retaddr;
516 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
517 cpu_io_recompile(cpu, retaddr);
518 }
519
520 cpu->mem_io_vaddr = addr;
521
522
523
524 if (mr->iommu_ops) {
525 address_space_rw(cpu->as, physaddr, iotlbentry->attrs, (void *) &val,
526 size, false);
527 } else {
528 memory_region_dispatch_read(mr, physaddr, &val, size,
529 iotlbentry->attrs);
530 }
531
532 if (qemu_etrace_mask(ETRACE_F_MEM)) {
533 etrace_mem_access(&qemu_etracer, 0, 0,
534 addr, size, MEM_READ, val);
535 }
536
537 return val;
538}
539
540static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
541 uint64_t val, target_ulong addr,
542 uintptr_t retaddr, int size)
543{
544 CPUState *cpu = ENV_GET_CPU(env);
545 hwaddr physaddr = iotlbentry->addr;
546 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
547
548 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
549 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
550 cpu_io_recompile(cpu, retaddr);
551 }
552
553 cpu->mem_io_vaddr = addr;
554 cpu->mem_io_pc = retaddr;
555
556
557
558
559 if (mr->iommu_ops) {
560 address_space_rw(cpu->as, physaddr, iotlbentry->attrs, (void *) &val,
561 size, true);
562 } else {
563 memory_region_dispatch_write(mr, physaddr, val, size,
564 iotlbentry->attrs);
565 }
566
567 if (qemu_etrace_mask(ETRACE_F_MEM)) {
568 etrace_mem_access(&qemu_etracer, 0, 0,
569 addr, size, MEM_WRITE, val);
570 }
571}
572
573
574
575static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
576 size_t elt_ofs, target_ulong page)
577{
578 size_t vidx;
579 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
580 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
581 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
582
583 if (cmp == page) {
584
585 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
586 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
587 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
588
589 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
590 tmpio = *io; *io = *vio; *vio = tmpio;
591 return true;
592 }
593 }
594 return false;
595}
596
597
598#define VICTIM_TLB_HIT(TY, ADDR) \
599 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
600 (ADDR) & TARGET_PAGE_MASK)
601
602
603
604
605
606
607
608void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
609 uintptr_t retaddr)
610{
611 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
612 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
613
614 if ((addr & TARGET_PAGE_MASK)
615 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
616
617 if (!VICTIM_TLB_HIT(addr_write, addr)) {
618 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
619 }
620 }
621}
622
623
624
625static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
626 TCGMemOpIdx oi, uintptr_t retaddr)
627{
628 size_t mmu_idx = get_mmuidx(oi);
629 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
630 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
631 target_ulong tlb_addr = tlbe->addr_write;
632 TCGMemOp mop = get_memop(oi);
633 int a_bits = get_alignment_bits(mop);
634 int s_bits = mop & MO_SIZE;
635
636
637 retaddr -= GETPC_ADJ;
638
639
640 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
641
642 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
643 mmu_idx, retaddr);
644 }
645
646
647 if (unlikely(addr & ((1 << s_bits) - 1))) {
648
649
650
651
652 goto stop_the_world;
653 }
654
655
656 if ((addr & TARGET_PAGE_MASK)
657 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
658 if (!VICTIM_TLB_HIT(addr_write, addr)) {
659 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
660 }
661 tlb_addr = tlbe->addr_write;
662 }
663
664
665 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
666
667
668 goto stop_the_world;
669 }
670
671
672 if (unlikely(tlbe->addr_read != tlb_addr)) {
673 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
674
675
676
677 goto stop_the_world;
678 }
679
680 return (void *)((uintptr_t)addr + tlbe->addend);
681
682 stop_the_world:
683 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
684}
685
686#ifdef TARGET_WORDS_BIGENDIAN
687# define TGT_BE(X) (X)
688# define TGT_LE(X) BSWAP(X)
689#else
690# define TGT_BE(X) BSWAP(X)
691# define TGT_LE(X) (X)
692#endif
693
694#define MMUSUFFIX _mmu
695
696#define DATA_SIZE 1
697#include "softmmu_template.h"
698
699#define DATA_SIZE 2
700#include "softmmu_template.h"
701
702#define DATA_SIZE 4
703#include "softmmu_template.h"
704
705#define DATA_SIZE 8
706#include "softmmu_template.h"
707
708
709
710
711#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
712#define ATOMIC_NAME(X) \
713 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
714#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
715
716#define DATA_SIZE 1
717#include "atomic_template.h"
718
719#define DATA_SIZE 2
720#include "atomic_template.h"
721
722#define DATA_SIZE 4
723#include "atomic_template.h"
724
725#ifdef CONFIG_ATOMIC64
726#define DATA_SIZE 8
727#include "atomic_template.h"
728#endif
729
730#ifdef CONFIG_ATOMIC128
731#define DATA_SIZE 16
732#include "atomic_template.h"
733#endif
734
735
736
737#undef EXTRA_ARGS
738#undef ATOMIC_NAME
739#undef ATOMIC_MMU_LOOKUP
740#define EXTRA_ARGS , TCGMemOpIdx oi
741#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
742#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
743
744#define DATA_SIZE 1
745#include "atomic_template.h"
746
747#define DATA_SIZE 2
748#include "atomic_template.h"
749
750#define DATA_SIZE 4
751#include "atomic_template.h"
752
753#ifdef CONFIG_ATOMIC64
754#define DATA_SIZE 8
755#include "atomic_template.h"
756#endif
757
758
759
760#undef MMUSUFFIX
761#define MMUSUFFIX _cmmu
762#undef GETPC
763#define GETPC() ((uintptr_t)0)
764#define SOFTMMU_CODE_ACCESS
765
766#define DATA_SIZE 1
767#include "softmmu_template.h"
768
769#define DATA_SIZE 2
770#include "softmmu_template.h"
771
772#define DATA_SIZE 4
773#include "softmmu_template.h"
774
775#define DATA_SIZE 8
776#include "softmmu_template.h"
777