1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "hw/core/tcg-cpu-ops.h"
24#include "exec/exec-all.h"
25#include "exec/memory.h"
26#include "exec/address-spaces.h"
27#include "exec/cpu_ldst.h"
28#include "exec/cputlb.h"
29#include "exec/tb-hash.h"
30#include "exec/memory-internal.h"
31#include "exec/ram_addr.h"
32#include "tcg/tcg.h"
33#include "qemu/error-report.h"
34#include "exec/log.h"
35#include "exec/helper-proto.h"
36#include "qemu/atomic.h"
37#include "qemu/atomic128.h"
38#include "exec/translate-all.h"
39#include "trace/trace-root.h"
40#include "trace/mem.h"
41#include "internal.h"
42#ifdef CONFIG_PLUGIN
43#include "qemu/plugin-memory.h"
44#endif
45
46
47
48
49
50#ifdef DEBUG_TLB
51# define DEBUG_TLB_GATE 1
52# ifdef DEBUG_TLB_LOG
53# define DEBUG_TLB_LOG_GATE 1
54# else
55# define DEBUG_TLB_LOG_GATE 0
56# endif
57#else
58# define DEBUG_TLB_GATE 0
59# define DEBUG_TLB_LOG_GATE 0
60#endif
61
62#define tlb_debug(fmt, ...) do { \
63 if (DEBUG_TLB_LOG_GATE) { \
64 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
65 ## __VA_ARGS__); \
66 } else if (DEBUG_TLB_GATE) { \
67 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
68 } \
69} while (0)
70
71#define assert_cpu_is_self(cpu) do { \
72 if (DEBUG_TLB_GATE) { \
73 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
74 } \
75 } while (0)
76
77
78
79QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
80
81
82
83QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
84#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
85
86static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
87{
88 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
89}
90
91static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
92{
93 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
94}
95
96static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
97 size_t max_entries)
98{
99 desc->window_begin_ns = ns;
100 desc->window_max_entries = max_entries;
101}
102
103static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
104{
105 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
106
107 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
108 qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
109 }
110}
111
112static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
113{
114
115
116 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
117 tb_jmp_cache_clear_page(cpu, addr);
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
161 int64_t now)
162{
163 size_t old_size = tlb_n_entries(fast);
164 size_t rate;
165 size_t new_size = old_size;
166 int64_t window_len_ms = 100;
167 int64_t window_len_ns = window_len_ms * 1000 * 1000;
168 bool window_expired = now > desc->window_begin_ns + window_len_ns;
169
170 if (desc->n_used_entries > desc->window_max_entries) {
171 desc->window_max_entries = desc->n_used_entries;
172 }
173 rate = desc->window_max_entries * 100 / old_size;
174
175 if (rate > 70) {
176 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
177 } else if (rate < 30 && window_expired) {
178 size_t ceil = pow2ceil(desc->window_max_entries);
179 size_t expected_rate = desc->window_max_entries * 100 / ceil;
180
181
182
183
184
185
186
187
188
189
190
191 if (expected_rate > 70) {
192 ceil *= 2;
193 }
194 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
195 }
196
197 if (new_size == old_size) {
198 if (window_expired) {
199 tlb_window_reset(desc, now, desc->n_used_entries);
200 }
201 return;
202 }
203
204 g_free(fast->table);
205 g_free(desc->iotlb);
206
207 tlb_window_reset(desc, now, 0);
208
209 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
210 fast->table = g_try_new(CPUTLBEntry, new_size);
211 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
212
213
214
215
216
217
218
219
220 while (fast->table == NULL || desc->iotlb == NULL) {
221 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
222 error_report("%s: %s", __func__, strerror(errno));
223 abort();
224 }
225 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
226 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
227
228 g_free(fast->table);
229 g_free(desc->iotlb);
230 fast->table = g_try_new(CPUTLBEntry, new_size);
231 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
232 }
233}
234
235static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
236{
237 desc->n_used_entries = 0;
238 desc->large_page_addr = -1;
239 desc->large_page_mask = -1;
240 desc->vindex = 0;
241 memset(fast->table, -1, sizeof_tlb(fast));
242 memset(desc->vtable, -1, sizeof(desc->vtable));
243}
244
245static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
246 int64_t now)
247{
248 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
249 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
250
251 tlb_mmu_resize_locked(desc, fast, now);
252 tlb_mmu_flush_locked(desc, fast);
253}
254
255static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
256{
257 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
258
259 tlb_window_reset(desc, now, 0);
260 desc->n_used_entries = 0;
261 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
262 fast->table = g_new(CPUTLBEntry, n_entries);
263 desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
264 tlb_mmu_flush_locked(desc, fast);
265}
266
267static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
268{
269 env_tlb(env)->d[mmu_idx].n_used_entries++;
270}
271
272static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
273{
274 env_tlb(env)->d[mmu_idx].n_used_entries--;
275}
276
277void tlb_init(CPUState *cpu)
278{
279 CPUArchState *env = cpu->env_ptr;
280 int64_t now = get_clock_realtime();
281 int i;
282
283 qemu_spin_init(&env_tlb(env)->c.lock);
284
285
286 env_tlb(env)->c.dirty = 0;
287
288 for (i = 0; i < NB_MMU_MODES; i++) {
289 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
290 }
291}
292
293void tlb_destroy(CPUState *cpu)
294{
295 CPUArchState *env = cpu->env_ptr;
296 int i;
297
298 qemu_spin_destroy(&env_tlb(env)->c.lock);
299 for (i = 0; i < NB_MMU_MODES; i++) {
300 CPUTLBDesc *desc = &env_tlb(env)->d[i];
301 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
302
303 g_free(fast->table);
304 g_free(desc->iotlb);
305 }
306}
307
308
309
310
311
312
313
314
315static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
316 run_on_cpu_data d)
317{
318 CPUState *cpu;
319
320 CPU_FOREACH(cpu) {
321 if (cpu != src) {
322 async_run_on_cpu(cpu, fn, d);
323 }
324 }
325}
326
327void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
328{
329 CPUState *cpu;
330 size_t full = 0, part = 0, elide = 0;
331
332 CPU_FOREACH(cpu) {
333 CPUArchState *env = cpu->env_ptr;
334
335 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
336 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
337 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
338 }
339 *pfull = full;
340 *ppart = part;
341 *pelide = elide;
342}
343
344static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
345{
346 CPUArchState *env = cpu->env_ptr;
347 uint16_t asked = data.host_int;
348 uint16_t all_dirty, work, to_clean;
349 int64_t now = get_clock_realtime();
350
351 assert_cpu_is_self(cpu);
352
353 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
354
355 qemu_spin_lock(&env_tlb(env)->c.lock);
356
357 all_dirty = env_tlb(env)->c.dirty;
358 to_clean = asked & all_dirty;
359 all_dirty &= ~to_clean;
360 env_tlb(env)->c.dirty = all_dirty;
361
362 for (work = to_clean; work != 0; work &= work - 1) {
363 int mmu_idx = ctz32(work);
364 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
365 }
366
367 qemu_spin_unlock(&env_tlb(env)->c.lock);
368
369 cpu_tb_jmp_cache_clear(cpu);
370
371 if (to_clean == ALL_MMUIDX_BITS) {
372 qatomic_set(&env_tlb(env)->c.full_flush_count,
373 env_tlb(env)->c.full_flush_count + 1);
374 } else {
375 qatomic_set(&env_tlb(env)->c.part_flush_count,
376 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
377 if (to_clean != asked) {
378 qatomic_set(&env_tlb(env)->c.elide_flush_count,
379 env_tlb(env)->c.elide_flush_count +
380 ctpop16(asked & ~to_clean));
381 }
382 }
383}
384
385void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
386{
387 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
388
389 if (cpu->created && !qemu_cpu_is_self(cpu)) {
390 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
391 RUN_ON_CPU_HOST_INT(idxmap));
392 } else {
393 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
394 }
395}
396
397void tlb_flush(CPUState *cpu)
398{
399 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
400}
401
402void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
403{
404 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
405
406 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
407
408 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
409 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
410}
411
412void tlb_flush_all_cpus(CPUState *src_cpu)
413{
414 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
415}
416
417void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
418{
419 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
420
421 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
422
423 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
424 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
425}
426
427void tlb_flush_all_cpus_synced(CPUState *src_cpu)
428{
429 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
430}
431
432static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
433 target_ulong page, target_ulong mask)
434{
435 page &= mask;
436 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
437
438 return (page == (tlb_entry->addr_read & mask) ||
439 page == (tlb_addr_write(tlb_entry) & mask) ||
440 page == (tlb_entry->addr_code & mask));
441}
442
443static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
444 target_ulong page)
445{
446 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
447}
448
449
450
451
452
453static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
454{
455 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
456}
457
458
459static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
460 target_ulong page,
461 target_ulong mask)
462{
463 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
464 memset(tlb_entry, -1, sizeof(*tlb_entry));
465 return true;
466 }
467 return false;
468}
469
470static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
471 target_ulong page)
472{
473 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
474}
475
476
477static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
478 target_ulong page,
479 target_ulong mask)
480{
481 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
482 int k;
483
484 assert_cpu_is_self(env_cpu(env));
485 for (k = 0; k < CPU_VTLB_SIZE; k++) {
486 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
487 tlb_n_used_entries_dec(env, mmu_idx);
488 }
489 }
490}
491
492static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
493 target_ulong page)
494{
495 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
496}
497
498static void tlb_flush_page_locked(CPUArchState *env, int midx,
499 target_ulong page)
500{
501 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
502 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
503
504
505 if ((page & lp_mask) == lp_addr) {
506 tlb_debug("forcing full flush midx %d ("
507 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
508 midx, lp_addr, lp_mask);
509 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
510 } else {
511 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
512 tlb_n_used_entries_dec(env, midx);
513 }
514 tlb_flush_vtlb_page_locked(env, midx, page);
515 }
516}
517
518
519
520
521
522
523
524
525
526
527static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
528 target_ulong addr,
529 uint16_t idxmap)
530{
531 CPUArchState *env = cpu->env_ptr;
532 int mmu_idx;
533
534 assert_cpu_is_self(cpu);
535
536 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
537
538 qemu_spin_lock(&env_tlb(env)->c.lock);
539 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
540 if ((idxmap >> mmu_idx) & 1) {
541 tlb_flush_page_locked(env, mmu_idx, addr);
542 }
543 }
544 qemu_spin_unlock(&env_tlb(env)->c.lock);
545
546 tb_flush_jmp_cache(cpu, addr);
547}
548
549
550
551
552
553
554
555
556
557
558
559static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
560 run_on_cpu_data data)
561{
562 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
563 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
564 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
565
566 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
567}
568
569typedef struct {
570 target_ulong addr;
571 uint16_t idxmap;
572} TLBFlushPageByMMUIdxData;
573
574
575
576
577
578
579
580
581
582
583
584static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
585 run_on_cpu_data data)
586{
587 TLBFlushPageByMMUIdxData *d = data.host_ptr;
588
589 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
590 g_free(d);
591}
592
593void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
594{
595 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
596
597
598 addr &= TARGET_PAGE_MASK;
599
600 if (qemu_cpu_is_self(cpu)) {
601 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
602 } else if (idxmap < TARGET_PAGE_SIZE) {
603
604
605
606
607
608 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
609 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
610 } else {
611 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
612
613
614 d->addr = addr;
615 d->idxmap = idxmap;
616 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
617 RUN_ON_CPU_HOST_PTR(d));
618 }
619}
620
621void tlb_flush_page(CPUState *cpu, target_ulong addr)
622{
623 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
624}
625
626void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
627 uint16_t idxmap)
628{
629 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
630
631
632 addr &= TARGET_PAGE_MASK;
633
634
635
636
637
638 if (idxmap < TARGET_PAGE_SIZE) {
639 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
640 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
641 } else {
642 CPUState *dst_cpu;
643
644
645 CPU_FOREACH(dst_cpu) {
646 if (dst_cpu != src_cpu) {
647 TLBFlushPageByMMUIdxData *d
648 = g_new(TLBFlushPageByMMUIdxData, 1);
649
650 d->addr = addr;
651 d->idxmap = idxmap;
652 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
653 RUN_ON_CPU_HOST_PTR(d));
654 }
655 }
656 }
657
658 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
659}
660
661void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
662{
663 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
664}
665
666void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
667 target_ulong addr,
668 uint16_t idxmap)
669{
670 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
671
672
673 addr &= TARGET_PAGE_MASK;
674
675
676
677
678
679 if (idxmap < TARGET_PAGE_SIZE) {
680 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
681 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
682 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
683 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
684 } else {
685 CPUState *dst_cpu;
686 TLBFlushPageByMMUIdxData *d;
687
688
689 CPU_FOREACH(dst_cpu) {
690 if (dst_cpu != src_cpu) {
691 d = g_new(TLBFlushPageByMMUIdxData, 1);
692 d->addr = addr;
693 d->idxmap = idxmap;
694 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
695 RUN_ON_CPU_HOST_PTR(d));
696 }
697 }
698
699 d = g_new(TLBFlushPageByMMUIdxData, 1);
700 d->addr = addr;
701 d->idxmap = idxmap;
702 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
703 RUN_ON_CPU_HOST_PTR(d));
704 }
705}
706
707void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
708{
709 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
710}
711
712static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
713 target_ulong page, unsigned bits)
714{
715 CPUTLBDesc *d = &env_tlb(env)->d[midx];
716 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
717 target_ulong mask = MAKE_64BIT_MASK(0, bits);
718
719
720
721
722
723
724
725
726
727 if (mask < f->mask) {
728 tlb_debug("forcing full flush midx %d ("
729 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
730 midx, page, mask);
731 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
732 return;
733 }
734
735
736 if ((page & d->large_page_mask) == d->large_page_addr) {
737 tlb_debug("forcing full flush midx %d ("
738 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
739 midx, d->large_page_addr, d->large_page_mask);
740 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
741 return;
742 }
743
744 if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
745 tlb_n_used_entries_dec(env, midx);
746 }
747 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
748}
749
750typedef struct {
751 target_ulong addr;
752 uint16_t idxmap;
753 uint16_t bits;
754} TLBFlushPageBitsByMMUIdxData;
755
756static void
757tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
758 TLBFlushPageBitsByMMUIdxData d)
759{
760 CPUArchState *env = cpu->env_ptr;
761 int mmu_idx;
762
763 assert_cpu_is_self(cpu);
764
765 tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
766 d.addr, d.bits, d.idxmap);
767
768 qemu_spin_lock(&env_tlb(env)->c.lock);
769 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
770 if ((d.idxmap >> mmu_idx) & 1) {
771 tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
772 }
773 }
774 qemu_spin_unlock(&env_tlb(env)->c.lock);
775
776 tb_flush_jmp_cache(cpu, d.addr);
777}
778
779static bool encode_pbm_to_runon(run_on_cpu_data *out,
780 TLBFlushPageBitsByMMUIdxData d)
781{
782
783 if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) {
784 *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits);
785 return true;
786 }
787 return false;
788}
789
790static TLBFlushPageBitsByMMUIdxData
791decode_runon_to_pbm(run_on_cpu_data data)
792{
793 target_ulong addr_map_bits = (target_ulong) data.target_ptr;
794 return (TLBFlushPageBitsByMMUIdxData){
795 .addr = addr_map_bits & TARGET_PAGE_MASK,
796 .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6,
797 .bits = addr_map_bits & 0x3f
798 };
799}
800
801static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu,
802 run_on_cpu_data runon)
803{
804 tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon));
805}
806
807static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
808 run_on_cpu_data data)
809{
810 TLBFlushPageBitsByMMUIdxData *d = data.host_ptr;
811 tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d);
812 g_free(d);
813}
814
815void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
816 uint16_t idxmap, unsigned bits)
817{
818 TLBFlushPageBitsByMMUIdxData d;
819 run_on_cpu_data runon;
820
821
822 if (bits >= TARGET_LONG_BITS) {
823 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
824 return;
825 }
826
827 if (bits < TARGET_PAGE_BITS) {
828 tlb_flush_by_mmuidx(cpu, idxmap);
829 return;
830 }
831
832
833 d.addr = addr & TARGET_PAGE_MASK;
834 d.idxmap = idxmap;
835 d.bits = bits;
836
837 if (qemu_cpu_is_self(cpu)) {
838 tlb_flush_page_bits_by_mmuidx_async_0(cpu, d);
839 } else if (encode_pbm_to_runon(&runon, d)) {
840 async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
841 } else {
842 TLBFlushPageBitsByMMUIdxData *p
843 = g_new(TLBFlushPageBitsByMMUIdxData, 1);
844
845
846 *p = d;
847 async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
848 RUN_ON_CPU_HOST_PTR(p));
849 }
850}
851
852void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
853 target_ulong addr,
854 uint16_t idxmap,
855 unsigned bits)
856{
857 TLBFlushPageBitsByMMUIdxData d;
858 run_on_cpu_data runon;
859
860
861 if (bits >= TARGET_LONG_BITS) {
862 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
863 return;
864 }
865
866 if (bits < TARGET_PAGE_BITS) {
867 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
868 return;
869 }
870
871
872 d.addr = addr & TARGET_PAGE_MASK;
873 d.idxmap = idxmap;
874 d.bits = bits;
875
876 if (encode_pbm_to_runon(&runon, d)) {
877 flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
878 } else {
879 CPUState *dst_cpu;
880 TLBFlushPageBitsByMMUIdxData *p;
881
882
883 CPU_FOREACH(dst_cpu) {
884 if (dst_cpu != src_cpu) {
885 p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
886 *p = d;
887 async_run_on_cpu(dst_cpu,
888 tlb_flush_page_bits_by_mmuidx_async_2,
889 RUN_ON_CPU_HOST_PTR(p));
890 }
891 }
892 }
893
894 tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d);
895}
896
897void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
898 target_ulong addr,
899 uint16_t idxmap,
900 unsigned bits)
901{
902 TLBFlushPageBitsByMMUIdxData d;
903 run_on_cpu_data runon;
904
905
906 if (bits >= TARGET_LONG_BITS) {
907 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
908 return;
909 }
910
911 if (bits < TARGET_PAGE_BITS) {
912 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
913 return;
914 }
915
916
917 d.addr = addr & TARGET_PAGE_MASK;
918 d.idxmap = idxmap;
919 d.bits = bits;
920
921 if (encode_pbm_to_runon(&runon, d)) {
922 flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
923 async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1,
924 runon);
925 } else {
926 CPUState *dst_cpu;
927 TLBFlushPageBitsByMMUIdxData *p;
928
929
930 CPU_FOREACH(dst_cpu) {
931 if (dst_cpu != src_cpu) {
932 p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
933 *p = d;
934 async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
935 RUN_ON_CPU_HOST_PTR(p));
936 }
937 }
938
939 p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
940 *p = d;
941 async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
942 RUN_ON_CPU_HOST_PTR(p));
943 }
944}
945
946
947
948void tlb_protect_code(ram_addr_t ram_addr)
949{
950 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
951 DIRTY_MEMORY_CODE);
952}
953
954
955
956void tlb_unprotect_code(ram_addr_t ram_addr)
957{
958 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
959}
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
979 uintptr_t start, uintptr_t length)
980{
981 uintptr_t addr = tlb_entry->addr_write;
982
983 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
984 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
985 addr &= TARGET_PAGE_MASK;
986 addr += tlb_entry->addend;
987 if ((addr - start) < length) {
988#if TCG_OVERSIZED_GUEST
989 tlb_entry->addr_write |= TLB_NOTDIRTY;
990#else
991 qatomic_set(&tlb_entry->addr_write,
992 tlb_entry->addr_write | TLB_NOTDIRTY);
993#endif
994 }
995 }
996}
997
998
999
1000
1001
1002static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1003{
1004 *d = *s;
1005}
1006
1007
1008
1009
1010
1011
1012void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1013{
1014 CPUArchState *env;
1015
1016 int mmu_idx;
1017
1018 env = cpu->env_ptr;
1019 qemu_spin_lock(&env_tlb(env)->c.lock);
1020 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1021 unsigned int i;
1022 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1023
1024 for (i = 0; i < n; i++) {
1025 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1026 start1, length);
1027 }
1028
1029 for (i = 0; i < CPU_VTLB_SIZE; i++) {
1030 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1031 start1, length);
1032 }
1033 }
1034 qemu_spin_unlock(&env_tlb(env)->c.lock);
1035}
1036
1037
1038static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1039 target_ulong vaddr)
1040{
1041 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1042 tlb_entry->addr_write = vaddr;
1043 }
1044}
1045
1046
1047
1048void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1049{
1050 CPUArchState *env = cpu->env_ptr;
1051 int mmu_idx;
1052
1053 assert_cpu_is_self(cpu);
1054
1055 vaddr &= TARGET_PAGE_MASK;
1056 qemu_spin_lock(&env_tlb(env)->c.lock);
1057 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1058 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1059 }
1060
1061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1062 int k;
1063 for (k = 0; k < CPU_VTLB_SIZE; k++) {
1064 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1065 }
1066 }
1067 qemu_spin_unlock(&env_tlb(env)->c.lock);
1068}
1069
1070
1071
1072static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1073 target_ulong vaddr, target_ulong size)
1074{
1075 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1076 target_ulong lp_mask = ~(size - 1);
1077
1078 if (lp_addr == (target_ulong)-1) {
1079
1080 lp_addr = vaddr;
1081 } else {
1082
1083
1084
1085 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1086 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1087 lp_mask <<= 1;
1088 }
1089 }
1090 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1091 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1102 hwaddr paddr, MemTxAttrs attrs, int prot,
1103 int mmu_idx, target_ulong size)
1104{
1105 CPUArchState *env = cpu->env_ptr;
1106 CPUTLB *tlb = env_tlb(env);
1107 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1108 MemoryRegionSection *section;
1109 unsigned int index;
1110 target_ulong address;
1111 target_ulong write_address;
1112 uintptr_t addend;
1113 CPUTLBEntry *te, tn;
1114 hwaddr iotlb, xlat, sz, paddr_page;
1115 target_ulong vaddr_page;
1116 int asidx = cpu_asidx_from_attrs(cpu, attrs);
1117 int wp_flags;
1118 bool is_ram, is_romd;
1119
1120 assert_cpu_is_self(cpu);
1121
1122 if (size <= TARGET_PAGE_SIZE) {
1123 sz = TARGET_PAGE_SIZE;
1124 } else {
1125 tlb_add_large_page(env, mmu_idx, vaddr, size);
1126 sz = size;
1127 }
1128 vaddr_page = vaddr & TARGET_PAGE_MASK;
1129 paddr_page = paddr & TARGET_PAGE_MASK;
1130
1131 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1132 &xlat, &sz, attrs, &prot);
1133 assert(sz >= TARGET_PAGE_SIZE);
1134
1135 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1136 " prot=%x idx=%d\n",
1137 vaddr, paddr, prot, mmu_idx);
1138
1139 address = vaddr_page;
1140 if (size < TARGET_PAGE_SIZE) {
1141
1142 address |= TLB_INVALID_MASK;
1143 }
1144 if (attrs.byte_swap) {
1145 address |= TLB_BSWAP;
1146 }
1147
1148 is_ram = memory_region_is_ram(section->mr);
1149 is_romd = memory_region_is_romd(section->mr);
1150
1151 if (is_ram || is_romd) {
1152
1153 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1154 } else {
1155
1156 addend = 0;
1157 }
1158
1159 write_address = address;
1160 if (is_ram) {
1161 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1162
1163
1164
1165
1166 if (prot & PAGE_WRITE) {
1167 if (section->readonly) {
1168 write_address |= TLB_DISCARD_WRITE;
1169 } else if (cpu_physical_memory_is_clean(iotlb)) {
1170 write_address |= TLB_NOTDIRTY;
1171 }
1172 }
1173 } else {
1174
1175 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1176
1177
1178
1179
1180
1181 write_address |= TLB_MMIO;
1182 if (!is_romd) {
1183 address = write_address;
1184 }
1185 }
1186
1187 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1188 TARGET_PAGE_SIZE);
1189
1190 index = tlb_index(env, mmu_idx, vaddr_page);
1191 te = tlb_entry(env, mmu_idx, vaddr_page);
1192
1193
1194
1195
1196
1197
1198
1199
1200 qemu_spin_lock(&tlb->c.lock);
1201
1202
1203 tlb->c.dirty |= 1 << mmu_idx;
1204
1205
1206 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1207
1208
1209
1210
1211
1212 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1213 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1214 CPUTLBEntry *tv = &desc->vtable[vidx];
1215
1216
1217 copy_tlb_helper_locked(tv, te);
1218 desc->viotlb[vidx] = desc->iotlb[index];
1219 tlb_n_used_entries_dec(env, mmu_idx);
1220 }
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235 desc->iotlb[index].addr = iotlb - vaddr_page;
1236 desc->iotlb[index].attrs = attrs;
1237
1238
1239 tn.addend = addend - vaddr_page;
1240 if (prot & PAGE_READ) {
1241 tn.addr_read = address;
1242 if (wp_flags & BP_MEM_READ) {
1243 tn.addr_read |= TLB_WATCHPOINT;
1244 }
1245 } else {
1246 tn.addr_read = -1;
1247 }
1248
1249 if (prot & PAGE_EXEC) {
1250 tn.addr_code = address;
1251 } else {
1252 tn.addr_code = -1;
1253 }
1254
1255 tn.addr_write = -1;
1256 if (prot & PAGE_WRITE) {
1257 tn.addr_write = write_address;
1258 if (prot & PAGE_WRITE_INV) {
1259 tn.addr_write |= TLB_INVALID_MASK;
1260 }
1261 if (wp_flags & BP_MEM_WRITE) {
1262 tn.addr_write |= TLB_WATCHPOINT;
1263 }
1264 }
1265
1266 copy_tlb_helper_locked(te, &tn);
1267 tlb_n_used_entries_inc(env, mmu_idx);
1268 qemu_spin_unlock(&tlb->c.lock);
1269}
1270
1271
1272
1273
1274void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1275 hwaddr paddr, int prot,
1276 int mmu_idx, target_ulong size)
1277{
1278 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1279 prot, mmu_idx, size);
1280}
1281
1282static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1283{
1284 ram_addr_t ram_addr;
1285
1286 ram_addr = qemu_ram_addr_from_host(ptr);
1287 if (ram_addr == RAM_ADDR_INVALID) {
1288 error_report("Bad ram pointer %p", ptr);
1289 abort();
1290 }
1291 return ram_addr;
1292}
1293
1294
1295
1296
1297
1298
1299static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1300 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1301{
1302 CPUClass *cc = CPU_GET_CLASS(cpu);
1303 bool ok;
1304
1305
1306
1307
1308
1309 ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
1310 access_type, mmu_idx, false, retaddr);
1311 assert(ok);
1312}
1313
1314static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1315 MMUAccessType access_type,
1316 int mmu_idx, uintptr_t retaddr)
1317{
1318 CPUClass *cc = CPU_GET_CLASS(cpu);
1319
1320 cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
1321}
1322
1323static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1324 vaddr addr, unsigned size,
1325 MMUAccessType access_type,
1326 int mmu_idx, MemTxAttrs attrs,
1327 MemTxResult response,
1328 uintptr_t retaddr)
1329{
1330 CPUClass *cc = CPU_GET_CLASS(cpu);
1331
1332 if (!cpu->ignore_memory_transaction_failures &&
1333 cc->tcg_ops->do_transaction_failed) {
1334 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1335 access_type, mmu_idx, attrs,
1336 response, retaddr);
1337 }
1338}
1339
1340static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1341 int mmu_idx, target_ulong addr, uintptr_t retaddr,
1342 MMUAccessType access_type, MemOp op)
1343{
1344 CPUState *cpu = env_cpu(env);
1345 hwaddr mr_offset;
1346 MemoryRegionSection *section;
1347 MemoryRegion *mr;
1348 uint64_t val;
1349 bool locked = false;
1350 MemTxResult r;
1351
1352 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1353 mr = section->mr;
1354 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1355 cpu->mem_io_pc = retaddr;
1356 if (!cpu->can_do_io) {
1357 cpu_io_recompile(cpu, retaddr);
1358 }
1359
1360 if (!qemu_mutex_iothread_locked()) {
1361 qemu_mutex_lock_iothread();
1362 locked = true;
1363 }
1364 r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
1365 if (r != MEMTX_OK) {
1366 hwaddr physaddr = mr_offset +
1367 section->offset_within_address_space -
1368 section->offset_within_region;
1369
1370 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1371 mmu_idx, iotlbentry->attrs, r, retaddr);
1372 }
1373 if (locked) {
1374 qemu_mutex_unlock_iothread();
1375 }
1376
1377 return val;
1378}
1379
1380
1381
1382
1383
1384
1385static void save_iotlb_data(CPUState *cs, hwaddr addr,
1386 MemoryRegionSection *section, hwaddr mr_offset)
1387{
1388#ifdef CONFIG_PLUGIN
1389 SavedIOTLB *saved = &cs->saved_iotlb;
1390 saved->addr = addr;
1391 saved->section = section;
1392 saved->mr_offset = mr_offset;
1393#endif
1394}
1395
1396static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1397 int mmu_idx, uint64_t val, target_ulong addr,
1398 uintptr_t retaddr, MemOp op)
1399{
1400 CPUState *cpu = env_cpu(env);
1401 hwaddr mr_offset;
1402 MemoryRegionSection *section;
1403 MemoryRegion *mr;
1404 bool locked = false;
1405 MemTxResult r;
1406
1407 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1408 mr = section->mr;
1409 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1410 if (!cpu->can_do_io) {
1411 cpu_io_recompile(cpu, retaddr);
1412 }
1413 cpu->mem_io_pc = retaddr;
1414
1415
1416
1417
1418
1419 save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
1420
1421 if (!qemu_mutex_iothread_locked()) {
1422 qemu_mutex_lock_iothread();
1423 locked = true;
1424 }
1425 r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
1426 if (r != MEMTX_OK) {
1427 hwaddr physaddr = mr_offset +
1428 section->offset_within_address_space -
1429 section->offset_within_region;
1430
1431 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1432 MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
1433 retaddr);
1434 }
1435 if (locked) {
1436 qemu_mutex_unlock_iothread();
1437 }
1438}
1439
1440static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1441{
1442#if TCG_OVERSIZED_GUEST
1443 return *(target_ulong *)((uintptr_t)entry + ofs);
1444#else
1445
1446 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
1447#endif
1448}
1449
1450
1451
1452static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1453 size_t elt_ofs, target_ulong page)
1454{
1455 size_t vidx;
1456
1457 assert_cpu_is_self(env_cpu(env));
1458 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1459 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1460 target_ulong cmp;
1461
1462
1463#if TCG_OVERSIZED_GUEST
1464 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1465#else
1466 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1467#endif
1468
1469 if (cmp == page) {
1470
1471 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1472
1473 qemu_spin_lock(&env_tlb(env)->c.lock);
1474 copy_tlb_helper_locked(&tmptlb, tlb);
1475 copy_tlb_helper_locked(tlb, vtlb);
1476 copy_tlb_helper_locked(vtlb, &tmptlb);
1477 qemu_spin_unlock(&env_tlb(env)->c.lock);
1478
1479 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1480 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1481 tmpio = *io; *io = *vio; *vio = tmpio;
1482 return true;
1483 }
1484 }
1485 return false;
1486}
1487
1488
1489#define VICTIM_TLB_HIT(TY, ADDR) \
1490 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1491 (ADDR) & TARGET_PAGE_MASK)
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1504 void **hostp)
1505{
1506 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1507 uintptr_t index = tlb_index(env, mmu_idx, addr);
1508 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1509 void *p;
1510
1511 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1512 if (!VICTIM_TLB_HIT(addr_code, addr)) {
1513 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1514 index = tlb_index(env, mmu_idx, addr);
1515 entry = tlb_entry(env, mmu_idx, addr);
1516
1517 if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1518
1519
1520
1521
1522 return -1;
1523 }
1524 }
1525 assert(tlb_hit(entry->addr_code, addr));
1526 }
1527
1528 if (unlikely(entry->addr_code & TLB_MMIO)) {
1529
1530 if (hostp) {
1531 *hostp = NULL;
1532 }
1533 return -1;
1534 }
1535
1536 p = (void *)((uintptr_t)addr + entry->addend);
1537 if (hostp) {
1538 *hostp = p;
1539 }
1540 return qemu_ram_addr_from_host_nofail(p);
1541}
1542
1543tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1544{
1545 return get_page_addr_code_hostp(env, addr, NULL);
1546}
1547
1548static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1549 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1550{
1551 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1552
1553 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1554
1555 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1556 struct page_collection *pages
1557 = page_collection_lock(ram_addr, ram_addr + size);
1558 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1559 page_collection_unlock(pages);
1560 }
1561
1562
1563
1564
1565
1566 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1567
1568
1569 if (!cpu_physical_memory_is_clean(ram_addr)) {
1570 trace_memory_notdirty_set_dirty(mem_vaddr);
1571 tlb_set_dirty(cpu, mem_vaddr);
1572 }
1573}
1574
1575static int probe_access_internal(CPUArchState *env, target_ulong addr,
1576 int fault_size, MMUAccessType access_type,
1577 int mmu_idx, bool nonfault,
1578 void **phost, uintptr_t retaddr)
1579{
1580 uintptr_t index = tlb_index(env, mmu_idx, addr);
1581 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1582 target_ulong tlb_addr, page_addr;
1583 size_t elt_ofs;
1584 int flags;
1585
1586 switch (access_type) {
1587 case MMU_DATA_LOAD:
1588 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1589 break;
1590 case MMU_DATA_STORE:
1591 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1592 break;
1593 case MMU_INST_FETCH:
1594 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1595 break;
1596 default:
1597 g_assert_not_reached();
1598 }
1599 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1600
1601 page_addr = addr & TARGET_PAGE_MASK;
1602 if (!tlb_hit_page(tlb_addr, page_addr)) {
1603 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1604 CPUState *cs = env_cpu(env);
1605 CPUClass *cc = CPU_GET_CLASS(cs);
1606
1607 if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1608 mmu_idx, nonfault, retaddr)) {
1609
1610 *phost = NULL;
1611 return TLB_INVALID_MASK;
1612 }
1613
1614
1615 entry = tlb_entry(env, mmu_idx, addr);
1616 }
1617 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1618 }
1619 flags = tlb_addr & TLB_FLAGS_MASK;
1620
1621
1622 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1623 *phost = NULL;
1624 return TLB_MMIO;
1625 }
1626
1627
1628 *phost = (void *)((uintptr_t)addr + entry->addend);
1629 return flags;
1630}
1631
1632int probe_access_flags(CPUArchState *env, target_ulong addr,
1633 MMUAccessType access_type, int mmu_idx,
1634 bool nonfault, void **phost, uintptr_t retaddr)
1635{
1636 int flags;
1637
1638 flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1639 nonfault, phost, retaddr);
1640
1641
1642 if (unlikely(flags & TLB_NOTDIRTY)) {
1643 uintptr_t index = tlb_index(env, mmu_idx, addr);
1644 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1645
1646 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1647 flags &= ~TLB_NOTDIRTY;
1648 }
1649
1650 return flags;
1651}
1652
1653void *probe_access(CPUArchState *env, target_ulong addr, int size,
1654 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1655{
1656 void *host;
1657 int flags;
1658
1659 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1660
1661 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1662 false, &host, retaddr);
1663
1664
1665 if (size == 0) {
1666 return NULL;
1667 }
1668
1669 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1670 uintptr_t index = tlb_index(env, mmu_idx, addr);
1671 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1672
1673
1674 if (flags & TLB_WATCHPOINT) {
1675 int wp_access = (access_type == MMU_DATA_STORE
1676 ? BP_MEM_WRITE : BP_MEM_READ);
1677 cpu_check_watchpoint(env_cpu(env), addr, size,
1678 iotlbentry->attrs, wp_access, retaddr);
1679 }
1680
1681
1682 if (flags & TLB_NOTDIRTY) {
1683 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1684 }
1685 }
1686
1687 return host;
1688}
1689
1690void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1691 MMUAccessType access_type, int mmu_idx)
1692{
1693 void *host;
1694 int flags;
1695
1696 flags = probe_access_internal(env, addr, 0, access_type,
1697 mmu_idx, true, &host, 0);
1698
1699
1700 return flags ? NULL : host;
1701}
1702
1703#ifdef CONFIG_PLUGIN
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1719 bool is_store, struct qemu_plugin_hwaddr *data)
1720{
1721 CPUArchState *env = cpu->env_ptr;
1722 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1723 uintptr_t index = tlb_index(env, mmu_idx, addr);
1724 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1725
1726 if (likely(tlb_hit(tlb_addr, addr))) {
1727
1728 if (tlb_addr & TLB_MMIO) {
1729 CPUIOTLBEntry *iotlbentry;
1730 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1731 data->is_io = true;
1732 data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1733 data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1734 } else {
1735 data->is_io = false;
1736 data->v.ram.hostaddr = addr + tlbe->addend;
1737 }
1738 return true;
1739 } else {
1740 SavedIOTLB *saved = &cpu->saved_iotlb;
1741 data->is_io = true;
1742 data->v.io.section = saved->section;
1743 data->v.io.offset = saved->mr_offset;
1744 return true;
1745 }
1746}
1747
1748#endif
1749
1750
1751
1752static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1753 TCGMemOpIdx oi, uintptr_t retaddr)
1754{
1755 size_t mmu_idx = get_mmuidx(oi);
1756 uintptr_t index = tlb_index(env, mmu_idx, addr);
1757 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1758 target_ulong tlb_addr = tlb_addr_write(tlbe);
1759 MemOp mop = get_memop(oi);
1760 int a_bits = get_alignment_bits(mop);
1761 int s_bits = mop & MO_SIZE;
1762 void *hostaddr;
1763
1764
1765 retaddr -= GETPC_ADJ;
1766
1767
1768 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1769
1770 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1771 mmu_idx, retaddr);
1772 }
1773
1774
1775 if (unlikely(addr & ((1 << s_bits) - 1))) {
1776
1777
1778
1779
1780 goto stop_the_world;
1781 }
1782
1783
1784 if (!tlb_hit(tlb_addr, addr)) {
1785 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1786 tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
1787 mmu_idx, retaddr);
1788 index = tlb_index(env, mmu_idx, addr);
1789 tlbe = tlb_entry(env, mmu_idx, addr);
1790 }
1791 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1792 }
1793
1794
1795 if (unlikely(tlb_addr & TLB_MMIO)) {
1796
1797
1798 goto stop_the_world;
1799 }
1800
1801
1802 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1803 tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1804 mmu_idx, retaddr);
1805
1806
1807
1808 goto stop_the_world;
1809 }
1810
1811 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1812
1813 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1814 notdirty_write(env_cpu(env), addr, 1 << s_bits,
1815 &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
1816 }
1817
1818 return hostaddr;
1819
1820 stop_the_world:
1821 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1834 TCGMemOpIdx oi, uintptr_t retaddr);
1835
1836static inline uint64_t QEMU_ALWAYS_INLINE
1837load_memop(const void *haddr, MemOp op)
1838{
1839 switch (op) {
1840 case MO_UB:
1841 return ldub_p(haddr);
1842 case MO_BEUW:
1843 return lduw_be_p(haddr);
1844 case MO_LEUW:
1845 return lduw_le_p(haddr);
1846 case MO_BEUL:
1847 return (uint32_t)ldl_be_p(haddr);
1848 case MO_LEUL:
1849 return (uint32_t)ldl_le_p(haddr);
1850 case MO_BEQ:
1851 return ldq_be_p(haddr);
1852 case MO_LEQ:
1853 return ldq_le_p(haddr);
1854 default:
1855 qemu_build_not_reached();
1856 }
1857}
1858
1859static inline uint64_t QEMU_ALWAYS_INLINE
1860load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1861 uintptr_t retaddr, MemOp op, bool code_read,
1862 FullLoadHelper *full_load)
1863{
1864 uintptr_t mmu_idx = get_mmuidx(oi);
1865 uintptr_t index = tlb_index(env, mmu_idx, addr);
1866 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1867 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1868 const size_t tlb_off = code_read ?
1869 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1870 const MMUAccessType access_type =
1871 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1872 unsigned a_bits = get_alignment_bits(get_memop(oi));
1873 void *haddr;
1874 uint64_t res;
1875 size_t size = memop_size(op);
1876
1877
1878 if (addr & ((1 << a_bits) - 1)) {
1879 cpu_unaligned_access(env_cpu(env), addr, access_type,
1880 mmu_idx, retaddr);
1881 }
1882
1883
1884 if (!tlb_hit(tlb_addr, addr)) {
1885 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1886 addr & TARGET_PAGE_MASK)) {
1887 tlb_fill(env_cpu(env), addr, size,
1888 access_type, mmu_idx, retaddr);
1889 index = tlb_index(env, mmu_idx, addr);
1890 entry = tlb_entry(env, mmu_idx, addr);
1891 }
1892 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1893 tlb_addr &= ~TLB_INVALID_MASK;
1894 }
1895
1896
1897 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1898 CPUIOTLBEntry *iotlbentry;
1899 bool need_swap;
1900
1901
1902 if ((addr & (size - 1)) != 0) {
1903 goto do_unaligned_access;
1904 }
1905
1906 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1907
1908
1909 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1910
1911 cpu_check_watchpoint(env_cpu(env), addr, size,
1912 iotlbentry->attrs, BP_MEM_READ, retaddr);
1913 }
1914
1915 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1916
1917
1918 if (likely(tlb_addr & TLB_MMIO)) {
1919 return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1920 access_type, op ^ (need_swap * MO_BSWAP));
1921 }
1922
1923 haddr = (void *)((uintptr_t)addr + entry->addend);
1924
1925
1926
1927
1928
1929
1930 if (unlikely(need_swap)) {
1931 return load_memop(haddr, op ^ MO_BSWAP);
1932 }
1933 return load_memop(haddr, op);
1934 }
1935
1936
1937 if (size > 1
1938 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1939 >= TARGET_PAGE_SIZE)) {
1940 target_ulong addr1, addr2;
1941 uint64_t r1, r2;
1942 unsigned shift;
1943 do_unaligned_access:
1944 addr1 = addr & ~((target_ulong)size - 1);
1945 addr2 = addr1 + size;
1946 r1 = full_load(env, addr1, oi, retaddr);
1947 r2 = full_load(env, addr2, oi, retaddr);
1948 shift = (addr & (size - 1)) * 8;
1949
1950 if (memop_big_endian(op)) {
1951
1952 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1953 } else {
1954
1955 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1956 }
1957 return res & MAKE_64BIT_MASK(0, size * 8);
1958 }
1959
1960 haddr = (void *)((uintptr_t)addr + entry->addend);
1961 return load_memop(haddr, op);
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1975 TCGMemOpIdx oi, uintptr_t retaddr)
1976{
1977 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
1978}
1979
1980tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1981 TCGMemOpIdx oi, uintptr_t retaddr)
1982{
1983 return full_ldub_mmu(env, addr, oi, retaddr);
1984}
1985
1986static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1987 TCGMemOpIdx oi, uintptr_t retaddr)
1988{
1989 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
1990 full_le_lduw_mmu);
1991}
1992
1993tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1994 TCGMemOpIdx oi, uintptr_t retaddr)
1995{
1996 return full_le_lduw_mmu(env, addr, oi, retaddr);
1997}
1998
1999static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2000 TCGMemOpIdx oi, uintptr_t retaddr)
2001{
2002 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2003 full_be_lduw_mmu);
2004}
2005
2006tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2007 TCGMemOpIdx oi, uintptr_t retaddr)
2008{
2009 return full_be_lduw_mmu(env, addr, oi, retaddr);
2010}
2011
2012static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2013 TCGMemOpIdx oi, uintptr_t retaddr)
2014{
2015 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2016 full_le_ldul_mmu);
2017}
2018
2019tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2020 TCGMemOpIdx oi, uintptr_t retaddr)
2021{
2022 return full_le_ldul_mmu(env, addr, oi, retaddr);
2023}
2024
2025static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2026 TCGMemOpIdx oi, uintptr_t retaddr)
2027{
2028 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2029 full_be_ldul_mmu);
2030}
2031
2032tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2033 TCGMemOpIdx oi, uintptr_t retaddr)
2034{
2035 return full_be_ldul_mmu(env, addr, oi, retaddr);
2036}
2037
2038uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
2039 TCGMemOpIdx oi, uintptr_t retaddr)
2040{
2041 return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
2042 helper_le_ldq_mmu);
2043}
2044
2045uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
2046 TCGMemOpIdx oi, uintptr_t retaddr)
2047{
2048 return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
2049 helper_be_ldq_mmu);
2050}
2051
2052
2053
2054
2055
2056
2057
2058tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
2059 TCGMemOpIdx oi, uintptr_t retaddr)
2060{
2061 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2062}
2063
2064tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
2065 TCGMemOpIdx oi, uintptr_t retaddr)
2066{
2067 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2068}
2069
2070tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
2071 TCGMemOpIdx oi, uintptr_t retaddr)
2072{
2073 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2074}
2075
2076tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
2077 TCGMemOpIdx oi, uintptr_t retaddr)
2078{
2079 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2080}
2081
2082tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
2083 TCGMemOpIdx oi, uintptr_t retaddr)
2084{
2085 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2086}
2087
2088
2089
2090
2091
2092static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2093 int mmu_idx, uintptr_t retaddr,
2094 MemOp op, FullLoadHelper *full_load)
2095{
2096 uint16_t meminfo;
2097 TCGMemOpIdx oi;
2098 uint64_t ret;
2099
2100 meminfo = trace_mem_get_info(op, mmu_idx, false);
2101 trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
2102
2103 op &= ~MO_SIGN;
2104 oi = make_memop_idx(op, mmu_idx);
2105 ret = full_load(env, addr, oi, retaddr);
2106
2107 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
2108
2109 return ret;
2110}
2111
2112uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2113 int mmu_idx, uintptr_t ra)
2114{
2115 return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
2116}
2117
2118int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2119 int mmu_idx, uintptr_t ra)
2120{
2121 return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
2122 full_ldub_mmu);
2123}
2124
2125uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2126 int mmu_idx, uintptr_t ra)
2127{
2128 return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu);
2129}
2130
2131int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2132 int mmu_idx, uintptr_t ra)
2133{
2134 return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
2135 full_be_lduw_mmu);
2136}
2137
2138uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2139 int mmu_idx, uintptr_t ra)
2140{
2141 return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu);
2142}
2143
2144uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2145 int mmu_idx, uintptr_t ra)
2146{
2147 return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu);
2148}
2149
2150uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2151 int mmu_idx, uintptr_t ra)
2152{
2153 return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu);
2154}
2155
2156int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2157 int mmu_idx, uintptr_t ra)
2158{
2159 return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
2160 full_le_lduw_mmu);
2161}
2162
2163uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2164 int mmu_idx, uintptr_t ra)
2165{
2166 return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
2167}
2168
2169uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2170 int mmu_idx, uintptr_t ra)
2171{
2172 return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
2173}
2174
2175uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
2176 uintptr_t retaddr)
2177{
2178 return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2179}
2180
2181int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2182{
2183 return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2184}
2185
2186uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
2187 uintptr_t retaddr)
2188{
2189 return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2190}
2191
2192int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2193{
2194 return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2195}
2196
2197uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
2198 uintptr_t retaddr)
2199{
2200 return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2201}
2202
2203uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
2204 uintptr_t retaddr)
2205{
2206 return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2207}
2208
2209uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
2210 uintptr_t retaddr)
2211{
2212 return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2213}
2214
2215int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2216{
2217 return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2218}
2219
2220uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
2221 uintptr_t retaddr)
2222{
2223 return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2224}
2225
2226uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
2227 uintptr_t retaddr)
2228{
2229 return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2230}
2231
2232uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
2233{
2234 return cpu_ldub_data_ra(env, ptr, 0);
2235}
2236
2237int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
2238{
2239 return cpu_ldsb_data_ra(env, ptr, 0);
2240}
2241
2242uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
2243{
2244 return cpu_lduw_be_data_ra(env, ptr, 0);
2245}
2246
2247int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
2248{
2249 return cpu_ldsw_be_data_ra(env, ptr, 0);
2250}
2251
2252uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
2253{
2254 return cpu_ldl_be_data_ra(env, ptr, 0);
2255}
2256
2257uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
2258{
2259 return cpu_ldq_be_data_ra(env, ptr, 0);
2260}
2261
2262uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
2263{
2264 return cpu_lduw_le_data_ra(env, ptr, 0);
2265}
2266
2267int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
2268{
2269 return cpu_ldsw_le_data_ra(env, ptr, 0);
2270}
2271
2272uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
2273{
2274 return cpu_ldl_le_data_ra(env, ptr, 0);
2275}
2276
2277uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
2278{
2279 return cpu_ldq_le_data_ra(env, ptr, 0);
2280}
2281
2282
2283
2284
2285
2286static inline void QEMU_ALWAYS_INLINE
2287store_memop(void *haddr, uint64_t val, MemOp op)
2288{
2289 switch (op) {
2290 case MO_UB:
2291 stb_p(haddr, val);
2292 break;
2293 case MO_BEUW:
2294 stw_be_p(haddr, val);
2295 break;
2296 case MO_LEUW:
2297 stw_le_p(haddr, val);
2298 break;
2299 case MO_BEUL:
2300 stl_be_p(haddr, val);
2301 break;
2302 case MO_LEUL:
2303 stl_le_p(haddr, val);
2304 break;
2305 case MO_BEQ:
2306 stq_be_p(haddr, val);
2307 break;
2308 case MO_LEQ:
2309 stq_le_p(haddr, val);
2310 break;
2311 default:
2312 qemu_build_not_reached();
2313 }
2314}
2315
2316static void __attribute__((noinline))
2317store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2318 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2319 bool big_endian)
2320{
2321 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2322 uintptr_t index, index2;
2323 CPUTLBEntry *entry, *entry2;
2324 target_ulong page2, tlb_addr, tlb_addr2;
2325 TCGMemOpIdx oi;
2326 size_t size2;
2327 int i;
2328
2329
2330
2331
2332
2333
2334 page2 = (addr + size) & TARGET_PAGE_MASK;
2335 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2336 index2 = tlb_index(env, mmu_idx, page2);
2337 entry2 = tlb_entry(env, mmu_idx, page2);
2338
2339 tlb_addr2 = tlb_addr_write(entry2);
2340 if (!tlb_hit_page(tlb_addr2, page2)) {
2341 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2342 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2343 mmu_idx, retaddr);
2344 index2 = tlb_index(env, mmu_idx, page2);
2345 entry2 = tlb_entry(env, mmu_idx, page2);
2346 }
2347 tlb_addr2 = tlb_addr_write(entry2);
2348 }
2349
2350 index = tlb_index(env, mmu_idx, addr);
2351 entry = tlb_entry(env, mmu_idx, addr);
2352 tlb_addr = tlb_addr_write(entry);
2353
2354
2355
2356
2357
2358 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2359 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2360 env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
2361 BP_MEM_WRITE, retaddr);
2362 }
2363 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2364 cpu_check_watchpoint(env_cpu(env), page2, size2,
2365 env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
2366 BP_MEM_WRITE, retaddr);
2367 }
2368
2369
2370
2371
2372
2373
2374 oi = make_memop_idx(MO_UB, mmu_idx);
2375 if (big_endian) {
2376 for (i = 0; i < size; ++i) {
2377
2378 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2379 helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
2380 }
2381 } else {
2382 for (i = 0; i < size; ++i) {
2383
2384 uint8_t val8 = val >> (i * 8);
2385 helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
2386 }
2387 }
2388}
2389
2390static inline void QEMU_ALWAYS_INLINE
2391store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2392 TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
2393{
2394 uintptr_t mmu_idx = get_mmuidx(oi);
2395 uintptr_t index = tlb_index(env, mmu_idx, addr);
2396 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
2397 target_ulong tlb_addr = tlb_addr_write(entry);
2398 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2399 unsigned a_bits = get_alignment_bits(get_memop(oi));
2400 void *haddr;
2401 size_t size = memop_size(op);
2402
2403
2404 if (addr & ((1 << a_bits) - 1)) {
2405 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2406 mmu_idx, retaddr);
2407 }
2408
2409
2410 if (!tlb_hit(tlb_addr, addr)) {
2411 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2412 addr & TARGET_PAGE_MASK)) {
2413 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2414 mmu_idx, retaddr);
2415 index = tlb_index(env, mmu_idx, addr);
2416 entry = tlb_entry(env, mmu_idx, addr);
2417 }
2418 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2419 }
2420
2421
2422 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
2423 CPUIOTLBEntry *iotlbentry;
2424 bool need_swap;
2425
2426
2427 if ((addr & (size - 1)) != 0) {
2428 goto do_unaligned_access;
2429 }
2430
2431 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
2432
2433
2434 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2435
2436 cpu_check_watchpoint(env_cpu(env), addr, size,
2437 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
2438 }
2439
2440 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2441
2442
2443 if (tlb_addr & TLB_MMIO) {
2444 io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
2445 op ^ (need_swap * MO_BSWAP));
2446 return;
2447 }
2448
2449
2450 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2451 return;
2452 }
2453
2454
2455 if (tlb_addr & TLB_NOTDIRTY) {
2456 notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
2457 }
2458
2459 haddr = (void *)((uintptr_t)addr + entry->addend);
2460
2461
2462
2463
2464
2465
2466 if (unlikely(need_swap)) {
2467 store_memop(haddr, val, op ^ MO_BSWAP);
2468 } else {
2469 store_memop(haddr, val, op);
2470 }
2471 return;
2472 }
2473
2474
2475 if (size > 1
2476 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2477 >= TARGET_PAGE_SIZE)) {
2478 do_unaligned_access:
2479 store_helper_unaligned(env, addr, val, retaddr, size,
2480 mmu_idx, memop_big_endian(op));
2481 return;
2482 }
2483
2484 haddr = (void *)((uintptr_t)addr + entry->addend);
2485 store_memop(haddr, val, op);
2486}
2487
2488void __attribute__((noinline))
2489helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2490 TCGMemOpIdx oi, uintptr_t retaddr)
2491{
2492 store_helper(env, addr, val, oi, retaddr, MO_UB);
2493}
2494
2495void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2496 TCGMemOpIdx oi, uintptr_t retaddr)
2497{
2498 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2499}
2500
2501void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2502 TCGMemOpIdx oi, uintptr_t retaddr)
2503{
2504 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2505}
2506
2507void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2508 TCGMemOpIdx oi, uintptr_t retaddr)
2509{
2510 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2511}
2512
2513void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2514 TCGMemOpIdx oi, uintptr_t retaddr)
2515{
2516 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2517}
2518
2519void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2520 TCGMemOpIdx oi, uintptr_t retaddr)
2521{
2522 store_helper(env, addr, val, oi, retaddr, MO_LEQ);
2523}
2524
2525void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2526 TCGMemOpIdx oi, uintptr_t retaddr)
2527{
2528 store_helper(env, addr, val, oi, retaddr, MO_BEQ);
2529}
2530
2531
2532
2533
2534
2535static inline void QEMU_ALWAYS_INLINE
2536cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2537 int mmu_idx, uintptr_t retaddr, MemOp op)
2538{
2539 TCGMemOpIdx oi;
2540 uint16_t meminfo;
2541
2542 meminfo = trace_mem_get_info(op, mmu_idx, true);
2543 trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
2544
2545 oi = make_memop_idx(op, mmu_idx);
2546 store_helper(env, addr, val, oi, retaddr, op);
2547
2548 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
2549}
2550
2551void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2552 int mmu_idx, uintptr_t retaddr)
2553{
2554 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
2555}
2556
2557void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2558 int mmu_idx, uintptr_t retaddr)
2559{
2560 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW);
2561}
2562
2563void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2564 int mmu_idx, uintptr_t retaddr)
2565{
2566 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL);
2567}
2568
2569void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
2570 int mmu_idx, uintptr_t retaddr)
2571{
2572 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ);
2573}
2574
2575void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2576 int mmu_idx, uintptr_t retaddr)
2577{
2578 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW);
2579}
2580
2581void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2582 int mmu_idx, uintptr_t retaddr)
2583{
2584 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL);
2585}
2586
2587void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
2588 int mmu_idx, uintptr_t retaddr)
2589{
2590 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ);
2591}
2592
2593void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
2594 uint32_t val, uintptr_t retaddr)
2595{
2596 cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2597}
2598
2599void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
2600 uint32_t val, uintptr_t retaddr)
2601{
2602 cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2603}
2604
2605void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
2606 uint32_t val, uintptr_t retaddr)
2607{
2608 cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2609}
2610
2611void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
2612 uint64_t val, uintptr_t retaddr)
2613{
2614 cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2615}
2616
2617void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
2618 uint32_t val, uintptr_t retaddr)
2619{
2620 cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2621}
2622
2623void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
2624 uint32_t val, uintptr_t retaddr)
2625{
2626 cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2627}
2628
2629void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
2630 uint64_t val, uintptr_t retaddr)
2631{
2632 cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2633}
2634
2635void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2636{
2637 cpu_stb_data_ra(env, ptr, val, 0);
2638}
2639
2640void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2641{
2642 cpu_stw_be_data_ra(env, ptr, val, 0);
2643}
2644
2645void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2646{
2647 cpu_stl_be_data_ra(env, ptr, val, 0);
2648}
2649
2650void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
2651{
2652 cpu_stq_be_data_ra(env, ptr, val, 0);
2653}
2654
2655void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2656{
2657 cpu_stw_le_data_ra(env, ptr, val, 0);
2658}
2659
2660void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2661{
2662 cpu_stl_le_data_ra(env, ptr, val, 0);
2663}
2664
2665void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
2666{
2667 cpu_stq_le_data_ra(env, ptr, val, 0);
2668}
2669
2670
2671
2672
2673#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
2674#define ATOMIC_NAME(X) \
2675 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
2676#define ATOMIC_MMU_DECLS
2677#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
2678#define ATOMIC_MMU_CLEANUP
2679#define ATOMIC_MMU_IDX get_mmuidx(oi)
2680
2681#include "atomic_common.c.inc"
2682
2683#define DATA_SIZE 1
2684#include "atomic_template.h"
2685
2686#define DATA_SIZE 2
2687#include "atomic_template.h"
2688
2689#define DATA_SIZE 4
2690#include "atomic_template.h"
2691
2692#ifdef CONFIG_ATOMIC64
2693#define DATA_SIZE 8
2694#include "atomic_template.h"
2695#endif
2696
2697#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2698#define DATA_SIZE 16
2699#include "atomic_template.h"
2700#endif
2701
2702
2703
2704#undef EXTRA_ARGS
2705#undef ATOMIC_NAME
2706#undef ATOMIC_MMU_LOOKUP
2707#define EXTRA_ARGS , TCGMemOpIdx oi
2708#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
2709#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
2710
2711#define DATA_SIZE 1
2712#include "atomic_template.h"
2713
2714#define DATA_SIZE 2
2715#include "atomic_template.h"
2716
2717#define DATA_SIZE 4
2718#include "atomic_template.h"
2719
2720#ifdef CONFIG_ATOMIC64
2721#define DATA_SIZE 8
2722#include "atomic_template.h"
2723#endif
2724#undef ATOMIC_MMU_IDX
2725
2726
2727
2728static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2729 TCGMemOpIdx oi, uintptr_t retaddr)
2730{
2731 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2732}
2733
2734uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2735{
2736 TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2737 return full_ldub_code(env, addr, oi, 0);
2738}
2739
2740static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2741 TCGMemOpIdx oi, uintptr_t retaddr)
2742{
2743 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
2744}
2745
2746uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2747{
2748 TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2749 return full_lduw_code(env, addr, oi, 0);
2750}
2751
2752static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2753 TCGMemOpIdx oi, uintptr_t retaddr)
2754{
2755 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
2756}
2757
2758uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2759{
2760 TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2761 return full_ldl_code(env, addr, oi, 0);
2762}
2763
2764static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2765 TCGMemOpIdx oi, uintptr_t retaddr)
2766{
2767 return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
2768}
2769
2770uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2771{
2772 TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
2773 return full_ldq_code(env, addr, oi, 0);
2774}
2775