1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "hw/core/tcg-cpu-ops.h"
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/cpu_ldst.h"
26#include "exec/cputlb.h"
27#include "exec/memory-internal.h"
28#include "exec/ram_addr.h"
29#include "tcg/tcg.h"
30#include "qemu/error-report.h"
31#include "exec/log.h"
32#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
34#include "qemu/atomic128.h"
35#include "exec/translate-all.h"
36#include "trace/trace-root.h"
37#include "trace/mem.h"
38#include "tb-hash.h"
39#include "internal.h"
40#ifdef CONFIG_PLUGIN
41#include "qemu/plugin-memory.h"
42#endif
43
44
45
46
47
48#ifdef DEBUG_TLB
49# define DEBUG_TLB_GATE 1
50# ifdef DEBUG_TLB_LOG
51# define DEBUG_TLB_LOG_GATE 1
52# else
53# define DEBUG_TLB_LOG_GATE 0
54# endif
55#else
56# define DEBUG_TLB_GATE 0
57# define DEBUG_TLB_LOG_GATE 0
58#endif
59
60#define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67} while (0)
68
69#define assert_cpu_is_self(cpu) do { \
70 if (DEBUG_TLB_GATE) { \
71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
72 } \
73 } while (0)
74
75
76
77QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
79
80
81QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
84static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
85{
86 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
87}
88
89static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
90{
91 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
92}
93
94static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
95 size_t max_entries)
96{
97 desc->window_begin_ns = ns;
98 desc->window_max_entries = max_entries;
99}
100
101static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
102{
103 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
104
105 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
106 qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
107 }
108}
109
110static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
111{
112
113
114 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
115 tb_jmp_cache_clear_page(cpu, addr);
116}
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
159 int64_t now)
160{
161 size_t old_size = tlb_n_entries(fast);
162 size_t rate;
163 size_t new_size = old_size;
164 int64_t window_len_ms = 100;
165 int64_t window_len_ns = window_len_ms * 1000 * 1000;
166 bool window_expired = now > desc->window_begin_ns + window_len_ns;
167
168 if (desc->n_used_entries > desc->window_max_entries) {
169 desc->window_max_entries = desc->n_used_entries;
170 }
171 rate = desc->window_max_entries * 100 / old_size;
172
173 if (rate > 70) {
174 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
175 } else if (rate < 30 && window_expired) {
176 size_t ceil = pow2ceil(desc->window_max_entries);
177 size_t expected_rate = desc->window_max_entries * 100 / ceil;
178
179
180
181
182
183
184
185
186
187
188
189 if (expected_rate > 70) {
190 ceil *= 2;
191 }
192 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
193 }
194
195 if (new_size == old_size) {
196 if (window_expired) {
197 tlb_window_reset(desc, now, desc->n_used_entries);
198 }
199 return;
200 }
201
202 g_free(fast->table);
203 g_free(desc->iotlb);
204
205 tlb_window_reset(desc, now, 0);
206
207 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
208 fast->table = g_try_new(CPUTLBEntry, new_size);
209 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
210
211
212
213
214
215
216
217
218 while (fast->table == NULL || desc->iotlb == NULL) {
219 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
220 error_report("%s: %s", __func__, strerror(errno));
221 abort();
222 }
223 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
224 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
225
226 g_free(fast->table);
227 g_free(desc->iotlb);
228 fast->table = g_try_new(CPUTLBEntry, new_size);
229 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
230 }
231}
232
233static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
234{
235 desc->n_used_entries = 0;
236 desc->large_page_addr = -1;
237 desc->large_page_mask = -1;
238 desc->vindex = 0;
239 memset(fast->table, -1, sizeof_tlb(fast));
240 memset(desc->vtable, -1, sizeof(desc->vtable));
241}
242
243static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
244 int64_t now)
245{
246 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
247 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
248
249 tlb_mmu_resize_locked(desc, fast, now);
250 tlb_mmu_flush_locked(desc, fast);
251}
252
253static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
254{
255 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
256
257 tlb_window_reset(desc, now, 0);
258 desc->n_used_entries = 0;
259 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
260 fast->table = g_new(CPUTLBEntry, n_entries);
261 desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
262 tlb_mmu_flush_locked(desc, fast);
263}
264
265static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
266{
267 env_tlb(env)->d[mmu_idx].n_used_entries++;
268}
269
270static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
271{
272 env_tlb(env)->d[mmu_idx].n_used_entries--;
273}
274
275void tlb_init(CPUState *cpu)
276{
277 CPUArchState *env = cpu->env_ptr;
278 int64_t now = get_clock_realtime();
279 int i;
280
281 qemu_spin_init(&env_tlb(env)->c.lock);
282
283
284 env_tlb(env)->c.dirty = 0;
285
286 for (i = 0; i < NB_MMU_MODES; i++) {
287 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
288 }
289}
290
291void tlb_destroy(CPUState *cpu)
292{
293 CPUArchState *env = cpu->env_ptr;
294 int i;
295
296 qemu_spin_destroy(&env_tlb(env)->c.lock);
297 for (i = 0; i < NB_MMU_MODES; i++) {
298 CPUTLBDesc *desc = &env_tlb(env)->d[i];
299 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
300
301 g_free(fast->table);
302 g_free(desc->iotlb);
303 }
304}
305
306
307
308
309
310
311
312
313static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
314 run_on_cpu_data d)
315{
316 CPUState *cpu;
317
318 CPU_FOREACH(cpu) {
319 if (cpu != src) {
320 async_run_on_cpu(cpu, fn, d);
321 }
322 }
323}
324
325void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
326{
327 CPUState *cpu;
328 size_t full = 0, part = 0, elide = 0;
329
330 CPU_FOREACH(cpu) {
331 CPUArchState *env = cpu->env_ptr;
332
333 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
334 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
335 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
336 }
337 *pfull = full;
338 *ppart = part;
339 *pelide = elide;
340}
341
342static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
343{
344 CPUArchState *env = cpu->env_ptr;
345 uint16_t asked = data.host_int;
346 uint16_t all_dirty, work, to_clean;
347 int64_t now = get_clock_realtime();
348
349 assert_cpu_is_self(cpu);
350
351 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
352
353 qemu_spin_lock(&env_tlb(env)->c.lock);
354
355 all_dirty = env_tlb(env)->c.dirty;
356 to_clean = asked & all_dirty;
357 all_dirty &= ~to_clean;
358 env_tlb(env)->c.dirty = all_dirty;
359
360 for (work = to_clean; work != 0; work &= work - 1) {
361 int mmu_idx = ctz32(work);
362 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
363 }
364
365 qemu_spin_unlock(&env_tlb(env)->c.lock);
366
367 cpu_tb_jmp_cache_clear(cpu);
368
369 if (to_clean == ALL_MMUIDX_BITS) {
370 qatomic_set(&env_tlb(env)->c.full_flush_count,
371 env_tlb(env)->c.full_flush_count + 1);
372 } else {
373 qatomic_set(&env_tlb(env)->c.part_flush_count,
374 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
375 if (to_clean != asked) {
376 qatomic_set(&env_tlb(env)->c.elide_flush_count,
377 env_tlb(env)->c.elide_flush_count +
378 ctpop16(asked & ~to_clean));
379 }
380 }
381}
382
383void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
384{
385 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
386
387 if (cpu->created && !qemu_cpu_is_self(cpu)) {
388 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
389 RUN_ON_CPU_HOST_INT(idxmap));
390 } else {
391 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
392 }
393}
394
395void tlb_flush(CPUState *cpu)
396{
397 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
398}
399
400void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
401{
402 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
403
404 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
405
406 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
407 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
408}
409
410void tlb_flush_all_cpus(CPUState *src_cpu)
411{
412 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
413}
414
415void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
416{
417 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
418
419 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
420
421 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
423}
424
425void tlb_flush_all_cpus_synced(CPUState *src_cpu)
426{
427 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
428}
429
430static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
431 target_ulong page, target_ulong mask)
432{
433 page &= mask;
434 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
435
436 return (page == (tlb_entry->addr_read & mask) ||
437 page == (tlb_addr_write(tlb_entry) & mask) ||
438 page == (tlb_entry->addr_code & mask));
439}
440
441static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
442 target_ulong page)
443{
444 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
445}
446
447
448
449
450
451static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
452{
453 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
454}
455
456
457static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
458 target_ulong page,
459 target_ulong mask)
460{
461 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
462 memset(tlb_entry, -1, sizeof(*tlb_entry));
463 return true;
464 }
465 return false;
466}
467
468static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
469 target_ulong page)
470{
471 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
472}
473
474
475static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
476 target_ulong page,
477 target_ulong mask)
478{
479 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
480 int k;
481
482 assert_cpu_is_self(env_cpu(env));
483 for (k = 0; k < CPU_VTLB_SIZE; k++) {
484 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
485 tlb_n_used_entries_dec(env, mmu_idx);
486 }
487 }
488}
489
490static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
491 target_ulong page)
492{
493 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
494}
495
496static void tlb_flush_page_locked(CPUArchState *env, int midx,
497 target_ulong page)
498{
499 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
500 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
501
502
503 if ((page & lp_mask) == lp_addr) {
504 tlb_debug("forcing full flush midx %d ("
505 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
506 midx, lp_addr, lp_mask);
507 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
508 } else {
509 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
510 tlb_n_used_entries_dec(env, midx);
511 }
512 tlb_flush_vtlb_page_locked(env, midx, page);
513 }
514}
515
516
517
518
519
520
521
522
523
524
525static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
526 target_ulong addr,
527 uint16_t idxmap)
528{
529 CPUArchState *env = cpu->env_ptr;
530 int mmu_idx;
531
532 assert_cpu_is_self(cpu);
533
534 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
535
536 qemu_spin_lock(&env_tlb(env)->c.lock);
537 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
538 if ((idxmap >> mmu_idx) & 1) {
539 tlb_flush_page_locked(env, mmu_idx, addr);
540 }
541 }
542 qemu_spin_unlock(&env_tlb(env)->c.lock);
543
544 tb_flush_jmp_cache(cpu, addr);
545}
546
547
548
549
550
551
552
553
554
555
556
557static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
558 run_on_cpu_data data)
559{
560 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
561 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
562 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
563
564 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
565}
566
567typedef struct {
568 target_ulong addr;
569 uint16_t idxmap;
570} TLBFlushPageByMMUIdxData;
571
572
573
574
575
576
577
578
579
580
581
582static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
583 run_on_cpu_data data)
584{
585 TLBFlushPageByMMUIdxData *d = data.host_ptr;
586
587 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
588 g_free(d);
589}
590
591void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
592{
593 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
594
595
596 addr &= TARGET_PAGE_MASK;
597
598 if (qemu_cpu_is_self(cpu)) {
599 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
600 } else if (idxmap < TARGET_PAGE_SIZE) {
601
602
603
604
605
606 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
607 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
608 } else {
609 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
610
611
612 d->addr = addr;
613 d->idxmap = idxmap;
614 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
615 RUN_ON_CPU_HOST_PTR(d));
616 }
617}
618
619void tlb_flush_page(CPUState *cpu, target_ulong addr)
620{
621 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
622}
623
624void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
625 uint16_t idxmap)
626{
627 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
628
629
630 addr &= TARGET_PAGE_MASK;
631
632
633
634
635
636 if (idxmap < TARGET_PAGE_SIZE) {
637 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
638 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
639 } else {
640 CPUState *dst_cpu;
641
642
643 CPU_FOREACH(dst_cpu) {
644 if (dst_cpu != src_cpu) {
645 TLBFlushPageByMMUIdxData *d
646 = g_new(TLBFlushPageByMMUIdxData, 1);
647
648 d->addr = addr;
649 d->idxmap = idxmap;
650 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
651 RUN_ON_CPU_HOST_PTR(d));
652 }
653 }
654 }
655
656 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
657}
658
659void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
660{
661 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
662}
663
664void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
665 target_ulong addr,
666 uint16_t idxmap)
667{
668 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
669
670
671 addr &= TARGET_PAGE_MASK;
672
673
674
675
676
677 if (idxmap < TARGET_PAGE_SIZE) {
678 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
679 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
680 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
681 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
682 } else {
683 CPUState *dst_cpu;
684 TLBFlushPageByMMUIdxData *d;
685
686
687 CPU_FOREACH(dst_cpu) {
688 if (dst_cpu != src_cpu) {
689 d = g_new(TLBFlushPageByMMUIdxData, 1);
690 d->addr = addr;
691 d->idxmap = idxmap;
692 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
693 RUN_ON_CPU_HOST_PTR(d));
694 }
695 }
696
697 d = g_new(TLBFlushPageByMMUIdxData, 1);
698 d->addr = addr;
699 d->idxmap = idxmap;
700 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
701 RUN_ON_CPU_HOST_PTR(d));
702 }
703}
704
705void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
706{
707 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
708}
709
710static void tlb_flush_range_locked(CPUArchState *env, int midx,
711 target_ulong addr, target_ulong len,
712 unsigned bits)
713{
714 CPUTLBDesc *d = &env_tlb(env)->d[midx];
715 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
716 target_ulong mask = MAKE_64BIT_MASK(0, bits);
717
718
719
720
721
722
723
724
725
726
727
728 if (mask < f->mask || len > f->mask) {
729 tlb_debug("forcing full flush midx %d ("
730 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
731 midx, addr, mask, len);
732 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
733 return;
734 }
735
736
737
738
739
740
741 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
742 tlb_debug("forcing full flush midx %d ("
743 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
744 midx, d->large_page_addr, d->large_page_mask);
745 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
746 return;
747 }
748
749 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
750 target_ulong page = addr + i;
751 CPUTLBEntry *entry = tlb_entry(env, midx, page);
752
753 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
754 tlb_n_used_entries_dec(env, midx);
755 }
756 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
757 }
758}
759
760typedef struct {
761 target_ulong addr;
762 target_ulong len;
763 uint16_t idxmap;
764 uint16_t bits;
765} TLBFlushRangeData;
766
767static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
768 TLBFlushRangeData d)
769{
770 CPUArchState *env = cpu->env_ptr;
771 int mmu_idx;
772
773 assert_cpu_is_self(cpu);
774
775 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
776 d.addr, d.bits, d.len, d.idxmap);
777
778 qemu_spin_lock(&env_tlb(env)->c.lock);
779 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
780 if ((d.idxmap >> mmu_idx) & 1) {
781 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
782 }
783 }
784 qemu_spin_unlock(&env_tlb(env)->c.lock);
785
786 for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
787 tb_flush_jmp_cache(cpu, d.addr + i);
788 }
789}
790
791static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
792 run_on_cpu_data data)
793{
794 TLBFlushRangeData *d = data.host_ptr;
795 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
796 g_free(d);
797}
798
799void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
800 target_ulong len, uint16_t idxmap,
801 unsigned bits)
802{
803 TLBFlushRangeData d;
804
805
806
807
808
809 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
810 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
811 return;
812 }
813
814 if (bits < TARGET_PAGE_BITS) {
815 tlb_flush_by_mmuidx(cpu, idxmap);
816 return;
817 }
818
819
820 d.addr = addr & TARGET_PAGE_MASK;
821 d.len = len;
822 d.idxmap = idxmap;
823 d.bits = bits;
824
825 if (qemu_cpu_is_self(cpu)) {
826 tlb_flush_range_by_mmuidx_async_0(cpu, d);
827 } else {
828
829 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
830 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
831 RUN_ON_CPU_HOST_PTR(p));
832 }
833}
834
835void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
836 uint16_t idxmap, unsigned bits)
837{
838 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
839}
840
841void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
842 target_ulong addr, target_ulong len,
843 uint16_t idxmap, unsigned bits)
844{
845 TLBFlushRangeData d;
846 CPUState *dst_cpu;
847
848
849
850
851
852 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
853 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
854 return;
855 }
856
857 if (bits < TARGET_PAGE_BITS) {
858 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
859 return;
860 }
861
862
863 d.addr = addr & TARGET_PAGE_MASK;
864 d.len = len;
865 d.idxmap = idxmap;
866 d.bits = bits;
867
868
869 CPU_FOREACH(dst_cpu) {
870 if (dst_cpu != src_cpu) {
871 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
872 async_run_on_cpu(dst_cpu,
873 tlb_flush_range_by_mmuidx_async_1,
874 RUN_ON_CPU_HOST_PTR(p));
875 }
876 }
877
878 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
879}
880
881void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
882 target_ulong addr,
883 uint16_t idxmap, unsigned bits)
884{
885 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
886 idxmap, bits);
887}
888
889void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
890 target_ulong addr,
891 target_ulong len,
892 uint16_t idxmap,
893 unsigned bits)
894{
895 TLBFlushRangeData d, *p;
896 CPUState *dst_cpu;
897
898
899
900
901
902 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
903 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
904 return;
905 }
906
907 if (bits < TARGET_PAGE_BITS) {
908 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
909 return;
910 }
911
912
913 d.addr = addr & TARGET_PAGE_MASK;
914 d.len = len;
915 d.idxmap = idxmap;
916 d.bits = bits;
917
918
919 CPU_FOREACH(dst_cpu) {
920 if (dst_cpu != src_cpu) {
921 p = g_memdup(&d, sizeof(d));
922 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
923 RUN_ON_CPU_HOST_PTR(p));
924 }
925 }
926
927 p = g_memdup(&d, sizeof(d));
928 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
929 RUN_ON_CPU_HOST_PTR(p));
930}
931
932void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
933 target_ulong addr,
934 uint16_t idxmap,
935 unsigned bits)
936{
937 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
938 idxmap, bits);
939}
940
941
942
943void tlb_protect_code(ram_addr_t ram_addr)
944{
945 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
946 DIRTY_MEMORY_CODE);
947}
948
949
950
951void tlb_unprotect_code(ram_addr_t ram_addr)
952{
953 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
954}
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
974 uintptr_t start, uintptr_t length)
975{
976 uintptr_t addr = tlb_entry->addr_write;
977
978 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
979 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
980 addr &= TARGET_PAGE_MASK;
981 addr += tlb_entry->addend;
982 if ((addr - start) < length) {
983#if TCG_OVERSIZED_GUEST
984 tlb_entry->addr_write |= TLB_NOTDIRTY;
985#else
986 qatomic_set(&tlb_entry->addr_write,
987 tlb_entry->addr_write | TLB_NOTDIRTY);
988#endif
989 }
990 }
991}
992
993
994
995
996
997static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
998{
999 *d = *s;
1000}
1001
1002
1003
1004
1005
1006
1007void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1008{
1009 CPUArchState *env;
1010
1011 int mmu_idx;
1012
1013 env = cpu->env_ptr;
1014 qemu_spin_lock(&env_tlb(env)->c.lock);
1015 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1016 unsigned int i;
1017 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1018
1019 for (i = 0; i < n; i++) {
1020 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1021 start1, length);
1022 }
1023
1024 for (i = 0; i < CPU_VTLB_SIZE; i++) {
1025 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1026 start1, length);
1027 }
1028 }
1029 qemu_spin_unlock(&env_tlb(env)->c.lock);
1030}
1031
1032
1033static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1034 target_ulong vaddr)
1035{
1036 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1037 tlb_entry->addr_write = vaddr;
1038 }
1039}
1040
1041
1042
1043void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1044{
1045 CPUArchState *env = cpu->env_ptr;
1046 int mmu_idx;
1047
1048 assert_cpu_is_self(cpu);
1049
1050 vaddr &= TARGET_PAGE_MASK;
1051 qemu_spin_lock(&env_tlb(env)->c.lock);
1052 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1053 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1054 }
1055
1056 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1057 int k;
1058 for (k = 0; k < CPU_VTLB_SIZE; k++) {
1059 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1060 }
1061 }
1062 qemu_spin_unlock(&env_tlb(env)->c.lock);
1063}
1064
1065
1066
1067static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1068 target_ulong vaddr, target_ulong size)
1069{
1070 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1071 target_ulong lp_mask = ~(size - 1);
1072
1073 if (lp_addr == (target_ulong)-1) {
1074
1075 lp_addr = vaddr;
1076 } else {
1077
1078
1079
1080 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1081 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1082 lp_mask <<= 1;
1083 }
1084 }
1085 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1086 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1097 hwaddr paddr, MemTxAttrs attrs, int prot,
1098 int mmu_idx, target_ulong size)
1099{
1100 CPUArchState *env = cpu->env_ptr;
1101 CPUTLB *tlb = env_tlb(env);
1102 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1103 MemoryRegionSection *section;
1104 unsigned int index;
1105 target_ulong address;
1106 target_ulong write_address;
1107 uintptr_t addend;
1108 CPUTLBEntry *te, tn;
1109 hwaddr iotlb, xlat, sz, paddr_page;
1110 target_ulong vaddr_page;
1111 int asidx = cpu_asidx_from_attrs(cpu, attrs);
1112 int wp_flags;
1113 bool is_ram, is_romd;
1114
1115 assert_cpu_is_self(cpu);
1116
1117 if (size <= TARGET_PAGE_SIZE) {
1118 sz = TARGET_PAGE_SIZE;
1119 } else {
1120 tlb_add_large_page(env, mmu_idx, vaddr, size);
1121 sz = size;
1122 }
1123 vaddr_page = vaddr & TARGET_PAGE_MASK;
1124 paddr_page = paddr & TARGET_PAGE_MASK;
1125
1126 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1127 &xlat, &sz, attrs, &prot);
1128 assert(sz >= TARGET_PAGE_SIZE);
1129
1130 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1131 " prot=%x idx=%d\n",
1132 vaddr, paddr, prot, mmu_idx);
1133
1134 address = vaddr_page;
1135 if (size < TARGET_PAGE_SIZE) {
1136
1137 address |= TLB_INVALID_MASK;
1138 }
1139 if (attrs.byte_swap) {
1140 address |= TLB_BSWAP;
1141 }
1142
1143 is_ram = memory_region_is_ram(section->mr);
1144 is_romd = memory_region_is_romd(section->mr);
1145
1146 if (is_ram || is_romd) {
1147
1148 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1149 } else {
1150
1151 addend = 0;
1152 }
1153
1154 write_address = address;
1155 if (is_ram) {
1156 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1157
1158
1159
1160
1161 if (prot & PAGE_WRITE) {
1162 if (section->readonly) {
1163 write_address |= TLB_DISCARD_WRITE;
1164 } else if (cpu_physical_memory_is_clean(iotlb)) {
1165 write_address |= TLB_NOTDIRTY;
1166 }
1167 }
1168 } else {
1169
1170 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1171
1172
1173
1174
1175
1176 write_address |= TLB_MMIO;
1177 if (!is_romd) {
1178 address = write_address;
1179 }
1180 }
1181
1182 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1183 TARGET_PAGE_SIZE);
1184
1185 index = tlb_index(env, mmu_idx, vaddr_page);
1186 te = tlb_entry(env, mmu_idx, vaddr_page);
1187
1188
1189
1190
1191
1192
1193
1194
1195 qemu_spin_lock(&tlb->c.lock);
1196
1197
1198 tlb->c.dirty |= 1 << mmu_idx;
1199
1200
1201 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1202
1203
1204
1205
1206
1207 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1208 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1209 CPUTLBEntry *tv = &desc->vtable[vidx];
1210
1211
1212 copy_tlb_helper_locked(tv, te);
1213 desc->viotlb[vidx] = desc->iotlb[index];
1214 tlb_n_used_entries_dec(env, mmu_idx);
1215 }
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 desc->iotlb[index].addr = iotlb - vaddr_page;
1231 desc->iotlb[index].attrs = attrs;
1232
1233
1234 tn.addend = addend - vaddr_page;
1235 if (prot & PAGE_READ) {
1236 tn.addr_read = address;
1237 if (wp_flags & BP_MEM_READ) {
1238 tn.addr_read |= TLB_WATCHPOINT;
1239 }
1240 } else {
1241 tn.addr_read = -1;
1242 }
1243
1244 if (prot & PAGE_EXEC) {
1245 tn.addr_code = address;
1246 } else {
1247 tn.addr_code = -1;
1248 }
1249
1250 tn.addr_write = -1;
1251 if (prot & PAGE_WRITE) {
1252 tn.addr_write = write_address;
1253 if (prot & PAGE_WRITE_INV) {
1254 tn.addr_write |= TLB_INVALID_MASK;
1255 }
1256 if (wp_flags & BP_MEM_WRITE) {
1257 tn.addr_write |= TLB_WATCHPOINT;
1258 }
1259 }
1260
1261 copy_tlb_helper_locked(te, &tn);
1262 tlb_n_used_entries_inc(env, mmu_idx);
1263 qemu_spin_unlock(&tlb->c.lock);
1264}
1265
1266
1267
1268
1269void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1270 hwaddr paddr, int prot,
1271 int mmu_idx, target_ulong size)
1272{
1273 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1274 prot, mmu_idx, size);
1275}
1276
1277static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1278{
1279 ram_addr_t ram_addr;
1280
1281 ram_addr = qemu_ram_addr_from_host(ptr);
1282 if (ram_addr == RAM_ADDR_INVALID) {
1283 error_report("Bad ram pointer %p", ptr);
1284 abort();
1285 }
1286 return ram_addr;
1287}
1288
1289
1290
1291
1292
1293
1294static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1295 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1296{
1297 CPUClass *cc = CPU_GET_CLASS(cpu);
1298 bool ok;
1299
1300
1301
1302
1303
1304 ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
1305 access_type, mmu_idx, false, retaddr);
1306 assert(ok);
1307}
1308
1309static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1310 MMUAccessType access_type,
1311 int mmu_idx, uintptr_t retaddr)
1312{
1313 CPUClass *cc = CPU_GET_CLASS(cpu);
1314
1315 cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
1316}
1317
1318static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1319 vaddr addr, unsigned size,
1320 MMUAccessType access_type,
1321 int mmu_idx, MemTxAttrs attrs,
1322 MemTxResult response,
1323 uintptr_t retaddr)
1324{
1325 CPUClass *cc = CPU_GET_CLASS(cpu);
1326
1327 if (!cpu->ignore_memory_transaction_failures &&
1328 cc->tcg_ops->do_transaction_failed) {
1329 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1330 access_type, mmu_idx, attrs,
1331 response, retaddr);
1332 }
1333}
1334
1335static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1336 int mmu_idx, target_ulong addr, uintptr_t retaddr,
1337 MMUAccessType access_type, MemOp op)
1338{
1339 CPUState *cpu = env_cpu(env);
1340 hwaddr mr_offset;
1341 MemoryRegionSection *section;
1342 MemoryRegion *mr;
1343 uint64_t val;
1344 bool locked = false;
1345 MemTxResult r;
1346
1347 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1348 mr = section->mr;
1349 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1350 cpu->mem_io_pc = retaddr;
1351 if (!cpu->can_do_io) {
1352 cpu_io_recompile(cpu, retaddr);
1353 }
1354
1355 if (!qemu_mutex_iothread_locked()) {
1356 qemu_mutex_lock_iothread();
1357 locked = true;
1358 }
1359 r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
1360 if (r != MEMTX_OK) {
1361 hwaddr physaddr = mr_offset +
1362 section->offset_within_address_space -
1363 section->offset_within_region;
1364
1365 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1366 mmu_idx, iotlbentry->attrs, r, retaddr);
1367 }
1368 if (locked) {
1369 qemu_mutex_unlock_iothread();
1370 }
1371
1372 return val;
1373}
1374
1375
1376
1377
1378
1379
1380static void save_iotlb_data(CPUState *cs, hwaddr addr,
1381 MemoryRegionSection *section, hwaddr mr_offset)
1382{
1383#ifdef CONFIG_PLUGIN
1384 SavedIOTLB *saved = &cs->saved_iotlb;
1385 saved->addr = addr;
1386 saved->section = section;
1387 saved->mr_offset = mr_offset;
1388#endif
1389}
1390
1391static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1392 int mmu_idx, uint64_t val, target_ulong addr,
1393 uintptr_t retaddr, MemOp op)
1394{
1395 CPUState *cpu = env_cpu(env);
1396 hwaddr mr_offset;
1397 MemoryRegionSection *section;
1398 MemoryRegion *mr;
1399 bool locked = false;
1400 MemTxResult r;
1401
1402 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1403 mr = section->mr;
1404 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1405 if (!cpu->can_do_io) {
1406 cpu_io_recompile(cpu, retaddr);
1407 }
1408 cpu->mem_io_pc = retaddr;
1409
1410
1411
1412
1413
1414 save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
1415
1416 if (!qemu_mutex_iothread_locked()) {
1417 qemu_mutex_lock_iothread();
1418 locked = true;
1419 }
1420 r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
1421 if (r != MEMTX_OK) {
1422 hwaddr physaddr = mr_offset +
1423 section->offset_within_address_space -
1424 section->offset_within_region;
1425
1426 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1427 MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
1428 retaddr);
1429 }
1430 if (locked) {
1431 qemu_mutex_unlock_iothread();
1432 }
1433}
1434
1435static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1436{
1437#if TCG_OVERSIZED_GUEST
1438 return *(target_ulong *)((uintptr_t)entry + ofs);
1439#else
1440
1441 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
1442#endif
1443}
1444
1445
1446
1447static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1448 size_t elt_ofs, target_ulong page)
1449{
1450 size_t vidx;
1451
1452 assert_cpu_is_self(env_cpu(env));
1453 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1454 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1455 target_ulong cmp;
1456
1457
1458#if TCG_OVERSIZED_GUEST
1459 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1460#else
1461 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1462#endif
1463
1464 if (cmp == page) {
1465
1466 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1467
1468 qemu_spin_lock(&env_tlb(env)->c.lock);
1469 copy_tlb_helper_locked(&tmptlb, tlb);
1470 copy_tlb_helper_locked(tlb, vtlb);
1471 copy_tlb_helper_locked(vtlb, &tmptlb);
1472 qemu_spin_unlock(&env_tlb(env)->c.lock);
1473
1474 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1475 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1476 tmpio = *io; *io = *vio; *vio = tmpio;
1477 return true;
1478 }
1479 }
1480 return false;
1481}
1482
1483
1484#define VICTIM_TLB_HIT(TY, ADDR) \
1485 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1486 (ADDR) & TARGET_PAGE_MASK)
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1499 void **hostp)
1500{
1501 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1502 uintptr_t index = tlb_index(env, mmu_idx, addr);
1503 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1504 void *p;
1505
1506 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1507 if (!VICTIM_TLB_HIT(addr_code, addr)) {
1508 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1509 index = tlb_index(env, mmu_idx, addr);
1510 entry = tlb_entry(env, mmu_idx, addr);
1511
1512 if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1513
1514
1515
1516
1517 return -1;
1518 }
1519 }
1520 assert(tlb_hit(entry->addr_code, addr));
1521 }
1522
1523 if (unlikely(entry->addr_code & TLB_MMIO)) {
1524
1525 if (hostp) {
1526 *hostp = NULL;
1527 }
1528 return -1;
1529 }
1530
1531 p = (void *)((uintptr_t)addr + entry->addend);
1532 if (hostp) {
1533 *hostp = p;
1534 }
1535 return qemu_ram_addr_from_host_nofail(p);
1536}
1537
1538tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1539{
1540 return get_page_addr_code_hostp(env, addr, NULL);
1541}
1542
1543static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1544 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1545{
1546 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1547
1548 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1549
1550 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1551 struct page_collection *pages
1552 = page_collection_lock(ram_addr, ram_addr + size);
1553 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1554 page_collection_unlock(pages);
1555 }
1556
1557
1558
1559
1560
1561 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1562
1563
1564 if (!cpu_physical_memory_is_clean(ram_addr)) {
1565 trace_memory_notdirty_set_dirty(mem_vaddr);
1566 tlb_set_dirty(cpu, mem_vaddr);
1567 }
1568}
1569
1570static int probe_access_internal(CPUArchState *env, target_ulong addr,
1571 int fault_size, MMUAccessType access_type,
1572 int mmu_idx, bool nonfault,
1573 void **phost, uintptr_t retaddr)
1574{
1575 uintptr_t index = tlb_index(env, mmu_idx, addr);
1576 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1577 target_ulong tlb_addr, page_addr;
1578 size_t elt_ofs;
1579 int flags;
1580
1581 switch (access_type) {
1582 case MMU_DATA_LOAD:
1583 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1584 break;
1585 case MMU_DATA_STORE:
1586 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1587 break;
1588 case MMU_INST_FETCH:
1589 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1590 break;
1591 default:
1592 g_assert_not_reached();
1593 }
1594 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1595
1596 page_addr = addr & TARGET_PAGE_MASK;
1597 if (!tlb_hit_page(tlb_addr, page_addr)) {
1598 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1599 CPUState *cs = env_cpu(env);
1600 CPUClass *cc = CPU_GET_CLASS(cs);
1601
1602 if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1603 mmu_idx, nonfault, retaddr)) {
1604
1605 *phost = NULL;
1606 return TLB_INVALID_MASK;
1607 }
1608
1609
1610 entry = tlb_entry(env, mmu_idx, addr);
1611 }
1612 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1613 }
1614 flags = tlb_addr & TLB_FLAGS_MASK;
1615
1616
1617 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1618 *phost = NULL;
1619 return TLB_MMIO;
1620 }
1621
1622
1623 *phost = (void *)((uintptr_t)addr + entry->addend);
1624 return flags;
1625}
1626
1627int probe_access_flags(CPUArchState *env, target_ulong addr,
1628 MMUAccessType access_type, int mmu_idx,
1629 bool nonfault, void **phost, uintptr_t retaddr)
1630{
1631 int flags;
1632
1633 flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1634 nonfault, phost, retaddr);
1635
1636
1637 if (unlikely(flags & TLB_NOTDIRTY)) {
1638 uintptr_t index = tlb_index(env, mmu_idx, addr);
1639 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1640
1641 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1642 flags &= ~TLB_NOTDIRTY;
1643 }
1644
1645 return flags;
1646}
1647
1648void *probe_access(CPUArchState *env, target_ulong addr, int size,
1649 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1650{
1651 void *host;
1652 int flags;
1653
1654 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1655
1656 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1657 false, &host, retaddr);
1658
1659
1660 if (size == 0) {
1661 return NULL;
1662 }
1663
1664 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1665 uintptr_t index = tlb_index(env, mmu_idx, addr);
1666 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1667
1668
1669 if (flags & TLB_WATCHPOINT) {
1670 int wp_access = (access_type == MMU_DATA_STORE
1671 ? BP_MEM_WRITE : BP_MEM_READ);
1672 cpu_check_watchpoint(env_cpu(env), addr, size,
1673 iotlbentry->attrs, wp_access, retaddr);
1674 }
1675
1676
1677 if (flags & TLB_NOTDIRTY) {
1678 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1679 }
1680 }
1681
1682 return host;
1683}
1684
1685void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1686 MMUAccessType access_type, int mmu_idx)
1687{
1688 void *host;
1689 int flags;
1690
1691 flags = probe_access_internal(env, addr, 0, access_type,
1692 mmu_idx, true, &host, 0);
1693
1694
1695 return flags ? NULL : host;
1696}
1697
1698#ifdef CONFIG_PLUGIN
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1714 bool is_store, struct qemu_plugin_hwaddr *data)
1715{
1716 CPUArchState *env = cpu->env_ptr;
1717 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1718 uintptr_t index = tlb_index(env, mmu_idx, addr);
1719 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1720
1721 if (likely(tlb_hit(tlb_addr, addr))) {
1722
1723 if (tlb_addr & TLB_MMIO) {
1724 CPUIOTLBEntry *iotlbentry;
1725 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1726 data->is_io = true;
1727 data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1728 data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1729 } else {
1730 data->is_io = false;
1731 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1732 }
1733 return true;
1734 } else {
1735 SavedIOTLB *saved = &cpu->saved_iotlb;
1736 data->is_io = true;
1737 data->v.io.section = saved->section;
1738 data->v.io.offset = saved->mr_offset;
1739 return true;
1740 }
1741}
1742
1743#endif
1744
1745
1746
1747
1748
1749
1750
1751static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1752 TCGMemOpIdx oi, int size, int prot,
1753 uintptr_t retaddr)
1754{
1755 size_t mmu_idx = get_mmuidx(oi);
1756 MemOp mop = get_memop(oi);
1757 int a_bits = get_alignment_bits(mop);
1758 uintptr_t index;
1759 CPUTLBEntry *tlbe;
1760 target_ulong tlb_addr;
1761 void *hostaddr;
1762
1763
1764 retaddr -= GETPC_ADJ;
1765
1766
1767 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1768
1769 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1770 mmu_idx, retaddr);
1771 }
1772
1773
1774 if (unlikely(addr & (size - 1))) {
1775
1776
1777
1778
1779 goto stop_the_world;
1780 }
1781
1782 index = tlb_index(env, mmu_idx, addr);
1783 tlbe = tlb_entry(env, mmu_idx, addr);
1784
1785
1786 if (prot & PAGE_WRITE) {
1787 tlb_addr = tlb_addr_write(tlbe);
1788 if (!tlb_hit(tlb_addr, addr)) {
1789 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1790 tlb_fill(env_cpu(env), addr, size,
1791 MMU_DATA_STORE, mmu_idx, retaddr);
1792 index = tlb_index(env, mmu_idx, addr);
1793 tlbe = tlb_entry(env, mmu_idx, addr);
1794 }
1795 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1796 }
1797
1798
1799 if ((prot & PAGE_READ) &&
1800 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1801 tlb_fill(env_cpu(env), addr, size,
1802 MMU_DATA_LOAD, mmu_idx, retaddr);
1803
1804
1805
1806
1807
1808 goto stop_the_world;
1809 }
1810 } else {
1811 tlb_addr = tlbe->addr_read;
1812 if (!tlb_hit(tlb_addr, addr)) {
1813 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1814 tlb_fill(env_cpu(env), addr, size,
1815 MMU_DATA_LOAD, mmu_idx, retaddr);
1816 index = tlb_index(env, mmu_idx, addr);
1817 tlbe = tlb_entry(env, mmu_idx, addr);
1818 }
1819 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
1820 }
1821 }
1822
1823
1824 if (unlikely(tlb_addr & TLB_MMIO)) {
1825
1826
1827 goto stop_the_world;
1828 }
1829
1830 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1831
1832 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1833 notdirty_write(env_cpu(env), addr, size,
1834 &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
1835 }
1836
1837 return hostaddr;
1838
1839 stop_the_world:
1840 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1853 TCGMemOpIdx oi, uintptr_t retaddr);
1854
1855static inline uint64_t QEMU_ALWAYS_INLINE
1856load_memop(const void *haddr, MemOp op)
1857{
1858 switch (op) {
1859 case MO_UB:
1860 return ldub_p(haddr);
1861 case MO_BEUW:
1862 return lduw_be_p(haddr);
1863 case MO_LEUW:
1864 return lduw_le_p(haddr);
1865 case MO_BEUL:
1866 return (uint32_t)ldl_be_p(haddr);
1867 case MO_LEUL:
1868 return (uint32_t)ldl_le_p(haddr);
1869 case MO_BEQ:
1870 return ldq_be_p(haddr);
1871 case MO_LEQ:
1872 return ldq_le_p(haddr);
1873 default:
1874 qemu_build_not_reached();
1875 }
1876}
1877
1878static inline uint64_t QEMU_ALWAYS_INLINE
1879load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1880 uintptr_t retaddr, MemOp op, bool code_read,
1881 FullLoadHelper *full_load)
1882{
1883 uintptr_t mmu_idx = get_mmuidx(oi);
1884 uintptr_t index = tlb_index(env, mmu_idx, addr);
1885 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1886 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1887 const size_t tlb_off = code_read ?
1888 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1889 const MMUAccessType access_type =
1890 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1891 unsigned a_bits = get_alignment_bits(get_memop(oi));
1892 void *haddr;
1893 uint64_t res;
1894 size_t size = memop_size(op);
1895
1896
1897 if (addr & ((1 << a_bits) - 1)) {
1898 cpu_unaligned_access(env_cpu(env), addr, access_type,
1899 mmu_idx, retaddr);
1900 }
1901
1902
1903 if (!tlb_hit(tlb_addr, addr)) {
1904 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1905 addr & TARGET_PAGE_MASK)) {
1906 tlb_fill(env_cpu(env), addr, size,
1907 access_type, mmu_idx, retaddr);
1908 index = tlb_index(env, mmu_idx, addr);
1909 entry = tlb_entry(env, mmu_idx, addr);
1910 }
1911 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1912 tlb_addr &= ~TLB_INVALID_MASK;
1913 }
1914
1915
1916 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1917 CPUIOTLBEntry *iotlbentry;
1918 bool need_swap;
1919
1920
1921 if ((addr & (size - 1)) != 0) {
1922 goto do_unaligned_access;
1923 }
1924
1925 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1926
1927
1928 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1929
1930 cpu_check_watchpoint(env_cpu(env), addr, size,
1931 iotlbentry->attrs, BP_MEM_READ, retaddr);
1932 }
1933
1934 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1935
1936
1937 if (likely(tlb_addr & TLB_MMIO)) {
1938 return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1939 access_type, op ^ (need_swap * MO_BSWAP));
1940 }
1941
1942 haddr = (void *)((uintptr_t)addr + entry->addend);
1943
1944
1945
1946
1947
1948
1949 if (unlikely(need_swap)) {
1950 return load_memop(haddr, op ^ MO_BSWAP);
1951 }
1952 return load_memop(haddr, op);
1953 }
1954
1955
1956 if (size > 1
1957 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1958 >= TARGET_PAGE_SIZE)) {
1959 target_ulong addr1, addr2;
1960 uint64_t r1, r2;
1961 unsigned shift;
1962 do_unaligned_access:
1963 addr1 = addr & ~((target_ulong)size - 1);
1964 addr2 = addr1 + size;
1965 r1 = full_load(env, addr1, oi, retaddr);
1966 r2 = full_load(env, addr2, oi, retaddr);
1967 shift = (addr & (size - 1)) * 8;
1968
1969 if (memop_big_endian(op)) {
1970
1971 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1972 } else {
1973
1974 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1975 }
1976 return res & MAKE_64BIT_MASK(0, size * 8);
1977 }
1978
1979 haddr = (void *)((uintptr_t)addr + entry->addend);
1980 return load_memop(haddr, op);
1981}
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1994 TCGMemOpIdx oi, uintptr_t retaddr)
1995{
1996 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
1997}
1998
1999tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
2000 TCGMemOpIdx oi, uintptr_t retaddr)
2001{
2002 return full_ldub_mmu(env, addr, oi, retaddr);
2003}
2004
2005static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2006 TCGMemOpIdx oi, uintptr_t retaddr)
2007{
2008 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2009 full_le_lduw_mmu);
2010}
2011
2012tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
2013 TCGMemOpIdx oi, uintptr_t retaddr)
2014{
2015 return full_le_lduw_mmu(env, addr, oi, retaddr);
2016}
2017
2018static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2019 TCGMemOpIdx oi, uintptr_t retaddr)
2020{
2021 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2022 full_be_lduw_mmu);
2023}
2024
2025tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2026 TCGMemOpIdx oi, uintptr_t retaddr)
2027{
2028 return full_be_lduw_mmu(env, addr, oi, retaddr);
2029}
2030
2031static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2032 TCGMemOpIdx oi, uintptr_t retaddr)
2033{
2034 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2035 full_le_ldul_mmu);
2036}
2037
2038tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2039 TCGMemOpIdx oi, uintptr_t retaddr)
2040{
2041 return full_le_ldul_mmu(env, addr, oi, retaddr);
2042}
2043
2044static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2045 TCGMemOpIdx oi, uintptr_t retaddr)
2046{
2047 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2048 full_be_ldul_mmu);
2049}
2050
2051tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2052 TCGMemOpIdx oi, uintptr_t retaddr)
2053{
2054 return full_be_ldul_mmu(env, addr, oi, retaddr);
2055}
2056
2057uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
2058 TCGMemOpIdx oi, uintptr_t retaddr)
2059{
2060 return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
2061 helper_le_ldq_mmu);
2062}
2063
2064uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
2065 TCGMemOpIdx oi, uintptr_t retaddr)
2066{
2067 return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
2068 helper_be_ldq_mmu);
2069}
2070
2071
2072
2073
2074
2075
2076
2077tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
2078 TCGMemOpIdx oi, uintptr_t retaddr)
2079{
2080 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2081}
2082
2083tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
2084 TCGMemOpIdx oi, uintptr_t retaddr)
2085{
2086 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2087}
2088
2089tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
2090 TCGMemOpIdx oi, uintptr_t retaddr)
2091{
2092 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2093}
2094
2095tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
2096 TCGMemOpIdx oi, uintptr_t retaddr)
2097{
2098 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2099}
2100
2101tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
2102 TCGMemOpIdx oi, uintptr_t retaddr)
2103{
2104 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2105}
2106
2107
2108
2109
2110
2111static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2112 int mmu_idx, uintptr_t retaddr,
2113 MemOp op, FullLoadHelper *full_load)
2114{
2115 uint16_t meminfo;
2116 TCGMemOpIdx oi;
2117 uint64_t ret;
2118
2119 meminfo = trace_mem_get_info(op, mmu_idx, false);
2120 trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
2121
2122 op &= ~MO_SIGN;
2123 oi = make_memop_idx(op, mmu_idx);
2124 ret = full_load(env, addr, oi, retaddr);
2125
2126 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
2127
2128 return ret;
2129}
2130
2131uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2132 int mmu_idx, uintptr_t ra)
2133{
2134 return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
2135}
2136
2137int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2138 int mmu_idx, uintptr_t ra)
2139{
2140 return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
2141 full_ldub_mmu);
2142}
2143
2144uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2145 int mmu_idx, uintptr_t ra)
2146{
2147 return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu);
2148}
2149
2150int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2151 int mmu_idx, uintptr_t ra)
2152{
2153 return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
2154 full_be_lduw_mmu);
2155}
2156
2157uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2158 int mmu_idx, uintptr_t ra)
2159{
2160 return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu);
2161}
2162
2163uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2164 int mmu_idx, uintptr_t ra)
2165{
2166 return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu);
2167}
2168
2169uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2170 int mmu_idx, uintptr_t ra)
2171{
2172 return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu);
2173}
2174
2175int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2176 int mmu_idx, uintptr_t ra)
2177{
2178 return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
2179 full_le_lduw_mmu);
2180}
2181
2182uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2183 int mmu_idx, uintptr_t ra)
2184{
2185 return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
2186}
2187
2188uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2189 int mmu_idx, uintptr_t ra)
2190{
2191 return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
2192}
2193
2194uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
2195 uintptr_t retaddr)
2196{
2197 return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2198}
2199
2200int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2201{
2202 return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2203}
2204
2205uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
2206 uintptr_t retaddr)
2207{
2208 return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2209}
2210
2211int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2212{
2213 return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2214}
2215
2216uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
2217 uintptr_t retaddr)
2218{
2219 return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2220}
2221
2222uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
2223 uintptr_t retaddr)
2224{
2225 return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2226}
2227
2228uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
2229 uintptr_t retaddr)
2230{
2231 return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2232}
2233
2234int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2235{
2236 return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2237}
2238
2239uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
2240 uintptr_t retaddr)
2241{
2242 return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2243}
2244
2245uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
2246 uintptr_t retaddr)
2247{
2248 return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2249}
2250
2251uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
2252{
2253 return cpu_ldub_data_ra(env, ptr, 0);
2254}
2255
2256int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
2257{
2258 return cpu_ldsb_data_ra(env, ptr, 0);
2259}
2260
2261uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
2262{
2263 return cpu_lduw_be_data_ra(env, ptr, 0);
2264}
2265
2266int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
2267{
2268 return cpu_ldsw_be_data_ra(env, ptr, 0);
2269}
2270
2271uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
2272{
2273 return cpu_ldl_be_data_ra(env, ptr, 0);
2274}
2275
2276uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
2277{
2278 return cpu_ldq_be_data_ra(env, ptr, 0);
2279}
2280
2281uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
2282{
2283 return cpu_lduw_le_data_ra(env, ptr, 0);
2284}
2285
2286int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
2287{
2288 return cpu_ldsw_le_data_ra(env, ptr, 0);
2289}
2290
2291uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
2292{
2293 return cpu_ldl_le_data_ra(env, ptr, 0);
2294}
2295
2296uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
2297{
2298 return cpu_ldq_le_data_ra(env, ptr, 0);
2299}
2300
2301
2302
2303
2304
2305static inline void QEMU_ALWAYS_INLINE
2306store_memop(void *haddr, uint64_t val, MemOp op)
2307{
2308 switch (op) {
2309 case MO_UB:
2310 stb_p(haddr, val);
2311 break;
2312 case MO_BEUW:
2313 stw_be_p(haddr, val);
2314 break;
2315 case MO_LEUW:
2316 stw_le_p(haddr, val);
2317 break;
2318 case MO_BEUL:
2319 stl_be_p(haddr, val);
2320 break;
2321 case MO_LEUL:
2322 stl_le_p(haddr, val);
2323 break;
2324 case MO_BEQ:
2325 stq_be_p(haddr, val);
2326 break;
2327 case MO_LEQ:
2328 stq_le_p(haddr, val);
2329 break;
2330 default:
2331 qemu_build_not_reached();
2332 }
2333}
2334
2335static void __attribute__((noinline))
2336store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2337 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2338 bool big_endian)
2339{
2340 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2341 uintptr_t index, index2;
2342 CPUTLBEntry *entry, *entry2;
2343 target_ulong page2, tlb_addr, tlb_addr2;
2344 TCGMemOpIdx oi;
2345 size_t size2;
2346 int i;
2347
2348
2349
2350
2351
2352
2353 page2 = (addr + size) & TARGET_PAGE_MASK;
2354 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2355 index2 = tlb_index(env, mmu_idx, page2);
2356 entry2 = tlb_entry(env, mmu_idx, page2);
2357
2358 tlb_addr2 = tlb_addr_write(entry2);
2359 if (!tlb_hit_page(tlb_addr2, page2)) {
2360 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2361 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2362 mmu_idx, retaddr);
2363 index2 = tlb_index(env, mmu_idx, page2);
2364 entry2 = tlb_entry(env, mmu_idx, page2);
2365 }
2366 tlb_addr2 = tlb_addr_write(entry2);
2367 }
2368
2369 index = tlb_index(env, mmu_idx, addr);
2370 entry = tlb_entry(env, mmu_idx, addr);
2371 tlb_addr = tlb_addr_write(entry);
2372
2373
2374
2375
2376
2377 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2378 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2379 env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
2380 BP_MEM_WRITE, retaddr);
2381 }
2382 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2383 cpu_check_watchpoint(env_cpu(env), page2, size2,
2384 env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
2385 BP_MEM_WRITE, retaddr);
2386 }
2387
2388
2389
2390
2391
2392
2393 oi = make_memop_idx(MO_UB, mmu_idx);
2394 if (big_endian) {
2395 for (i = 0; i < size; ++i) {
2396
2397 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2398 helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
2399 }
2400 } else {
2401 for (i = 0; i < size; ++i) {
2402
2403 uint8_t val8 = val >> (i * 8);
2404 helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
2405 }
2406 }
2407}
2408
2409static inline void QEMU_ALWAYS_INLINE
2410store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2411 TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
2412{
2413 uintptr_t mmu_idx = get_mmuidx(oi);
2414 uintptr_t index = tlb_index(env, mmu_idx, addr);
2415 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
2416 target_ulong tlb_addr = tlb_addr_write(entry);
2417 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2418 unsigned a_bits = get_alignment_bits(get_memop(oi));
2419 void *haddr;
2420 size_t size = memop_size(op);
2421
2422
2423 if (addr & ((1 << a_bits) - 1)) {
2424 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2425 mmu_idx, retaddr);
2426 }
2427
2428
2429 if (!tlb_hit(tlb_addr, addr)) {
2430 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2431 addr & TARGET_PAGE_MASK)) {
2432 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2433 mmu_idx, retaddr);
2434 index = tlb_index(env, mmu_idx, addr);
2435 entry = tlb_entry(env, mmu_idx, addr);
2436 }
2437 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2438 }
2439
2440
2441 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
2442 CPUIOTLBEntry *iotlbentry;
2443 bool need_swap;
2444
2445
2446 if ((addr & (size - 1)) != 0) {
2447 goto do_unaligned_access;
2448 }
2449
2450 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
2451
2452
2453 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2454
2455 cpu_check_watchpoint(env_cpu(env), addr, size,
2456 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
2457 }
2458
2459 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2460
2461
2462 if (tlb_addr & TLB_MMIO) {
2463 io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
2464 op ^ (need_swap * MO_BSWAP));
2465 return;
2466 }
2467
2468
2469 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2470 return;
2471 }
2472
2473
2474 if (tlb_addr & TLB_NOTDIRTY) {
2475 notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
2476 }
2477
2478 haddr = (void *)((uintptr_t)addr + entry->addend);
2479
2480
2481
2482
2483
2484
2485 if (unlikely(need_swap)) {
2486 store_memop(haddr, val, op ^ MO_BSWAP);
2487 } else {
2488 store_memop(haddr, val, op);
2489 }
2490 return;
2491 }
2492
2493
2494 if (size > 1
2495 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2496 >= TARGET_PAGE_SIZE)) {
2497 do_unaligned_access:
2498 store_helper_unaligned(env, addr, val, retaddr, size,
2499 mmu_idx, memop_big_endian(op));
2500 return;
2501 }
2502
2503 haddr = (void *)((uintptr_t)addr + entry->addend);
2504 store_memop(haddr, val, op);
2505}
2506
2507void __attribute__((noinline))
2508helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2509 TCGMemOpIdx oi, uintptr_t retaddr)
2510{
2511 store_helper(env, addr, val, oi, retaddr, MO_UB);
2512}
2513
2514void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2515 TCGMemOpIdx oi, uintptr_t retaddr)
2516{
2517 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2518}
2519
2520void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2521 TCGMemOpIdx oi, uintptr_t retaddr)
2522{
2523 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2524}
2525
2526void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2527 TCGMemOpIdx oi, uintptr_t retaddr)
2528{
2529 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2530}
2531
2532void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2533 TCGMemOpIdx oi, uintptr_t retaddr)
2534{
2535 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2536}
2537
2538void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2539 TCGMemOpIdx oi, uintptr_t retaddr)
2540{
2541 store_helper(env, addr, val, oi, retaddr, MO_LEQ);
2542}
2543
2544void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2545 TCGMemOpIdx oi, uintptr_t retaddr)
2546{
2547 store_helper(env, addr, val, oi, retaddr, MO_BEQ);
2548}
2549
2550
2551
2552
2553
2554static inline void QEMU_ALWAYS_INLINE
2555cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2556 int mmu_idx, uintptr_t retaddr, MemOp op)
2557{
2558 TCGMemOpIdx oi;
2559 uint16_t meminfo;
2560
2561 meminfo = trace_mem_get_info(op, mmu_idx, true);
2562 trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
2563
2564 oi = make_memop_idx(op, mmu_idx);
2565 store_helper(env, addr, val, oi, retaddr, op);
2566
2567 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
2568}
2569
2570void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2571 int mmu_idx, uintptr_t retaddr)
2572{
2573 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
2574}
2575
2576void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2577 int mmu_idx, uintptr_t retaddr)
2578{
2579 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW);
2580}
2581
2582void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2583 int mmu_idx, uintptr_t retaddr)
2584{
2585 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL);
2586}
2587
2588void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
2589 int mmu_idx, uintptr_t retaddr)
2590{
2591 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ);
2592}
2593
2594void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2595 int mmu_idx, uintptr_t retaddr)
2596{
2597 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW);
2598}
2599
2600void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2601 int mmu_idx, uintptr_t retaddr)
2602{
2603 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL);
2604}
2605
2606void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
2607 int mmu_idx, uintptr_t retaddr)
2608{
2609 cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ);
2610}
2611
2612void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
2613 uint32_t val, uintptr_t retaddr)
2614{
2615 cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2616}
2617
2618void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
2619 uint32_t val, uintptr_t retaddr)
2620{
2621 cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2622}
2623
2624void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
2625 uint32_t val, uintptr_t retaddr)
2626{
2627 cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2628}
2629
2630void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
2631 uint64_t val, uintptr_t retaddr)
2632{
2633 cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2634}
2635
2636void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
2637 uint32_t val, uintptr_t retaddr)
2638{
2639 cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2640}
2641
2642void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
2643 uint32_t val, uintptr_t retaddr)
2644{
2645 cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2646}
2647
2648void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
2649 uint64_t val, uintptr_t retaddr)
2650{
2651 cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2652}
2653
2654void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2655{
2656 cpu_stb_data_ra(env, ptr, val, 0);
2657}
2658
2659void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2660{
2661 cpu_stw_be_data_ra(env, ptr, val, 0);
2662}
2663
2664void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2665{
2666 cpu_stl_be_data_ra(env, ptr, val, 0);
2667}
2668
2669void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
2670{
2671 cpu_stq_be_data_ra(env, ptr, val, 0);
2672}
2673
2674void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2675{
2676 cpu_stw_le_data_ra(env, ptr, val, 0);
2677}
2678
2679void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2680{
2681 cpu_stl_le_data_ra(env, ptr, val, 0);
2682}
2683
2684void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
2685{
2686 cpu_stq_le_data_ra(env, ptr, val, 0);
2687}
2688
2689
2690
2691
2692
2693
2694#define ATOMIC_NAME(X) \
2695 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2696
2697#define ATOMIC_MMU_CLEANUP
2698#define ATOMIC_MMU_IDX get_mmuidx(oi)
2699
2700#include "atomic_common.c.inc"
2701
2702#define DATA_SIZE 1
2703#include "atomic_template.h"
2704
2705#define DATA_SIZE 2
2706#include "atomic_template.h"
2707
2708#define DATA_SIZE 4
2709#include "atomic_template.h"
2710
2711#ifdef CONFIG_ATOMIC64
2712#define DATA_SIZE 8
2713#include "atomic_template.h"
2714#endif
2715
2716#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2717#define DATA_SIZE 16
2718#include "atomic_template.h"
2719#endif
2720
2721
2722
2723static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2724 TCGMemOpIdx oi, uintptr_t retaddr)
2725{
2726 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2727}
2728
2729uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2730{
2731 TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2732 return full_ldub_code(env, addr, oi, 0);
2733}
2734
2735static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2736 TCGMemOpIdx oi, uintptr_t retaddr)
2737{
2738 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
2739}
2740
2741uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2742{
2743 TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2744 return full_lduw_code(env, addr, oi, 0);
2745}
2746
2747static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2748 TCGMemOpIdx oi, uintptr_t retaddr)
2749{
2750 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
2751}
2752
2753uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2754{
2755 TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2756 return full_ldl_code(env, addr, oi, 0);
2757}
2758
2759static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2760 TCGMemOpIdx oi, uintptr_t retaddr)
2761{
2762 return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
2763}
2764
2765uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2766{
2767 TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
2768 return full_ldq_code(env, addr, oi, 0);
2769}
2770