1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu-common.h"
22#include "qapi/error.h"
23
24#include "qemu/cutils.h"
25#include "cpu.h"
26#include "exec/exec-all.h"
27#include "exec/target_page.h"
28#include "tcg/tcg.h"
29#include "hw/qdev-core.h"
30#include "hw/qdev-properties.h"
31#if !defined(CONFIG_USER_ONLY)
32#include "hw/boards.h"
33#include "hw/xen/xen.h"
34#endif
35#include "sysemu/kvm.h"
36#include "sysemu/sysemu.h"
37#include "sysemu/tcg.h"
38#include "sysemu/qtest.h"
39#include "qemu/timer.h"
40#include "qemu/config-file.h"
41#include "qemu/error-report.h"
42#include "qemu/qemu-print.h"
43#if defined(CONFIG_USER_ONLY)
44#include "qemu.h"
45#else
46#include "exec/memory.h"
47#include "exec/ioport.h"
48#include "sysemu/dma.h"
49#include "sysemu/hostmem.h"
50#include "sysemu/hw_accel.h"
51#include "exec/address-spaces.h"
52#include "sysemu/xen-mapcache.h"
53#include "trace-root.h"
54
55#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
56#include <linux/falloc.h>
57#endif
58
59#endif
60#include "qemu/rcu_queue.h"
61#include "qemu/main-loop.h"
62#include "translate-all.h"
63#include "sysemu/replay.h"
64
65#include "exec/memory-internal.h"
66#include "exec/ram_addr.h"
67#include "exec/log.h"
68
69#include "qemu/pmem.h"
70
71#include "migration/vmstate.h"
72
73#include "qemu/range.h"
74#ifndef _WIN32
75#include "qemu/mmap-alloc.h"
76#endif
77
78#include "monitor/monitor.h"
79
80#ifdef CONFIG_LIBDAXCTL
81#include <daxctl/libdaxctl.h>
82#endif
83
84
85
86#if !defined(CONFIG_USER_ONLY)
87
88
89
90RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
91
92static MemoryRegion *system_memory;
93static MemoryRegion *system_io;
94
95AddressSpace address_space_io;
96AddressSpace address_space_memory;
97
98static MemoryRegion io_mem_unassigned;
99#endif
100
101uintptr_t qemu_host_page_size;
102intptr_t qemu_host_page_mask;
103
104#if !defined(CONFIG_USER_ONLY)
105
106
107
108int use_icount;
109
110typedef struct PhysPageEntry PhysPageEntry;
111
112struct PhysPageEntry {
113
114 uint32_t skip : 6;
115
116 uint32_t ptr : 26;
117};
118
119#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
120
121
122#define ADDR_SPACE_BITS 64
123
124#define P_L2_BITS 9
125#define P_L2_SIZE (1 << P_L2_BITS)
126
127#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
128
129typedef PhysPageEntry Node[P_L2_SIZE];
130
131typedef struct PhysPageMap {
132 struct rcu_head rcu;
133
134 unsigned sections_nb;
135 unsigned sections_nb_alloc;
136 unsigned nodes_nb;
137 unsigned nodes_nb_alloc;
138 Node *nodes;
139 MemoryRegionSection *sections;
140} PhysPageMap;
141
142struct AddressSpaceDispatch {
143 MemoryRegionSection *mru_section;
144
145
146
147 PhysPageEntry phys_map;
148 PhysPageMap map;
149};
150
151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
154 FlatView *fv;
155 hwaddr base;
156 uint16_t sub_section[];
157} subpage_t;
158
159#define PHYS_SECTION_UNASSIGNED 0
160
161static void io_mem_init(void);
162static void memory_map_init(void);
163static void tcg_log_global_after_sync(MemoryListener *listener);
164static void tcg_commit(MemoryListener *listener);
165
166
167
168
169
170
171
172
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
180struct DirtyBitmapSnapshot {
181 ram_addr_t start;
182 ram_addr_t end;
183 unsigned long dirty[];
184};
185
186#endif
187
188#if !defined(CONFIG_USER_ONLY)
189
190static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
191{
192 static unsigned alloc_hint = 16;
193 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
194 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes);
195 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
196 alloc_hint = map->nodes_nb_alloc;
197 }
198}
199
200static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
201{
202 unsigned i;
203 uint32_t ret;
204 PhysPageEntry e;
205 PhysPageEntry *p;
206
207 ret = map->nodes_nb++;
208 p = map->nodes[ret];
209 assert(ret != PHYS_MAP_NODE_NIL);
210 assert(ret != map->nodes_nb_alloc);
211
212 e.skip = leaf ? 0 : 1;
213 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
214 for (i = 0; i < P_L2_SIZE; ++i) {
215 memcpy(&p[i], &e, sizeof(e));
216 }
217 return ret;
218}
219
220static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
221 hwaddr *index, uint64_t *nb, uint16_t leaf,
222 int level)
223{
224 PhysPageEntry *p;
225 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
226
227 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
228 lp->ptr = phys_map_node_alloc(map, level == 0);
229 }
230 p = map->nodes[lp->ptr];
231 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
232
233 while (*nb && lp < &p[P_L2_SIZE]) {
234 if ((*index & (step - 1)) == 0 && *nb >= step) {
235 lp->skip = 0;
236 lp->ptr = leaf;
237 *index += step;
238 *nb -= step;
239 } else {
240 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
241 }
242 ++lp;
243 }
244}
245
246static void phys_page_set(AddressSpaceDispatch *d,
247 hwaddr index, uint64_t nb,
248 uint16_t leaf)
249{
250
251 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
252
253 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
254}
255
256
257
258
259static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
260{
261 unsigned valid_ptr = P_L2_SIZE;
262 int valid = 0;
263 PhysPageEntry *p;
264 int i;
265
266 if (lp->ptr == PHYS_MAP_NODE_NIL) {
267 return;
268 }
269
270 p = nodes[lp->ptr];
271 for (i = 0; i < P_L2_SIZE; i++) {
272 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
273 continue;
274 }
275
276 valid_ptr = i;
277 valid++;
278 if (p[i].skip) {
279 phys_page_compact(&p[i], nodes);
280 }
281 }
282
283
284 if (valid != 1) {
285 return;
286 }
287
288 assert(valid_ptr < P_L2_SIZE);
289
290
291 if (P_L2_LEVELS >= (1 << 6) &&
292 lp->skip + p[valid_ptr].skip >= (1 << 6)) {
293 return;
294 }
295
296 lp->ptr = p[valid_ptr].ptr;
297 if (!p[valid_ptr].skip) {
298
299
300
301
302
303
304 lp->skip = 0;
305 } else {
306 lp->skip += p[valid_ptr].skip;
307 }
308}
309
310void address_space_dispatch_compact(AddressSpaceDispatch *d)
311{
312 if (d->phys_map.skip) {
313 phys_page_compact(&d->phys_map, d->map.nodes);
314 }
315}
316
317static inline bool section_covers_addr(const MemoryRegionSection *section,
318 hwaddr addr)
319{
320
321
322
323 return int128_gethi(section->size) ||
324 range_covers_byte(section->offset_within_address_space,
325 int128_getlo(section->size), addr);
326}
327
328static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
329{
330 PhysPageEntry lp = d->phys_map, *p;
331 Node *nodes = d->map.nodes;
332 MemoryRegionSection *sections = d->map.sections;
333 hwaddr index = addr >> TARGET_PAGE_BITS;
334 int i;
335
336 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
337 if (lp.ptr == PHYS_MAP_NODE_NIL) {
338 return §ions[PHYS_SECTION_UNASSIGNED];
339 }
340 p = nodes[lp.ptr];
341 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
342 }
343
344 if (section_covers_addr(§ions[lp.ptr], addr)) {
345 return §ions[lp.ptr];
346 } else {
347 return §ions[PHYS_SECTION_UNASSIGNED];
348 }
349}
350
351
352static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
353 hwaddr addr,
354 bool resolve_subpage)
355{
356 MemoryRegionSection *section = atomic_read(&d->mru_section);
357 subpage_t *subpage;
358
359 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
360 !section_covers_addr(section, addr)) {
361 section = phys_page_find(d, addr);
362 atomic_set(&d->mru_section, section);
363 }
364 if (resolve_subpage && section->mr->subpage) {
365 subpage = container_of(section->mr, subpage_t, iomem);
366 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
367 }
368 return section;
369}
370
371
372static MemoryRegionSection *
373address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
374 hwaddr *plen, bool resolve_subpage)
375{
376 MemoryRegionSection *section;
377 MemoryRegion *mr;
378 Int128 diff;
379
380 section = address_space_lookup_region(d, addr, resolve_subpage);
381
382 addr -= section->offset_within_address_space;
383
384
385 *xlat = addr + section->offset_within_region;
386
387 mr = section->mr;
388
389
390
391
392
393
394
395
396
397
398
399
400 if (memory_region_is_ram(mr)) {
401 diff = int128_sub(section->size, int128_make64(addr));
402 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
403 }
404 return section;
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
430 hwaddr *xlat,
431 hwaddr *plen_out,
432 hwaddr *page_mask_out,
433 bool is_write,
434 bool is_mmio,
435 AddressSpace **target_as,
436 MemTxAttrs attrs)
437{
438 MemoryRegionSection *section;
439 hwaddr page_mask = (hwaddr)-1;
440
441 do {
442 hwaddr addr = *xlat;
443 IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
444 int iommu_idx = 0;
445 IOMMUTLBEntry iotlb;
446
447 if (imrc->attrs_to_index) {
448 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
449 }
450
451 iotlb = imrc->translate(iommu_mr, addr, is_write ?
452 IOMMU_WO : IOMMU_RO, iommu_idx);
453
454 if (!(iotlb.perm & (1 << is_write))) {
455 goto unassigned;
456 }
457
458 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
459 | (addr & iotlb.addr_mask));
460 page_mask &= iotlb.addr_mask;
461 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
462 *target_as = iotlb.target_as;
463
464 section = address_space_translate_internal(
465 address_space_to_dispatch(iotlb.target_as), addr, xlat,
466 plen_out, is_mmio);
467
468 iommu_mr = memory_region_get_iommu(section->mr);
469 } while (unlikely(iommu_mr));
470
471 if (page_mask_out) {
472 *page_mask_out = page_mask;
473 }
474 return *section;
475
476unassigned:
477 return (MemoryRegionSection) { .mr = &io_mem_unassigned };
478}
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500static MemoryRegionSection flatview_do_translate(FlatView *fv,
501 hwaddr addr,
502 hwaddr *xlat,
503 hwaddr *plen_out,
504 hwaddr *page_mask_out,
505 bool is_write,
506 bool is_mmio,
507 AddressSpace **target_as,
508 MemTxAttrs attrs)
509{
510 MemoryRegionSection *section;
511 IOMMUMemoryRegion *iommu_mr;
512 hwaddr plen = (hwaddr)(-1);
513
514 if (!plen_out) {
515 plen_out = &plen;
516 }
517
518 section = address_space_translate_internal(
519 flatview_to_dispatch(fv), addr, xlat,
520 plen_out, is_mmio);
521
522 iommu_mr = memory_region_get_iommu(section->mr);
523 if (unlikely(iommu_mr)) {
524 return address_space_translate_iommu(iommu_mr, xlat,
525 plen_out, page_mask_out,
526 is_write, is_mmio,
527 target_as, attrs);
528 }
529 if (page_mask_out) {
530
531 *page_mask_out = ~TARGET_PAGE_MASK;
532 }
533
534 return *section;
535}
536
537
538IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
539 bool is_write, MemTxAttrs attrs)
540{
541 MemoryRegionSection section;
542 hwaddr xlat, page_mask;
543
544
545
546
547
548 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
549 NULL, &page_mask, is_write, false, &as,
550 attrs);
551
552
553 if (section.mr == &io_mem_unassigned) {
554 goto iotlb_fail;
555 }
556
557
558 xlat += section.offset_within_address_space -
559 section.offset_within_region;
560
561 return (IOMMUTLBEntry) {
562 .target_as = as,
563 .iova = addr & ~page_mask,
564 .translated_addr = xlat & ~page_mask,
565 .addr_mask = page_mask,
566
567 .perm = IOMMU_RW,
568 };
569
570iotlb_fail:
571 return (IOMMUTLBEntry) {0};
572}
573
574
575MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
576 hwaddr *plen, bool is_write,
577 MemTxAttrs attrs)
578{
579 MemoryRegion *mr;
580 MemoryRegionSection section;
581 AddressSpace *as = NULL;
582
583
584 section = flatview_do_translate(fv, addr, xlat, plen, NULL,
585 is_write, true, &as, attrs);
586 mr = section.mr;
587
588 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
589 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
590 *plen = MIN(page, *plen);
591 }
592
593 return mr;
594}
595
596typedef struct TCGIOMMUNotifier {
597 IOMMUNotifier n;
598 MemoryRegion *mr;
599 CPUState *cpu;
600 int iommu_idx;
601 bool active;
602} TCGIOMMUNotifier;
603
604static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
605{
606 TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n);
607
608 if (!notifier->active) {
609 return;
610 }
611 tlb_flush(notifier->cpu);
612 notifier->active = false;
613
614
615
616
617
618}
619
620static void tcg_register_iommu_notifier(CPUState *cpu,
621 IOMMUMemoryRegion *iommu_mr,
622 int iommu_idx)
623{
624
625
626
627
628 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
629 TCGIOMMUNotifier *notifier;
630 Error *err = NULL;
631 int i, ret;
632
633 for (i = 0; i < cpu->iommu_notifiers->len; i++) {
634 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
635 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) {
636 break;
637 }
638 }
639 if (i == cpu->iommu_notifiers->len) {
640
641 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1);
642 notifier = g_new0(TCGIOMMUNotifier, 1);
643 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier;
644
645 notifier->mr = mr;
646 notifier->iommu_idx = iommu_idx;
647 notifier->cpu = cpu;
648
649
650
651
652
653
654 iommu_notifier_init(¬ifier->n,
655 tcg_iommu_unmap_notify,
656 IOMMU_NOTIFIER_UNMAP,
657 0,
658 HWADDR_MAX,
659 iommu_idx);
660 ret = memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n,
661 &err);
662 if (ret) {
663 error_report_err(err);
664 exit(1);
665 }
666 }
667
668 if (!notifier->active) {
669 notifier->active = true;
670 }
671}
672
673static void tcg_iommu_free_notifier_list(CPUState *cpu)
674{
675
676 int i;
677 TCGIOMMUNotifier *notifier;
678
679 for (i = 0; i < cpu->iommu_notifiers->len; i++) {
680 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
681 memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n);
682 g_free(notifier);
683 }
684 g_array_free(cpu->iommu_notifiers, true);
685}
686
687
688MemoryRegionSection *
689address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
690 hwaddr *xlat, hwaddr *plen,
691 MemTxAttrs attrs, int *prot)
692{
693 MemoryRegionSection *section;
694 IOMMUMemoryRegion *iommu_mr;
695 IOMMUMemoryRegionClass *imrc;
696 IOMMUTLBEntry iotlb;
697 int iommu_idx;
698 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
699
700 for (;;) {
701 section = address_space_translate_internal(d, addr, &addr, plen, false);
702
703 iommu_mr = memory_region_get_iommu(section->mr);
704 if (!iommu_mr) {
705 break;
706 }
707
708 imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
709
710 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
711 tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
712
713
714
715 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
716 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
717 | (addr & iotlb.addr_mask));
718
719
720
721
722 if (!(iotlb.perm & IOMMU_RO)) {
723 *prot &= ~(PAGE_READ | PAGE_EXEC);
724 }
725 if (!(iotlb.perm & IOMMU_WO)) {
726 *prot &= ~PAGE_WRITE;
727 }
728
729 if (!*prot) {
730 goto translate_fail;
731 }
732
733 d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
734 }
735
736 assert(!memory_region_is_iommu(section->mr));
737 *xlat = addr;
738 return section;
739
740translate_fail:
741 return &d->map.sections[PHYS_SECTION_UNASSIGNED];
742}
743#endif
744
745#if !defined(CONFIG_USER_ONLY)
746
747static int cpu_common_post_load(void *opaque, int version_id)
748{
749 CPUState *cpu = opaque;
750
751
752
753 cpu->interrupt_request &= ~0x01;
754 tlb_flush(cpu);
755
756
757
758
759
760
761 tb_flush(cpu);
762
763 return 0;
764}
765
766static int cpu_common_pre_load(void *opaque)
767{
768 CPUState *cpu = opaque;
769
770 cpu->exception_index = -1;
771
772 return 0;
773}
774
775static bool cpu_common_exception_index_needed(void *opaque)
776{
777 CPUState *cpu = opaque;
778
779 return tcg_enabled() && cpu->exception_index != -1;
780}
781
782static const VMStateDescription vmstate_cpu_common_exception_index = {
783 .name = "cpu_common/exception_index",
784 .version_id = 1,
785 .minimum_version_id = 1,
786 .needed = cpu_common_exception_index_needed,
787 .fields = (VMStateField[]) {
788 VMSTATE_INT32(exception_index, CPUState),
789 VMSTATE_END_OF_LIST()
790 }
791};
792
793static bool cpu_common_crash_occurred_needed(void *opaque)
794{
795 CPUState *cpu = opaque;
796
797 return cpu->crash_occurred;
798}
799
800static const VMStateDescription vmstate_cpu_common_crash_occurred = {
801 .name = "cpu_common/crash_occurred",
802 .version_id = 1,
803 .minimum_version_id = 1,
804 .needed = cpu_common_crash_occurred_needed,
805 .fields = (VMStateField[]) {
806 VMSTATE_BOOL(crash_occurred, CPUState),
807 VMSTATE_END_OF_LIST()
808 }
809};
810
811const VMStateDescription vmstate_cpu_common = {
812 .name = "cpu_common",
813 .version_id = 1,
814 .minimum_version_id = 1,
815 .pre_load = cpu_common_pre_load,
816 .post_load = cpu_common_post_load,
817 .fields = (VMStateField[]) {
818 VMSTATE_UINT32(halted, CPUState),
819 VMSTATE_UINT32(interrupt_request, CPUState),
820 VMSTATE_END_OF_LIST()
821 },
822 .subsections = (const VMStateDescription*[]) {
823 &vmstate_cpu_common_exception_index,
824 &vmstate_cpu_common_crash_occurred,
825 NULL
826 }
827};
828
829void cpu_address_space_init(CPUState *cpu, int asidx,
830 const char *prefix, MemoryRegion *mr)
831{
832 CPUAddressSpace *newas;
833 AddressSpace *as = g_new0(AddressSpace, 1);
834 char *as_name;
835
836 assert(mr);
837 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
838 address_space_init(as, mr, as_name);
839 g_free(as_name);
840
841
842 assert(asidx < cpu->num_ases);
843
844 if (asidx == 0) {
845
846 cpu->as = as;
847 }
848
849
850 assert(asidx == 0 || !kvm_enabled());
851
852 if (!cpu->cpu_ases) {
853 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
854 }
855
856 newas = &cpu->cpu_ases[asidx];
857 newas->cpu = cpu;
858 newas->as = as;
859 if (tcg_enabled()) {
860 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync;
861 newas->tcg_as_listener.commit = tcg_commit;
862 memory_listener_register(&newas->tcg_as_listener, as);
863 }
864}
865
866AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
867{
868
869 return cpu->cpu_ases[asidx].as;
870}
871#endif
872
873void cpu_exec_unrealizefn(CPUState *cpu)
874{
875 CPUClass *cc = CPU_GET_CLASS(cpu);
876
877 tlb_destroy(cpu);
878 cpu_list_remove(cpu);
879
880 if (cc->vmsd != NULL) {
881 vmstate_unregister(NULL, cc->vmsd, cpu);
882 }
883 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
884 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
885 }
886#ifndef CONFIG_USER_ONLY
887 tcg_iommu_free_notifier_list(cpu);
888#endif
889}
890
891Property cpu_common_props[] = {
892#ifndef CONFIG_USER_ONLY
893
894
895
896
897
898
899 DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
900 MemoryRegion *),
901#endif
902 DEFINE_PROP_END_OF_LIST(),
903};
904
905void cpu_exec_initfn(CPUState *cpu)
906{
907 cpu->as = NULL;
908 cpu->num_ases = 0;
909
910#ifndef CONFIG_USER_ONLY
911 cpu->thread_id = qemu_get_thread_id();
912 cpu->memory = system_memory;
913 object_ref(OBJECT(cpu->memory));
914#endif
915}
916
917void cpu_exec_realizefn(CPUState *cpu, Error **errp)
918{
919 CPUClass *cc = CPU_GET_CLASS(cpu);
920 static bool tcg_target_initialized;
921
922 cpu_list_add(cpu);
923
924 if (tcg_enabled() && !tcg_target_initialized) {
925 tcg_target_initialized = true;
926 cc->tcg_initialize();
927 }
928 tlb_init(cpu);
929
930 qemu_plugin_vcpu_init_hook(cpu);
931
932#ifdef CONFIG_USER_ONLY
933 assert(cc->vmsd == NULL);
934#else
935 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
936 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
937 }
938 if (cc->vmsd != NULL) {
939 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
940 }
941
942 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *));
943#endif
944}
945
946const char *parse_cpu_option(const char *cpu_option)
947{
948 ObjectClass *oc;
949 CPUClass *cc;
950 gchar **model_pieces;
951 const char *cpu_type;
952
953 model_pieces = g_strsplit(cpu_option, ",", 2);
954 if (!model_pieces[0]) {
955 error_report("-cpu option cannot be empty");
956 exit(1);
957 }
958
959 oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
960 if (oc == NULL) {
961 error_report("unable to find CPU model '%s'", model_pieces[0]);
962 g_strfreev(model_pieces);
963 exit(EXIT_FAILURE);
964 }
965
966 cpu_type = object_class_get_name(oc);
967 cc = CPU_CLASS(oc);
968 cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
969 g_strfreev(model_pieces);
970 return cpu_type;
971}
972
973#if defined(CONFIG_USER_ONLY)
974void tb_invalidate_phys_addr(target_ulong addr)
975{
976 mmap_lock();
977 tb_invalidate_phys_page_range(addr, addr + 1);
978 mmap_unlock();
979}
980
981static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
982{
983 tb_invalidate_phys_addr(pc);
984}
985#else
986void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
987{
988 ram_addr_t ram_addr;
989 MemoryRegion *mr;
990 hwaddr l = 1;
991
992 if (!tcg_enabled()) {
993 return;
994 }
995
996 RCU_READ_LOCK_GUARD();
997 mr = address_space_translate(as, addr, &addr, &l, false, attrs);
998 if (!(memory_region_is_ram(mr)
999 || memory_region_is_romd(mr))) {
1000 return;
1001 }
1002 ram_addr = memory_region_get_ram_addr(mr) + addr;
1003 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
1004}
1005
1006static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1007{
1008
1009
1010
1011
1012
1013
1014 tb_flush(cpu);
1015}
1016#endif
1017
1018#ifndef CONFIG_USER_ONLY
1019
1020int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1021 int flags, CPUWatchpoint **watchpoint)
1022{
1023 CPUWatchpoint *wp;
1024 vaddr in_page;
1025
1026
1027 if (len == 0 || (addr + len - 1) < addr) {
1028 error_report("tried to set invalid watchpoint at %"
1029 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
1030 return -EINVAL;
1031 }
1032 wp = g_malloc(sizeof(*wp));
1033
1034 wp->vaddr = addr;
1035 wp->len = len;
1036 wp->flags = flags;
1037
1038
1039 if (flags & BP_GDB) {
1040 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
1041 } else {
1042 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
1043 }
1044
1045 in_page = -(addr | TARGET_PAGE_MASK);
1046 if (len <= in_page) {
1047 tlb_flush_page(cpu, addr);
1048 } else {
1049 tlb_flush(cpu);
1050 }
1051
1052 if (watchpoint)
1053 *watchpoint = wp;
1054 return 0;
1055}
1056
1057
1058int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
1059 int flags)
1060{
1061 CPUWatchpoint *wp;
1062
1063 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1064 if (addr == wp->vaddr && len == wp->len
1065 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1066 cpu_watchpoint_remove_by_ref(cpu, wp);
1067 return 0;
1068 }
1069 }
1070 return -ENOENT;
1071}
1072
1073
1074void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
1075{
1076 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
1077
1078 tlb_flush_page(cpu, watchpoint->vaddr);
1079
1080 g_free(watchpoint);
1081}
1082
1083
1084void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
1085{
1086 CPUWatchpoint *wp, *next;
1087
1088 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
1089 if (wp->flags & mask) {
1090 cpu_watchpoint_remove_by_ref(cpu, wp);
1091 }
1092 }
1093}
1094
1095
1096
1097
1098
1099
1100static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
1101 vaddr addr, vaddr len)
1102{
1103
1104
1105
1106
1107
1108 vaddr wpend = wp->vaddr + wp->len - 1;
1109 vaddr addrend = addr + len - 1;
1110
1111 return !(addr > wpend || wp->vaddr > addrend);
1112}
1113
1114
1115int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
1116{
1117 CPUWatchpoint *wp;
1118 int ret = 0;
1119
1120 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1121 if (watchpoint_address_matches(wp, addr, len)) {
1122 ret |= wp->flags;
1123 }
1124 }
1125 return ret;
1126}
1127#endif
1128
1129
1130int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1131 CPUBreakpoint **breakpoint)
1132{
1133 CPUBreakpoint *bp;
1134
1135 bp = g_malloc(sizeof(*bp));
1136
1137 bp->pc = pc;
1138 bp->flags = flags;
1139
1140
1141 if (flags & BP_GDB) {
1142 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
1143 } else {
1144 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
1145 }
1146
1147 breakpoint_invalidate(cpu, pc);
1148
1149 if (breakpoint) {
1150 *breakpoint = bp;
1151 }
1152 return 0;
1153}
1154
1155
1156int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
1157{
1158 CPUBreakpoint *bp;
1159
1160 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1161 if (bp->pc == pc && bp->flags == flags) {
1162 cpu_breakpoint_remove_by_ref(cpu, bp);
1163 return 0;
1164 }
1165 }
1166 return -ENOENT;
1167}
1168
1169
1170void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
1171{
1172 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
1173
1174 breakpoint_invalidate(cpu, breakpoint->pc);
1175
1176 g_free(breakpoint);
1177}
1178
1179
1180void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
1181{
1182 CPUBreakpoint *bp, *next;
1183
1184 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
1185 if (bp->flags & mask) {
1186 cpu_breakpoint_remove_by_ref(cpu, bp);
1187 }
1188 }
1189}
1190
1191
1192
1193void cpu_single_step(CPUState *cpu, int enabled)
1194{
1195 if (cpu->singlestep_enabled != enabled) {
1196 cpu->singlestep_enabled = enabled;
1197 if (kvm_enabled()) {
1198 kvm_update_guest_debug(cpu, 0);
1199 } else {
1200
1201
1202 tb_flush(cpu);
1203 }
1204 }
1205}
1206
1207void cpu_abort(CPUState *cpu, const char *fmt, ...)
1208{
1209 va_list ap;
1210 va_list ap2;
1211
1212 va_start(ap, fmt);
1213 va_copy(ap2, ap);
1214 fprintf(stderr, "qemu: fatal: ");
1215 vfprintf(stderr, fmt, ap);
1216 fprintf(stderr, "\n");
1217 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1218 if (qemu_log_separate()) {
1219 FILE *logfile = qemu_log_lock();
1220 qemu_log("qemu: fatal: ");
1221 qemu_log_vprintf(fmt, ap2);
1222 qemu_log("\n");
1223 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1224 qemu_log_flush();
1225 qemu_log_unlock(logfile);
1226 qemu_log_close();
1227 }
1228 va_end(ap2);
1229 va_end(ap);
1230 replay_finish();
1231#if defined(CONFIG_USER_ONLY)
1232 {
1233 struct sigaction act;
1234 sigfillset(&act.sa_mask);
1235 act.sa_handler = SIG_DFL;
1236 act.sa_flags = 0;
1237 sigaction(SIGABRT, &act, NULL);
1238 }
1239#endif
1240 abort();
1241}
1242
1243#if !defined(CONFIG_USER_ONLY)
1244
1245static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
1246{
1247 RAMBlock *block;
1248
1249 block = atomic_rcu_read(&ram_list.mru_block);
1250 if (block && addr - block->offset < block->max_length) {
1251 return block;
1252 }
1253 RAMBLOCK_FOREACH(block) {
1254 if (addr - block->offset < block->max_length) {
1255 goto found;
1256 }
1257 }
1258
1259 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1260 abort();
1261
1262found:
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 ram_list.mru_block = block;
1280 return block;
1281}
1282
1283static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
1284{
1285 CPUState *cpu;
1286 ram_addr_t start1;
1287 RAMBlock *block;
1288 ram_addr_t end;
1289
1290 assert(tcg_enabled());
1291 end = TARGET_PAGE_ALIGN(start + length);
1292 start &= TARGET_PAGE_MASK;
1293
1294 RCU_READ_LOCK_GUARD();
1295 block = qemu_get_ram_block(start);
1296 assert(block == qemu_get_ram_block(end - 1));
1297 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1298 CPU_FOREACH(cpu) {
1299 tlb_reset_dirty(cpu, start1, length);
1300 }
1301}
1302
1303
1304bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1305 ram_addr_t length,
1306 unsigned client)
1307{
1308 DirtyMemoryBlocks *blocks;
1309 unsigned long end, page, start_page;
1310 bool dirty = false;
1311 RAMBlock *ramblock;
1312 uint64_t mr_offset, mr_size;
1313
1314 if (length == 0) {
1315 return false;
1316 }
1317
1318 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1319 start_page = start >> TARGET_PAGE_BITS;
1320 page = start_page;
1321
1322 WITH_RCU_READ_LOCK_GUARD() {
1323 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1324 ramblock = qemu_get_ram_block(start);
1325
1326 assert(start >= ramblock->offset &&
1327 start + length <= ramblock->offset + ramblock->used_length);
1328
1329 while (page < end) {
1330 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1331 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1332 unsigned long num = MIN(end - page,
1333 DIRTY_MEMORY_BLOCK_SIZE - offset);
1334
1335 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1336 offset, num);
1337 page += num;
1338 }
1339
1340 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset;
1341 mr_size = (end - start_page) << TARGET_PAGE_BITS;
1342 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
1343 }
1344
1345 if (dirty && tcg_enabled()) {
1346 tlb_reset_dirty_range_all(start, length);
1347 }
1348
1349 return dirty;
1350}
1351
1352DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
1353 (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
1354{
1355 DirtyMemoryBlocks *blocks;
1356 ram_addr_t start = memory_region_get_ram_addr(mr) + offset;
1357 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
1358 ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
1359 ram_addr_t last = QEMU_ALIGN_UP(start + length, align);
1360 DirtyBitmapSnapshot *snap;
1361 unsigned long page, end, dest;
1362
1363 snap = g_malloc0(sizeof(*snap) +
1364 ((last - first) >> (TARGET_PAGE_BITS + 3)));
1365 snap->start = first;
1366 snap->end = last;
1367
1368 page = first >> TARGET_PAGE_BITS;
1369 end = last >> TARGET_PAGE_BITS;
1370 dest = 0;
1371
1372 WITH_RCU_READ_LOCK_GUARD() {
1373 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1374
1375 while (page < end) {
1376 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1377 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1378 unsigned long num = MIN(end - page,
1379 DIRTY_MEMORY_BLOCK_SIZE - offset);
1380
1381 assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
1382 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
1383 offset >>= BITS_PER_LEVEL;
1384
1385 bitmap_copy_and_clear_atomic(snap->dirty + dest,
1386 blocks->blocks[idx] + offset,
1387 num);
1388 page += num;
1389 dest += num >> BITS_PER_LEVEL;
1390 }
1391 }
1392
1393 if (tcg_enabled()) {
1394 tlb_reset_dirty_range_all(start, length);
1395 }
1396
1397 memory_region_clear_dirty_bitmap(mr, offset, length);
1398
1399 return snap;
1400}
1401
1402bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
1403 ram_addr_t start,
1404 ram_addr_t length)
1405{
1406 unsigned long page, end;
1407
1408 assert(start >= snap->start);
1409 assert(start + length <= snap->end);
1410
1411 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
1412 page = (start - snap->start) >> TARGET_PAGE_BITS;
1413
1414 while (page < end) {
1415 if (test_bit(page, snap->dirty)) {
1416 return true;
1417 }
1418 page++;
1419 }
1420 return false;
1421}
1422
1423
1424hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1425 MemoryRegionSection *section)
1426{
1427 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
1428 return section - d->map.sections;
1429}
1430#endif
1431
1432#if !defined(CONFIG_USER_ONLY)
1433
1434static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
1435 uint16_t section);
1436static subpage_t *subpage_init(FlatView *fv, hwaddr base);
1437
1438static void *(*phys_mem_alloc)(size_t size, uint64_t *align, bool shared) =
1439 qemu_anon_ram_alloc;
1440
1441
1442
1443
1444
1445
1446void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared))
1447{
1448 phys_mem_alloc = alloc;
1449}
1450
1451static uint16_t phys_section_add(PhysPageMap *map,
1452 MemoryRegionSection *section)
1453{
1454
1455
1456
1457
1458 assert(map->sections_nb < TARGET_PAGE_SIZE);
1459
1460 if (map->sections_nb == map->sections_nb_alloc) {
1461 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1462 map->sections = g_renew(MemoryRegionSection, map->sections,
1463 map->sections_nb_alloc);
1464 }
1465 map->sections[map->sections_nb] = *section;
1466 memory_region_ref(section->mr);
1467 return map->sections_nb++;
1468}
1469
1470static void phys_section_destroy(MemoryRegion *mr)
1471{
1472 bool have_sub_page = mr->subpage;
1473
1474 memory_region_unref(mr);
1475
1476 if (have_sub_page) {
1477 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1478 object_unref(OBJECT(&subpage->iomem));
1479 g_free(subpage);
1480 }
1481}
1482
1483static void phys_sections_free(PhysPageMap *map)
1484{
1485 while (map->sections_nb > 0) {
1486 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1487 phys_section_destroy(section->mr);
1488 }
1489 g_free(map->sections);
1490 g_free(map->nodes);
1491}
1492
1493static void register_subpage(FlatView *fv, MemoryRegionSection *section)
1494{
1495 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1496 subpage_t *subpage;
1497 hwaddr base = section->offset_within_address_space
1498 & TARGET_PAGE_MASK;
1499 MemoryRegionSection *existing = phys_page_find(d, base);
1500 MemoryRegionSection subsection = {
1501 .offset_within_address_space = base,
1502 .size = int128_make64(TARGET_PAGE_SIZE),
1503 };
1504 hwaddr start, end;
1505
1506 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1507
1508 if (!(existing->mr->subpage)) {
1509 subpage = subpage_init(fv, base);
1510 subsection.fv = fv;
1511 subsection.mr = &subpage->iomem;
1512 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1513 phys_section_add(&d->map, &subsection));
1514 } else {
1515 subpage = container_of(existing->mr, subpage_t, iomem);
1516 }
1517 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1518 end = start + int128_get64(section->size) - 1;
1519 subpage_register(subpage, start, end,
1520 phys_section_add(&d->map, section));
1521}
1522
1523
1524static void register_multipage(FlatView *fv,
1525 MemoryRegionSection *section)
1526{
1527 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1528 hwaddr start_addr = section->offset_within_address_space;
1529 uint16_t section_index = phys_section_add(&d->map, section);
1530 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1531 TARGET_PAGE_BITS));
1532
1533 assert(num_pages);
1534 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
1545{
1546 MemoryRegionSection remain = *section;
1547 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1548
1549
1550 if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1551 uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
1552 - remain.offset_within_address_space;
1553
1554 MemoryRegionSection now = remain;
1555 now.size = int128_min(int128_make64(left), now.size);
1556 register_subpage(fv, &now);
1557 if (int128_eq(remain.size, now.size)) {
1558 return;
1559 }
1560 remain.size = int128_sub(remain.size, now.size);
1561 remain.offset_within_address_space += int128_get64(now.size);
1562 remain.offset_within_region += int128_get64(now.size);
1563 }
1564
1565
1566 if (int128_ge(remain.size, page_size)) {
1567 MemoryRegionSection now = remain;
1568 now.size = int128_and(now.size, int128_neg(page_size));
1569 register_multipage(fv, &now);
1570 if (int128_eq(remain.size, now.size)) {
1571 return;
1572 }
1573 remain.size = int128_sub(remain.size, now.size);
1574 remain.offset_within_address_space += int128_get64(now.size);
1575 remain.offset_within_region += int128_get64(now.size);
1576 }
1577
1578
1579 register_subpage(fv, &remain);
1580}
1581
1582void qemu_flush_coalesced_mmio_buffer(void)
1583{
1584 if (kvm_enabled())
1585 kvm_flush_coalesced_mmio_buffer();
1586}
1587
1588void qemu_mutex_lock_ramlist(void)
1589{
1590 qemu_mutex_lock(&ram_list.mutex);
1591}
1592
1593void qemu_mutex_unlock_ramlist(void)
1594{
1595 qemu_mutex_unlock(&ram_list.mutex);
1596}
1597
1598void ram_block_dump(Monitor *mon)
1599{
1600 RAMBlock *block;
1601 char *psize;
1602
1603 RCU_READ_LOCK_GUARD();
1604 monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
1605 "Block Name", "PSize", "Offset", "Used", "Total");
1606 RAMBLOCK_FOREACH(block) {
1607 psize = size_to_str(block->page_size);
1608 monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64
1609 " 0x%016" PRIx64 "\n", block->idstr, psize,
1610 (uint64_t)block->offset,
1611 (uint64_t)block->used_length,
1612 (uint64_t)block->max_length);
1613 g_free(psize);
1614 }
1615}
1616
1617#ifdef __linux__
1618
1619
1620
1621
1622
1623
1624static int find_min_backend_pagesize(Object *obj, void *opaque)
1625{
1626 long *hpsize_min = opaque;
1627
1628 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1629 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1630 long hpsize = host_memory_backend_pagesize(backend);
1631
1632 if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) {
1633 *hpsize_min = hpsize;
1634 }
1635 }
1636
1637 return 0;
1638}
1639
1640static int find_max_backend_pagesize(Object *obj, void *opaque)
1641{
1642 long *hpsize_max = opaque;
1643
1644 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1645 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1646 long hpsize = host_memory_backend_pagesize(backend);
1647
1648 if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) {
1649 *hpsize_max = hpsize;
1650 }
1651 }
1652
1653 return 0;
1654}
1655
1656
1657
1658
1659
1660long qemu_minrampagesize(void)
1661{
1662 long hpsize = LONG_MAX;
1663 Object *memdev_root = object_resolve_path("/objects", NULL);
1664
1665 object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
1666 return hpsize;
1667}
1668
1669long qemu_maxrampagesize(void)
1670{
1671 long pagesize = 0;
1672 Object *memdev_root = object_resolve_path("/objects", NULL);
1673
1674 object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize);
1675 return pagesize;
1676}
1677#else
1678long qemu_minrampagesize(void)
1679{
1680 return qemu_real_host_page_size;
1681}
1682long qemu_maxrampagesize(void)
1683{
1684 return qemu_real_host_page_size;
1685}
1686#endif
1687
1688#ifdef CONFIG_POSIX
1689static int64_t get_file_size(int fd)
1690{
1691 int64_t size;
1692#if defined(__linux__)
1693 struct stat st;
1694
1695 if (fstat(fd, &st) < 0) {
1696 return -errno;
1697 }
1698
1699
1700 if (S_ISCHR(st.st_mode)) {
1701 g_autofree char *subsystem_path = NULL;
1702 g_autofree char *subsystem = NULL;
1703
1704 subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem",
1705 major(st.st_rdev), minor(st.st_rdev));
1706 subsystem = g_file_read_link(subsystem_path, NULL);
1707
1708 if (subsystem && g_str_has_suffix(subsystem, "/dax")) {
1709 g_autofree char *size_path = NULL;
1710 g_autofree char *size_str = NULL;
1711
1712 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size",
1713 major(st.st_rdev), minor(st.st_rdev));
1714
1715 if (g_file_get_contents(size_path, &size_str, NULL, NULL)) {
1716 return g_ascii_strtoll(size_str, NULL, 0);
1717 }
1718 }
1719 }
1720#endif
1721
1722
1723 size = lseek(fd, 0, SEEK_END);
1724 if (size < 0) {
1725 return -errno;
1726 }
1727 return size;
1728}
1729
1730static int64_t get_file_align(int fd)
1731{
1732 int64_t align = -1;
1733#if defined(__linux__) && defined(CONFIG_LIBDAXCTL)
1734 struct stat st;
1735
1736 if (fstat(fd, &st) < 0) {
1737 return -errno;
1738 }
1739
1740
1741 if (S_ISCHR(st.st_mode)) {
1742 g_autofree char *path = NULL;
1743 g_autofree char *rpath = NULL;
1744 struct daxctl_ctx *ctx;
1745 struct daxctl_region *region;
1746 int rc = 0;
1747
1748 path = g_strdup_printf("/sys/dev/char/%d:%d",
1749 major(st.st_rdev), minor(st.st_rdev));
1750 rpath = realpath(path, NULL);
1751
1752 rc = daxctl_new(&ctx);
1753 if (rc) {
1754 return -1;
1755 }
1756
1757 daxctl_region_foreach(ctx, region) {
1758 if (strstr(rpath, daxctl_region_get_path(region))) {
1759 align = daxctl_region_get_align(region);
1760 break;
1761 }
1762 }
1763 daxctl_unref(ctx);
1764 }
1765#endif
1766
1767 return align;
1768}
1769
1770static int file_ram_open(const char *path,
1771 const char *region_name,
1772 bool *created,
1773 Error **errp)
1774{
1775 char *filename;
1776 char *sanitized_name;
1777 char *c;
1778 int fd = -1;
1779
1780 *created = false;
1781 for (;;) {
1782 fd = open(path, O_RDWR);
1783 if (fd >= 0) {
1784
1785 break;
1786 }
1787 if (errno == ENOENT) {
1788
1789 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1790 if (fd >= 0) {
1791 *created = true;
1792 break;
1793 }
1794 } else if (errno == EISDIR) {
1795
1796
1797 sanitized_name = g_strdup(region_name);
1798 for (c = sanitized_name; *c != '\0'; c++) {
1799 if (*c == '/') {
1800 *c = '_';
1801 }
1802 }
1803
1804 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1805 sanitized_name);
1806 g_free(sanitized_name);
1807
1808 fd = mkstemp(filename);
1809 if (fd >= 0) {
1810 unlink(filename);
1811 g_free(filename);
1812 break;
1813 }
1814 g_free(filename);
1815 }
1816 if (errno != EEXIST && errno != EINTR) {
1817 error_setg_errno(errp, errno,
1818 "can't open backing store %s for guest RAM",
1819 path);
1820 return -1;
1821 }
1822
1823
1824
1825
1826 }
1827
1828 return fd;
1829}
1830
1831static void *file_ram_alloc(RAMBlock *block,
1832 ram_addr_t memory,
1833 int fd,
1834 bool truncate,
1835 Error **errp)
1836{
1837 void *area;
1838
1839 block->page_size = qemu_fd_getpagesize(fd);
1840 if (block->mr->align % block->page_size) {
1841 error_setg(errp, "alignment 0x%" PRIx64
1842 " must be multiples of page size 0x%zx",
1843 block->mr->align, block->page_size);
1844 return NULL;
1845 } else if (block->mr->align && !is_power_of_2(block->mr->align)) {
1846 error_setg(errp, "alignment 0x%" PRIx64
1847 " must be a power of two", block->mr->align);
1848 return NULL;
1849 }
1850 block->mr->align = MAX(block->page_size, block->mr->align);
1851#if defined(__s390x__)
1852 if (kvm_enabled()) {
1853 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1854 }
1855#endif
1856
1857 if (memory < block->page_size) {
1858 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1859 "or larger than page size 0x%zx",
1860 memory, block->page_size);
1861 return NULL;
1862 }
1863
1864 memory = ROUND_UP(memory, block->page_size);
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 if (truncate && ftruncate(fd, memory)) {
1881 perror("ftruncate");
1882 }
1883
1884 area = qemu_ram_mmap(fd, memory, block->mr->align,
1885 block->flags & RAM_SHARED, block->flags & RAM_PMEM);
1886 if (area == MAP_FAILED) {
1887 error_setg_errno(errp, errno,
1888 "unable to map backing store for guest RAM");
1889 return NULL;
1890 }
1891
1892 block->fd = fd;
1893 return area;
1894}
1895#endif
1896
1897
1898
1899
1900
1901static ram_addr_t find_ram_offset(ram_addr_t size)
1902{
1903 RAMBlock *block, *next_block;
1904 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1905
1906 assert(size != 0);
1907
1908 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1909 return 0;
1910 }
1911
1912 RAMBLOCK_FOREACH(block) {
1913 ram_addr_t candidate, next = RAM_ADDR_MAX;
1914
1915
1916
1917
1918 candidate = block->offset + block->max_length;
1919 candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
1920
1921
1922
1923
1924 RAMBLOCK_FOREACH(next_block) {
1925 if (next_block->offset >= candidate) {
1926 next = MIN(next, next_block->offset);
1927 }
1928 }
1929
1930
1931
1932
1933
1934 if (next - candidate >= size && next - candidate < mingap) {
1935 offset = candidate;
1936 mingap = next - candidate;
1937 }
1938
1939 trace_find_ram_offset_loop(size, candidate, offset, next, mingap);
1940 }
1941
1942 if (offset == RAM_ADDR_MAX) {
1943 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1944 (uint64_t)size);
1945 abort();
1946 }
1947
1948 trace_find_ram_offset(size, offset);
1949
1950 return offset;
1951}
1952
1953static unsigned long last_ram_page(void)
1954{
1955 RAMBlock *block;
1956 ram_addr_t last = 0;
1957
1958 RCU_READ_LOCK_GUARD();
1959 RAMBLOCK_FOREACH(block) {
1960 last = MAX(last, block->offset + block->max_length);
1961 }
1962 return last >> TARGET_PAGE_BITS;
1963}
1964
1965static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1966{
1967 int ret;
1968
1969
1970 if (!machine_dump_guest_core(current_machine)) {
1971 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1972 if (ret) {
1973 perror("qemu_madvise");
1974 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1975 "but dump_guest_core=off specified\n");
1976 }
1977 }
1978}
1979
1980const char *qemu_ram_get_idstr(RAMBlock *rb)
1981{
1982 return rb->idstr;
1983}
1984
1985void *qemu_ram_get_host_addr(RAMBlock *rb)
1986{
1987 return rb->host;
1988}
1989
1990ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
1991{
1992 return rb->offset;
1993}
1994
1995ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
1996{
1997 return rb->used_length;
1998}
1999
2000bool qemu_ram_is_shared(RAMBlock *rb)
2001{
2002 return rb->flags & RAM_SHARED;
2003}
2004
2005
2006bool qemu_ram_is_uf_zeroable(RAMBlock *rb)
2007{
2008 return rb->flags & RAM_UF_ZEROPAGE;
2009}
2010
2011void qemu_ram_set_uf_zeroable(RAMBlock *rb)
2012{
2013 rb->flags |= RAM_UF_ZEROPAGE;
2014}
2015
2016bool qemu_ram_is_migratable(RAMBlock *rb)
2017{
2018 return rb->flags & RAM_MIGRATABLE;
2019}
2020
2021void qemu_ram_set_migratable(RAMBlock *rb)
2022{
2023 rb->flags |= RAM_MIGRATABLE;
2024}
2025
2026void qemu_ram_unset_migratable(RAMBlock *rb)
2027{
2028 rb->flags &= ~RAM_MIGRATABLE;
2029}
2030
2031
2032void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
2033{
2034 RAMBlock *block;
2035
2036 assert(new_block);
2037 assert(!new_block->idstr[0]);
2038
2039 if (dev) {
2040 char *id = qdev_get_dev_path(dev);
2041 if (id) {
2042 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2043 g_free(id);
2044 }
2045 }
2046 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2047
2048 RCU_READ_LOCK_GUARD();
2049 RAMBLOCK_FOREACH(block) {
2050 if (block != new_block &&
2051 !strcmp(block->idstr, new_block->idstr)) {
2052 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2053 new_block->idstr);
2054 abort();
2055 }
2056 }
2057}
2058
2059
2060void qemu_ram_unset_idstr(RAMBlock *block)
2061{
2062
2063
2064
2065
2066 if (block) {
2067 memset(block->idstr, 0, sizeof(block->idstr));
2068 }
2069}
2070
2071size_t qemu_ram_pagesize(RAMBlock *rb)
2072{
2073 return rb->page_size;
2074}
2075
2076
2077size_t qemu_ram_pagesize_largest(void)
2078{
2079 RAMBlock *block;
2080 size_t largest = 0;
2081
2082 RAMBLOCK_FOREACH(block) {
2083 largest = MAX(largest, qemu_ram_pagesize(block));
2084 }
2085
2086 return largest;
2087}
2088
2089static int memory_try_enable_merging(void *addr, size_t len)
2090{
2091 if (!machine_mem_merge(current_machine)) {
2092
2093 return 0;
2094 }
2095
2096 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2097}
2098
2099
2100
2101
2102
2103
2104
2105
2106int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
2107{
2108 const ram_addr_t unaligned_size = newsize;
2109
2110 assert(block);
2111
2112 newsize = HOST_PAGE_ALIGN(newsize);
2113
2114 if (block->used_length == newsize) {
2115
2116
2117
2118
2119 if (unaligned_size != memory_region_size(block->mr)) {
2120 memory_region_set_size(block->mr, unaligned_size);
2121 if (block->resized) {
2122 block->resized(block->idstr, unaligned_size, block->host);
2123 }
2124 }
2125 return 0;
2126 }
2127
2128 if (!(block->flags & RAM_RESIZEABLE)) {
2129 error_setg_errno(errp, EINVAL,
2130 "Length mismatch: %s: 0x" RAM_ADDR_FMT
2131 " in != 0x" RAM_ADDR_FMT, block->idstr,
2132 newsize, block->used_length);
2133 return -EINVAL;
2134 }
2135
2136 if (block->max_length < newsize) {
2137 error_setg_errno(errp, EINVAL,
2138 "Length too large: %s: 0x" RAM_ADDR_FMT
2139 " > 0x" RAM_ADDR_FMT, block->idstr,
2140 newsize, block->max_length);
2141 return -EINVAL;
2142 }
2143
2144 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
2145 block->used_length = newsize;
2146 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
2147 DIRTY_CLIENTS_ALL);
2148 memory_region_set_size(block->mr, unaligned_size);
2149 if (block->resized) {
2150 block->resized(block->idstr, unaligned_size, block->host);
2151 }
2152 return 0;
2153}
2154
2155
2156
2157
2158
2159
2160
2161void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
2162{
2163
2164 g_assert((start + length) <= block->used_length);
2165
2166#ifdef CONFIG_LIBPMEM
2167
2168 if (ramblock_is_pmem(block)) {
2169 void *addr = ramblock_ptr(block, start);
2170 pmem_persist(addr, length);
2171 return;
2172 }
2173#endif
2174 if (block->fd >= 0) {
2175
2176
2177
2178
2179
2180 void *addr = ramblock_ptr(block, start);
2181 if (qemu_msync(addr, length, block->fd)) {
2182 warn_report("%s: failed to sync memory range: start: "
2183 RAM_ADDR_FMT " length: " RAM_ADDR_FMT,
2184 __func__, start, length);
2185 }
2186 }
2187}
2188
2189
2190static void dirty_memory_extend(ram_addr_t old_ram_size,
2191 ram_addr_t new_ram_size)
2192{
2193 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
2194 DIRTY_MEMORY_BLOCK_SIZE);
2195 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
2196 DIRTY_MEMORY_BLOCK_SIZE);
2197 int i;
2198
2199
2200 if (new_num_blocks <= old_num_blocks) {
2201 return;
2202 }
2203
2204 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
2205 DirtyMemoryBlocks *old_blocks;
2206 DirtyMemoryBlocks *new_blocks;
2207 int j;
2208
2209 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
2210 new_blocks = g_malloc(sizeof(*new_blocks) +
2211 sizeof(new_blocks->blocks[0]) * new_num_blocks);
2212
2213 if (old_num_blocks) {
2214 memcpy(new_blocks->blocks, old_blocks->blocks,
2215 old_num_blocks * sizeof(old_blocks->blocks[0]));
2216 }
2217
2218 for (j = old_num_blocks; j < new_num_blocks; j++) {
2219 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
2220 }
2221
2222 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
2223
2224 if (old_blocks) {
2225 g_free_rcu(old_blocks, rcu);
2226 }
2227 }
2228}
2229
2230static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared)
2231{
2232 RAMBlock *block;
2233 RAMBlock *last_block = NULL;
2234 ram_addr_t old_ram_size, new_ram_size;
2235 Error *err = NULL;
2236
2237 old_ram_size = last_ram_page();
2238
2239 qemu_mutex_lock_ramlist();
2240 new_block->offset = find_ram_offset(new_block->max_length);
2241
2242 if (!new_block->host) {
2243 if (xen_enabled()) {
2244 xen_ram_alloc(new_block->offset, new_block->max_length,
2245 new_block->mr, &err);
2246 if (err) {
2247 error_propagate(errp, err);
2248 qemu_mutex_unlock_ramlist();
2249 return;
2250 }
2251 } else {
2252 new_block->host = phys_mem_alloc(new_block->max_length,
2253 &new_block->mr->align, shared);
2254 if (!new_block->host) {
2255 error_setg_errno(errp, errno,
2256 "cannot set up guest memory '%s'",
2257 memory_region_name(new_block->mr));
2258 qemu_mutex_unlock_ramlist();
2259 return;
2260 }
2261 memory_try_enable_merging(new_block->host, new_block->max_length);
2262 }
2263 }
2264
2265 new_ram_size = MAX(old_ram_size,
2266 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
2267 if (new_ram_size > old_ram_size) {
2268 dirty_memory_extend(old_ram_size, new_ram_size);
2269 }
2270
2271
2272
2273
2274 RAMBLOCK_FOREACH(block) {
2275 last_block = block;
2276 if (block->max_length < new_block->max_length) {
2277 break;
2278 }
2279 }
2280 if (block) {
2281 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
2282 } else if (last_block) {
2283 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
2284 } else {
2285 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
2286 }
2287 ram_list.mru_block = NULL;
2288
2289
2290 smp_wmb();
2291 ram_list.version++;
2292 qemu_mutex_unlock_ramlist();
2293
2294 cpu_physical_memory_set_dirty_range(new_block->offset,
2295 new_block->used_length,
2296 DIRTY_CLIENTS_ALL);
2297
2298 if (new_block->host) {
2299 qemu_ram_setup_dump(new_block->host, new_block->max_length);
2300 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
2301
2302
2303
2304
2305
2306 if (!qtest_enabled()) {
2307 qemu_madvise(new_block->host, new_block->max_length,
2308 QEMU_MADV_DONTFORK);
2309 }
2310 ram_block_notify_add(new_block->host, new_block->max_length);
2311 }
2312}
2313
2314#ifdef CONFIG_POSIX
2315RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
2316 uint32_t ram_flags, int fd,
2317 Error **errp)
2318{
2319 RAMBlock *new_block;
2320 Error *local_err = NULL;
2321 int64_t file_size, file_align;
2322
2323
2324 assert((ram_flags & ~(RAM_SHARED | RAM_PMEM)) == 0);
2325
2326 if (xen_enabled()) {
2327 error_setg(errp, "-mem-path not supported with Xen");
2328 return NULL;
2329 }
2330
2331 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2332 error_setg(errp,
2333 "host lacks kvm mmu notifiers, -mem-path unsupported");
2334 return NULL;
2335 }
2336
2337 if (phys_mem_alloc != qemu_anon_ram_alloc) {
2338
2339
2340
2341
2342
2343 error_setg(errp,
2344 "-mem-path not supported with this accelerator");
2345 return NULL;
2346 }
2347
2348 size = HOST_PAGE_ALIGN(size);
2349 file_size = get_file_size(fd);
2350 if (file_size > 0 && file_size < size) {
2351 error_setg(errp, "backing store size 0x%" PRIx64
2352 " does not match 'size' option 0x" RAM_ADDR_FMT,
2353 file_size, size);
2354 return NULL;
2355 }
2356
2357 file_align = get_file_align(fd);
2358 if (file_align > 0 && mr && file_align > mr->align) {
2359 error_setg(errp, "backing store align 0x%" PRIx64
2360 " is larger than 'align' option 0x%" PRIx64,
2361 file_align, mr->align);
2362 return NULL;
2363 }
2364
2365 new_block = g_malloc0(sizeof(*new_block));
2366 new_block->mr = mr;
2367 new_block->used_length = size;
2368 new_block->max_length = size;
2369 new_block->flags = ram_flags;
2370 new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp);
2371 if (!new_block->host) {
2372 g_free(new_block);
2373 return NULL;
2374 }
2375
2376 ram_block_add(new_block, &local_err, ram_flags & RAM_SHARED);
2377 if (local_err) {
2378 g_free(new_block);
2379 error_propagate(errp, local_err);
2380 return NULL;
2381 }
2382 return new_block;
2383
2384}
2385
2386
2387RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2388 uint32_t ram_flags, const char *mem_path,
2389 Error **errp)
2390{
2391 int fd;
2392 bool created;
2393 RAMBlock *block;
2394
2395 fd = file_ram_open(mem_path, memory_region_name(mr), &created, errp);
2396 if (fd < 0) {
2397 return NULL;
2398 }
2399
2400 block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, errp);
2401 if (!block) {
2402 if (created) {
2403 unlink(mem_path);
2404 }
2405 close(fd);
2406 return NULL;
2407 }
2408
2409 return block;
2410}
2411#endif
2412
2413static
2414RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
2415 void (*resized)(const char*,
2416 uint64_t length,
2417 void *host),
2418 void *host, bool resizeable, bool share,
2419 MemoryRegion *mr, Error **errp)
2420{
2421 RAMBlock *new_block;
2422 Error *local_err = NULL;
2423
2424 size = HOST_PAGE_ALIGN(size);
2425 max_size = HOST_PAGE_ALIGN(max_size);
2426 new_block = g_malloc0(sizeof(*new_block));
2427 new_block->mr = mr;
2428 new_block->resized = resized;
2429 new_block->used_length = size;
2430 new_block->max_length = max_size;
2431 assert(max_size >= size);
2432 new_block->fd = -1;
2433 new_block->page_size = qemu_real_host_page_size;
2434 new_block->host = host;
2435 if (host) {
2436 new_block->flags |= RAM_PREALLOC;
2437 }
2438 if (resizeable) {
2439 new_block->flags |= RAM_RESIZEABLE;
2440 }
2441 ram_block_add(new_block, &local_err, share);
2442 if (local_err) {
2443 g_free(new_block);
2444 error_propagate(errp, local_err);
2445 return NULL;
2446 }
2447 return new_block;
2448}
2449
2450RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2451 MemoryRegion *mr, Error **errp)
2452{
2453 return qemu_ram_alloc_internal(size, size, NULL, host, false,
2454 false, mr, errp);
2455}
2456
2457RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share,
2458 MemoryRegion *mr, Error **errp)
2459{
2460 return qemu_ram_alloc_internal(size, size, NULL, NULL, false,
2461 share, mr, errp);
2462}
2463
2464RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
2465 void (*resized)(const char*,
2466 uint64_t length,
2467 void *host),
2468 MemoryRegion *mr, Error **errp)
2469{
2470 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true,
2471 false, mr, errp);
2472}
2473
2474static void reclaim_ramblock(RAMBlock *block)
2475{
2476 if (block->flags & RAM_PREALLOC) {
2477 ;
2478 } else if (xen_enabled()) {
2479 xen_invalidate_map_cache_entry(block->host);
2480#ifndef _WIN32
2481 } else if (block->fd >= 0) {
2482 qemu_ram_munmap(block->fd, block->host, block->max_length);
2483 close(block->fd);
2484#endif
2485 } else {
2486 qemu_anon_ram_free(block->host, block->max_length);
2487 }
2488 g_free(block);
2489}
2490
2491void qemu_ram_free(RAMBlock *block)
2492{
2493 if (!block) {
2494 return;
2495 }
2496
2497 if (block->host) {
2498 ram_block_notify_remove(block->host, block->max_length);
2499 }
2500
2501 qemu_mutex_lock_ramlist();
2502 QLIST_REMOVE_RCU(block, next);
2503 ram_list.mru_block = NULL;
2504
2505 smp_wmb();
2506 ram_list.version++;
2507 call_rcu(block, reclaim_ramblock, rcu);
2508 qemu_mutex_unlock_ramlist();
2509}
2510
2511#ifndef _WIN32
2512void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2513{
2514 RAMBlock *block;
2515 ram_addr_t offset;
2516 int flags;
2517 void *area, *vaddr;
2518
2519 RAMBLOCK_FOREACH(block) {
2520 offset = addr - block->offset;
2521 if (offset < block->max_length) {
2522 vaddr = ramblock_ptr(block, offset);
2523 if (block->flags & RAM_PREALLOC) {
2524 ;
2525 } else if (xen_enabled()) {
2526 abort();
2527 } else {
2528 flags = MAP_FIXED;
2529 if (block->fd >= 0) {
2530 flags |= (block->flags & RAM_SHARED ?
2531 MAP_SHARED : MAP_PRIVATE);
2532 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2533 flags, block->fd, offset);
2534 } else {
2535
2536
2537
2538
2539
2540 assert(phys_mem_alloc == qemu_anon_ram_alloc);
2541
2542 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2543 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2544 flags, -1, 0);
2545 }
2546 if (area != vaddr) {
2547 error_report("Could not remap addr: "
2548 RAM_ADDR_FMT "@" RAM_ADDR_FMT "",
2549 length, addr);
2550 exit(1);
2551 }
2552 memory_try_enable_merging(vaddr, length);
2553 qemu_ram_setup_dump(vaddr, length);
2554 }
2555 }
2556 }
2557}
2558#endif
2559
2560
2561
2562
2563
2564
2565
2566
2567void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2568{
2569 RAMBlock *block = ram_block;
2570
2571 if (block == NULL) {
2572 block = qemu_get_ram_block(addr);
2573 addr -= block->offset;
2574 }
2575
2576 if (xen_enabled() && block->host == NULL) {
2577
2578
2579
2580
2581 if (block->offset == 0) {
2582 return xen_map_cache(addr, 0, 0, false);
2583 }
2584
2585 block->host = xen_map_cache(block->offset, block->max_length, 1, false);
2586 }
2587 return ramblock_ptr(block, addr);
2588}
2589
2590
2591
2592
2593
2594
2595static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
2596 hwaddr *size, bool lock)
2597{
2598 RAMBlock *block = ram_block;
2599 if (*size == 0) {
2600 return NULL;
2601 }
2602
2603 if (block == NULL) {
2604 block = qemu_get_ram_block(addr);
2605 addr -= block->offset;
2606 }
2607 *size = MIN(*size, block->max_length - addr);
2608
2609 if (xen_enabled() && block->host == NULL) {
2610
2611
2612
2613
2614 if (block->offset == 0) {
2615 return xen_map_cache(addr, *size, lock, lock);
2616 }
2617
2618 block->host = xen_map_cache(block->offset, block->max_length, 1, lock);
2619 }
2620
2621 return ramblock_ptr(block, addr);
2622}
2623
2624
2625ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host)
2626{
2627 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host;
2628 assert((uintptr_t)host >= (uintptr_t)rb->host);
2629 assert(res < rb->max_length);
2630
2631 return res;
2632}
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2652 ram_addr_t *offset)
2653{
2654 RAMBlock *block;
2655 uint8_t *host = ptr;
2656
2657 if (xen_enabled()) {
2658 ram_addr_t ram_addr;
2659 RCU_READ_LOCK_GUARD();
2660 ram_addr = xen_ram_addr_from_mapcache(ptr);
2661 block = qemu_get_ram_block(ram_addr);
2662 if (block) {
2663 *offset = ram_addr - block->offset;
2664 }
2665 return block;
2666 }
2667
2668 RCU_READ_LOCK_GUARD();
2669 block = atomic_rcu_read(&ram_list.mru_block);
2670 if (block && block->host && host - block->host < block->max_length) {
2671 goto found;
2672 }
2673
2674 RAMBLOCK_FOREACH(block) {
2675
2676 if (block->host == NULL) {
2677 continue;
2678 }
2679 if (host - block->host < block->max_length) {
2680 goto found;
2681 }
2682 }
2683
2684 return NULL;
2685
2686found:
2687 *offset = (host - block->host);
2688 if (round_offset) {
2689 *offset &= TARGET_PAGE_MASK;
2690 }
2691 return block;
2692}
2693
2694
2695
2696
2697
2698
2699
2700
2701RAMBlock *qemu_ram_block_by_name(const char *name)
2702{
2703 RAMBlock *block;
2704
2705 RAMBLOCK_FOREACH(block) {
2706 if (!strcmp(name, block->idstr)) {
2707 return block;
2708 }
2709 }
2710
2711 return NULL;
2712}
2713
2714
2715
2716ram_addr_t qemu_ram_addr_from_host(void *ptr)
2717{
2718 RAMBlock *block;
2719 ram_addr_t offset;
2720
2721 block = qemu_ram_block_from_host(ptr, false, &offset);
2722 if (!block) {
2723 return RAM_ADDR_INVALID;
2724 }
2725
2726 return block->offset + offset;
2727}
2728
2729
2730void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
2731 MemTxAttrs attrs, int flags, uintptr_t ra)
2732{
2733 CPUClass *cc = CPU_GET_CLASS(cpu);
2734 CPUWatchpoint *wp;
2735
2736 assert(tcg_enabled());
2737 if (cpu->watchpoint_hit) {
2738
2739
2740
2741
2742
2743 qemu_mutex_lock_iothread();
2744 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2745 qemu_mutex_unlock_iothread();
2746 return;
2747 }
2748
2749 addr = cc->adjust_watchpoint_address(cpu, addr, len);
2750 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2751 if (watchpoint_address_matches(wp, addr, len)
2752 && (wp->flags & flags)) {
2753 if (flags == BP_MEM_READ) {
2754 wp->flags |= BP_WATCHPOINT_HIT_READ;
2755 } else {
2756 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2757 }
2758 wp->hitaddr = MAX(addr, wp->vaddr);
2759 wp->hitattrs = attrs;
2760 if (!cpu->watchpoint_hit) {
2761 if (wp->flags & BP_CPU &&
2762 !cc->debug_check_watchpoint(cpu, wp)) {
2763 wp->flags &= ~BP_WATCHPOINT_HIT;
2764 continue;
2765 }
2766 cpu->watchpoint_hit = wp;
2767
2768 mmap_lock();
2769 tb_check_watchpoint(cpu, ra);
2770 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2771 cpu->exception_index = EXCP_DEBUG;
2772 mmap_unlock();
2773 cpu_loop_exit_restore(cpu, ra);
2774 } else {
2775
2776 cpu->cflags_next_tb = 1 | curr_cflags();
2777 mmap_unlock();
2778 if (ra) {
2779 cpu_restore_state(cpu, ra, true);
2780 }
2781 cpu_loop_exit_noexc(cpu);
2782 }
2783 }
2784 } else {
2785 wp->flags &= ~BP_WATCHPOINT_HIT;
2786 }
2787 }
2788}
2789
2790static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2791 MemTxAttrs attrs, void *buf, hwaddr len);
2792static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2793 const void *buf, hwaddr len);
2794static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
2795 bool is_write, MemTxAttrs attrs);
2796
2797static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2798 unsigned len, MemTxAttrs attrs)
2799{
2800 subpage_t *subpage = opaque;
2801 uint8_t buf[8];
2802 MemTxResult res;
2803
2804#if defined(DEBUG_SUBPAGE)
2805 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2806 subpage, len, addr);
2807#endif
2808 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
2809 if (res) {
2810 return res;
2811 }
2812 *data = ldn_p(buf, len);
2813 return MEMTX_OK;
2814}
2815
2816static MemTxResult subpage_write(void *opaque, hwaddr addr,
2817 uint64_t value, unsigned len, MemTxAttrs attrs)
2818{
2819 subpage_t *subpage = opaque;
2820 uint8_t buf[8];
2821
2822#if defined(DEBUG_SUBPAGE)
2823 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2824 " value %"PRIx64"\n",
2825 __func__, subpage, len, addr, value);
2826#endif
2827 stn_p(buf, len, value);
2828 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
2829}
2830
2831static bool subpage_accepts(void *opaque, hwaddr addr,
2832 unsigned len, bool is_write,
2833 MemTxAttrs attrs)
2834{
2835 subpage_t *subpage = opaque;
2836#if defined(DEBUG_SUBPAGE)
2837 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2838 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2839#endif
2840
2841 return flatview_access_valid(subpage->fv, addr + subpage->base,
2842 len, is_write, attrs);
2843}
2844
2845static const MemoryRegionOps subpage_ops = {
2846 .read_with_attrs = subpage_read,
2847 .write_with_attrs = subpage_write,
2848 .impl.min_access_size = 1,
2849 .impl.max_access_size = 8,
2850 .valid.min_access_size = 1,
2851 .valid.max_access_size = 8,
2852 .valid.accepts = subpage_accepts,
2853 .endianness = DEVICE_NATIVE_ENDIAN,
2854};
2855
2856static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
2857 uint16_t section)
2858{
2859 int idx, eidx;
2860
2861 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2862 return -1;
2863 idx = SUBPAGE_IDX(start);
2864 eidx = SUBPAGE_IDX(end);
2865#if defined(DEBUG_SUBPAGE)
2866 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2867 __func__, mmio, start, end, idx, eidx, section);
2868#endif
2869 for (; idx <= eidx; idx++) {
2870 mmio->sub_section[idx] = section;
2871 }
2872
2873 return 0;
2874}
2875
2876static subpage_t *subpage_init(FlatView *fv, hwaddr base)
2877{
2878 subpage_t *mmio;
2879
2880
2881 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2882 mmio->fv = fv;
2883 mmio->base = base;
2884 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2885 NULL, TARGET_PAGE_SIZE);
2886 mmio->iomem.subpage = true;
2887#if defined(DEBUG_SUBPAGE)
2888 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2889 mmio, base, TARGET_PAGE_SIZE);
2890#endif
2891
2892 return mmio;
2893}
2894
2895static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
2896{
2897 assert(fv);
2898 MemoryRegionSection section = {
2899 .fv = fv,
2900 .mr = mr,
2901 .offset_within_address_space = 0,
2902 .offset_within_region = 0,
2903 .size = int128_2_64(),
2904 };
2905
2906 return phys_section_add(map, §ion);
2907}
2908
2909MemoryRegionSection *iotlb_to_section(CPUState *cpu,
2910 hwaddr index, MemTxAttrs attrs)
2911{
2912 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2913 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2914 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2915 MemoryRegionSection *sections = d->map.sections;
2916
2917 return §ions[index & ~TARGET_PAGE_MASK];
2918}
2919
2920static void io_mem_init(void)
2921{
2922 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2923 NULL, UINT64_MAX);
2924}
2925
2926AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
2927{
2928 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2929 uint16_t n;
2930
2931 n = dummy_section(&d->map, fv, &io_mem_unassigned);
2932 assert(n == PHYS_SECTION_UNASSIGNED);
2933
2934 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2935
2936 return d;
2937}
2938
2939void address_space_dispatch_free(AddressSpaceDispatch *d)
2940{
2941 phys_sections_free(&d->map);
2942 g_free(d);
2943}
2944
2945static void do_nothing(CPUState *cpu, run_on_cpu_data d)
2946{
2947}
2948
2949static void tcg_log_global_after_sync(MemoryListener *listener)
2950{
2951 CPUAddressSpace *cpuas;
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970 if (replay_mode == REPLAY_MODE_NONE) {
2971
2972
2973
2974
2975
2976
2977
2978 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2979 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
2980 }
2981}
2982
2983static void tcg_commit(MemoryListener *listener)
2984{
2985 CPUAddressSpace *cpuas;
2986 AddressSpaceDispatch *d;
2987
2988 assert(tcg_enabled());
2989
2990
2991 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2992 cpu_reloading_memory_map();
2993
2994
2995
2996
2997 d = address_space_to_dispatch(cpuas->as);
2998 atomic_rcu_set(&cpuas->memory_dispatch, d);
2999 tlb_flush(cpuas->cpu);
3000}
3001
3002static void memory_map_init(void)
3003{
3004 system_memory = g_malloc(sizeof(*system_memory));
3005
3006 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
3007 address_space_init(&address_space_memory, system_memory, "memory");
3008
3009 system_io = g_malloc(sizeof(*system_io));
3010 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
3011 65536);
3012 address_space_init(&address_space_io, system_io, "I/O");
3013}
3014
3015MemoryRegion *get_system_memory(void)
3016{
3017 return system_memory;
3018}
3019
3020MemoryRegion *get_system_io(void)
3021{
3022 return system_io;
3023}
3024
3025#endif
3026
3027
3028#if defined(CONFIG_USER_ONLY)
3029int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3030 void *ptr, target_ulong len, bool is_write)
3031{
3032 int flags;
3033 target_ulong l, page;
3034 void * p;
3035 uint8_t *buf = ptr;
3036
3037 while (len > 0) {
3038 page = addr & TARGET_PAGE_MASK;
3039 l = (page + TARGET_PAGE_SIZE) - addr;
3040 if (l > len)
3041 l = len;
3042 flags = page_get_flags(page);
3043 if (!(flags & PAGE_VALID))
3044 return -1;
3045 if (is_write) {
3046 if (!(flags & PAGE_WRITE))
3047 return -1;
3048
3049 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3050 return -1;
3051 memcpy(p, buf, l);
3052 unlock_user(p, addr, l);
3053 } else {
3054 if (!(flags & PAGE_READ))
3055 return -1;
3056
3057 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3058 return -1;
3059 memcpy(buf, p, l);
3060 unlock_user(p, addr, 0);
3061 }
3062 len -= l;
3063 buf += l;
3064 addr += l;
3065 }
3066 return 0;
3067}
3068
3069#else
3070
3071static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
3072 hwaddr length)
3073{
3074 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3075 addr += memory_region_get_ram_addr(mr);
3076
3077
3078
3079
3080
3081 if (dirty_log_mask) {
3082 dirty_log_mask =
3083 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
3084 }
3085 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
3086 assert(tcg_enabled());
3087 tb_invalidate_phys_range(addr, addr + length);
3088 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3089 }
3090 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
3091}
3092
3093void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size)
3094{
3095
3096
3097
3098
3099
3100
3101 assert(memory_region_is_romd(mr));
3102
3103 invalidate_and_set_dirty(mr, addr, size);
3104}
3105
3106static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
3107{
3108 unsigned access_size_max = mr->ops->valid.max_access_size;
3109
3110
3111
3112 if (access_size_max == 0) {
3113 access_size_max = 4;
3114 }
3115
3116
3117 if (!mr->ops->impl.unaligned) {
3118 unsigned align_size_max = addr & -addr;
3119 if (align_size_max != 0 && align_size_max < access_size_max) {
3120 access_size_max = align_size_max;
3121 }
3122 }
3123
3124
3125 if (l > access_size_max) {
3126 l = access_size_max;
3127 }
3128 l = pow2floor(l);
3129
3130 return l;
3131}
3132
3133static bool prepare_mmio_access(MemoryRegion *mr)
3134{
3135 bool unlocked = !qemu_mutex_iothread_locked();
3136 bool release_lock = false;
3137
3138 if (unlocked && mr->global_locking) {
3139 qemu_mutex_lock_iothread();
3140 unlocked = false;
3141 release_lock = true;
3142 }
3143 if (mr->flush_coalesced_mmio) {
3144 if (unlocked) {
3145 qemu_mutex_lock_iothread();
3146 }
3147 qemu_flush_coalesced_mmio_buffer();
3148 if (unlocked) {
3149 qemu_mutex_unlock_iothread();
3150 }
3151 }
3152
3153 return release_lock;
3154}
3155
3156
3157static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
3158 MemTxAttrs attrs,
3159 const void *ptr,
3160 hwaddr len, hwaddr addr1,
3161 hwaddr l, MemoryRegion *mr)
3162{
3163 uint8_t *ram_ptr;
3164 uint64_t val;
3165 MemTxResult result = MEMTX_OK;
3166 bool release_lock = false;
3167 const uint8_t *buf = ptr;
3168
3169 for (;;) {
3170 if (!memory_access_is_direct(mr, true)) {
3171 release_lock |= prepare_mmio_access(mr);
3172 l = memory_access_size(mr, l, addr1);
3173
3174
3175 val = ldn_he_p(buf, l);
3176 result |= memory_region_dispatch_write(mr, addr1, val,
3177 size_memop(l), attrs);
3178 } else {
3179
3180 ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
3181 memcpy(ram_ptr, buf, l);
3182 invalidate_and_set_dirty(mr, addr1, l);
3183 }
3184
3185 if (release_lock) {
3186 qemu_mutex_unlock_iothread();
3187 release_lock = false;
3188 }
3189
3190 len -= l;
3191 buf += l;
3192 addr += l;
3193
3194 if (!len) {
3195 break;
3196 }
3197
3198 l = len;
3199 mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
3200 }
3201
3202 return result;
3203}
3204
3205
3206static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
3207 const void *buf, hwaddr len)
3208{
3209 hwaddr l;
3210 hwaddr addr1;
3211 MemoryRegion *mr;
3212 MemTxResult result = MEMTX_OK;
3213
3214 l = len;
3215 mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
3216 result = flatview_write_continue(fv, addr, attrs, buf, len,
3217 addr1, l, mr);
3218
3219 return result;
3220}
3221
3222
3223MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
3224 MemTxAttrs attrs, void *ptr,
3225 hwaddr len, hwaddr addr1, hwaddr l,
3226 MemoryRegion *mr)
3227{
3228 uint8_t *ram_ptr;
3229 uint64_t val;
3230 MemTxResult result = MEMTX_OK;
3231 bool release_lock = false;
3232 uint8_t *buf = ptr;
3233
3234 for (;;) {
3235 if (!memory_access_is_direct(mr, false)) {
3236
3237 release_lock |= prepare_mmio_access(mr);
3238 l = memory_access_size(mr, l, addr1);
3239 result |= memory_region_dispatch_read(mr, addr1, &val,
3240 size_memop(l), attrs);
3241 stn_he_p(buf, l, val);
3242 } else {
3243
3244 ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
3245 memcpy(buf, ram_ptr, l);
3246 }
3247
3248 if (release_lock) {
3249 qemu_mutex_unlock_iothread();
3250 release_lock = false;
3251 }
3252
3253 len -= l;
3254 buf += l;
3255 addr += l;
3256
3257 if (!len) {
3258 break;
3259 }
3260
3261 l = len;
3262 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3263 }
3264
3265 return result;
3266}
3267
3268
3269static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
3270 MemTxAttrs attrs, void *buf, hwaddr len)
3271{
3272 hwaddr l;
3273 hwaddr addr1;
3274 MemoryRegion *mr;
3275
3276 l = len;
3277 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3278 return flatview_read_continue(fv, addr, attrs, buf, len,
3279 addr1, l, mr);
3280}
3281
3282MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
3283 MemTxAttrs attrs, void *buf, hwaddr len)
3284{
3285 MemTxResult result = MEMTX_OK;
3286 FlatView *fv;
3287
3288 if (len > 0) {
3289 RCU_READ_LOCK_GUARD();
3290 fv = address_space_to_flatview(as);
3291 result = flatview_read(fv, addr, attrs, buf, len);
3292 }
3293
3294 return result;
3295}
3296
3297MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
3298 MemTxAttrs attrs,
3299 const void *buf, hwaddr len)
3300{
3301 MemTxResult result = MEMTX_OK;
3302 FlatView *fv;
3303
3304 if (len > 0) {
3305 RCU_READ_LOCK_GUARD();
3306 fv = address_space_to_flatview(as);
3307 result = flatview_write(fv, addr, attrs, buf, len);
3308 }
3309
3310 return result;
3311}
3312
3313MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
3314 void *buf, hwaddr len, bool is_write)
3315{
3316 if (is_write) {
3317 return address_space_write(as, addr, attrs, buf, len);
3318 } else {
3319 return address_space_read_full(as, addr, attrs, buf, len);
3320 }
3321}
3322
3323void cpu_physical_memory_rw(hwaddr addr, void *buf,
3324 hwaddr len, bool is_write)
3325{
3326 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
3327 buf, len, is_write);
3328}
3329
3330enum write_rom_type {
3331 WRITE_DATA,
3332 FLUSH_CACHE,
3333};
3334
3335static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
3336 hwaddr addr,
3337 MemTxAttrs attrs,
3338 const void *ptr,
3339 hwaddr len,
3340 enum write_rom_type type)
3341{
3342 hwaddr l;
3343 uint8_t *ram_ptr;
3344 hwaddr addr1;
3345 MemoryRegion *mr;
3346 const uint8_t *buf = ptr;
3347
3348 RCU_READ_LOCK_GUARD();
3349 while (len > 0) {
3350 l = len;
3351 mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
3352
3353 if (!(memory_region_is_ram(mr) ||
3354 memory_region_is_romd(mr))) {
3355 l = memory_access_size(mr, l, addr1);
3356 } else {
3357
3358 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3359 switch (type) {
3360 case WRITE_DATA:
3361 memcpy(ram_ptr, buf, l);
3362 invalidate_and_set_dirty(mr, addr1, l);
3363 break;
3364 case FLUSH_CACHE:
3365 flush_icache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr + l);
3366 break;
3367 }
3368 }
3369 len -= l;
3370 buf += l;
3371 addr += l;
3372 }
3373 return MEMTX_OK;
3374}
3375
3376
3377MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
3378 MemTxAttrs attrs,
3379 const void *buf, hwaddr len)
3380{
3381 return address_space_write_rom_internal(as, addr, attrs,
3382 buf, len, WRITE_DATA);
3383}
3384
3385void cpu_flush_icache_range(hwaddr start, hwaddr len)
3386{
3387
3388
3389
3390
3391
3392
3393 if (tcg_enabled()) {
3394 return;
3395 }
3396
3397 address_space_write_rom_internal(&address_space_memory,
3398 start, MEMTXATTRS_UNSPECIFIED,
3399 NULL, len, FLUSH_CACHE);
3400}
3401
3402typedef struct {
3403 MemoryRegion *mr;
3404 void *buffer;
3405 hwaddr addr;
3406 hwaddr len;
3407 bool in_use;
3408} BounceBuffer;
3409
3410static BounceBuffer bounce;
3411
3412typedef struct MapClient {
3413 QEMUBH *bh;
3414 QLIST_ENTRY(MapClient) link;
3415} MapClient;
3416
3417QemuMutex map_client_list_lock;
3418static QLIST_HEAD(, MapClient) map_client_list
3419 = QLIST_HEAD_INITIALIZER(map_client_list);
3420
3421static void cpu_unregister_map_client_do(MapClient *client)
3422{
3423 QLIST_REMOVE(client, link);
3424 g_free(client);
3425}
3426
3427static void cpu_notify_map_clients_locked(void)
3428{
3429 MapClient *client;
3430
3431 while (!QLIST_EMPTY(&map_client_list)) {
3432 client = QLIST_FIRST(&map_client_list);
3433 qemu_bh_schedule(client->bh);
3434 cpu_unregister_map_client_do(client);
3435 }
3436}
3437
3438void cpu_register_map_client(QEMUBH *bh)
3439{
3440 MapClient *client = g_malloc(sizeof(*client));
3441
3442 qemu_mutex_lock(&map_client_list_lock);
3443 client->bh = bh;
3444 QLIST_INSERT_HEAD(&map_client_list, client, link);
3445 if (!atomic_read(&bounce.in_use)) {
3446 cpu_notify_map_clients_locked();
3447 }
3448 qemu_mutex_unlock(&map_client_list_lock);
3449}
3450
3451void cpu_exec_init_all(void)
3452{
3453 qemu_mutex_init(&ram_list.mutex);
3454
3455
3456
3457
3458
3459
3460
3461 finalize_target_page_bits();
3462 io_mem_init();
3463 memory_map_init();
3464 qemu_mutex_init(&map_client_list_lock);
3465}
3466
3467void cpu_unregister_map_client(QEMUBH *bh)
3468{
3469 MapClient *client;
3470
3471 qemu_mutex_lock(&map_client_list_lock);
3472 QLIST_FOREACH(client, &map_client_list, link) {
3473 if (client->bh == bh) {
3474 cpu_unregister_map_client_do(client);
3475 break;
3476 }
3477 }
3478 qemu_mutex_unlock(&map_client_list_lock);
3479}
3480
3481static void cpu_notify_map_clients(void)
3482{
3483 qemu_mutex_lock(&map_client_list_lock);
3484 cpu_notify_map_clients_locked();
3485 qemu_mutex_unlock(&map_client_list_lock);
3486}
3487
3488static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
3489 bool is_write, MemTxAttrs attrs)
3490{
3491 MemoryRegion *mr;
3492 hwaddr l, xlat;
3493
3494 while (len > 0) {
3495 l = len;
3496 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
3497 if (!memory_access_is_direct(mr, is_write)) {
3498 l = memory_access_size(mr, l, addr);
3499 if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
3500 return false;
3501 }
3502 }
3503
3504 len -= l;
3505 addr += l;
3506 }
3507 return true;
3508}
3509
3510bool address_space_access_valid(AddressSpace *as, hwaddr addr,
3511 hwaddr len, bool is_write,
3512 MemTxAttrs attrs)
3513{
3514 FlatView *fv;
3515 bool result;
3516
3517 RCU_READ_LOCK_GUARD();
3518 fv = address_space_to_flatview(as);
3519 result = flatview_access_valid(fv, addr, len, is_write, attrs);
3520 return result;
3521}
3522
3523static hwaddr
3524flatview_extend_translation(FlatView *fv, hwaddr addr,
3525 hwaddr target_len,
3526 MemoryRegion *mr, hwaddr base, hwaddr len,
3527 bool is_write, MemTxAttrs attrs)
3528{
3529 hwaddr done = 0;
3530 hwaddr xlat;
3531 MemoryRegion *this_mr;
3532
3533 for (;;) {
3534 target_len -= len;
3535 addr += len;
3536 done += len;
3537 if (target_len == 0) {
3538 return done;
3539 }
3540
3541 len = target_len;
3542 this_mr = flatview_translate(fv, addr, &xlat,
3543 &len, is_write, attrs);
3544 if (this_mr != mr || xlat != base + done) {
3545 return done;
3546 }
3547 }
3548}
3549
3550
3551
3552
3553
3554
3555
3556
3557void *address_space_map(AddressSpace *as,
3558 hwaddr addr,
3559 hwaddr *plen,
3560 bool is_write,
3561 MemTxAttrs attrs)
3562{
3563 hwaddr len = *plen;
3564 hwaddr l, xlat;
3565 MemoryRegion *mr;
3566 void *ptr;
3567 FlatView *fv;
3568
3569 if (len == 0) {
3570 return NULL;
3571 }
3572
3573 l = len;
3574 RCU_READ_LOCK_GUARD();
3575 fv = address_space_to_flatview(as);
3576 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
3577
3578 if (!memory_access_is_direct(mr, is_write)) {
3579 if (atomic_xchg(&bounce.in_use, true)) {
3580 *plen = 0;
3581 return NULL;
3582 }
3583
3584 l = MIN(l, TARGET_PAGE_SIZE);
3585 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
3586 bounce.addr = addr;
3587 bounce.len = l;
3588
3589 memory_region_ref(mr);
3590 bounce.mr = mr;
3591 if (!is_write) {
3592 flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
3593 bounce.buffer, l);
3594 }
3595
3596 *plen = l;
3597 return bounce.buffer;
3598 }
3599
3600
3601 memory_region_ref(mr);
3602 *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
3603 l, is_write, attrs);
3604 ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
3605
3606 return ptr;
3607}
3608
3609
3610
3611
3612
3613void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3614 bool is_write, hwaddr access_len)
3615{
3616 if (buffer != bounce.buffer) {
3617 MemoryRegion *mr;
3618 ram_addr_t addr1;
3619
3620 mr = memory_region_from_host(buffer, &addr1);
3621 assert(mr != NULL);
3622 if (is_write) {
3623 invalidate_and_set_dirty(mr, addr1, access_len);
3624 }
3625 if (xen_enabled()) {
3626 xen_invalidate_map_cache_entry(buffer);
3627 }
3628 memory_region_unref(mr);
3629 return;
3630 }
3631 if (is_write) {
3632 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3633 bounce.buffer, access_len);
3634 }
3635 qemu_vfree(bounce.buffer);
3636 bounce.buffer = NULL;
3637 memory_region_unref(bounce.mr);
3638 atomic_mb_set(&bounce.in_use, false);
3639 cpu_notify_map_clients();
3640}
3641
3642void *cpu_physical_memory_map(hwaddr addr,
3643 hwaddr *plen,
3644 bool is_write)
3645{
3646 return address_space_map(&address_space_memory, addr, plen, is_write,
3647 MEMTXATTRS_UNSPECIFIED);
3648}
3649
3650void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3651 bool is_write, hwaddr access_len)
3652{
3653 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3654}
3655
3656#define ARG1_DECL AddressSpace *as
3657#define ARG1 as
3658#define SUFFIX
3659#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3660#define RCU_READ_LOCK(...) rcu_read_lock()
3661#define RCU_READ_UNLOCK(...) rcu_read_unlock()
3662#include "memory_ldst.inc.c"
3663
3664int64_t address_space_cache_init(MemoryRegionCache *cache,
3665 AddressSpace *as,
3666 hwaddr addr,
3667 hwaddr len,
3668 bool is_write)
3669{
3670 AddressSpaceDispatch *d;
3671 hwaddr l;
3672 MemoryRegion *mr;
3673
3674 assert(len > 0);
3675
3676 l = len;
3677 cache->fv = address_space_get_flatview(as);
3678 d = flatview_to_dispatch(cache->fv);
3679 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true);
3680
3681 mr = cache->mrs.mr;
3682 memory_region_ref(mr);
3683 if (memory_access_is_direct(mr, is_write)) {
3684
3685
3686
3687
3688 l = flatview_extend_translation(cache->fv, addr, len, mr,
3689 cache->xlat, l, is_write,
3690 MEMTXATTRS_UNSPECIFIED);
3691 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true);
3692 } else {
3693 cache->ptr = NULL;
3694 }
3695
3696 cache->len = l;
3697 cache->is_write = is_write;
3698 return l;
3699}
3700
3701void address_space_cache_invalidate(MemoryRegionCache *cache,
3702 hwaddr addr,
3703 hwaddr access_len)
3704{
3705 assert(cache->is_write);
3706 if (likely(cache->ptr)) {
3707 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len);
3708 }
3709}
3710
3711void address_space_cache_destroy(MemoryRegionCache *cache)
3712{
3713 if (!cache->mrs.mr) {
3714 return;
3715 }
3716
3717 if (xen_enabled()) {
3718 xen_invalidate_map_cache_entry(cache->ptr);
3719 }
3720 memory_region_unref(cache->mrs.mr);
3721 flatview_unref(cache->fv);
3722 cache->mrs.mr = NULL;
3723 cache->fv = NULL;
3724}
3725
3726
3727
3728
3729
3730
3731static inline MemoryRegion *address_space_translate_cached(
3732 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
3733 hwaddr *plen, bool is_write, MemTxAttrs attrs)
3734{
3735 MemoryRegionSection section;
3736 MemoryRegion *mr;
3737 IOMMUMemoryRegion *iommu_mr;
3738 AddressSpace *target_as;
3739
3740 assert(!cache->ptr);
3741 *xlat = addr + cache->xlat;
3742
3743 mr = cache->mrs.mr;
3744 iommu_mr = memory_region_get_iommu(mr);
3745 if (!iommu_mr) {
3746
3747 return mr;
3748 }
3749
3750 section = address_space_translate_iommu(iommu_mr, xlat, plen,
3751 NULL, is_write, true,
3752 &target_as, attrs);
3753 return section.mr;
3754}
3755
3756
3757
3758
3759MemTxResult
3760address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
3761 void *buf, hwaddr len)
3762{
3763 hwaddr addr1, l;
3764 MemoryRegion *mr;
3765
3766 l = len;
3767 mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
3768 MEMTXATTRS_UNSPECIFIED);
3769 return flatview_read_continue(cache->fv,
3770 addr, MEMTXATTRS_UNSPECIFIED, buf, len,
3771 addr1, l, mr);
3772}
3773
3774
3775
3776
3777MemTxResult
3778address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
3779 const void *buf, hwaddr len)
3780{
3781 hwaddr addr1, l;
3782 MemoryRegion *mr;
3783
3784 l = len;
3785 mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
3786 MEMTXATTRS_UNSPECIFIED);
3787 return flatview_write_continue(cache->fv,
3788 addr, MEMTXATTRS_UNSPECIFIED, buf, len,
3789 addr1, l, mr);
3790}
3791
3792#define ARG1_DECL MemoryRegionCache *cache
3793#define ARG1 cache
3794#define SUFFIX _cached_slow
3795#define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
3796#define RCU_READ_LOCK() ((void)0)
3797#define RCU_READ_UNLOCK() ((void)0)
3798#include "memory_ldst.inc.c"
3799
3800
3801int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3802 void *ptr, target_ulong len, bool is_write)
3803{
3804 hwaddr phys_addr;
3805 target_ulong l, page;
3806 uint8_t *buf = ptr;
3807
3808 cpu_synchronize_state(cpu);
3809 while (len > 0) {
3810 int asidx;
3811 MemTxAttrs attrs;
3812 MemTxResult res;
3813
3814 page = addr & TARGET_PAGE_MASK;
3815 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3816 asidx = cpu_asidx_from_attrs(cpu, attrs);
3817
3818 if (phys_addr == -1)
3819 return -1;
3820 l = (page + TARGET_PAGE_SIZE) - addr;
3821 if (l > len)
3822 l = len;
3823 phys_addr += (addr & ~TARGET_PAGE_MASK);
3824 if (is_write) {
3825 res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
3826 attrs, buf, l);
3827 } else {
3828 res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
3829 attrs, buf, l);
3830 }
3831 if (res != MEMTX_OK) {
3832 return -1;
3833 }
3834 len -= l;
3835 buf += l;
3836 addr += l;
3837 }
3838 return 0;
3839}
3840
3841
3842
3843
3844
3845size_t qemu_target_page_size(void)
3846{
3847 return TARGET_PAGE_SIZE;
3848}
3849
3850int qemu_target_page_bits(void)
3851{
3852 return TARGET_PAGE_BITS;
3853}
3854
3855int qemu_target_page_bits_min(void)
3856{
3857 return TARGET_PAGE_BITS_MIN;
3858}
3859#endif
3860
3861bool target_words_bigendian(void)
3862{
3863#if defined(TARGET_WORDS_BIGENDIAN)
3864 return true;
3865#else
3866 return false;
3867#endif
3868}
3869
3870#ifndef CONFIG_USER_ONLY
3871bool cpu_physical_memory_is_io(hwaddr phys_addr)
3872{
3873 MemoryRegion*mr;
3874 hwaddr l = 1;
3875 bool res;
3876
3877 RCU_READ_LOCK_GUARD();
3878 mr = address_space_translate(&address_space_memory,
3879 phys_addr, &phys_addr, &l, false,
3880 MEMTXATTRS_UNSPECIFIED);
3881
3882 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3883 return res;
3884}
3885
3886int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3887{
3888 RAMBlock *block;
3889 int ret = 0;
3890
3891 RCU_READ_LOCK_GUARD();
3892 RAMBLOCK_FOREACH(block) {
3893 ret = func(block, opaque);
3894 if (ret) {
3895 break;
3896 }
3897 }
3898 return ret;
3899}
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3910{
3911 int ret = -1;
3912
3913 uint8_t *host_startaddr = rb->host + start;
3914
3915 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
3916 error_report("ram_block_discard_range: Unaligned start address: %p",
3917 host_startaddr);
3918 goto err;
3919 }
3920
3921 if ((start + length) <= rb->used_length) {
3922 bool need_madvise, need_fallocate;
3923 if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
3924 error_report("ram_block_discard_range: Unaligned length: %zx",
3925 length);
3926 goto err;
3927 }
3928
3929 errno = ENOTSUP;
3930
3931
3932
3933
3934
3935 need_madvise = (rb->page_size == qemu_host_page_size);
3936 need_fallocate = rb->fd != -1;
3937 if (need_fallocate) {
3938
3939
3940
3941
3942#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3943 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3944 start, length);
3945 if (ret) {
3946 ret = -errno;
3947 error_report("ram_block_discard_range: Failed to fallocate "
3948 "%s:%" PRIx64 " +%zx (%d)",
3949 rb->idstr, start, length, ret);
3950 goto err;
3951 }
3952#else
3953 ret = -ENOSYS;
3954 error_report("ram_block_discard_range: fallocate not available/file"
3955 "%s:%" PRIx64 " +%zx (%d)",
3956 rb->idstr, start, length, ret);
3957 goto err;
3958#endif
3959 }
3960 if (need_madvise) {
3961
3962
3963
3964
3965
3966#if defined(CONFIG_MADVISE)
3967 ret = madvise(host_startaddr, length, MADV_DONTNEED);
3968 if (ret) {
3969 ret = -errno;
3970 error_report("ram_block_discard_range: Failed to discard range "
3971 "%s:%" PRIx64 " +%zx (%d)",
3972 rb->idstr, start, length, ret);
3973 goto err;
3974 }
3975#else
3976 ret = -ENOSYS;
3977 error_report("ram_block_discard_range: MADVISE not available"
3978 "%s:%" PRIx64 " +%zx (%d)",
3979 rb->idstr, start, length, ret);
3980 goto err;
3981#endif
3982 }
3983 trace_ram_block_discard_range(rb->idstr, host_startaddr, length,
3984 need_madvise, need_fallocate, ret);
3985 } else {
3986 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3987 "/%zx/" RAM_ADDR_FMT")",
3988 rb->idstr, start, length, rb->used_length);
3989 }
3990
3991err:
3992 return ret;
3993}
3994
3995bool ramblock_is_pmem(RAMBlock *rb)
3996{
3997 return rb->flags & RAM_PMEM;
3998}
3999
4000#endif
4001
4002void page_size_init(void)
4003{
4004
4005
4006 if (qemu_host_page_size == 0) {
4007 qemu_host_page_size = qemu_real_host_page_size;
4008 }
4009 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
4010 qemu_host_page_size = TARGET_PAGE_SIZE;
4011 }
4012 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
4013}
4014
4015#if !defined(CONFIG_USER_ONLY)
4016
4017static void mtree_print_phys_entries(int start, int end, int skip, int ptr)
4018{
4019 if (start == end - 1) {
4020 qemu_printf("\t%3d ", start);
4021 } else {
4022 qemu_printf("\t%3d..%-3d ", start, end - 1);
4023 }
4024 qemu_printf(" skip=%d ", skip);
4025 if (ptr == PHYS_MAP_NODE_NIL) {
4026 qemu_printf(" ptr=NIL");
4027 } else if (!skip) {
4028 qemu_printf(" ptr=#%d", ptr);
4029 } else {
4030 qemu_printf(" ptr=[%d]", ptr);
4031 }
4032 qemu_printf("\n");
4033}
4034
4035#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
4036 int128_sub((size), int128_one())) : 0)
4037
4038void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root)
4039{
4040 int i;
4041
4042 qemu_printf(" Dispatch\n");
4043 qemu_printf(" Physical sections\n");
4044
4045 for (i = 0; i < d->map.sections_nb; ++i) {
4046 MemoryRegionSection *s = d->map.sections + i;
4047 const char *names[] = { " [unassigned]", " [not dirty]",
4048 " [ROM]", " [watch]" };
4049
4050 qemu_printf(" #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx
4051 " %s%s%s%s%s",
4052 i,
4053 s->offset_within_address_space,
4054 s->offset_within_address_space + MR_SIZE(s->mr->size),
4055 s->mr->name ? s->mr->name : "(noname)",
4056 i < ARRAY_SIZE(names) ? names[i] : "",
4057 s->mr == root ? " [ROOT]" : "",
4058 s == d->mru_section ? " [MRU]" : "",
4059 s->mr->is_iommu ? " [iommu]" : "");
4060
4061 if (s->mr->alias) {
4062 qemu_printf(" alias=%s", s->mr->alias->name ?
4063 s->mr->alias->name : "noname");
4064 }
4065 qemu_printf("\n");
4066 }
4067
4068 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
4069 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
4070 for (i = 0; i < d->map.nodes_nb; ++i) {
4071 int j, jprev;
4072 PhysPageEntry prev;
4073 Node *n = d->map.nodes + i;
4074
4075 qemu_printf(" [%d]\n", i);
4076
4077 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
4078 PhysPageEntry *pe = *n + j;
4079
4080 if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
4081 continue;
4082 }
4083
4084 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
4085
4086 jprev = j;
4087 prev = *pe;
4088 }
4089
4090 if (jprev != ARRAY_SIZE(*n)) {
4091 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
4092 }
4093 }
4094}
4095
4096
4097
4098
4099
4100static int ram_block_discard_disabled;
4101
4102int ram_block_discard_disable(bool state)
4103{
4104 int old;
4105
4106 if (!state) {
4107 atomic_dec(&ram_block_discard_disabled);
4108 return 0;
4109 }
4110
4111 do {
4112 old = atomic_read(&ram_block_discard_disabled);
4113 if (old < 0) {
4114 return -EBUSY;
4115 }
4116 } while (atomic_cmpxchg(&ram_block_discard_disabled, old, old + 1) != old);
4117 return 0;
4118}
4119
4120int ram_block_discard_require(bool state)
4121{
4122 int old;
4123
4124 if (!state) {
4125 atomic_inc(&ram_block_discard_disabled);
4126 return 0;
4127 }
4128
4129 do {
4130 old = atomic_read(&ram_block_discard_disabled);
4131 if (old > 0) {
4132 return -EBUSY;
4133 }
4134 } while (atomic_cmpxchg(&ram_block_discard_disabled, old, old - 1) != old);
4135 return 0;
4136}
4137
4138bool ram_block_discard_is_disabled(void)
4139{
4140 return atomic_read(&ram_block_discard_disabled) > 0;
4141}
4142
4143bool ram_block_discard_is_required(void)
4144{
4145 return atomic_read(&ram_block_discard_disabled) < 0;
4146}
4147
4148#endif
4149