1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qapi/error.h"
21#ifndef _WIN32
22#endif
23
24#include "qemu/cutils.h"
25#include "cpu.h"
26#include "exec/exec-all.h"
27#include "exec/target_page.h"
28#include "tcg.h"
29#include "hw/qdev-core.h"
30#include "hw/qdev-properties.h"
31#if !defined(CONFIG_USER_ONLY)
32#include "hw/boards.h"
33#include "hw/xen/xen.h"
34#endif
35#include "sysemu/kvm.h"
36#include "sysemu/sysemu.h"
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
39#include "qemu/error-report.h"
40#if defined(CONFIG_USER_ONLY)
41#include "qemu.h"
42#else
43#include "hw/hw.h"
44#include "exec/memory.h"
45#include "exec/ioport.h"
46#include "sysemu/dma.h"
47#include "sysemu/numa.h"
48#include "sysemu/hw_accel.h"
49#include "exec/address-spaces.h"
50#include "sysemu/xen-mapcache.h"
51#include "trace-root.h"
52
53#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
54#include <fcntl.h>
55#include <linux/falloc.h>
56#endif
57
58#endif
59#include "exec/cpu-all.h"
60#include "qemu/rcu_queue.h"
61#include "qemu/main-loop.h"
62#include "translate-all.h"
63#include "sysemu/replay.h"
64
65#include "exec/memory-internal.h"
66#include "exec/ram_addr.h"
67#include "exec/log.h"
68
69#include "migration/vmstate.h"
70
71#include "qemu/range.h"
72#ifndef _WIN32
73#include "qemu/mmap-alloc.h"
74#endif
75
76#include "monitor/monitor.h"
77
78
79
80#if !defined(CONFIG_USER_ONLY)
81
82
83
84RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
85
86static MemoryRegion *system_memory;
87static MemoryRegion *system_io;
88
89AddressSpace address_space_io;
90AddressSpace address_space_memory;
91
92MemoryRegion io_mem_rom, io_mem_notdirty;
93static MemoryRegion io_mem_unassigned;
94
95
96#define RAM_PREALLOC (1 << 0)
97
98
99#define RAM_SHARED (1 << 1)
100
101
102
103
104#define RAM_RESIZEABLE (1 << 2)
105
106#endif
107
108#ifdef TARGET_PAGE_BITS_VARY
109int target_page_bits;
110bool target_page_bits_decided;
111#endif
112
113struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
114
115
116__thread CPUState *current_cpu;
117
118
119
120int use_icount;
121
122uintptr_t qemu_host_page_size;
123intptr_t qemu_host_page_mask;
124uintptr_t qemu_real_host_page_size;
125intptr_t qemu_real_host_page_mask;
126
127bool set_preferred_target_page_bits(int bits)
128{
129
130
131
132
133
134#ifdef TARGET_PAGE_BITS_VARY
135 assert(bits >= TARGET_PAGE_BITS_MIN);
136 if (target_page_bits == 0 || target_page_bits > bits) {
137 if (target_page_bits_decided) {
138 return false;
139 }
140 target_page_bits = bits;
141 }
142#endif
143 return true;
144}
145
146#if !defined(CONFIG_USER_ONLY)
147
148static void finalize_target_page_bits(void)
149{
150#ifdef TARGET_PAGE_BITS_VARY
151 if (target_page_bits == 0) {
152 target_page_bits = TARGET_PAGE_BITS_MIN;
153 }
154 target_page_bits_decided = true;
155#endif
156}
157
158typedef struct PhysPageEntry PhysPageEntry;
159
160struct PhysPageEntry {
161
162 uint32_t skip : 6;
163
164 uint32_t ptr : 26;
165};
166
167#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
168
169
170#define ADDR_SPACE_BITS 64
171
172#define P_L2_BITS 9
173#define P_L2_SIZE (1 << P_L2_BITS)
174
175#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
176
177typedef PhysPageEntry Node[P_L2_SIZE];
178
179typedef struct PhysPageMap {
180 struct rcu_head rcu;
181
182 unsigned sections_nb;
183 unsigned sections_nb_alloc;
184 unsigned nodes_nb;
185 unsigned nodes_nb_alloc;
186 Node *nodes;
187 MemoryRegionSection *sections;
188} PhysPageMap;
189
190struct AddressSpaceDispatch {
191 MemoryRegionSection *mru_section;
192
193
194
195 PhysPageEntry phys_map;
196 PhysPageMap map;
197};
198
199#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
200typedef struct subpage_t {
201 MemoryRegion iomem;
202 FlatView *fv;
203 hwaddr base;
204 uint16_t sub_section[];
205} subpage_t;
206
207#define PHYS_SECTION_UNASSIGNED 0
208#define PHYS_SECTION_NOTDIRTY 1
209#define PHYS_SECTION_ROM 2
210#define PHYS_SECTION_WATCH 3
211
212static void io_mem_init(void);
213static void memory_map_init(void);
214static void tcg_commit(MemoryListener *listener);
215
216static MemoryRegion io_mem_watch;
217
218
219
220
221
222
223
224
225struct CPUAddressSpace {
226 CPUState *cpu;
227 AddressSpace *as;
228 struct AddressSpaceDispatch *memory_dispatch;
229 MemoryListener tcg_as_listener;
230};
231
232struct DirtyBitmapSnapshot {
233 ram_addr_t start;
234 ram_addr_t end;
235 unsigned long dirty[];
236};
237
238#endif
239
240#if !defined(CONFIG_USER_ONLY)
241
242static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
243{
244 static unsigned alloc_hint = 16;
245 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
246 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
247 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
248 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
249 alloc_hint = map->nodes_nb_alloc;
250 }
251}
252
253static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
254{
255 unsigned i;
256 uint32_t ret;
257 PhysPageEntry e;
258 PhysPageEntry *p;
259
260 ret = map->nodes_nb++;
261 p = map->nodes[ret];
262 assert(ret != PHYS_MAP_NODE_NIL);
263 assert(ret != map->nodes_nb_alloc);
264
265 e.skip = leaf ? 0 : 1;
266 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
267 for (i = 0; i < P_L2_SIZE; ++i) {
268 memcpy(&p[i], &e, sizeof(e));
269 }
270 return ret;
271}
272
273static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
274 hwaddr *index, hwaddr *nb, uint16_t leaf,
275 int level)
276{
277 PhysPageEntry *p;
278 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
279
280 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
281 lp->ptr = phys_map_node_alloc(map, level == 0);
282 }
283 p = map->nodes[lp->ptr];
284 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
285
286 while (*nb && lp < &p[P_L2_SIZE]) {
287 if ((*index & (step - 1)) == 0 && *nb >= step) {
288 lp->skip = 0;
289 lp->ptr = leaf;
290 *index += step;
291 *nb -= step;
292 } else {
293 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
294 }
295 ++lp;
296 }
297}
298
299static void phys_page_set(AddressSpaceDispatch *d,
300 hwaddr index, hwaddr nb,
301 uint16_t leaf)
302{
303
304 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
305
306 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
307}
308
309
310
311
312static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
313{
314 unsigned valid_ptr = P_L2_SIZE;
315 int valid = 0;
316 PhysPageEntry *p;
317 int i;
318
319 if (lp->ptr == PHYS_MAP_NODE_NIL) {
320 return;
321 }
322
323 p = nodes[lp->ptr];
324 for (i = 0; i < P_L2_SIZE; i++) {
325 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
326 continue;
327 }
328
329 valid_ptr = i;
330 valid++;
331 if (p[i].skip) {
332 phys_page_compact(&p[i], nodes);
333 }
334 }
335
336
337 if (valid != 1) {
338 return;
339 }
340
341 assert(valid_ptr < P_L2_SIZE);
342
343
344 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
345 return;
346 }
347
348 lp->ptr = p[valid_ptr].ptr;
349 if (!p[valid_ptr].skip) {
350
351
352
353
354
355
356 lp->skip = 0;
357 } else {
358 lp->skip += p[valid_ptr].skip;
359 }
360}
361
362void address_space_dispatch_compact(AddressSpaceDispatch *d)
363{
364 if (d->phys_map.skip) {
365 phys_page_compact(&d->phys_map, d->map.nodes);
366 }
367}
368
369static inline bool section_covers_addr(const MemoryRegionSection *section,
370 hwaddr addr)
371{
372
373
374
375 return int128_gethi(section->size) ||
376 range_covers_byte(section->offset_within_address_space,
377 int128_getlo(section->size), addr);
378}
379
380static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
381{
382 PhysPageEntry lp = d->phys_map, *p;
383 Node *nodes = d->map.nodes;
384 MemoryRegionSection *sections = d->map.sections;
385 hwaddr index = addr >> TARGET_PAGE_BITS;
386 int i;
387
388 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
389 if (lp.ptr == PHYS_MAP_NODE_NIL) {
390 return §ions[PHYS_SECTION_UNASSIGNED];
391 }
392 p = nodes[lp.ptr];
393 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
394 }
395
396 if (section_covers_addr(§ions[lp.ptr], addr)) {
397 return §ions[lp.ptr];
398 } else {
399 return §ions[PHYS_SECTION_UNASSIGNED];
400 }
401}
402
403bool memory_region_is_unassigned(MemoryRegion *mr)
404{
405 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
406 && mr != &io_mem_watch;
407}
408
409
410static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
411 hwaddr addr,
412 bool resolve_subpage)
413{
414 MemoryRegionSection *section = atomic_read(&d->mru_section);
415 subpage_t *subpage;
416 bool update;
417
418 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
419 section_covers_addr(section, addr)) {
420 update = false;
421 } else {
422 section = phys_page_find(d, addr);
423 update = true;
424 }
425 if (resolve_subpage && section->mr->subpage) {
426 subpage = container_of(section->mr, subpage_t, iomem);
427 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
428 }
429 if (update) {
430 atomic_set(&d->mru_section, section);
431 }
432 return section;
433}
434
435
436static MemoryRegionSection *
437address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
438 hwaddr *plen, bool resolve_subpage)
439{
440 MemoryRegionSection *section;
441 MemoryRegion *mr;
442 Int128 diff;
443
444 section = address_space_lookup_region(d, addr, resolve_subpage);
445
446 addr -= section->offset_within_address_space;
447
448
449 *xlat = addr + section->offset_within_region;
450
451 mr = section->mr;
452
453
454
455
456
457
458
459
460
461
462
463
464 if (memory_region_is_ram(mr)) {
465 diff = int128_sub(section->size, int128_make64(addr));
466 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
467 }
468 return section;
469}
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489static MemoryRegionSection flatview_do_translate(FlatView *fv,
490 hwaddr addr,
491 hwaddr *xlat,
492 hwaddr *plen_out,
493 hwaddr *page_mask_out,
494 bool is_write,
495 bool is_mmio,
496 AddressSpace **target_as)
497{
498 IOMMUTLBEntry iotlb;
499 MemoryRegionSection *section;
500 IOMMUMemoryRegion *iommu_mr;
501 IOMMUMemoryRegionClass *imrc;
502 hwaddr page_mask = (hwaddr)(-1);
503 hwaddr plen = (hwaddr)(-1);
504
505 if (plen_out) {
506 plen = *plen_out;
507 }
508
509 for (;;) {
510 section = address_space_translate_internal(
511 flatview_to_dispatch(fv), addr, &addr,
512 &plen, is_mmio);
513
514 iommu_mr = memory_region_get_iommu(section->mr);
515 if (!iommu_mr) {
516 break;
517 }
518 imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
519
520 iotlb = imrc->translate(iommu_mr, addr, is_write ?
521 IOMMU_WO : IOMMU_RO);
522 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
523 | (addr & iotlb.addr_mask));
524 page_mask &= iotlb.addr_mask;
525 plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1);
526 if (!(iotlb.perm & (1 << is_write))) {
527 goto translate_fail;
528 }
529
530 fv = address_space_to_flatview(iotlb.target_as);
531 *target_as = iotlb.target_as;
532 }
533
534 *xlat = addr;
535
536 if (page_mask == (hwaddr)(-1)) {
537
538 page_mask = ~TARGET_PAGE_MASK;
539 }
540
541 if (page_mask_out) {
542 *page_mask_out = page_mask;
543 }
544
545 if (plen_out) {
546 *plen_out = plen;
547 }
548
549 return *section;
550
551translate_fail:
552 return (MemoryRegionSection) { .mr = &io_mem_unassigned };
553}
554
555
556IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
557 bool is_write)
558{
559 MemoryRegionSection section;
560 hwaddr xlat, page_mask;
561
562
563
564
565
566 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
567 NULL, &page_mask, is_write, false, &as);
568
569
570 if (section.mr == &io_mem_unassigned) {
571 goto iotlb_fail;
572 }
573
574
575 xlat += section.offset_within_address_space -
576 section.offset_within_region;
577
578 return (IOMMUTLBEntry) {
579 .target_as = as,
580 .iova = addr & ~page_mask,
581 .translated_addr = xlat & ~page_mask,
582 .addr_mask = page_mask,
583
584 .perm = IOMMU_RW,
585 };
586
587iotlb_fail:
588 return (IOMMUTLBEntry) {0};
589}
590
591
592MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
593 hwaddr *plen, bool is_write)
594{
595 MemoryRegion *mr;
596 MemoryRegionSection section;
597 AddressSpace *as = NULL;
598
599
600 section = flatview_do_translate(fv, addr, xlat, plen, NULL,
601 is_write, true, &as);
602 mr = section.mr;
603
604 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
605 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
606 *plen = MIN(page, *plen);
607 }
608
609 return mr;
610}
611
612
613MemoryRegionSection *
614address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
615 hwaddr *xlat, hwaddr *plen)
616{
617 MemoryRegionSection *section;
618 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
619
620 section = address_space_translate_internal(d, addr, xlat, plen, false);
621
622 assert(!memory_region_is_iommu(section->mr));
623 return section;
624}
625#endif
626
627#if !defined(CONFIG_USER_ONLY)
628
629static int cpu_common_post_load(void *opaque, int version_id)
630{
631 CPUState *cpu = opaque;
632
633
634
635 cpu->interrupt_request &= ~0x01;
636 tlb_flush(cpu);
637
638 return 0;
639}
640
641static int cpu_common_pre_load(void *opaque)
642{
643 CPUState *cpu = opaque;
644
645 cpu->exception_index = -1;
646
647 return 0;
648}
649
650static bool cpu_common_exception_index_needed(void *opaque)
651{
652 CPUState *cpu = opaque;
653
654 return tcg_enabled() && cpu->exception_index != -1;
655}
656
657static const VMStateDescription vmstate_cpu_common_exception_index = {
658 .name = "cpu_common/exception_index",
659 .version_id = 1,
660 .minimum_version_id = 1,
661 .needed = cpu_common_exception_index_needed,
662 .fields = (VMStateField[]) {
663 VMSTATE_INT32(exception_index, CPUState),
664 VMSTATE_END_OF_LIST()
665 }
666};
667
668static bool cpu_common_crash_occurred_needed(void *opaque)
669{
670 CPUState *cpu = opaque;
671
672 return cpu->crash_occurred;
673}
674
675static const VMStateDescription vmstate_cpu_common_crash_occurred = {
676 .name = "cpu_common/crash_occurred",
677 .version_id = 1,
678 .minimum_version_id = 1,
679 .needed = cpu_common_crash_occurred_needed,
680 .fields = (VMStateField[]) {
681 VMSTATE_BOOL(crash_occurred, CPUState),
682 VMSTATE_END_OF_LIST()
683 }
684};
685
686const VMStateDescription vmstate_cpu_common = {
687 .name = "cpu_common",
688 .version_id = 1,
689 .minimum_version_id = 1,
690 .pre_load = cpu_common_pre_load,
691 .post_load = cpu_common_post_load,
692 .fields = (VMStateField[]) {
693 VMSTATE_UINT32(halted, CPUState),
694 VMSTATE_UINT32(interrupt_request, CPUState),
695 VMSTATE_END_OF_LIST()
696 },
697 .subsections = (const VMStateDescription*[]) {
698 &vmstate_cpu_common_exception_index,
699 &vmstate_cpu_common_crash_occurred,
700 NULL
701 }
702};
703
704#endif
705
706CPUState *qemu_get_cpu(int index)
707{
708 CPUState *cpu;
709
710 CPU_FOREACH(cpu) {
711 if (cpu->cpu_index == index) {
712 return cpu;
713 }
714 }
715
716 return NULL;
717}
718
719#if !defined(CONFIG_USER_ONLY)
720void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
721{
722 CPUAddressSpace *newas;
723
724
725 assert(asidx < cpu->num_ases);
726
727 if (asidx == 0) {
728
729 cpu->as = as;
730 }
731
732
733 assert(asidx == 0 || !kvm_enabled());
734
735 if (!cpu->cpu_ases) {
736 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
737 }
738
739 newas = &cpu->cpu_ases[asidx];
740 newas->cpu = cpu;
741 newas->as = as;
742 if (tcg_enabled()) {
743 newas->tcg_as_listener.commit = tcg_commit;
744 memory_listener_register(&newas->tcg_as_listener, as);
745 }
746}
747
748AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
749{
750
751 return cpu->cpu_ases[asidx].as;
752}
753#endif
754
755void cpu_exec_unrealizefn(CPUState *cpu)
756{
757 CPUClass *cc = CPU_GET_CLASS(cpu);
758
759 cpu_list_remove(cpu);
760
761 if (cc->vmsd != NULL) {
762 vmstate_unregister(NULL, cc->vmsd, cpu);
763 }
764 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
765 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
766 }
767}
768
769Property cpu_common_props[] = {
770#ifndef CONFIG_USER_ONLY
771
772
773
774
775
776
777 DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
778 MemoryRegion *),
779#endif
780 DEFINE_PROP_END_OF_LIST(),
781};
782
783void cpu_exec_initfn(CPUState *cpu)
784{
785 cpu->as = NULL;
786 cpu->num_ases = 0;
787
788#ifndef CONFIG_USER_ONLY
789 cpu->thread_id = qemu_get_thread_id();
790 cpu->memory = system_memory;
791 object_ref(OBJECT(cpu->memory));
792#endif
793}
794
795void cpu_exec_realizefn(CPUState *cpu, Error **errp)
796{
797 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
798
799 cpu_list_add(cpu);
800
801#ifndef CONFIG_USER_ONLY
802 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
803 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
804 }
805 if (cc->vmsd != NULL) {
806 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
807 }
808#endif
809}
810
811#if defined(CONFIG_USER_ONLY)
812static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
813{
814 mmap_lock();
815 tb_lock();
816 tb_invalidate_phys_page_range(pc, pc + 1, 0);
817 tb_unlock();
818 mmap_unlock();
819}
820#else
821static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
822{
823 MemTxAttrs attrs;
824 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
825 int asidx = cpu_asidx_from_attrs(cpu, attrs);
826 if (phys != -1) {
827
828 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
829 phys | (pc & ~TARGET_PAGE_MASK));
830 }
831}
832#endif
833
834#if defined(CONFIG_USER_ONLY)
835void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
836
837{
838}
839
840int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
841 int flags)
842{
843 return -ENOSYS;
844}
845
846void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
847{
848}
849
850int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
851 int flags, CPUWatchpoint **watchpoint)
852{
853 return -ENOSYS;
854}
855#else
856
857int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
858 int flags, CPUWatchpoint **watchpoint)
859{
860 CPUWatchpoint *wp;
861
862
863 if (len == 0 || (addr + len - 1) < addr) {
864 error_report("tried to set invalid watchpoint at %"
865 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
866 return -EINVAL;
867 }
868 wp = g_malloc(sizeof(*wp));
869
870 wp->vaddr = addr;
871 wp->len = len;
872 wp->flags = flags;
873
874
875 if (flags & BP_GDB) {
876 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
877 } else {
878 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
879 }
880
881 tlb_flush_page(cpu, addr);
882
883 if (watchpoint)
884 *watchpoint = wp;
885 return 0;
886}
887
888
889int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
890 int flags)
891{
892 CPUWatchpoint *wp;
893
894 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
895 if (addr == wp->vaddr && len == wp->len
896 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
897 cpu_watchpoint_remove_by_ref(cpu, wp);
898 return 0;
899 }
900 }
901 return -ENOENT;
902}
903
904
905void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
906{
907 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
908
909 tlb_flush_page(cpu, watchpoint->vaddr);
910
911 g_free(watchpoint);
912}
913
914
915void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
916{
917 CPUWatchpoint *wp, *next;
918
919 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
920 if (wp->flags & mask) {
921 cpu_watchpoint_remove_by_ref(cpu, wp);
922 }
923 }
924}
925
926
927
928
929
930
931static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
932 vaddr addr,
933 vaddr len)
934{
935
936
937
938
939
940 vaddr wpend = wp->vaddr + wp->len - 1;
941 vaddr addrend = addr + len - 1;
942
943 return !(addr > wpend || wp->vaddr > addrend);
944}
945
946#endif
947
948
949int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
950 CPUBreakpoint **breakpoint)
951{
952 CPUBreakpoint *bp;
953
954 bp = g_malloc(sizeof(*bp));
955
956 bp->pc = pc;
957 bp->flags = flags;
958
959
960 if (flags & BP_GDB) {
961 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
962 } else {
963 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
964 }
965
966 breakpoint_invalidate(cpu, pc);
967
968 if (breakpoint) {
969 *breakpoint = bp;
970 }
971 return 0;
972}
973
974
975int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
976{
977 CPUBreakpoint *bp;
978
979 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
980 if (bp->pc == pc && bp->flags == flags) {
981 cpu_breakpoint_remove_by_ref(cpu, bp);
982 return 0;
983 }
984 }
985 return -ENOENT;
986}
987
988
989void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
990{
991 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
992
993 breakpoint_invalidate(cpu, breakpoint->pc);
994
995 g_free(breakpoint);
996}
997
998
999void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
1000{
1001 CPUBreakpoint *bp, *next;
1002
1003 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
1004 if (bp->flags & mask) {
1005 cpu_breakpoint_remove_by_ref(cpu, bp);
1006 }
1007 }
1008}
1009
1010
1011
1012void cpu_single_step(CPUState *cpu, int enabled)
1013{
1014 if (cpu->singlestep_enabled != enabled) {
1015 cpu->singlestep_enabled = enabled;
1016 if (kvm_enabled()) {
1017 kvm_update_guest_debug(cpu, 0);
1018 } else {
1019
1020
1021 tb_flush(cpu);
1022 }
1023 }
1024}
1025
1026void cpu_abort(CPUState *cpu, const char *fmt, ...)
1027{
1028 va_list ap;
1029 va_list ap2;
1030
1031 va_start(ap, fmt);
1032 va_copy(ap2, ap);
1033 fprintf(stderr, "qemu: fatal: ");
1034 vfprintf(stderr, fmt, ap);
1035 fprintf(stderr, "\n");
1036 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1037 if (qemu_log_separate()) {
1038 qemu_log_lock();
1039 qemu_log("qemu: fatal: ");
1040 qemu_log_vprintf(fmt, ap2);
1041 qemu_log("\n");
1042 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1043 qemu_log_flush();
1044 qemu_log_unlock();
1045 qemu_log_close();
1046 }
1047 va_end(ap2);
1048 va_end(ap);
1049 replay_finish();
1050#if defined(CONFIG_USER_ONLY)
1051 {
1052 struct sigaction act;
1053 sigfillset(&act.sa_mask);
1054 act.sa_handler = SIG_DFL;
1055 sigaction(SIGABRT, &act, NULL);
1056 }
1057#endif
1058 abort();
1059}
1060
1061#if !defined(CONFIG_USER_ONLY)
1062
1063static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
1064{
1065 RAMBlock *block;
1066
1067 block = atomic_rcu_read(&ram_list.mru_block);
1068 if (block && addr - block->offset < block->max_length) {
1069 return block;
1070 }
1071 RAMBLOCK_FOREACH(block) {
1072 if (addr - block->offset < block->max_length) {
1073 goto found;
1074 }
1075 }
1076
1077 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1078 abort();
1079
1080found:
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 ram_list.mru_block = block;
1098 return block;
1099}
1100
1101static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
1102{
1103 CPUState *cpu;
1104 ram_addr_t start1;
1105 RAMBlock *block;
1106 ram_addr_t end;
1107
1108 end = TARGET_PAGE_ALIGN(start + length);
1109 start &= TARGET_PAGE_MASK;
1110
1111 rcu_read_lock();
1112 block = qemu_get_ram_block(start);
1113 assert(block == qemu_get_ram_block(end - 1));
1114 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1115 CPU_FOREACH(cpu) {
1116 tlb_reset_dirty(cpu, start1, length);
1117 }
1118 rcu_read_unlock();
1119}
1120
1121
1122bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1123 ram_addr_t length,
1124 unsigned client)
1125{
1126 DirtyMemoryBlocks *blocks;
1127 unsigned long end, page;
1128 bool dirty = false;
1129
1130 if (length == 0) {
1131 return false;
1132 }
1133
1134 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1135 page = start >> TARGET_PAGE_BITS;
1136
1137 rcu_read_lock();
1138
1139 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1140
1141 while (page < end) {
1142 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1143 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1144 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1145
1146 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1147 offset, num);
1148 page += num;
1149 }
1150
1151 rcu_read_unlock();
1152
1153 if (dirty && tcg_enabled()) {
1154 tlb_reset_dirty_range_all(start, length);
1155 }
1156
1157 return dirty;
1158}
1159
1160DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
1161 (ram_addr_t start, ram_addr_t length, unsigned client)
1162{
1163 DirtyMemoryBlocks *blocks;
1164 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
1165 ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
1166 ram_addr_t last = QEMU_ALIGN_UP(start + length, align);
1167 DirtyBitmapSnapshot *snap;
1168 unsigned long page, end, dest;
1169
1170 snap = g_malloc0(sizeof(*snap) +
1171 ((last - first) >> (TARGET_PAGE_BITS + 3)));
1172 snap->start = first;
1173 snap->end = last;
1174
1175 page = first >> TARGET_PAGE_BITS;
1176 end = last >> TARGET_PAGE_BITS;
1177 dest = 0;
1178
1179 rcu_read_lock();
1180
1181 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1182
1183 while (page < end) {
1184 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1185 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1186 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1187
1188 assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
1189 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
1190 offset >>= BITS_PER_LEVEL;
1191
1192 bitmap_copy_and_clear_atomic(snap->dirty + dest,
1193 blocks->blocks[idx] + offset,
1194 num);
1195 page += num;
1196 dest += num >> BITS_PER_LEVEL;
1197 }
1198
1199 rcu_read_unlock();
1200
1201 if (tcg_enabled()) {
1202 tlb_reset_dirty_range_all(start, length);
1203 }
1204
1205 return snap;
1206}
1207
1208bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
1209 ram_addr_t start,
1210 ram_addr_t length)
1211{
1212 unsigned long page, end;
1213
1214 assert(start >= snap->start);
1215 assert(start + length <= snap->end);
1216
1217 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
1218 page = (start - snap->start) >> TARGET_PAGE_BITS;
1219
1220 while (page < end) {
1221 if (test_bit(page, snap->dirty)) {
1222 return true;
1223 }
1224 page++;
1225 }
1226 return false;
1227}
1228
1229
1230hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1231 MemoryRegionSection *section,
1232 target_ulong vaddr,
1233 hwaddr paddr, hwaddr xlat,
1234 int prot,
1235 target_ulong *address)
1236{
1237 hwaddr iotlb;
1238 CPUWatchpoint *wp;
1239
1240 if (memory_region_is_ram(section->mr)) {
1241
1242 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1243 if (!section->readonly) {
1244 iotlb |= PHYS_SECTION_NOTDIRTY;
1245 } else {
1246 iotlb |= PHYS_SECTION_ROM;
1247 }
1248 } else {
1249 AddressSpaceDispatch *d;
1250
1251 d = flatview_to_dispatch(section->fv);
1252 iotlb = section - d->map.sections;
1253 iotlb += xlat;
1254 }
1255
1256
1257
1258 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1259 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1260
1261 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1262 iotlb = PHYS_SECTION_WATCH + paddr;
1263 *address |= TLB_MMIO;
1264 break;
1265 }
1266 }
1267 }
1268
1269 return iotlb;
1270}
1271#endif
1272
1273#if !defined(CONFIG_USER_ONLY)
1274
1275static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1276 uint16_t section);
1277static subpage_t *subpage_init(FlatView *fv, hwaddr base);
1278
1279static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1280 qemu_anon_ram_alloc;
1281
1282
1283
1284
1285
1286
1287void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1288{
1289 phys_mem_alloc = alloc;
1290}
1291
1292static uint16_t phys_section_add(PhysPageMap *map,
1293 MemoryRegionSection *section)
1294{
1295
1296
1297
1298
1299 assert(map->sections_nb < TARGET_PAGE_SIZE);
1300
1301 if (map->sections_nb == map->sections_nb_alloc) {
1302 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1303 map->sections = g_renew(MemoryRegionSection, map->sections,
1304 map->sections_nb_alloc);
1305 }
1306 map->sections[map->sections_nb] = *section;
1307 memory_region_ref(section->mr);
1308 return map->sections_nb++;
1309}
1310
1311static void phys_section_destroy(MemoryRegion *mr)
1312{
1313 bool have_sub_page = mr->subpage;
1314
1315 memory_region_unref(mr);
1316
1317 if (have_sub_page) {
1318 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1319 object_unref(OBJECT(&subpage->iomem));
1320 g_free(subpage);
1321 }
1322}
1323
1324static void phys_sections_free(PhysPageMap *map)
1325{
1326 while (map->sections_nb > 0) {
1327 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1328 phys_section_destroy(section->mr);
1329 }
1330 g_free(map->sections);
1331 g_free(map->nodes);
1332}
1333
1334static void register_subpage(FlatView *fv, MemoryRegionSection *section)
1335{
1336 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1337 subpage_t *subpage;
1338 hwaddr base = section->offset_within_address_space
1339 & TARGET_PAGE_MASK;
1340 MemoryRegionSection *existing = phys_page_find(d, base);
1341 MemoryRegionSection subsection = {
1342 .offset_within_address_space = base,
1343 .size = int128_make64(TARGET_PAGE_SIZE),
1344 };
1345 hwaddr start, end;
1346
1347 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1348
1349 if (!(existing->mr->subpage)) {
1350 subpage = subpage_init(fv, base);
1351 subsection.fv = fv;
1352 subsection.mr = &subpage->iomem;
1353 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1354 phys_section_add(&d->map, &subsection));
1355 } else {
1356 subpage = container_of(existing->mr, subpage_t, iomem);
1357 }
1358 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1359 end = start + int128_get64(section->size) - 1;
1360 subpage_register(subpage, start, end,
1361 phys_section_add(&d->map, section));
1362}
1363
1364
1365static void register_multipage(FlatView *fv,
1366 MemoryRegionSection *section)
1367{
1368 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1369 hwaddr start_addr = section->offset_within_address_space;
1370 uint16_t section_index = phys_section_add(&d->map, section);
1371 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1372 TARGET_PAGE_BITS));
1373
1374 assert(num_pages);
1375 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1376}
1377
1378void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
1379{
1380 MemoryRegionSection now = *section, remain = *section;
1381 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1382
1383 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1384 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1385 - now.offset_within_address_space;
1386
1387 now.size = int128_min(int128_make64(left), now.size);
1388 register_subpage(fv, &now);
1389 } else {
1390 now.size = int128_zero();
1391 }
1392 while (int128_ne(remain.size, now.size)) {
1393 remain.size = int128_sub(remain.size, now.size);
1394 remain.offset_within_address_space += int128_get64(now.size);
1395 remain.offset_within_region += int128_get64(now.size);
1396 now = remain;
1397 if (int128_lt(remain.size, page_size)) {
1398 register_subpage(fv, &now);
1399 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1400 now.size = page_size;
1401 register_subpage(fv, &now);
1402 } else {
1403 now.size = int128_and(now.size, int128_neg(page_size));
1404 register_multipage(fv, &now);
1405 }
1406 }
1407}
1408
1409void qemu_flush_coalesced_mmio_buffer(void)
1410{
1411 if (kvm_enabled())
1412 kvm_flush_coalesced_mmio_buffer();
1413}
1414
1415void qemu_mutex_lock_ramlist(void)
1416{
1417 qemu_mutex_lock(&ram_list.mutex);
1418}
1419
1420void qemu_mutex_unlock_ramlist(void)
1421{
1422 qemu_mutex_unlock(&ram_list.mutex);
1423}
1424
1425void ram_block_dump(Monitor *mon)
1426{
1427 RAMBlock *block;
1428 char *psize;
1429
1430 rcu_read_lock();
1431 monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
1432 "Block Name", "PSize", "Offset", "Used", "Total");
1433 RAMBLOCK_FOREACH(block) {
1434 psize = size_to_str(block->page_size);
1435 monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64
1436 " 0x%016" PRIx64 "\n", block->idstr, psize,
1437 (uint64_t)block->offset,
1438 (uint64_t)block->used_length,
1439 (uint64_t)block->max_length);
1440 g_free(psize);
1441 }
1442 rcu_read_unlock();
1443}
1444
1445#ifdef __linux__
1446
1447
1448
1449
1450
1451
1452static int find_max_supported_pagesize(Object *obj, void *opaque)
1453{
1454 char *mem_path;
1455 long *hpsize_min = opaque;
1456
1457 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1458 mem_path = object_property_get_str(obj, "mem-path", NULL);
1459 if (mem_path) {
1460 long hpsize = qemu_mempath_getpagesize(mem_path);
1461 if (hpsize < *hpsize_min) {
1462 *hpsize_min = hpsize;
1463 }
1464 } else {
1465 *hpsize_min = getpagesize();
1466 }
1467 }
1468
1469 return 0;
1470}
1471
1472long qemu_getrampagesize(void)
1473{
1474 long hpsize = LONG_MAX;
1475 long mainrampagesize;
1476 Object *memdev_root;
1477
1478 if (mem_path) {
1479 mainrampagesize = qemu_mempath_getpagesize(mem_path);
1480 } else {
1481 mainrampagesize = getpagesize();
1482 }
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494 memdev_root = object_resolve_path("/objects", NULL);
1495 if (memdev_root) {
1496 object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
1497 }
1498 if (hpsize == LONG_MAX) {
1499
1500 return mainrampagesize;
1501 }
1502
1503
1504
1505
1506
1507 if (hpsize > mainrampagesize &&
1508 (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
1509 static bool warned;
1510 if (!warned) {
1511 error_report("Huge page support disabled (n/a for main memory).");
1512 warned = true;
1513 }
1514 return mainrampagesize;
1515 }
1516
1517 return hpsize;
1518}
1519#else
1520long qemu_getrampagesize(void)
1521{
1522 return getpagesize();
1523}
1524#endif
1525
1526#ifdef __linux__
1527static int64_t get_file_size(int fd)
1528{
1529 int64_t size = lseek(fd, 0, SEEK_END);
1530 if (size < 0) {
1531 return -errno;
1532 }
1533 return size;
1534}
1535
1536static int file_ram_open(const char *path,
1537 const char *region_name,
1538 bool *created,
1539 Error **errp)
1540{
1541 char *filename;
1542 char *sanitized_name;
1543 char *c;
1544 int fd = -1;
1545
1546 *created = false;
1547 for (;;) {
1548 fd = open(path, O_RDWR);
1549 if (fd >= 0) {
1550
1551 break;
1552 }
1553 if (errno == ENOENT) {
1554
1555 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1556 if (fd >= 0) {
1557 *created = true;
1558 break;
1559 }
1560 } else if (errno == EISDIR) {
1561
1562
1563 sanitized_name = g_strdup(region_name);
1564 for (c = sanitized_name; *c != '\0'; c++) {
1565 if (*c == '/') {
1566 *c = '_';
1567 }
1568 }
1569
1570 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1571 sanitized_name);
1572 g_free(sanitized_name);
1573
1574 fd = mkstemp(filename);
1575 if (fd >= 0) {
1576 unlink(filename);
1577 g_free(filename);
1578 break;
1579 }
1580 g_free(filename);
1581 }
1582 if (errno != EEXIST && errno != EINTR) {
1583 error_setg_errno(errp, errno,
1584 "can't open backing store %s for guest RAM",
1585 path);
1586 return -1;
1587 }
1588
1589
1590
1591
1592 }
1593
1594 return fd;
1595}
1596
1597static void *file_ram_alloc(RAMBlock *block,
1598 ram_addr_t memory,
1599 int fd,
1600 bool truncate,
1601 Error **errp)
1602{
1603 void *area;
1604
1605 block->page_size = qemu_fd_getpagesize(fd);
1606 block->mr->align = block->page_size;
1607#if defined(__s390x__)
1608 if (kvm_enabled()) {
1609 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1610 }
1611#endif
1612
1613 if (memory < block->page_size) {
1614 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1615 "or larger than page size 0x%zx",
1616 memory, block->page_size);
1617 return NULL;
1618 }
1619
1620 memory = ROUND_UP(memory, block->page_size);
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636 if (truncate && ftruncate(fd, memory)) {
1637 perror("ftruncate");
1638 }
1639
1640 area = qemu_ram_mmap(fd, memory, block->mr->align,
1641 block->flags & RAM_SHARED);
1642 if (area == MAP_FAILED) {
1643 error_setg_errno(errp, errno,
1644 "unable to map backing store for guest RAM");
1645 return NULL;
1646 }
1647
1648 if (mem_prealloc) {
1649 os_mem_prealloc(fd, area, memory, smp_cpus, errp);
1650 if (errp && *errp) {
1651 qemu_ram_munmap(area, memory);
1652 return NULL;
1653 }
1654 }
1655
1656 block->fd = fd;
1657 return area;
1658}
1659#endif
1660
1661
1662static ram_addr_t find_ram_offset(ram_addr_t size)
1663{
1664 RAMBlock *block, *next_block;
1665 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1666
1667 assert(size != 0);
1668
1669 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1670 return 0;
1671 }
1672
1673 RAMBLOCK_FOREACH(block) {
1674 ram_addr_t end, next = RAM_ADDR_MAX;
1675
1676 end = block->offset + block->max_length;
1677
1678 RAMBLOCK_FOREACH(next_block) {
1679 if (next_block->offset >= end) {
1680 next = MIN(next, next_block->offset);
1681 }
1682 }
1683 if (next - end >= size && next - end < mingap) {
1684 offset = end;
1685 mingap = next - end;
1686 }
1687 }
1688
1689 if (offset == RAM_ADDR_MAX) {
1690 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1691 (uint64_t)size);
1692 abort();
1693 }
1694
1695 return offset;
1696}
1697
1698unsigned long last_ram_page(void)
1699{
1700 RAMBlock *block;
1701 ram_addr_t last = 0;
1702
1703 rcu_read_lock();
1704 RAMBLOCK_FOREACH(block) {
1705 last = MAX(last, block->offset + block->max_length);
1706 }
1707 rcu_read_unlock();
1708 return last >> TARGET_PAGE_BITS;
1709}
1710
1711static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1712{
1713 int ret;
1714
1715
1716 if (!machine_dump_guest_core(current_machine)) {
1717 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1718 if (ret) {
1719 perror("qemu_madvise");
1720 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1721 "but dump_guest_core=off specified\n");
1722 }
1723 }
1724}
1725
1726const char *qemu_ram_get_idstr(RAMBlock *rb)
1727{
1728 return rb->idstr;
1729}
1730
1731bool qemu_ram_is_shared(RAMBlock *rb)
1732{
1733 return rb->flags & RAM_SHARED;
1734}
1735
1736
1737void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1738{
1739 RAMBlock *block;
1740
1741 assert(new_block);
1742 assert(!new_block->idstr[0]);
1743
1744 if (dev) {
1745 char *id = qdev_get_dev_path(dev);
1746 if (id) {
1747 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1748 g_free(id);
1749 }
1750 }
1751 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1752
1753 rcu_read_lock();
1754 RAMBLOCK_FOREACH(block) {
1755 if (block != new_block &&
1756 !strcmp(block->idstr, new_block->idstr)) {
1757 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1758 new_block->idstr);
1759 abort();
1760 }
1761 }
1762 rcu_read_unlock();
1763}
1764
1765
1766void qemu_ram_unset_idstr(RAMBlock *block)
1767{
1768
1769
1770
1771
1772 if (block) {
1773 memset(block->idstr, 0, sizeof(block->idstr));
1774 }
1775}
1776
1777size_t qemu_ram_pagesize(RAMBlock *rb)
1778{
1779 return rb->page_size;
1780}
1781
1782
1783size_t qemu_ram_pagesize_largest(void)
1784{
1785 RAMBlock *block;
1786 size_t largest = 0;
1787
1788 RAMBLOCK_FOREACH(block) {
1789 largest = MAX(largest, qemu_ram_pagesize(block));
1790 }
1791
1792 return largest;
1793}
1794
1795static int memory_try_enable_merging(void *addr, size_t len)
1796{
1797 if (!machine_mem_merge(current_machine)) {
1798
1799 return 0;
1800 }
1801
1802 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1803}
1804
1805
1806
1807
1808
1809
1810
1811
1812int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1813{
1814 assert(block);
1815
1816 newsize = HOST_PAGE_ALIGN(newsize);
1817
1818 if (block->used_length == newsize) {
1819 return 0;
1820 }
1821
1822 if (!(block->flags & RAM_RESIZEABLE)) {
1823 error_setg_errno(errp, EINVAL,
1824 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1825 " in != 0x" RAM_ADDR_FMT, block->idstr,
1826 newsize, block->used_length);
1827 return -EINVAL;
1828 }
1829
1830 if (block->max_length < newsize) {
1831 error_setg_errno(errp, EINVAL,
1832 "Length too large: %s: 0x" RAM_ADDR_FMT
1833 " > 0x" RAM_ADDR_FMT, block->idstr,
1834 newsize, block->max_length);
1835 return -EINVAL;
1836 }
1837
1838 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1839 block->used_length = newsize;
1840 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1841 DIRTY_CLIENTS_ALL);
1842 memory_region_set_size(block->mr, newsize);
1843 if (block->resized) {
1844 block->resized(block->idstr, newsize, block->host);
1845 }
1846 return 0;
1847}
1848
1849
1850static void dirty_memory_extend(ram_addr_t old_ram_size,
1851 ram_addr_t new_ram_size)
1852{
1853 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1854 DIRTY_MEMORY_BLOCK_SIZE);
1855 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1856 DIRTY_MEMORY_BLOCK_SIZE);
1857 int i;
1858
1859
1860 if (new_num_blocks <= old_num_blocks) {
1861 return;
1862 }
1863
1864 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1865 DirtyMemoryBlocks *old_blocks;
1866 DirtyMemoryBlocks *new_blocks;
1867 int j;
1868
1869 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1870 new_blocks = g_malloc(sizeof(*new_blocks) +
1871 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1872
1873 if (old_num_blocks) {
1874 memcpy(new_blocks->blocks, old_blocks->blocks,
1875 old_num_blocks * sizeof(old_blocks->blocks[0]));
1876 }
1877
1878 for (j = old_num_blocks; j < new_num_blocks; j++) {
1879 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1880 }
1881
1882 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1883
1884 if (old_blocks) {
1885 g_free_rcu(old_blocks, rcu);
1886 }
1887 }
1888}
1889
1890static void ram_block_add(RAMBlock *new_block, Error **errp)
1891{
1892 RAMBlock *block;
1893 RAMBlock *last_block = NULL;
1894 ram_addr_t old_ram_size, new_ram_size;
1895 Error *err = NULL;
1896
1897 old_ram_size = last_ram_page();
1898
1899 qemu_mutex_lock_ramlist();
1900 new_block->offset = find_ram_offset(new_block->max_length);
1901
1902 if (!new_block->host) {
1903 if (xen_enabled()) {
1904 xen_ram_alloc(new_block->offset, new_block->max_length,
1905 new_block->mr, &err);
1906 if (err) {
1907 error_propagate(errp, err);
1908 qemu_mutex_unlock_ramlist();
1909 return;
1910 }
1911 } else {
1912 new_block->host = phys_mem_alloc(new_block->max_length,
1913 &new_block->mr->align);
1914 if (!new_block->host) {
1915 error_setg_errno(errp, errno,
1916 "cannot set up guest memory '%s'",
1917 memory_region_name(new_block->mr));
1918 qemu_mutex_unlock_ramlist();
1919 return;
1920 }
1921 memory_try_enable_merging(new_block->host, new_block->max_length);
1922 }
1923 }
1924
1925 new_ram_size = MAX(old_ram_size,
1926 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1927 if (new_ram_size > old_ram_size) {
1928 dirty_memory_extend(old_ram_size, new_ram_size);
1929 }
1930
1931
1932
1933
1934 RAMBLOCK_FOREACH(block) {
1935 last_block = block;
1936 if (block->max_length < new_block->max_length) {
1937 break;
1938 }
1939 }
1940 if (block) {
1941 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1942 } else if (last_block) {
1943 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1944 } else {
1945 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1946 }
1947 ram_list.mru_block = NULL;
1948
1949
1950 smp_wmb();
1951 ram_list.version++;
1952 qemu_mutex_unlock_ramlist();
1953
1954 cpu_physical_memory_set_dirty_range(new_block->offset,
1955 new_block->used_length,
1956 DIRTY_CLIENTS_ALL);
1957
1958 if (new_block->host) {
1959 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1960 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1961
1962 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1963 ram_block_notify_add(new_block->host, new_block->max_length);
1964 }
1965}
1966
1967#ifdef __linux__
1968RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
1969 bool share, int fd,
1970 Error **errp)
1971{
1972 RAMBlock *new_block;
1973 Error *local_err = NULL;
1974 int64_t file_size;
1975
1976 if (xen_enabled()) {
1977 error_setg(errp, "-mem-path not supported with Xen");
1978 return NULL;
1979 }
1980
1981 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1982 error_setg(errp,
1983 "host lacks kvm mmu notifiers, -mem-path unsupported");
1984 return NULL;
1985 }
1986
1987 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1988
1989
1990
1991
1992
1993 error_setg(errp,
1994 "-mem-path not supported with this accelerator");
1995 return NULL;
1996 }
1997
1998 size = HOST_PAGE_ALIGN(size);
1999 file_size = get_file_size(fd);
2000 if (file_size > 0 && file_size < size) {
2001 error_setg(errp, "backing store %s size 0x%" PRIx64
2002 " does not match 'size' option 0x" RAM_ADDR_FMT,
2003 mem_path, file_size, size);
2004 return NULL;
2005 }
2006
2007 new_block = g_malloc0(sizeof(*new_block));
2008 new_block->mr = mr;
2009 new_block->used_length = size;
2010 new_block->max_length = size;
2011 new_block->flags = share ? RAM_SHARED : 0;
2012 new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp);
2013 if (!new_block->host) {
2014 g_free(new_block);
2015 return NULL;
2016 }
2017
2018 ram_block_add(new_block, &local_err);
2019 if (local_err) {
2020 g_free(new_block);
2021 error_propagate(errp, local_err);
2022 return NULL;
2023 }
2024 return new_block;
2025
2026}
2027
2028
2029RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2030 bool share, const char *mem_path,
2031 Error **errp)
2032{
2033 int fd;
2034 bool created;
2035 RAMBlock *block;
2036
2037 fd = file_ram_open(mem_path, memory_region_name(mr), &created, errp);
2038 if (fd < 0) {
2039 return NULL;
2040 }
2041
2042 block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
2043 if (!block) {
2044 if (created) {
2045 unlink(mem_path);
2046 }
2047 close(fd);
2048 return NULL;
2049 }
2050
2051 return block;
2052}
2053#endif
2054
2055static
2056RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
2057 void (*resized)(const char*,
2058 uint64_t length,
2059 void *host),
2060 void *host, bool resizeable,
2061 MemoryRegion *mr, Error **errp)
2062{
2063 RAMBlock *new_block;
2064 Error *local_err = NULL;
2065
2066 size = HOST_PAGE_ALIGN(size);
2067 max_size = HOST_PAGE_ALIGN(max_size);
2068 new_block = g_malloc0(sizeof(*new_block));
2069 new_block->mr = mr;
2070 new_block->resized = resized;
2071 new_block->used_length = size;
2072 new_block->max_length = max_size;
2073 assert(max_size >= size);
2074 new_block->fd = -1;
2075 new_block->page_size = getpagesize();
2076 new_block->host = host;
2077 if (host) {
2078 new_block->flags |= RAM_PREALLOC;
2079 }
2080 if (resizeable) {
2081 new_block->flags |= RAM_RESIZEABLE;
2082 }
2083 ram_block_add(new_block, &local_err);
2084 if (local_err) {
2085 g_free(new_block);
2086 error_propagate(errp, local_err);
2087 return NULL;
2088 }
2089 return new_block;
2090}
2091
2092RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2093 MemoryRegion *mr, Error **errp)
2094{
2095 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
2096}
2097
2098RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
2099{
2100 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
2101}
2102
2103RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
2104 void (*resized)(const char*,
2105 uint64_t length,
2106 void *host),
2107 MemoryRegion *mr, Error **errp)
2108{
2109 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
2110}
2111
2112static void reclaim_ramblock(RAMBlock *block)
2113{
2114 if (block->flags & RAM_PREALLOC) {
2115 ;
2116 } else if (xen_enabled()) {
2117 xen_invalidate_map_cache_entry(block->host);
2118#ifndef _WIN32
2119 } else if (block->fd >= 0) {
2120 qemu_ram_munmap(block->host, block->max_length);
2121 close(block->fd);
2122#endif
2123 } else {
2124 qemu_anon_ram_free(block->host, block->max_length);
2125 }
2126 g_free(block);
2127}
2128
2129void qemu_ram_free(RAMBlock *block)
2130{
2131 if (!block) {
2132 return;
2133 }
2134
2135 if (block->host) {
2136 ram_block_notify_remove(block->host, block->max_length);
2137 }
2138
2139 qemu_mutex_lock_ramlist();
2140 QLIST_REMOVE_RCU(block, next);
2141 ram_list.mru_block = NULL;
2142
2143 smp_wmb();
2144 ram_list.version++;
2145 call_rcu(block, reclaim_ramblock, rcu);
2146 qemu_mutex_unlock_ramlist();
2147}
2148
2149#ifndef _WIN32
2150void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2151{
2152 RAMBlock *block;
2153 ram_addr_t offset;
2154 int flags;
2155 void *area, *vaddr;
2156
2157 RAMBLOCK_FOREACH(block) {
2158 offset = addr - block->offset;
2159 if (offset < block->max_length) {
2160 vaddr = ramblock_ptr(block, offset);
2161 if (block->flags & RAM_PREALLOC) {
2162 ;
2163 } else if (xen_enabled()) {
2164 abort();
2165 } else {
2166 flags = MAP_FIXED;
2167 if (block->fd >= 0) {
2168 flags |= (block->flags & RAM_SHARED ?
2169 MAP_SHARED : MAP_PRIVATE);
2170 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2171 flags, block->fd, offset);
2172 } else {
2173
2174
2175
2176
2177
2178 assert(phys_mem_alloc == qemu_anon_ram_alloc);
2179
2180 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2181 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2182 flags, -1, 0);
2183 }
2184 if (area != vaddr) {
2185 fprintf(stderr, "Could not remap addr: "
2186 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2187 length, addr);
2188 exit(1);
2189 }
2190 memory_try_enable_merging(vaddr, length);
2191 qemu_ram_setup_dump(vaddr, length);
2192 }
2193 }
2194 }
2195}
2196#endif
2197
2198
2199
2200
2201
2202
2203
2204
2205void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2206{
2207 RAMBlock *block = ram_block;
2208
2209 if (block == NULL) {
2210 block = qemu_get_ram_block(addr);
2211 addr -= block->offset;
2212 }
2213
2214 if (xen_enabled() && block->host == NULL) {
2215
2216
2217
2218
2219 if (block->offset == 0) {
2220 return xen_map_cache(addr, 0, 0, false);
2221 }
2222
2223 block->host = xen_map_cache(block->offset, block->max_length, 1, false);
2224 }
2225 return ramblock_ptr(block, addr);
2226}
2227
2228
2229
2230
2231
2232
2233static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
2234 hwaddr *size, bool lock)
2235{
2236 RAMBlock *block = ram_block;
2237 if (*size == 0) {
2238 return NULL;
2239 }
2240
2241 if (block == NULL) {
2242 block = qemu_get_ram_block(addr);
2243 addr -= block->offset;
2244 }
2245 *size = MIN(*size, block->max_length - addr);
2246
2247 if (xen_enabled() && block->host == NULL) {
2248
2249
2250
2251
2252 if (block->offset == 0) {
2253 return xen_map_cache(addr, *size, lock, lock);
2254 }
2255
2256 block->host = xen_map_cache(block->offset, block->max_length, 1, lock);
2257 }
2258
2259 return ramblock_ptr(block, addr);
2260}
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2280 ram_addr_t *offset)
2281{
2282 RAMBlock *block;
2283 uint8_t *host = ptr;
2284
2285 if (xen_enabled()) {
2286 ram_addr_t ram_addr;
2287 rcu_read_lock();
2288 ram_addr = xen_ram_addr_from_mapcache(ptr);
2289 block = qemu_get_ram_block(ram_addr);
2290 if (block) {
2291 *offset = ram_addr - block->offset;
2292 }
2293 rcu_read_unlock();
2294 return block;
2295 }
2296
2297 rcu_read_lock();
2298 block = atomic_rcu_read(&ram_list.mru_block);
2299 if (block && block->host && host - block->host < block->max_length) {
2300 goto found;
2301 }
2302
2303 RAMBLOCK_FOREACH(block) {
2304
2305 if (block->host == NULL) {
2306 continue;
2307 }
2308 if (host - block->host < block->max_length) {
2309 goto found;
2310 }
2311 }
2312
2313 rcu_read_unlock();
2314 return NULL;
2315
2316found:
2317 *offset = (host - block->host);
2318 if (round_offset) {
2319 *offset &= TARGET_PAGE_MASK;
2320 }
2321 rcu_read_unlock();
2322 return block;
2323}
2324
2325
2326
2327
2328
2329
2330
2331
2332RAMBlock *qemu_ram_block_by_name(const char *name)
2333{
2334 RAMBlock *block;
2335
2336 RAMBLOCK_FOREACH(block) {
2337 if (!strcmp(name, block->idstr)) {
2338 return block;
2339 }
2340 }
2341
2342 return NULL;
2343}
2344
2345
2346
2347ram_addr_t qemu_ram_addr_from_host(void *ptr)
2348{
2349 RAMBlock *block;
2350 ram_addr_t offset;
2351
2352 block = qemu_ram_block_from_host(ptr, false, &offset);
2353 if (!block) {
2354 return RAM_ADDR_INVALID;
2355 }
2356
2357 return block->offset + offset;
2358}
2359
2360
2361static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2362 uint64_t val, unsigned size)
2363{
2364 bool locked = false;
2365
2366 assert(tcg_enabled());
2367 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2368 locked = true;
2369 tb_lock();
2370 tb_invalidate_phys_page_fast(ram_addr, size);
2371 }
2372 switch (size) {
2373 case 1:
2374 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2375 break;
2376 case 2:
2377 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2378 break;
2379 case 4:
2380 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2381 break;
2382 default:
2383 abort();
2384 }
2385
2386 if (locked) {
2387 tb_unlock();
2388 }
2389
2390
2391
2392
2393 cpu_physical_memory_set_dirty_range(ram_addr, size,
2394 DIRTY_CLIENTS_NOCODE);
2395
2396
2397 if (!cpu_physical_memory_is_clean(ram_addr)) {
2398 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2399 }
2400}
2401
2402static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2403 unsigned size, bool is_write)
2404{
2405 return is_write;
2406}
2407
2408static const MemoryRegionOps notdirty_mem_ops = {
2409 .write = notdirty_mem_write,
2410 .valid.accepts = notdirty_mem_accepts,
2411 .endianness = DEVICE_NATIVE_ENDIAN,
2412};
2413
2414
2415static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2416{
2417 CPUState *cpu = current_cpu;
2418 CPUClass *cc = CPU_GET_CLASS(cpu);
2419 CPUArchState *env = cpu->env_ptr;
2420 target_ulong pc, cs_base;
2421 target_ulong vaddr;
2422 CPUWatchpoint *wp;
2423 uint32_t cpu_flags;
2424
2425 assert(tcg_enabled());
2426 if (cpu->watchpoint_hit) {
2427
2428
2429
2430 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2431 return;
2432 }
2433 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2434 vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len);
2435 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2436 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2437 && (wp->flags & flags)) {
2438 if (flags == BP_MEM_READ) {
2439 wp->flags |= BP_WATCHPOINT_HIT_READ;
2440 } else {
2441 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2442 }
2443 wp->hitaddr = vaddr;
2444 wp->hitattrs = attrs;
2445 if (!cpu->watchpoint_hit) {
2446 if (wp->flags & BP_CPU &&
2447 !cc->debug_check_watchpoint(cpu, wp)) {
2448 wp->flags &= ~BP_WATCHPOINT_HIT;
2449 continue;
2450 }
2451 cpu->watchpoint_hit = wp;
2452
2453
2454
2455
2456
2457 tb_lock();
2458 tb_check_watchpoint(cpu);
2459 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2460 cpu->exception_index = EXCP_DEBUG;
2461 cpu_loop_exit(cpu);
2462 } else {
2463 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2464 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2465 cpu_loop_exit_noexc(cpu);
2466 }
2467 }
2468 } else {
2469 wp->flags &= ~BP_WATCHPOINT_HIT;
2470 }
2471 }
2472}
2473
2474
2475
2476
2477static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2478 unsigned size, MemTxAttrs attrs)
2479{
2480 MemTxResult res;
2481 uint64_t data;
2482 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2483 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2484
2485 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2486 switch (size) {
2487 case 1:
2488 data = address_space_ldub(as, addr, attrs, &res);
2489 break;
2490 case 2:
2491 data = address_space_lduw(as, addr, attrs, &res);
2492 break;
2493 case 4:
2494 data = address_space_ldl(as, addr, attrs, &res);
2495 break;
2496 default: abort();
2497 }
2498 *pdata = data;
2499 return res;
2500}
2501
2502static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2503 uint64_t val, unsigned size,
2504 MemTxAttrs attrs)
2505{
2506 MemTxResult res;
2507 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2508 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2509
2510 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2511 switch (size) {
2512 case 1:
2513 address_space_stb(as, addr, val, attrs, &res);
2514 break;
2515 case 2:
2516 address_space_stw(as, addr, val, attrs, &res);
2517 break;
2518 case 4:
2519 address_space_stl(as, addr, val, attrs, &res);
2520 break;
2521 default: abort();
2522 }
2523 return res;
2524}
2525
2526static const MemoryRegionOps watch_mem_ops = {
2527 .read_with_attrs = watch_mem_read,
2528 .write_with_attrs = watch_mem_write,
2529 .endianness = DEVICE_NATIVE_ENDIAN,
2530};
2531
2532static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2533 const uint8_t *buf, int len);
2534static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
2535 bool is_write);
2536
2537static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2538 unsigned len, MemTxAttrs attrs)
2539{
2540 subpage_t *subpage = opaque;
2541 uint8_t buf[8];
2542 MemTxResult res;
2543
2544#if defined(DEBUG_SUBPAGE)
2545 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2546 subpage, len, addr);
2547#endif
2548 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
2549 if (res) {
2550 return res;
2551 }
2552 switch (len) {
2553 case 1:
2554 *data = ldub_p(buf);
2555 return MEMTX_OK;
2556 case 2:
2557 *data = lduw_p(buf);
2558 return MEMTX_OK;
2559 case 4:
2560 *data = ldl_p(buf);
2561 return MEMTX_OK;
2562 case 8:
2563 *data = ldq_p(buf);
2564 return MEMTX_OK;
2565 default:
2566 abort();
2567 }
2568}
2569
2570static MemTxResult subpage_write(void *opaque, hwaddr addr,
2571 uint64_t value, unsigned len, MemTxAttrs attrs)
2572{
2573 subpage_t *subpage = opaque;
2574 uint8_t buf[8];
2575
2576#if defined(DEBUG_SUBPAGE)
2577 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2578 " value %"PRIx64"\n",
2579 __func__, subpage, len, addr, value);
2580#endif
2581 switch (len) {
2582 case 1:
2583 stb_p(buf, value);
2584 break;
2585 case 2:
2586 stw_p(buf, value);
2587 break;
2588 case 4:
2589 stl_p(buf, value);
2590 break;
2591 case 8:
2592 stq_p(buf, value);
2593 break;
2594 default:
2595 abort();
2596 }
2597 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
2598}
2599
2600static bool subpage_accepts(void *opaque, hwaddr addr,
2601 unsigned len, bool is_write)
2602{
2603 subpage_t *subpage = opaque;
2604#if defined(DEBUG_SUBPAGE)
2605 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2606 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2607#endif
2608
2609 return flatview_access_valid(subpage->fv, addr + subpage->base,
2610 len, is_write);
2611}
2612
2613static const MemoryRegionOps subpage_ops = {
2614 .read_with_attrs = subpage_read,
2615 .write_with_attrs = subpage_write,
2616 .impl.min_access_size = 1,
2617 .impl.max_access_size = 8,
2618 .valid.min_access_size = 1,
2619 .valid.max_access_size = 8,
2620 .valid.accepts = subpage_accepts,
2621 .endianness = DEVICE_NATIVE_ENDIAN,
2622};
2623
2624static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2625 uint16_t section)
2626{
2627 int idx, eidx;
2628
2629 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2630 return -1;
2631 idx = SUBPAGE_IDX(start);
2632 eidx = SUBPAGE_IDX(end);
2633#if defined(DEBUG_SUBPAGE)
2634 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2635 __func__, mmio, start, end, idx, eidx, section);
2636#endif
2637 for (; idx <= eidx; idx++) {
2638 mmio->sub_section[idx] = section;
2639 }
2640
2641 return 0;
2642}
2643
2644static subpage_t *subpage_init(FlatView *fv, hwaddr base)
2645{
2646 subpage_t *mmio;
2647
2648 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2649 mmio->fv = fv;
2650 mmio->base = base;
2651 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2652 NULL, TARGET_PAGE_SIZE);
2653 mmio->iomem.subpage = true;
2654#if defined(DEBUG_SUBPAGE)
2655 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2656 mmio, base, TARGET_PAGE_SIZE);
2657#endif
2658 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2659
2660 return mmio;
2661}
2662
2663static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
2664{
2665 assert(fv);
2666 MemoryRegionSection section = {
2667 .fv = fv,
2668 .mr = mr,
2669 .offset_within_address_space = 0,
2670 .offset_within_region = 0,
2671 .size = int128_2_64(),
2672 };
2673
2674 return phys_section_add(map, §ion);
2675}
2676
2677MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2678{
2679 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2680 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2681 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2682 MemoryRegionSection *sections = d->map.sections;
2683
2684 return sections[index & ~TARGET_PAGE_MASK].mr;
2685}
2686
2687static void io_mem_init(void)
2688{
2689 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2690 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2691 NULL, UINT64_MAX);
2692
2693
2694
2695
2696 memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
2697 NULL, UINT64_MAX);
2698 memory_region_clear_global_locking(&io_mem_notdirty);
2699
2700 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2701 NULL, UINT64_MAX);
2702}
2703
2704AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
2705{
2706 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2707 uint16_t n;
2708
2709 n = dummy_section(&d->map, fv, &io_mem_unassigned);
2710 assert(n == PHYS_SECTION_UNASSIGNED);
2711 n = dummy_section(&d->map, fv, &io_mem_notdirty);
2712 assert(n == PHYS_SECTION_NOTDIRTY);
2713 n = dummy_section(&d->map, fv, &io_mem_rom);
2714 assert(n == PHYS_SECTION_ROM);
2715 n = dummy_section(&d->map, fv, &io_mem_watch);
2716 assert(n == PHYS_SECTION_WATCH);
2717
2718 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2719
2720 return d;
2721}
2722
2723void address_space_dispatch_free(AddressSpaceDispatch *d)
2724{
2725 phys_sections_free(&d->map);
2726 g_free(d);
2727}
2728
2729static void tcg_commit(MemoryListener *listener)
2730{
2731 CPUAddressSpace *cpuas;
2732 AddressSpaceDispatch *d;
2733
2734
2735
2736 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2737 cpu_reloading_memory_map();
2738
2739
2740
2741
2742 d = address_space_to_dispatch(cpuas->as);
2743 atomic_rcu_set(&cpuas->memory_dispatch, d);
2744 tlb_flush(cpuas->cpu);
2745}
2746
2747static void memory_map_init(void)
2748{
2749 system_memory = g_malloc(sizeof(*system_memory));
2750
2751 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2752 address_space_init(&address_space_memory, system_memory, "memory");
2753
2754 system_io = g_malloc(sizeof(*system_io));
2755 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2756 65536);
2757 address_space_init(&address_space_io, system_io, "I/O");
2758}
2759
2760MemoryRegion *get_system_memory(void)
2761{
2762 return system_memory;
2763}
2764
2765MemoryRegion *get_system_io(void)
2766{
2767 return system_io;
2768}
2769
2770#endif
2771
2772
2773#if defined(CONFIG_USER_ONLY)
2774int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2775 uint8_t *buf, int len, int is_write)
2776{
2777 int l, flags;
2778 target_ulong page;
2779 void * p;
2780
2781 while (len > 0) {
2782 page = addr & TARGET_PAGE_MASK;
2783 l = (page + TARGET_PAGE_SIZE) - addr;
2784 if (l > len)
2785 l = len;
2786 flags = page_get_flags(page);
2787 if (!(flags & PAGE_VALID))
2788 return -1;
2789 if (is_write) {
2790 if (!(flags & PAGE_WRITE))
2791 return -1;
2792
2793 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2794 return -1;
2795 memcpy(p, buf, l);
2796 unlock_user(p, addr, l);
2797 } else {
2798 if (!(flags & PAGE_READ))
2799 return -1;
2800
2801 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2802 return -1;
2803 memcpy(buf, p, l);
2804 unlock_user(p, addr, 0);
2805 }
2806 len -= l;
2807 buf += l;
2808 addr += l;
2809 }
2810 return 0;
2811}
2812
2813#else
2814
2815static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2816 hwaddr length)
2817{
2818 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2819 addr += memory_region_get_ram_addr(mr);
2820
2821
2822
2823
2824
2825 if (dirty_log_mask) {
2826 dirty_log_mask =
2827 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2828 }
2829 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2830 assert(tcg_enabled());
2831 tb_lock();
2832 tb_invalidate_phys_range(addr, addr + length);
2833 tb_unlock();
2834 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2835 }
2836 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2837}
2838
2839static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2840{
2841 unsigned access_size_max = mr->ops->valid.max_access_size;
2842
2843
2844
2845 if (access_size_max == 0) {
2846 access_size_max = 4;
2847 }
2848
2849
2850 if (!mr->ops->impl.unaligned) {
2851 unsigned align_size_max = addr & -addr;
2852 if (align_size_max != 0 && align_size_max < access_size_max) {
2853 access_size_max = align_size_max;
2854 }
2855 }
2856
2857
2858 if (l > access_size_max) {
2859 l = access_size_max;
2860 }
2861 l = pow2floor(l);
2862
2863 return l;
2864}
2865
2866static bool prepare_mmio_access(MemoryRegion *mr)
2867{
2868 bool unlocked = !qemu_mutex_iothread_locked();
2869 bool release_lock = false;
2870
2871 if (unlocked && mr->global_locking) {
2872 qemu_mutex_lock_iothread();
2873 unlocked = false;
2874 release_lock = true;
2875 }
2876 if (mr->flush_coalesced_mmio) {
2877 if (unlocked) {
2878 qemu_mutex_lock_iothread();
2879 }
2880 qemu_flush_coalesced_mmio_buffer();
2881 if (unlocked) {
2882 qemu_mutex_unlock_iothread();
2883 }
2884 }
2885
2886 return release_lock;
2887}
2888
2889
2890static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2891 MemTxAttrs attrs,
2892 const uint8_t *buf,
2893 int len, hwaddr addr1,
2894 hwaddr l, MemoryRegion *mr)
2895{
2896 uint8_t *ptr;
2897 uint64_t val;
2898 MemTxResult result = MEMTX_OK;
2899 bool release_lock = false;
2900
2901 for (;;) {
2902 if (!memory_access_is_direct(mr, true)) {
2903 release_lock |= prepare_mmio_access(mr);
2904 l = memory_access_size(mr, l, addr1);
2905
2906
2907 switch (l) {
2908 case 8:
2909
2910 val = ldq_p(buf);
2911 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2912 attrs);
2913 break;
2914 case 4:
2915
2916 val = (uint32_t)ldl_p(buf);
2917 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2918 attrs);
2919 break;
2920 case 2:
2921
2922 val = lduw_p(buf);
2923 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2924 attrs);
2925 break;
2926 case 1:
2927
2928 val = ldub_p(buf);
2929 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2930 attrs);
2931 break;
2932 default:
2933 abort();
2934 }
2935 } else {
2936
2937 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
2938 memcpy(ptr, buf, l);
2939 invalidate_and_set_dirty(mr, addr1, l);
2940 }
2941
2942 if (release_lock) {
2943 qemu_mutex_unlock_iothread();
2944 release_lock = false;
2945 }
2946
2947 len -= l;
2948 buf += l;
2949 addr += l;
2950
2951 if (!len) {
2952 break;
2953 }
2954
2955 l = len;
2956 mr = flatview_translate(fv, addr, &addr1, &l, true);
2957 }
2958
2959 return result;
2960}
2961
2962static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2963 const uint8_t *buf, int len)
2964{
2965 hwaddr l;
2966 hwaddr addr1;
2967 MemoryRegion *mr;
2968 MemTxResult result = MEMTX_OK;
2969
2970 if (len > 0) {
2971 rcu_read_lock();
2972 l = len;
2973 mr = flatview_translate(fv, addr, &addr1, &l, true);
2974 result = flatview_write_continue(fv, addr, attrs, buf, len,
2975 addr1, l, mr);
2976 rcu_read_unlock();
2977 }
2978
2979 return result;
2980}
2981
2982MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2983 MemTxAttrs attrs,
2984 const uint8_t *buf, int len)
2985{
2986 return flatview_write(address_space_to_flatview(as), addr, attrs, buf, len);
2987}
2988
2989
2990MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2991 MemTxAttrs attrs, uint8_t *buf,
2992 int len, hwaddr addr1, hwaddr l,
2993 MemoryRegion *mr)
2994{
2995 uint8_t *ptr;
2996 uint64_t val;
2997 MemTxResult result = MEMTX_OK;
2998 bool release_lock = false;
2999
3000 for (;;) {
3001 if (!memory_access_is_direct(mr, false)) {
3002
3003 release_lock |= prepare_mmio_access(mr);
3004 l = memory_access_size(mr, l, addr1);
3005 switch (l) {
3006 case 8:
3007
3008 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
3009 attrs);
3010 stq_p(buf, val);
3011 break;
3012 case 4:
3013
3014 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
3015 attrs);
3016 stl_p(buf, val);
3017 break;
3018 case 2:
3019
3020 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
3021 attrs);
3022 stw_p(buf, val);
3023 break;
3024 case 1:
3025
3026 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
3027 attrs);
3028 stb_p(buf, val);
3029 break;
3030 default:
3031 abort();
3032 }
3033 } else {
3034
3035 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
3036 memcpy(buf, ptr, l);
3037 }
3038
3039 if (release_lock) {
3040 qemu_mutex_unlock_iothread();
3041 release_lock = false;
3042 }
3043
3044 len -= l;
3045 buf += l;
3046 addr += l;
3047
3048 if (!len) {
3049 break;
3050 }
3051
3052 l = len;
3053 mr = flatview_translate(fv, addr, &addr1, &l, false);
3054 }
3055
3056 return result;
3057}
3058
3059MemTxResult flatview_read_full(FlatView *fv, hwaddr addr,
3060 MemTxAttrs attrs, uint8_t *buf, int len)
3061{
3062 hwaddr l;
3063 hwaddr addr1;
3064 MemoryRegion *mr;
3065 MemTxResult result = MEMTX_OK;
3066
3067 if (len > 0) {
3068 rcu_read_lock();
3069 l = len;
3070 mr = flatview_translate(fv, addr, &addr1, &l, false);
3071 result = flatview_read_continue(fv, addr, attrs, buf, len,
3072 addr1, l, mr);
3073 rcu_read_unlock();
3074 }
3075
3076 return result;
3077}
3078
3079static MemTxResult flatview_rw(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
3080 uint8_t *buf, int len, bool is_write)
3081{
3082 if (is_write) {
3083 return flatview_write(fv, addr, attrs, (uint8_t *)buf, len);
3084 } else {
3085 return flatview_read(fv, addr, attrs, (uint8_t *)buf, len);
3086 }
3087}
3088
3089MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
3090 MemTxAttrs attrs, uint8_t *buf,
3091 int len, bool is_write)
3092{
3093 return flatview_rw(address_space_to_flatview(as),
3094 addr, attrs, buf, len, is_write);
3095}
3096
3097void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
3098 int len, int is_write)
3099{
3100 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
3101 buf, len, is_write);
3102}
3103
3104enum write_rom_type {
3105 WRITE_DATA,
3106 FLUSH_CACHE,
3107};
3108
3109static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
3110 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
3111{
3112 hwaddr l;
3113 uint8_t *ptr;
3114 hwaddr addr1;
3115 MemoryRegion *mr;
3116
3117 rcu_read_lock();
3118 while (len > 0) {
3119 l = len;
3120 mr = address_space_translate(as, addr, &addr1, &l, true);
3121
3122 if (!(memory_region_is_ram(mr) ||
3123 memory_region_is_romd(mr))) {
3124 l = memory_access_size(mr, l, addr1);
3125 } else {
3126
3127 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3128 switch (type) {
3129 case WRITE_DATA:
3130 memcpy(ptr, buf, l);
3131 invalidate_and_set_dirty(mr, addr1, l);
3132 break;
3133 case FLUSH_CACHE:
3134 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
3135 break;
3136 }
3137 }
3138 len -= l;
3139 buf += l;
3140 addr += l;
3141 }
3142 rcu_read_unlock();
3143}
3144
3145
3146void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
3147 const uint8_t *buf, int len)
3148{
3149 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
3150}
3151
3152void cpu_flush_icache_range(hwaddr start, int len)
3153{
3154
3155
3156
3157
3158
3159
3160 if (tcg_enabled()) {
3161 return;
3162 }
3163
3164 cpu_physical_memory_write_rom_internal(&address_space_memory,
3165 start, NULL, len, FLUSH_CACHE);
3166}
3167
3168typedef struct {
3169 MemoryRegion *mr;
3170 void *buffer;
3171 hwaddr addr;
3172 hwaddr len;
3173 bool in_use;
3174} BounceBuffer;
3175
3176static BounceBuffer bounce;
3177
3178typedef struct MapClient {
3179 QEMUBH *bh;
3180 QLIST_ENTRY(MapClient) link;
3181} MapClient;
3182
3183QemuMutex map_client_list_lock;
3184static QLIST_HEAD(map_client_list, MapClient) map_client_list
3185 = QLIST_HEAD_INITIALIZER(map_client_list);
3186
3187static void cpu_unregister_map_client_do(MapClient *client)
3188{
3189 QLIST_REMOVE(client, link);
3190 g_free(client);
3191}
3192
3193static void cpu_notify_map_clients_locked(void)
3194{
3195 MapClient *client;
3196
3197 while (!QLIST_EMPTY(&map_client_list)) {
3198 client = QLIST_FIRST(&map_client_list);
3199 qemu_bh_schedule(client->bh);
3200 cpu_unregister_map_client_do(client);
3201 }
3202}
3203
3204void cpu_register_map_client(QEMUBH *bh)
3205{
3206 MapClient *client = g_malloc(sizeof(*client));
3207
3208 qemu_mutex_lock(&map_client_list_lock);
3209 client->bh = bh;
3210 QLIST_INSERT_HEAD(&map_client_list, client, link);
3211 if (!atomic_read(&bounce.in_use)) {
3212 cpu_notify_map_clients_locked();
3213 }
3214 qemu_mutex_unlock(&map_client_list_lock);
3215}
3216
3217void cpu_exec_init_all(void)
3218{
3219 qemu_mutex_init(&ram_list.mutex);
3220
3221
3222
3223
3224
3225
3226
3227 finalize_target_page_bits();
3228 io_mem_init();
3229 memory_map_init();
3230 qemu_mutex_init(&map_client_list_lock);
3231}
3232
3233void cpu_unregister_map_client(QEMUBH *bh)
3234{
3235 MapClient *client;
3236
3237 qemu_mutex_lock(&map_client_list_lock);
3238 QLIST_FOREACH(client, &map_client_list, link) {
3239 if (client->bh == bh) {
3240 cpu_unregister_map_client_do(client);
3241 break;
3242 }
3243 }
3244 qemu_mutex_unlock(&map_client_list_lock);
3245}
3246
3247static void cpu_notify_map_clients(void)
3248{
3249 qemu_mutex_lock(&map_client_list_lock);
3250 cpu_notify_map_clients_locked();
3251 qemu_mutex_unlock(&map_client_list_lock);
3252}
3253
3254static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
3255 bool is_write)
3256{
3257 MemoryRegion *mr;
3258 hwaddr l, xlat;
3259
3260 rcu_read_lock();
3261 while (len > 0) {
3262 l = len;
3263 mr = flatview_translate(fv, addr, &xlat, &l, is_write);
3264 if (!memory_access_is_direct(mr, is_write)) {
3265 l = memory_access_size(mr, l, addr);
3266 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
3267 rcu_read_unlock();
3268 return false;
3269 }
3270 }
3271
3272 len -= l;
3273 addr += l;
3274 }
3275 rcu_read_unlock();
3276 return true;
3277}
3278
3279bool address_space_access_valid(AddressSpace *as, hwaddr addr,
3280 int len, bool is_write)
3281{
3282 return flatview_access_valid(address_space_to_flatview(as),
3283 addr, len, is_write);
3284}
3285
3286static hwaddr
3287flatview_extend_translation(FlatView *fv, hwaddr addr,
3288 hwaddr target_len,
3289 MemoryRegion *mr, hwaddr base, hwaddr len,
3290 bool is_write)
3291{
3292 hwaddr done = 0;
3293 hwaddr xlat;
3294 MemoryRegion *this_mr;
3295
3296 for (;;) {
3297 target_len -= len;
3298 addr += len;
3299 done += len;
3300 if (target_len == 0) {
3301 return done;
3302 }
3303
3304 len = target_len;
3305 this_mr = flatview_translate(fv, addr, &xlat,
3306 &len, is_write);
3307 if (this_mr != mr || xlat != base + done) {
3308 return done;
3309 }
3310 }
3311}
3312
3313
3314
3315
3316
3317
3318
3319
3320void *address_space_map(AddressSpace *as,
3321 hwaddr addr,
3322 hwaddr *plen,
3323 bool is_write)
3324{
3325 hwaddr len = *plen;
3326 hwaddr l, xlat;
3327 MemoryRegion *mr;
3328 void *ptr;
3329 FlatView *fv = address_space_to_flatview(as);
3330
3331 if (len == 0) {
3332 return NULL;
3333 }
3334
3335 l = len;
3336 rcu_read_lock();
3337 mr = flatview_translate(fv, addr, &xlat, &l, is_write);
3338
3339 if (!memory_access_is_direct(mr, is_write)) {
3340 if (atomic_xchg(&bounce.in_use, true)) {
3341 rcu_read_unlock();
3342 return NULL;
3343 }
3344
3345 l = MIN(l, TARGET_PAGE_SIZE);
3346 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
3347 bounce.addr = addr;
3348 bounce.len = l;
3349
3350 memory_region_ref(mr);
3351 bounce.mr = mr;
3352 if (!is_write) {
3353 flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
3354 bounce.buffer, l);
3355 }
3356
3357 rcu_read_unlock();
3358 *plen = l;
3359 return bounce.buffer;
3360 }
3361
3362
3363 memory_region_ref(mr);
3364 *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
3365 l, is_write);
3366 ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
3367 rcu_read_unlock();
3368
3369 return ptr;
3370}
3371
3372
3373
3374
3375
3376void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3377 int is_write, hwaddr access_len)
3378{
3379 if (buffer != bounce.buffer) {
3380 MemoryRegion *mr;
3381 ram_addr_t addr1;
3382
3383 mr = memory_region_from_host(buffer, &addr1);
3384 assert(mr != NULL);
3385 if (is_write) {
3386 invalidate_and_set_dirty(mr, addr1, access_len);
3387 }
3388 if (xen_enabled()) {
3389 xen_invalidate_map_cache_entry(buffer);
3390 }
3391 memory_region_unref(mr);
3392 return;
3393 }
3394 if (is_write) {
3395 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3396 bounce.buffer, access_len);
3397 }
3398 qemu_vfree(bounce.buffer);
3399 bounce.buffer = NULL;
3400 memory_region_unref(bounce.mr);
3401 atomic_mb_set(&bounce.in_use, false);
3402 cpu_notify_map_clients();
3403}
3404
3405void *cpu_physical_memory_map(hwaddr addr,
3406 hwaddr *plen,
3407 int is_write)
3408{
3409 return address_space_map(&address_space_memory, addr, plen, is_write);
3410}
3411
3412void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3413 int is_write, hwaddr access_len)
3414{
3415 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3416}
3417
3418#define ARG1_DECL AddressSpace *as
3419#define ARG1 as
3420#define SUFFIX
3421#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3422#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
3423#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3424#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3425#define RCU_READ_LOCK(...) rcu_read_lock()
3426#define RCU_READ_UNLOCK(...) rcu_read_unlock()
3427#include "memory_ldst.inc.c"
3428
3429int64_t address_space_cache_init(MemoryRegionCache *cache,
3430 AddressSpace *as,
3431 hwaddr addr,
3432 hwaddr len,
3433 bool is_write)
3434{
3435 cache->len = len;
3436 cache->as = as;
3437 cache->xlat = addr;
3438 return len;
3439}
3440
3441void address_space_cache_invalidate(MemoryRegionCache *cache,
3442 hwaddr addr,
3443 hwaddr access_len)
3444{
3445}
3446
3447void address_space_cache_destroy(MemoryRegionCache *cache)
3448{
3449 cache->as = NULL;
3450}
3451
3452#define ARG1_DECL MemoryRegionCache *cache
3453#define ARG1 cache
3454#define SUFFIX _cached
3455#define TRANSLATE(addr, ...) \
3456 address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
3457#define IS_DIRECT(mr, is_write) true
3458#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3459#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3460#define RCU_READ_LOCK() rcu_read_lock()
3461#define RCU_READ_UNLOCK() rcu_read_unlock()
3462#include "memory_ldst.inc.c"
3463
3464
3465int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3466 uint8_t *buf, int len, int is_write)
3467{
3468 int l;
3469 hwaddr phys_addr;
3470 target_ulong page;
3471
3472 cpu_synchronize_state(cpu);
3473 while (len > 0) {
3474 int asidx;
3475 MemTxAttrs attrs;
3476
3477 page = addr & TARGET_PAGE_MASK;
3478 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3479 asidx = cpu_asidx_from_attrs(cpu, attrs);
3480
3481 if (phys_addr == -1)
3482 return -1;
3483 l = (page + TARGET_PAGE_SIZE) - addr;
3484 if (l > len)
3485 l = len;
3486 phys_addr += (addr & ~TARGET_PAGE_MASK);
3487 if (is_write) {
3488 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3489 phys_addr, buf, l);
3490 } else {
3491 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3492 MEMTXATTRS_UNSPECIFIED,
3493 buf, l, 0);
3494 }
3495 len -= l;
3496 buf += l;
3497 addr += l;
3498 }
3499 return 0;
3500}
3501
3502
3503
3504
3505
3506size_t qemu_target_page_size(void)
3507{
3508 return TARGET_PAGE_SIZE;
3509}
3510
3511int qemu_target_page_bits(void)
3512{
3513 return TARGET_PAGE_BITS;
3514}
3515
3516int qemu_target_page_bits_min(void)
3517{
3518 return TARGET_PAGE_BITS_MIN;
3519}
3520#endif
3521
3522
3523
3524
3525
3526bool target_words_bigendian(void);
3527bool target_words_bigendian(void)
3528{
3529#if defined(TARGET_WORDS_BIGENDIAN)
3530 return true;
3531#else
3532 return false;
3533#endif
3534}
3535
3536#ifndef CONFIG_USER_ONLY
3537bool cpu_physical_memory_is_io(hwaddr phys_addr)
3538{
3539 MemoryRegion*mr;
3540 hwaddr l = 1;
3541 bool res;
3542
3543 rcu_read_lock();
3544 mr = address_space_translate(&address_space_memory,
3545 phys_addr, &phys_addr, &l, false);
3546
3547 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3548 rcu_read_unlock();
3549 return res;
3550}
3551
3552int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3553{
3554 RAMBlock *block;
3555 int ret = 0;
3556
3557 rcu_read_lock();
3558 RAMBLOCK_FOREACH(block) {
3559 ret = func(block->idstr, block->host, block->offset,
3560 block->used_length, opaque);
3561 if (ret) {
3562 break;
3563 }
3564 }
3565 rcu_read_unlock();
3566 return ret;
3567}
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3578{
3579 int ret = -1;
3580
3581 uint8_t *host_startaddr = rb->host + start;
3582
3583 if ((uintptr_t)host_startaddr & (rb->page_size - 1)) {
3584 error_report("ram_block_discard_range: Unaligned start address: %p",
3585 host_startaddr);
3586 goto err;
3587 }
3588
3589 if ((start + length) <= rb->used_length) {
3590 uint8_t *host_endaddr = host_startaddr + length;
3591 if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
3592 error_report("ram_block_discard_range: Unaligned end address: %p",
3593 host_endaddr);
3594 goto err;
3595 }
3596
3597 errno = ENOTSUP;
3598
3599 if (rb->page_size == qemu_host_page_size) {
3600#if defined(CONFIG_MADVISE)
3601
3602
3603
3604 ret = madvise(host_startaddr, length, MADV_DONTNEED);
3605#endif
3606 } else {
3607
3608
3609
3610
3611#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3612 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3613 start, length);
3614#endif
3615 }
3616 if (ret) {
3617 ret = -errno;
3618 error_report("ram_block_discard_range: Failed to discard range "
3619 "%s:%" PRIx64 " +%zx (%d)",
3620 rb->idstr, start, length, ret);
3621 }
3622 } else {
3623 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3624 "/%zx/" RAM_ADDR_FMT")",
3625 rb->idstr, start, length, rb->used_length);
3626 }
3627
3628err:
3629 return ret;
3630}
3631
3632#endif
3633
3634void page_size_init(void)
3635{
3636
3637
3638 qemu_real_host_page_size = getpagesize();
3639 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
3640 if (qemu_host_page_size == 0) {
3641 qemu_host_page_size = qemu_real_host_page_size;
3642 }
3643 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
3644 qemu_host_page_size = TARGET_PAGE_SIZE;
3645 }
3646 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
3647}
3648