1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include "qemu/osdep.h"
17#include "qemu/log.h"
18#include "qapi/error.h"
19#include "cpu.h"
20#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "qapi/visitor.h"
23#include "qemu/bitops.h"
24#include "qemu/error-report.h"
25#include "qemu/main-loop.h"
26#include "qemu/qemu-print.h"
27#include "qom/object.h"
28#include "trace-root.h"
29
30#include "exec/memory-internal.h"
31#include "exec/ram_addr.h"
32#include "sysemu/kvm.h"
33#include "sysemu/runstate.h"
34#include "sysemu/tcg.h"
35#include "sysemu/accel.h"
36#include "hw/boards.h"
37#include "migration/vmstate.h"
38
39#include "hw/fdt_generic_util.h"
40#include "hw/qdev-core.h"
41
42
43
44static unsigned memory_region_transaction_depth;
45static bool memory_region_update_pending;
46static bool ioeventfd_update_pending;
47bool global_dirty_log;
48
49static QTAILQ_HEAD(, MemoryListener) memory_listeners
50 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
51
52static QTAILQ_HEAD(, AddressSpace) address_spaces
53 = QTAILQ_HEAD_INITIALIZER(address_spaces);
54
55static GHashTable *flat_views;
56
57typedef struct AddrRange AddrRange;
58
59static void memory_region_update_container_subregions(MemoryRegion *subregion);
60static void memory_region_readd_subregion(MemoryRegion *mr);
61
62
63
64
65
66struct AddrRange {
67 Int128 start;
68 Int128 size;
69};
70
71static AddrRange addrrange_make(Int128 start, Int128 size)
72{
73 return (AddrRange) { start, size };
74}
75
76static bool addrrange_equal(AddrRange r1, AddrRange r2)
77{
78 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
79}
80
81static Int128 addrrange_end(AddrRange r)
82{
83 return int128_add(r.start, r.size);
84}
85
86static AddrRange addrrange_shift(AddrRange range, Int128 delta)
87{
88 int128_addto(&range.start, delta);
89 return range;
90}
91
92static bool addrrange_contains(AddrRange range, Int128 addr)
93{
94 return int128_ge(addr, range.start)
95 && int128_lt(addr, addrrange_end(range));
96}
97
98static bool addrrange_intersects(AddrRange r1, AddrRange r2)
99{
100 return addrrange_contains(r1, r2.start)
101 || addrrange_contains(r2, r1.start);
102}
103
104static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
105{
106 Int128 start = int128_max(r1.start, r2.start);
107 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
108 return addrrange_make(start, int128_sub(end, start));
109}
110
111enum ListenerDirection { Forward, Reverse };
112
113#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
114 do { \
115 MemoryListener *_listener; \
116 \
117 switch (_direction) { \
118 case Forward: \
119 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
123 } \
124 break; \
125 case Reverse: \
126 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
127 if (_listener->_callback) { \
128 _listener->_callback(_listener, ##_args); \
129 } \
130 } \
131 break; \
132 default: \
133 abort(); \
134 } \
135 } while (0)
136
137#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
138 do { \
139 MemoryListener *_listener; \
140 \
141 switch (_direction) { \
142 case Forward: \
143 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 case Reverse: \
150 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
151 if (_listener->_callback) { \
152 _listener->_callback(_listener, _section, ##_args); \
153 } \
154 } \
155 break; \
156 default: \
157 abort(); \
158 } \
159 } while (0)
160
161
162#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
163 do { \
164 MemoryRegionSection mrs = section_from_flat_range(fr, \
165 address_space_to_flatview(as)); \
166 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
167 } while(0)
168
169struct CoalescedMemoryRange {
170 AddrRange addr;
171 QTAILQ_ENTRY(CoalescedMemoryRange) link;
172};
173
174struct MemoryRegionIoeventfd {
175 AddrRange addr;
176 bool match_data;
177 uint64_t data;
178 EventNotifier *e;
179};
180
181static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
182 MemoryRegionIoeventfd *b)
183{
184 if (int128_lt(a->addr.start, b->addr.start)) {
185 return true;
186 } else if (int128_gt(a->addr.start, b->addr.start)) {
187 return false;
188 } else if (int128_lt(a->addr.size, b->addr.size)) {
189 return true;
190 } else if (int128_gt(a->addr.size, b->addr.size)) {
191 return false;
192 } else if (a->match_data < b->match_data) {
193 return true;
194 } else if (a->match_data > b->match_data) {
195 return false;
196 } else if (a->match_data) {
197 if (a->data < b->data) {
198 return true;
199 } else if (a->data > b->data) {
200 return false;
201 }
202 }
203 if (a->e < b->e) {
204 return true;
205 } else if (a->e > b->e) {
206 return false;
207 }
208 return false;
209}
210
211static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
212 MemoryRegionIoeventfd *b)
213{
214 return !memory_region_ioeventfd_before(a, b)
215 && !memory_region_ioeventfd_before(b, a);
216}
217
218
219struct FlatRange {
220 MemoryRegion *mr;
221 hwaddr offset_in_region;
222 AddrRange addr;
223 uint8_t dirty_log_mask;
224 bool romd_mode;
225 bool readonly;
226 bool nonvolatile;
227};
228
229#define FOR_EACH_FLAT_RANGE(var, view) \
230 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
231
232static inline MemoryRegionSection
233section_from_flat_range(FlatRange *fr, FlatView *fv)
234{
235 return (MemoryRegionSection) {
236 .mr = fr->mr,
237 .fv = fv,
238 .offset_within_region = fr->offset_in_region,
239 .size = fr->addr.size,
240 .offset_within_address_space = int128_get64(fr->addr.start),
241 .readonly = fr->readonly,
242 .nonvolatile = fr->nonvolatile,
243 };
244}
245
246static bool flatrange_equal(FlatRange *a, FlatRange *b)
247{
248 return a->mr == b->mr
249 && addrrange_equal(a->addr, b->addr)
250 && a->offset_in_region == b->offset_in_region
251 && a->romd_mode == b->romd_mode
252 && a->readonly == b->readonly
253 && a->nonvolatile == b->nonvolatile;
254}
255
256static FlatView *flatview_new(MemoryRegion *mr_root)
257{
258 FlatView *view;
259
260 view = g_new0(FlatView, 1);
261 view->ref = 1;
262 view->root = mr_root;
263 memory_region_ref(mr_root);
264 trace_flatview_new(view, mr_root);
265
266 return view;
267}
268
269
270
271
272static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
273{
274 if (view->nr == view->nr_allocated) {
275 view->nr_allocated = MAX(2 * view->nr, 10);
276 view->ranges = g_realloc(view->ranges,
277 view->nr_allocated * sizeof(*view->ranges));
278 }
279 memmove(view->ranges + pos + 1, view->ranges + pos,
280 (view->nr - pos) * sizeof(FlatRange));
281 view->ranges[pos] = *range;
282 memory_region_ref(range->mr);
283 ++view->nr;
284}
285
286static void flatview_destroy(FlatView *view)
287{
288 int i;
289
290 trace_flatview_destroy(view, view->root);
291 if (view->dispatch) {
292 address_space_dispatch_free(view->dispatch);
293 }
294 for (i = 0; i < view->nr; i++) {
295 memory_region_unref(view->ranges[i].mr);
296 }
297 g_free(view->ranges);
298 memory_region_unref(view->root);
299 g_free(view);
300}
301
302static bool flatview_ref(FlatView *view)
303{
304 return atomic_fetch_inc_nonzero(&view->ref) > 0;
305}
306
307void flatview_unref(FlatView *view)
308{
309 if (atomic_fetch_dec(&view->ref) == 1) {
310 trace_flatview_destroy_rcu(view, view->root);
311 assert(view->root);
312 call_rcu(view, flatview_destroy, rcu);
313 }
314}
315
316static bool can_merge(FlatRange *r1, FlatRange *r2)
317{
318 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
319 && r1->mr == r2->mr
320 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
321 r1->addr.size),
322 int128_make64(r2->offset_in_region))
323 && r1->dirty_log_mask == r2->dirty_log_mask
324 && r1->romd_mode == r2->romd_mode
325 && r1->readonly == r2->readonly
326 && r1->nonvolatile == r2->nonvolatile;
327}
328
329
330static void flatview_simplify(FlatView *view)
331{
332 unsigned i, j, k;
333
334 i = 0;
335 while (i < view->nr) {
336 j = i + 1;
337 while (j < view->nr
338 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
339 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
340 ++j;
341 }
342 ++i;
343 for (k = i; k < j; k++) {
344 memory_region_unref(view->ranges[k].mr);
345 }
346 memmove(&view->ranges[i], &view->ranges[j],
347 (view->nr - j) * sizeof(view->ranges[j]));
348 view->nr -= j - i;
349 }
350}
351
352static bool memory_region_big_endian(MemoryRegion *mr)
353{
354#ifdef TARGET_WORDS_BIGENDIAN
355 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
356#else
357 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
358#endif
359}
360
361static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
362{
363 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
364 switch (op & MO_SIZE) {
365 case MO_8:
366 break;
367 case MO_16:
368 *data = bswap16(*data);
369 break;
370 case MO_32:
371 *data = bswap32(*data);
372 break;
373 case MO_64:
374 *data = bswap64(*data);
375 break;
376 default:
377 g_assert_not_reached();
378 }
379 }
380}
381
382static inline void memory_region_shift_read_access(uint64_t *value,
383 signed shift,
384 uint64_t mask,
385 uint64_t tmp)
386{
387 if (shift >= 0) {
388 *value |= (tmp & mask) << shift;
389 } else {
390 *value |= (tmp & mask) >> -shift;
391 }
392}
393
394static inline uint64_t memory_region_shift_write_access(uint64_t *value,
395 signed shift,
396 uint64_t mask)
397{
398 uint64_t tmp;
399
400 if (shift >= 0) {
401 tmp = (*value >> shift) & mask;
402 } else {
403 tmp = (*value << -shift) & mask;
404 }
405
406 return tmp;
407}
408
409static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
410{
411 MemoryRegion *root;
412 hwaddr abs_addr = offset;
413
414 abs_addr += mr->addr;
415 for (root = mr; root->container; ) {
416 root = root->container;
417 abs_addr += root->addr;
418 }
419
420 return abs_addr;
421}
422
423static int get_cpu_index(void)
424{
425 if (current_cpu) {
426 return current_cpu->cpu_index;
427 }
428 return -1;
429}
430
431
432static MemTxResult memory_region_read_accessor_attr(MemoryRegion *mr,
433 hwaddr addr,
434 uint64_t *value,
435 unsigned size,
436 signed shift,
437 uint64_t mask,
438 MemTxAttrs attrs)
439{
440 MemoryTransaction tr = {{0}};
441 MemTxResult ret;
442
443 if (mr->flush_coalesced_mmio) {
444 qemu_flush_coalesced_mmio_buffer();
445 }
446
447 tr.opaque = mr->opaque;
448 tr.addr = addr;
449 tr.size = size;
450 tr.attr = attrs;
451 ret = mr->ops->access(&tr);
452 *value |= (tr.data.u64 & mask) << shift;
453
454 return ret;
455}
456
457static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
458 hwaddr addr,
459 uint64_t *value,
460 unsigned size,
461 signed shift,
462 uint64_t mask,
463 MemTxAttrs attrs)
464{
465 uint64_t tmp;
466
467 tmp = mr->ops->read(mr->opaque, addr, size);
468 if (mr->subpage) {
469 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
470 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
471 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
472 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
473 }
474 memory_region_shift_read_access(value, shift, mask, tmp);
475 return MEMTX_OK;
476}
477
478static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
479 hwaddr addr,
480 uint64_t *value,
481 unsigned size,
482 signed shift,
483 uint64_t mask,
484 MemTxAttrs attrs)
485{
486 uint64_t tmp = 0;
487 MemTxResult r;
488
489 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
490 if (mr->subpage) {
491 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
492 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
493 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
494 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
495 }
496 memory_region_shift_read_access(value, shift, mask, tmp);
497 return r;
498}
499
500
501static MemTxResult memory_region_write_accessor_attr(MemoryRegion *mr,
502 hwaddr addr,
503 uint64_t *value,
504 unsigned size,
505 signed shift,
506 uint64_t mask,
507 MemTxAttrs attrs)
508{
509 MemoryTransaction tr = {{0}};
510
511 if (mr->flush_coalesced_mmio) {
512 qemu_flush_coalesced_mmio_buffer();
513 }
514
515 tr.opaque = mr->opaque;
516 tr.rw = true;
517 tr.addr = addr;
518 tr.size = size;
519 tr.attr = attrs;
520 tr.data.u64 = (*value >> shift) & mask;
521 trace_memory_region_ops_write(get_cpu_index(), mr, tr.addr, tr.data.u64, tr.size);
522 return mr->ops->access(&tr);
523}
524
525static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
526 hwaddr addr,
527 uint64_t *value,
528 unsigned size,
529 signed shift,
530 uint64_t mask,
531 MemTxAttrs attrs)
532{
533 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
534
535 if (mr->subpage) {
536 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
537 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
538 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
539 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
540 }
541 mr->ops->write(mr->opaque, addr, tmp, size);
542 return MEMTX_OK;
543}
544
545static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
546 hwaddr addr,
547 uint64_t *value,
548 unsigned size,
549 signed shift,
550 uint64_t mask,
551 MemTxAttrs attrs)
552{
553 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
554
555 if (mr->subpage) {
556 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
557 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
558 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
559 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
560 }
561 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
562}
563
564
565static bool memory_owner_is_in_reset(MemoryRegion *mr)
566{
567 if (object_dynamic_cast(mr->owner, TYPE_DEVICE)) {
568 return DEVICE(mr->owner)->reset_level;
569 }
570 return false;
571}
572
573static MemTxResult access_with_adjusted_size(hwaddr addr,
574 uint64_t *value,
575 unsigned size,
576 unsigned access_size_min,
577 unsigned access_size_max,
578 MemTxResult (*access_fn)
579 (MemoryRegion *mr,
580 hwaddr addr,
581 uint64_t *value,
582 unsigned size,
583 signed shift,
584 uint64_t mask,
585 MemTxAttrs attrs),
586 MemoryRegion *mr,
587 MemTxAttrs attrs)
588{
589 uint64_t access_mask;
590 unsigned access_size;
591 unsigned i;
592 MemTxResult r = MEMTX_OK;
593
594 if (!access_size_min) {
595 access_size_min = 1;
596 }
597 if (!access_size_max) {
598 access_size_max = 4;
599 }
600
601
602
603
604
605
606
607 if (memory_owner_is_in_reset(mr)) {
608 qemu_log_mask(LOG_GUEST_ERROR, "%s: Accessing 0x%" HWADDR_PRIx" when "
609 "held in reset.\n",
610 object_get_canonical_path(OBJECT(mr->owner)), addr);
611 }
612
613
614 access_size = MAX(MIN(size, access_size_max), access_size_min);
615 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
616 if (memory_region_big_endian(mr)) {
617 for (i = 0; i < size; i += access_size) {
618 r |= access_fn(mr, addr + i, value, access_size,
619 (size - access_size - i) * 8, access_mask, attrs);
620 }
621 } else {
622 for (i = 0; i < size; i += access_size) {
623 r |= access_fn(mr, addr + i, value, access_size, i * 8,
624 access_mask, attrs);
625 }
626 }
627 return r;
628}
629
630static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
631{
632 AddressSpace *as;
633
634 while (mr->container) {
635 mr = mr->container;
636 }
637 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
638 if (mr == as->root) {
639 return as;
640 }
641 }
642 return NULL;
643}
644
645
646
647
648static void render_memory_region(FlatView *view,
649 MemoryRegion *mr,
650 Int128 base,
651 AddrRange clip,
652 bool readonly,
653 bool nonvolatile)
654{
655 MemoryRegion *subregion;
656 unsigned i;
657 hwaddr offset_in_region;
658 Int128 remain;
659 Int128 now;
660 FlatRange fr;
661 AddrRange tmp;
662
663 if (!mr->enabled) {
664 return;
665 }
666
667 int128_addto(&base, int128_make64(mr->addr));
668 readonly |= mr->readonly;
669 nonvolatile |= mr->nonvolatile;
670
671 tmp = addrrange_make(base, mr->size);
672
673 if (!addrrange_intersects(tmp, clip)) {
674 return;
675 }
676
677 clip = addrrange_intersection(tmp, clip);
678
679 if (mr->alias) {
680 int128_subfrom(&base, int128_make64(mr->alias->addr));
681 int128_subfrom(&base, int128_make64(mr->alias_offset));
682 render_memory_region(view, mr->alias, base, clip,
683 readonly, nonvolatile);
684 return;
685 }
686
687
688 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
689 render_memory_region(view, subregion, base, clip,
690 readonly, nonvolatile);
691 }
692
693 if (!mr->terminates) {
694 return;
695 }
696
697 offset_in_region = int128_get64(int128_sub(clip.start, base));
698 base = clip.start;
699 remain = clip.size;
700
701 fr.mr = mr;
702 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
703 fr.romd_mode = mr->romd_mode;
704 fr.readonly = readonly;
705 fr.nonvolatile = nonvolatile;
706
707
708 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
709 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
710 continue;
711 }
712 if (int128_lt(base, view->ranges[i].addr.start)) {
713 now = int128_min(remain,
714 int128_sub(view->ranges[i].addr.start, base));
715 fr.offset_in_region = offset_in_region;
716 fr.addr = addrrange_make(base, now);
717 flatview_insert(view, i, &fr);
718 ++i;
719 int128_addto(&base, now);
720 offset_in_region += int128_get64(now);
721 int128_subfrom(&remain, now);
722 }
723 now = int128_sub(int128_min(int128_add(base, remain),
724 addrrange_end(view->ranges[i].addr)),
725 base);
726 int128_addto(&base, now);
727 offset_in_region += int128_get64(now);
728 int128_subfrom(&remain, now);
729 }
730 if (int128_nz(remain)) {
731 fr.offset_in_region = offset_in_region;
732 fr.addr = addrrange_make(base, remain);
733 flatview_insert(view, i, &fr);
734 }
735}
736
737static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
738{
739 while (mr->enabled) {
740 if (mr->alias) {
741 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
742
743
744
745 mr = mr->alias;
746 continue;
747 }
748 } else if (!mr->terminates) {
749 unsigned int found = 0;
750 MemoryRegion *child, *next = NULL;
751 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
752 if (child->enabled) {
753 if (++found > 1) {
754 next = NULL;
755 break;
756 }
757 if (!child->addr && int128_ge(mr->size, child->size)) {
758
759
760
761
762 next = child;
763 }
764 }
765 }
766 if (found == 0) {
767 return NULL;
768 }
769 if (next) {
770 mr = next;
771 continue;
772 }
773 }
774
775 return mr;
776 }
777
778 return NULL;
779}
780
781
782static FlatView *generate_memory_topology(MemoryRegion *mr)
783{
784 int i;
785 FlatView *view;
786
787 view = flatview_new(mr);
788
789 if (mr) {
790 render_memory_region(view, mr, int128_zero(),
791 addrrange_make(int128_zero(), int128_2_64()),
792 false, false);
793 }
794 flatview_simplify(view);
795
796 view->dispatch = address_space_dispatch_new(view);
797 for (i = 0; i < view->nr; i++) {
798 MemoryRegionSection mrs =
799 section_from_flat_range(&view->ranges[i], view);
800 flatview_add_to_dispatch(view, &mrs);
801 }
802 address_space_dispatch_compact(view->dispatch);
803 g_hash_table_replace(flat_views, mr, view);
804
805 return view;
806}
807
808static void address_space_add_del_ioeventfds(AddressSpace *as,
809 MemoryRegionIoeventfd *fds_new,
810 unsigned fds_new_nb,
811 MemoryRegionIoeventfd *fds_old,
812 unsigned fds_old_nb)
813{
814 unsigned iold, inew;
815 MemoryRegionIoeventfd *fd;
816 MemoryRegionSection section;
817
818
819
820
821
822 iold = inew = 0;
823 while (iold < fds_old_nb || inew < fds_new_nb) {
824 if (iold < fds_old_nb
825 && (inew == fds_new_nb
826 || memory_region_ioeventfd_before(&fds_old[iold],
827 &fds_new[inew]))) {
828 fd = &fds_old[iold];
829 section = (MemoryRegionSection) {
830 .fv = address_space_to_flatview(as),
831 .offset_within_address_space = int128_get64(fd->addr.start),
832 .size = fd->addr.size,
833 };
834 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
835 fd->match_data, fd->data, fd->e);
836 ++iold;
837 } else if (inew < fds_new_nb
838 && (iold == fds_old_nb
839 || memory_region_ioeventfd_before(&fds_new[inew],
840 &fds_old[iold]))) {
841 fd = &fds_new[inew];
842 section = (MemoryRegionSection) {
843 .fv = address_space_to_flatview(as),
844 .offset_within_address_space = int128_get64(fd->addr.start),
845 .size = fd->addr.size,
846 };
847 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
848 fd->match_data, fd->data, fd->e);
849 ++inew;
850 } else {
851 ++iold;
852 ++inew;
853 }
854 }
855}
856
857FlatView *address_space_get_flatview(AddressSpace *as)
858{
859 FlatView *view;
860
861 RCU_READ_LOCK_GUARD();
862 do {
863 view = address_space_to_flatview(as);
864
865
866
867 } while (!flatview_ref(view));
868 return view;
869}
870
871static void address_space_update_ioeventfds(AddressSpace *as)
872{
873 FlatView *view;
874 FlatRange *fr;
875 unsigned ioeventfd_nb = 0;
876 unsigned ioeventfd_max;
877 MemoryRegionIoeventfd *ioeventfds;
878 AddrRange tmp;
879 unsigned i;
880
881
882
883
884
885
886 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
887 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
888
889 view = address_space_get_flatview(as);
890 FOR_EACH_FLAT_RANGE(fr, view) {
891 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
892 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
893 int128_sub(fr->addr.start,
894 int128_make64(fr->offset_in_region)));
895 if (addrrange_intersects(fr->addr, tmp)) {
896 ++ioeventfd_nb;
897 if (ioeventfd_nb > ioeventfd_max) {
898 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
899 ioeventfds = g_realloc(ioeventfds,
900 ioeventfd_max * sizeof(*ioeventfds));
901 }
902 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
903 ioeventfds[ioeventfd_nb-1].addr = tmp;
904 }
905 }
906 }
907
908 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
909 as->ioeventfds, as->ioeventfd_nb);
910
911 g_free(as->ioeventfds);
912 as->ioeventfds = ioeventfds;
913 as->ioeventfd_nb = ioeventfd_nb;
914 flatview_unref(view);
915}
916
917
918
919
920
921
922static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
923 CoalescedMemoryRange *cmr, bool add)
924{
925 AddrRange tmp;
926
927 tmp = addrrange_shift(cmr->addr,
928 int128_sub(fr->addr.start,
929 int128_make64(fr->offset_in_region)));
930 if (!addrrange_intersects(tmp, fr->addr)) {
931 return;
932 }
933 tmp = addrrange_intersection(tmp, fr->addr);
934
935 if (add) {
936 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
937 int128_get64(tmp.start),
938 int128_get64(tmp.size));
939 } else {
940 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
941 int128_get64(tmp.start),
942 int128_get64(tmp.size));
943 }
944}
945
946static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
947{
948 CoalescedMemoryRange *cmr;
949
950 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
951 flat_range_coalesced_io_notify(fr, as, cmr, false);
952 }
953}
954
955static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
956{
957 MemoryRegion *mr = fr->mr;
958 CoalescedMemoryRange *cmr;
959
960 if (QTAILQ_EMPTY(&mr->coalesced)) {
961 return;
962 }
963
964 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
965 flat_range_coalesced_io_notify(fr, as, cmr, true);
966 }
967}
968
969static void address_space_update_topology_pass(AddressSpace *as,
970 const FlatView *old_view,
971 const FlatView *new_view,
972 bool adding)
973{
974 unsigned iold, inew;
975 FlatRange *frold, *frnew;
976
977
978
979
980 iold = inew = 0;
981 while (iold < old_view->nr || inew < new_view->nr) {
982 if (iold < old_view->nr) {
983 frold = &old_view->ranges[iold];
984 } else {
985 frold = NULL;
986 }
987 if (inew < new_view->nr) {
988 frnew = &new_view->ranges[inew];
989 } else {
990 frnew = NULL;
991 }
992
993 if (frold
994 && (!frnew
995 || int128_lt(frold->addr.start, frnew->addr.start)
996 || (int128_eq(frold->addr.start, frnew->addr.start)
997 && !flatrange_equal(frold, frnew)))) {
998
999
1000 if (!adding) {
1001 flat_range_coalesced_io_del(frold, as);
1002 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
1003 }
1004
1005 ++iold;
1006 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
1007
1008
1009 if (adding) {
1010 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
1011 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
1012 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
1013 frold->dirty_log_mask,
1014 frnew->dirty_log_mask);
1015 }
1016 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
1017 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
1018 frold->dirty_log_mask,
1019 frnew->dirty_log_mask);
1020 }
1021 }
1022
1023 ++iold;
1024 ++inew;
1025 } else {
1026
1027
1028 if (adding) {
1029 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
1030 flat_range_coalesced_io_add(frnew, as);
1031 }
1032
1033 ++inew;
1034 }
1035 }
1036}
1037
1038static void flatviews_init(void)
1039{
1040 static FlatView *empty_view;
1041
1042 if (flat_views) {
1043 return;
1044 }
1045
1046 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
1047 (GDestroyNotify) flatview_unref);
1048 if (!empty_view) {
1049 empty_view = generate_memory_topology(NULL);
1050
1051 flatview_ref(empty_view);
1052 } else {
1053 g_hash_table_replace(flat_views, NULL, empty_view);
1054 flatview_ref(empty_view);
1055 }
1056}
1057
1058static void flatviews_reset(void)
1059{
1060 AddressSpace *as;
1061
1062 if (flat_views) {
1063 g_hash_table_unref(flat_views);
1064 flat_views = NULL;
1065 }
1066 flatviews_init();
1067
1068
1069 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1070 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1071
1072 if (g_hash_table_lookup(flat_views, physmr)) {
1073 continue;
1074 }
1075
1076 generate_memory_topology(physmr);
1077 }
1078}
1079
1080static void address_space_set_flatview(AddressSpace *as)
1081{
1082 FlatView *old_view = address_space_to_flatview(as);
1083 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1084 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1085
1086 assert(new_view);
1087
1088 if (old_view == new_view) {
1089 return;
1090 }
1091
1092 if (old_view) {
1093 flatview_ref(old_view);
1094 }
1095
1096 flatview_ref(new_view);
1097
1098 if (!QTAILQ_EMPTY(&as->listeners)) {
1099 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1100
1101 if (!old_view2) {
1102 old_view2 = &tmpview;
1103 }
1104 address_space_update_topology_pass(as, old_view2, new_view, false);
1105 address_space_update_topology_pass(as, old_view2, new_view, true);
1106 }
1107
1108
1109 atomic_rcu_set(&as->current_map, new_view);
1110 if (old_view) {
1111 flatview_unref(old_view);
1112 }
1113
1114
1115
1116
1117
1118
1119
1120 if (old_view) {
1121 flatview_unref(old_view);
1122 }
1123}
1124
1125static void address_space_update_topology(AddressSpace *as)
1126{
1127 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1128
1129 flatviews_init();
1130 if (!g_hash_table_lookup(flat_views, physmr)) {
1131 generate_memory_topology(physmr);
1132 }
1133 address_space_set_flatview(as);
1134}
1135
1136void memory_region_transaction_begin(void)
1137{
1138 qemu_flush_coalesced_mmio_buffer();
1139 ++memory_region_transaction_depth;
1140}
1141
1142void memory_region_transaction_commit(void)
1143{
1144 AddressSpace *as;
1145
1146 assert(memory_region_transaction_depth);
1147 assert(qemu_mutex_iothread_locked());
1148
1149 --memory_region_transaction_depth;
1150 if (!memory_region_transaction_depth) {
1151 if (memory_region_update_pending) {
1152 flatviews_reset();
1153
1154 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1155
1156 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1157 address_space_set_flatview(as);
1158 address_space_update_ioeventfds(as);
1159 }
1160 memory_region_update_pending = false;
1161 ioeventfd_update_pending = false;
1162 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1163 } else if (ioeventfd_update_pending) {
1164 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1165 address_space_update_ioeventfds(as);
1166 }
1167 ioeventfd_update_pending = false;
1168 }
1169 }
1170}
1171
1172static void memory_region_destructor_none(MemoryRegion *mr)
1173{
1174}
1175
1176static void memory_region_destructor_ram(MemoryRegion *mr)
1177{
1178 qemu_ram_free(mr->ram_block);
1179}
1180
1181static bool memory_region_need_escape(char c)
1182{
1183 return c == '/' || c == '[' || c == '\\' || c == ']';
1184}
1185
1186static char *memory_region_escape_name(const char *name)
1187{
1188 const char *p;
1189 char *escaped, *q;
1190 uint8_t c;
1191 size_t bytes = 0;
1192
1193 for (p = name; *p; p++) {
1194 bytes += memory_region_need_escape(*p) ? 4 : 1;
1195 }
1196 if (bytes == p - name) {
1197 return g_memdup(name, bytes + 1);
1198 }
1199
1200 escaped = g_malloc(bytes + 1);
1201 for (p = name, q = escaped; *p; p++) {
1202 c = *p;
1203 if (unlikely(memory_region_need_escape(c))) {
1204 *q++ = '\\';
1205 *q++ = 'x';
1206 *q++ = "0123456789abcdef"[c >> 4];
1207 c = "0123456789abcdef"[c & 15];
1208 }
1209 *q++ = c;
1210 }
1211 *q = 0;
1212 return escaped;
1213}
1214
1215static void memory_region_do_init(MemoryRegion *mr,
1216 Object *owner,
1217 const char *name,
1218 uint64_t size)
1219{
1220 mr->size = int128_make64(size);
1221 if (size == UINT64_MAX) {
1222 mr->size = int128_2_64();
1223 }
1224 mr->name = g_strdup(name);
1225 mr->owner = owner;
1226 mr->ram_block = NULL;
1227
1228 if (name) {
1229 char *escaped_name = memory_region_escape_name(name);
1230 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1231
1232 if (!owner) {
1233 owner = container_get(qdev_get_machine(), "/unattached");
1234 }
1235
1236 object_property_add_child(owner, name_array, OBJECT(mr));
1237 object_unref(OBJECT(mr));
1238 g_free(name_array);
1239 g_free(escaped_name);
1240 }
1241}
1242
1243void memory_region_init(MemoryRegion *mr,
1244 Object *owner,
1245 const char *name,
1246 uint64_t size)
1247{
1248 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1249 memory_region_do_init(mr, owner, name, size);
1250}
1251
1252static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1253 void *opaque, Error **errp)
1254{
1255 MemoryRegion *mr = MEMORY_REGION(obj);
1256 uint64_t value = mr->addr;
1257
1258 visit_type_uint64(v, name, &value, errp);
1259}
1260
1261static void memory_region_set_addr(Object *obj, Visitor *v, const char *name,
1262 void *opaque, Error **errp)
1263{
1264 MemoryRegion *mr = MEMORY_REGION(obj);
1265 Error *local_err = NULL;
1266 uint64_t value;
1267
1268 visit_type_uint64(v, name, &value, &local_err);
1269 if (local_err) {
1270 error_propagate(errp, local_err);
1271 return;
1272 }
1273
1274 memory_region_set_address(mr, value);
1275}
1276
1277static void memory_region_set_container(Object *obj, Visitor *v, const char *name,
1278 void *opaque, Error **errp)
1279{
1280 MemoryRegion *mr = MEMORY_REGION(obj);
1281 Error *local_err = NULL;
1282 MemoryRegion *old_container = mr->container;
1283 MemoryRegion *new_container = NULL;
1284 char *path = NULL;
1285
1286 visit_type_str(v, name, &path, &local_err);
1287
1288 if (!local_err && strcmp(path, "") != 0) {
1289 new_container = MEMORY_REGION(object_resolve_link(obj, name, path,
1290 &local_err));
1291 while (new_container->alias) {
1292 new_container = new_container->alias;
1293 }
1294 }
1295
1296 if (local_err) {
1297 error_propagate(errp, local_err);
1298 return;
1299 }
1300
1301 object_ref(OBJECT(new_container));
1302
1303 memory_region_transaction_begin();
1304 memory_region_ref(mr);
1305 if (old_container) {
1306 memory_region_del_subregion(old_container, mr);
1307 }
1308 mr->container = new_container;
1309 if (new_container) {
1310 memory_region_update_container_subregions(mr);
1311 }
1312 memory_region_unref(mr);
1313 memory_region_transaction_commit();
1314
1315 object_unref(OBJECT(old_container));
1316}
1317
1318static void memory_region_get_container(Object *obj, Visitor *v,
1319 const char *name, void *opaque,
1320 Error **errp)
1321{
1322 MemoryRegion *mr = MEMORY_REGION(obj);
1323 char *path = (char *)"";
1324
1325 if (mr->container) {
1326 path = object_get_canonical_path(OBJECT(mr->container));
1327 }
1328 visit_type_str(v, name, &path, errp);
1329 if (mr->container) {
1330 g_free(path);
1331 }
1332}
1333
1334static Object *memory_region_resolve_container(Object *obj, void *opaque,
1335 const char *part)
1336{
1337 MemoryRegion *mr = MEMORY_REGION(obj);
1338
1339 return OBJECT(mr->container);
1340}
1341
1342static void memory_region_set_alias(const Object *obj, const char *name,
1343 Object *val, Error **errp)
1344{
1345 MemoryRegion *mr = MEMORY_REGION(obj);
1346 MemoryRegion *subregion, *next;
1347
1348
1349
1350 assert (!mr->alias);
1351
1352
1353
1354
1355
1356 memory_region_transaction_begin();
1357 QTAILQ_FOREACH_SAFE(subregion, &mr->subregions, subregions_link, next) {
1358 object_property_set_link(OBJECT(subregion), "container", OBJECT(val), errp);
1359 }
1360 memory_region_ref(mr);
1361 mr->alias = MEMORY_REGION(val);
1362 memory_region_unref(mr);
1363 memory_region_transaction_commit();
1364
1365}
1366
1367static void memory_region_get_priority(Object *obj, Visitor *v,
1368 const char *name, void *opaque,
1369 Error **errp)
1370{
1371 MemoryRegion *mr = MEMORY_REGION(obj);
1372 int32_t value = mr->priority;
1373
1374 visit_type_int32(v, name, &value, errp);
1375}
1376
1377static void memory_region_set_priority(Object *obj, Visitor *v, const char *name,
1378 void *opaque, Error **errp)
1379{
1380 MemoryRegion *mr = MEMORY_REGION(obj);
1381 Error *local_err = NULL;
1382 int32_t value;
1383
1384 visit_type_uint32(v, name, (uint32_t *)&value, &error_abort);
1385 if (local_err) {
1386 error_propagate(errp, local_err);
1387 return;
1388 }
1389
1390 if (mr->priority != value) {
1391 mr->priority = value;
1392 memory_region_readd_subregion(mr);
1393 }
1394}
1395
1396static void memory_region_do_set_ram(MemoryRegion *mr)
1397{
1398 char *c, *filename, *sanitized_name;
1399
1400 if (mr->addr) {
1401 qemu_ram_free(mr->ram_block);
1402 }
1403 if (int128_eq(mr->size, int128_make64(0))) {
1404 return;
1405 }
1406 switch (mr->ram) {
1407 case(0):
1408 mr->ram_block = NULL;
1409 break;
1410 case(1):
1411 mr->ram_block = qemu_ram_alloc(int128_get64(mr->size), true, mr, &error_abort);
1412 break;
1413 case(2):
1414 if (mr->filename) {
1415 filename = g_strdup_printf("%s%s%s",
1416 machine_path ? machine_path : "",
1417 machine_path ? G_DIR_SEPARATOR_S : "",
1418 mr->filename);
1419 } else {
1420 sanitized_name = g_strdup(object_get_canonical_path(OBJECT(mr)));
1421
1422 for (c = sanitized_name; *c != '\0'; c++) {
1423 if (*c == '/') {
1424 *c = '_';
1425 }
1426 }
1427 filename = g_strdup_printf("%s" G_DIR_SEPARATOR_S "qemu-memory-%s",
1428 machine_path ? machine_path : ".",
1429 sanitized_name);
1430 g_free(sanitized_name);
1431 }
1432 mr->ram_block = qemu_ram_alloc_from_file(int128_get64(mr->size), mr,
1433 RAM_SHARED, filename, &error_abort);
1434 g_free(filename);
1435 break;
1436 default:
1437 abort();
1438 }
1439}
1440
1441static void memory_region_set_ram(Object *obj, Visitor *v, const char *name,
1442 void *opaque, Error **errp)
1443{
1444 MemoryRegion *mr = MEMORY_REGION(obj);
1445 Error *local_err = NULL;
1446 uint8_t value;
1447
1448 visit_type_uint8(v, name, &value, &error_abort);
1449 if (local_err) {
1450 error_propagate(errp, local_err);
1451 return;
1452 }
1453
1454 mr->dirty_log_mask |= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1455
1456
1457 if (mr->ram == value) {
1458 return;
1459 }
1460
1461 mr->ram = value;
1462 mr->terminates = !!value;
1463
1464 if (int128_eq(int128_2_64(), mr->size)) {
1465 return;
1466 }
1467
1468 memory_region_do_set_ram(mr);
1469}
1470
1471static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1472 void *opaque, Error **errp)
1473{
1474 MemoryRegion *mr = MEMORY_REGION(obj);
1475 uint64_t value = memory_region_size(mr);
1476
1477 visit_type_uint64(v, name, &value, errp);
1478}
1479
1480static void memory_region_set_object_size(Object *obj, Visitor *v, const char *name,
1481 void *opaque, Error **errp)
1482{
1483 MemoryRegion *mr = MEMORY_REGION(obj);
1484 Error *local_err = NULL;
1485 uint64_t size;
1486
1487 visit_type_uint64(v, name, &size, &local_err);
1488
1489 memory_region_set_size(mr, size);
1490}
1491
1492static void memory_region_get_filename(Object *obj, Visitor *v,
1493 const char *name,
1494 void *opaque, Error **errp)
1495{
1496 MemoryRegion *mr = MEMORY_REGION(obj);
1497 char *filename = mr->filename;
1498
1499 visit_type_str(v, name, &filename, errp);
1500}
1501
1502static void memory_region_set_filename(Object *obj, Visitor *v,
1503 const char *name,
1504 void *opaque, Error **errp)
1505{
1506 MemoryRegion *mr = MEMORY_REGION(obj);
1507 Error *local_err = NULL;
1508 char *filename;
1509
1510 visit_type_str(v, name, &filename, &local_err);
1511 mr->filename = filename;
1512}
1513
1514static void memory_region_initfn(Object *obj)
1515{
1516 MemoryRegion *mr = MEMORY_REGION(obj);
1517 ObjectProperty *op;
1518
1519 mr->ops = &unassigned_mem_ops;
1520 mr->enabled = true;
1521 mr->romd_mode = true;
1522 mr->global_locking = true;
1523 mr->destructor = memory_region_destructor_none;
1524
1525
1526
1527 mr->size = int128_2_64();
1528 QTAILQ_INIT(&mr->subregions);
1529 QTAILQ_INIT(&mr->coalesced);
1530
1531 op = object_property_add(OBJECT(mr), "container",
1532 "link<" TYPE_MEMORY_REGION ">",
1533 memory_region_get_container,
1534 memory_region_set_container,
1535 NULL, NULL);
1536 op->resolve = memory_region_resolve_container;
1537
1538 object_property_add_link(OBJECT(mr), "alias", TYPE_MEMORY_REGION,
1539 (Object **)&mr->alias,
1540 memory_region_set_alias,
1541 0);
1542 object_property_add(OBJECT(mr), "addr", "uint64",
1543 memory_region_get_addr,
1544 memory_region_set_addr,
1545 NULL, NULL);
1546 object_property_add(OBJECT(mr), "priority", "uint32",
1547 memory_region_get_priority,
1548 memory_region_set_priority,
1549 NULL, NULL);
1550 object_property_add(OBJECT(mr), "ram", "uint8",
1551 NULL,
1552 memory_region_set_ram,
1553 NULL, NULL);
1554 object_property_add(OBJECT(mr), "filename", "string",
1555 memory_region_get_filename,
1556 memory_region_set_filename,
1557 NULL, NULL);
1558 object_property_add(OBJECT(mr), "size", "uint64",
1559 memory_region_get_size,
1560 memory_region_set_object_size,
1561 NULL, NULL);
1562}
1563
1564static void iommu_memory_region_initfn(Object *obj)
1565{
1566 MemoryRegion *mr = MEMORY_REGION(obj);
1567
1568 mr->is_iommu = true;
1569}
1570
1571static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1572 unsigned size)
1573{
1574#ifdef DEBUG_UNASSIGNED
1575 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1576#endif
1577 return 0;
1578}
1579
1580static void unassigned_mem_write(void *opaque, hwaddr addr,
1581 uint64_t val, unsigned size)
1582{
1583#ifdef DEBUG_UNASSIGNED
1584 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1585#endif
1586}
1587
1588static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1589 unsigned size, bool is_write,
1590 MemTxAttrs attrs)
1591{
1592 return false;
1593}
1594
1595const MemoryRegionOps unassigned_mem_ops = {
1596 .valid.accepts = unassigned_mem_accepts,
1597 .endianness = DEVICE_NATIVE_ENDIAN,
1598};
1599
1600static uint64_t memory_region_ram_device_read(void *opaque,
1601 hwaddr addr, unsigned size)
1602{
1603 MemoryRegion *mr = opaque;
1604 uint64_t data = (uint64_t)~0;
1605
1606 switch (size) {
1607 case 1:
1608 data = *(uint8_t *)(mr->ram_block->host + addr);
1609 break;
1610 case 2:
1611 data = *(uint16_t *)(mr->ram_block->host + addr);
1612 break;
1613 case 4:
1614 data = *(uint32_t *)(mr->ram_block->host + addr);
1615 break;
1616 case 8:
1617 data = *(uint64_t *)(mr->ram_block->host + addr);
1618 break;
1619 }
1620
1621 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1622
1623 return data;
1624}
1625
1626static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1627 uint64_t data, unsigned size)
1628{
1629 MemoryRegion *mr = opaque;
1630
1631 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1632
1633 switch (size) {
1634 case 1:
1635 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1636 break;
1637 case 2:
1638 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1639 break;
1640 case 4:
1641 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1642 break;
1643 case 8:
1644 *(uint64_t *)(mr->ram_block->host + addr) = data;
1645 break;
1646 }
1647}
1648
1649static const MemoryRegionOps ram_device_mem_ops = {
1650 .read = memory_region_ram_device_read,
1651 .write = memory_region_ram_device_write,
1652 .endianness = DEVICE_HOST_ENDIAN,
1653 .valid = {
1654 .min_access_size = 1,
1655 .max_access_size = 8,
1656 .unaligned = true,
1657 },
1658 .impl = {
1659 .min_access_size = 1,
1660 .max_access_size = 8,
1661 .unaligned = true,
1662 },
1663};
1664
1665bool memory_region_access_valid(MemoryRegion *mr,
1666 hwaddr addr,
1667 unsigned size,
1668 bool is_write,
1669 MemTxAttrs attrs)
1670{
1671 if (mr->ops->valid.accepts
1672 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
1673 return false;
1674 }
1675
1676 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1677 return false;
1678 }
1679
1680
1681 if (!mr->ops->valid.max_access_size) {
1682 return true;
1683 }
1684
1685 if (size > mr->ops->valid.max_access_size
1686 || size < mr->ops->valid.min_access_size) {
1687 return false;
1688 }
1689 return true;
1690}
1691
1692static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1693 hwaddr addr,
1694 uint64_t *pval,
1695 unsigned size,
1696 MemTxAttrs attrs)
1697{
1698 *pval = 0;
1699
1700 if (mr->ops->access) {
1701 return access_with_adjusted_size(addr, pval, size,
1702 mr->ops->impl.min_access_size,
1703 mr->ops->impl.max_access_size,
1704 memory_region_read_accessor_attr,
1705 mr, attrs);
1706 } else if (mr->ops->read) {
1707 return access_with_adjusted_size(addr, pval, size,
1708 mr->ops->impl.min_access_size,
1709 mr->ops->impl.max_access_size,
1710 memory_region_read_accessor,
1711 mr, attrs);
1712 } else {
1713 return access_with_adjusted_size(addr, pval, size,
1714 mr->ops->impl.min_access_size,
1715 mr->ops->impl.max_access_size,
1716 memory_region_read_with_attrs_accessor,
1717 mr, attrs);
1718 }
1719}
1720
1721MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1722 hwaddr addr,
1723 uint64_t *pval,
1724 MemOp op,
1725 MemTxAttrs attrs)
1726{
1727 unsigned size = memop_size(op);
1728 MemTxResult r;
1729
1730 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1731 *pval = unassigned_mem_read(mr, addr, size);
1732 return MEMTX_DECODE_ERROR;
1733 }
1734
1735 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1736 adjust_endianness(mr, pval, op);
1737 return r;
1738}
1739
1740
1741static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1742 hwaddr addr,
1743 uint64_t data,
1744 unsigned size,
1745 MemTxAttrs attrs)
1746{
1747 MemoryRegionIoeventfd ioeventfd = {
1748 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1749 .data = data,
1750 };
1751 unsigned i;
1752
1753 for (i = 0; i < mr->ioeventfd_nb; i++) {
1754 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1755 ioeventfd.e = mr->ioeventfds[i].e;
1756
1757 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1758 event_notifier_set(ioeventfd.e);
1759 return true;
1760 }
1761 }
1762
1763 return false;
1764}
1765
1766MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1767 hwaddr addr,
1768 uint64_t data,
1769 MemOp op,
1770 MemTxAttrs attrs)
1771{
1772 unsigned size = memop_size(op);
1773
1774 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1775 unassigned_mem_write(mr, addr, data, size);
1776 return MEMTX_DECODE_ERROR;
1777 }
1778
1779 adjust_endianness(mr, &data, op);
1780
1781 if ((!kvm_eventfds_enabled()) &&
1782 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1783 return MEMTX_OK;
1784 }
1785
1786 if (mr->ops->access) {
1787 return access_with_adjusted_size(addr, &data, size,
1788 mr->ops->impl.min_access_size,
1789 mr->ops->impl.max_access_size,
1790 memory_region_write_accessor_attr,
1791 mr, attrs);
1792 } else if (mr->ops->write) {
1793 return access_with_adjusted_size(addr, &data, size,
1794 mr->ops->impl.min_access_size,
1795 mr->ops->impl.max_access_size,
1796 memory_region_write_accessor, mr,
1797 attrs);
1798 } else {
1799 return
1800 access_with_adjusted_size(addr, &data, size,
1801 mr->ops->impl.min_access_size,
1802 mr->ops->impl.max_access_size,
1803 memory_region_write_with_attrs_accessor,
1804 mr, attrs);
1805 }
1806}
1807
1808void memory_region_init_io(MemoryRegion *mr,
1809 Object *owner,
1810 const MemoryRegionOps *ops,
1811 void *opaque,
1812 const char *name,
1813 uint64_t size)
1814{
1815 memory_region_init(mr, owner, name, size);
1816 mr->ops = ops ? ops : &unassigned_mem_ops;
1817 mr->opaque = opaque;
1818 mr->terminates = true;
1819}
1820
1821void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1822 Object *owner,
1823 const char *name,
1824 uint64_t size,
1825 Error **errp)
1826{
1827 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1828}
1829
1830void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1831 Object *owner,
1832 const char *name,
1833 uint64_t size,
1834 bool share,
1835 Error **errp)
1836{
1837 Error *err = NULL;
1838 memory_region_init(mr, owner, name, size);
1839 mr->ram = 1;
1840 mr->terminates = true;
1841 mr->destructor = memory_region_destructor_ram;
1842 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
1843 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1844 if (err) {
1845 mr->size = int128_zero();
1846 object_unparent(OBJECT(mr));
1847 error_propagate(errp, err);
1848 }
1849}
1850
1851void memory_region_init_resizeable_ram(MemoryRegion *mr,
1852 Object *owner,
1853 const char *name,
1854 uint64_t size,
1855 uint64_t max_size,
1856 void (*resized)(const char*,
1857 uint64_t length,
1858 void *host),
1859 Error **errp)
1860{
1861 Error *err = NULL;
1862 memory_region_init(mr, owner, name, size);
1863 mr->ram = true;
1864 mr->terminates = true;
1865 mr->destructor = memory_region_destructor_ram;
1866 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1867 mr, &err);
1868 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1869 if (err) {
1870 mr->size = int128_zero();
1871 object_unparent(OBJECT(mr));
1872 error_propagate(errp, err);
1873 }
1874}
1875
1876#ifdef CONFIG_POSIX
1877void memory_region_init_ram_from_file(MemoryRegion *mr,
1878 struct Object *owner,
1879 const char *name,
1880 uint64_t size,
1881 uint64_t align,
1882 uint32_t ram_flags,
1883 const char *path,
1884 Error **errp)
1885{
1886 Error *err = NULL;
1887 memory_region_init(mr, owner, name, size);
1888 mr->ram = 2;
1889 mr->terminates = true;
1890 mr->destructor = memory_region_destructor_ram;
1891 mr->align = align;
1892 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
1893 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1894 if (err) {
1895 mr->size = int128_zero();
1896 object_unparent(OBJECT(mr));
1897 error_propagate(errp, err);
1898 }
1899}
1900
1901void memory_region_init_ram_from_fd(MemoryRegion *mr,
1902 struct Object *owner,
1903 const char *name,
1904 uint64_t size,
1905 bool share,
1906 int fd,
1907 Error **errp)
1908{
1909 Error *err = NULL;
1910 memory_region_init(mr, owner, name, size);
1911 mr->ram = true;
1912 mr->terminates = true;
1913 mr->destructor = memory_region_destructor_ram;
1914 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1915 share ? RAM_SHARED : 0,
1916 fd, &err);
1917 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1918 if (err) {
1919 mr->size = int128_zero();
1920 object_unparent(OBJECT(mr));
1921 error_propagate(errp, err);
1922 }
1923}
1924#endif
1925
1926void memory_region_init_ram_ptr(MemoryRegion *mr,
1927 Object *owner,
1928 const char *name,
1929 uint64_t size,
1930 void *ptr)
1931{
1932 memory_region_init(mr, owner, name, size);
1933 mr->ram = 3;
1934 mr->terminates = true;
1935 mr->destructor = memory_region_destructor_ram;
1936 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1937
1938
1939 assert(ptr != NULL);
1940 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1941}
1942
1943void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1944 Object *owner,
1945 const char *name,
1946 uint64_t size,
1947 void *ptr)
1948{
1949 memory_region_init(mr, owner, name, size);
1950 mr->ram = true;
1951 mr->terminates = true;
1952 mr->ram_device = true;
1953 mr->ops = &ram_device_mem_ops;
1954 mr->opaque = mr;
1955 mr->destructor = memory_region_destructor_ram;
1956 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1957
1958 assert(ptr != NULL);
1959 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1960}
1961
1962void memory_region_init_alias(MemoryRegion *mr,
1963 Object *owner,
1964 const char *name,
1965 MemoryRegion *orig,
1966 hwaddr offset,
1967 uint64_t size)
1968{
1969 memory_region_init(mr, owner, name, size);
1970 mr->alias = orig;
1971 mr->alias_offset = offset;
1972}
1973
1974void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1975 struct Object *owner,
1976 const char *name,
1977 uint64_t size,
1978 Error **errp)
1979{
1980 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1981 mr->readonly = true;
1982}
1983
1984void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1985 Object *owner,
1986 const MemoryRegionOps *ops,
1987 void *opaque,
1988 const char *name,
1989 uint64_t size,
1990 Error **errp)
1991{
1992 Error *err = NULL;
1993 assert(ops);
1994 memory_region_init(mr, owner, name, size);
1995 mr->ops = ops;
1996 mr->opaque = opaque;
1997 mr->terminates = true;
1998 mr->rom_device = true;
1999 mr->destructor = memory_region_destructor_ram;
2000 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
2001 if (err) {
2002 mr->size = int128_zero();
2003 object_unparent(OBJECT(mr));
2004 error_propagate(errp, err);
2005 }
2006}
2007
2008void memory_region_init_iommu(void *_iommu_mr,
2009 size_t instance_size,
2010 const char *mrtypename,
2011 Object *owner,
2012 const char *name,
2013 uint64_t size)
2014{
2015 struct IOMMUMemoryRegion *iommu_mr;
2016 struct MemoryRegion *mr;
2017
2018 object_initialize(_iommu_mr, instance_size, mrtypename);
2019 mr = MEMORY_REGION(_iommu_mr);
2020 memory_region_do_init(mr, owner, name, size);
2021 iommu_mr = IOMMU_MEMORY_REGION(mr);
2022 mr->terminates = true;
2023 QLIST_INIT(&iommu_mr->iommu_notify);
2024 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
2025}
2026
2027static void memory_region_finalize(Object *obj)
2028{
2029 MemoryRegion *mr = MEMORY_REGION(obj);
2030
2031 assert(!mr->container);
2032
2033
2034
2035
2036
2037
2038
2039 mr->enabled = false;
2040 memory_region_transaction_begin();
2041 while (!QTAILQ_EMPTY(&mr->subregions)) {
2042 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
2043 memory_region_del_subregion(mr, subregion);
2044 }
2045 memory_region_transaction_commit();
2046
2047 mr->destructor(mr);
2048 memory_region_clear_coalescing(mr);
2049 g_free((char *)mr->name);
2050 g_free(mr->ioeventfds);
2051}
2052
2053Object *memory_region_owner(MemoryRegion *mr)
2054{
2055 Object *obj = OBJECT(mr);
2056 return obj->parent;
2057}
2058
2059void memory_region_ref(MemoryRegion *mr)
2060{
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071 if (mr && mr->owner) {
2072 object_ref(mr->owner);
2073 }
2074}
2075
2076void memory_region_unref(MemoryRegion *mr)
2077{
2078 if (mr && mr->owner) {
2079 object_unref(mr->owner);
2080 }
2081}
2082
2083uint64_t memory_region_size(MemoryRegion *mr)
2084{
2085 if (int128_eq(mr->size, int128_2_64())) {
2086 return UINT64_MAX;
2087 }
2088 return int128_get64(mr->size);
2089}
2090
2091const char *memory_region_name(const MemoryRegion *mr)
2092{
2093 if (!mr->name) {
2094 ((MemoryRegion *)mr)->name =
2095 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
2096 }
2097 return mr->name;
2098}
2099
2100bool memory_region_is_ram_device(MemoryRegion *mr)
2101{
2102 return mr->ram_device;
2103}
2104
2105uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
2106{
2107 uint8_t mask = mr->dirty_log_mask;
2108 if (global_dirty_log && mr->ram_block) {
2109 mask |= (1 << DIRTY_MEMORY_MIGRATION);
2110 }
2111 return mask;
2112}
2113
2114bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
2115{
2116 return memory_region_get_dirty_log_mask(mr) & (1 << client);
2117}
2118
2119static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
2120 Error **errp)
2121{
2122 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
2123 IOMMUNotifier *iommu_notifier;
2124 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2125 int ret = 0;
2126
2127 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2128 flags |= iommu_notifier->notifier_flags;
2129 }
2130
2131 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
2132 ret = imrc->notify_flag_changed(iommu_mr,
2133 iommu_mr->iommu_notify_flags,
2134 flags, errp);
2135 }
2136
2137 if (!ret) {
2138 iommu_mr->iommu_notify_flags = flags;
2139 }
2140 return ret;
2141}
2142
2143int memory_region_register_iommu_notifier(MemoryRegion *mr,
2144 IOMMUNotifier *n, Error **errp)
2145{
2146 IOMMUMemoryRegion *iommu_mr;
2147 int ret;
2148
2149 if (mr->alias) {
2150 return memory_region_register_iommu_notifier(mr->alias, n, errp);
2151 }
2152
2153
2154 iommu_mr = IOMMU_MEMORY_REGION(mr);
2155 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
2156 assert(n->start <= n->end);
2157 assert(n->iommu_idx >= 0 &&
2158 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
2159
2160 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
2161 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
2162 if (ret) {
2163 QLIST_REMOVE(n, node);
2164 }
2165 return ret;
2166}
2167
2168uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
2169{
2170 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2171
2172 if (imrc->get_min_page_size) {
2173 return imrc->get_min_page_size(iommu_mr);
2174 }
2175 return TARGET_PAGE_SIZE;
2176}
2177
2178void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
2179{
2180 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
2181 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2182 hwaddr addr, granularity;
2183 IOMMUTLBEntry iotlb;
2184
2185
2186 if (imrc->replay) {
2187 imrc->replay(iommu_mr, n);
2188 return;
2189 }
2190
2191 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
2192
2193 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
2194 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
2195 if (iotlb.perm != IOMMU_NONE) {
2196 n->notify(n, &iotlb);
2197 }
2198
2199
2200
2201 if ((addr + granularity) < addr) {
2202 break;
2203 }
2204 }
2205}
2206
2207void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
2208 IOMMUNotifier *n)
2209{
2210 IOMMUMemoryRegion *iommu_mr;
2211
2212 if (mr->alias) {
2213 memory_region_unregister_iommu_notifier(mr->alias, n);
2214 return;
2215 }
2216 QLIST_REMOVE(n, node);
2217 iommu_mr = IOMMU_MEMORY_REGION(mr);
2218 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
2219}
2220
2221void memory_region_notify_one(IOMMUNotifier *notifier,
2222 IOMMUTLBEntry *entry)
2223{
2224 IOMMUNotifierFlag request_flags;
2225 hwaddr entry_end = entry->iova + entry->addr_mask;
2226
2227
2228
2229
2230
2231 if (notifier->start > entry_end || notifier->end < entry->iova) {
2232 return;
2233 }
2234
2235 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
2236
2237 if (entry->perm & IOMMU_RW) {
2238 request_flags = IOMMU_NOTIFIER_MAP;
2239 } else {
2240 request_flags = IOMMU_NOTIFIER_UNMAP;
2241 }
2242
2243 if (notifier->notifier_flags & request_flags) {
2244 notifier->notify(notifier, entry);
2245 }
2246}
2247
2248void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2249 int iommu_idx,
2250 IOMMUTLBEntry entry)
2251{
2252 IOMMUNotifier *iommu_notifier;
2253
2254 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2255
2256 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2257 if (iommu_notifier->iommu_idx == iommu_idx) {
2258 memory_region_notify_one(iommu_notifier, &entry);
2259 }
2260 }
2261}
2262
2263int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
2264 enum IOMMUMemoryRegionAttr attr,
2265 void *data)
2266{
2267 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2268
2269 if (!imrc->get_attr) {
2270 return -EINVAL;
2271 }
2272
2273 return imrc->get_attr(iommu_mr, attr, data);
2274}
2275
2276int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2277 MemTxAttrs attrs)
2278{
2279 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2280
2281 if (!imrc->attrs_to_index) {
2282 return 0;
2283 }
2284
2285 return imrc->attrs_to_index(iommu_mr, attrs);
2286}
2287
2288int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2289{
2290 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2291
2292 if (!imrc->num_indexes) {
2293 return 1;
2294 }
2295
2296 return imrc->num_indexes(iommu_mr);
2297}
2298
2299void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2300{
2301 uint8_t mask = 1 << client;
2302 uint8_t old_logging;
2303
2304 assert(client == DIRTY_MEMORY_VGA);
2305 old_logging = mr->vga_logging_count;
2306 mr->vga_logging_count += log ? 1 : -1;
2307 if (!!old_logging == !!mr->vga_logging_count) {
2308 return;
2309 }
2310
2311 memory_region_transaction_begin();
2312 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2313 memory_region_update_pending |= mr->enabled;
2314 memory_region_transaction_commit();
2315}
2316
2317void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2318 hwaddr size)
2319{
2320 assert(mr->ram_block);
2321 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2322 size,
2323 memory_region_get_dirty_log_mask(mr));
2324}
2325
2326static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
2327{
2328 MemoryListener *listener;
2329 AddressSpace *as;
2330 FlatView *view;
2331 FlatRange *fr;
2332
2333
2334
2335
2336
2337
2338 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2339 if (!listener->log_sync) {
2340 continue;
2341 }
2342 as = listener->address_space;
2343 view = address_space_get_flatview(as);
2344 FOR_EACH_FLAT_RANGE(fr, view) {
2345 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2346 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2347 listener->log_sync(listener, &mrs);
2348 }
2349 }
2350 flatview_unref(view);
2351 }
2352}
2353
2354void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2355 hwaddr len)
2356{
2357 MemoryRegionSection mrs;
2358 MemoryListener *listener;
2359 AddressSpace *as;
2360 FlatView *view;
2361 FlatRange *fr;
2362 hwaddr sec_start, sec_end, sec_size;
2363
2364 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2365 if (!listener->log_clear) {
2366 continue;
2367 }
2368 as = listener->address_space;
2369 view = address_space_get_flatview(as);
2370 FOR_EACH_FLAT_RANGE(fr, view) {
2371 if (!fr->dirty_log_mask || fr->mr != mr) {
2372
2373
2374
2375
2376 continue;
2377 }
2378
2379 mrs = section_from_flat_range(fr, view);
2380
2381 sec_start = MAX(mrs.offset_within_region, start);
2382 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2383 sec_end = MIN(sec_end, start + len);
2384
2385 if (sec_start >= sec_end) {
2386
2387
2388
2389
2390 continue;
2391 }
2392
2393
2394 mrs.offset_within_address_space +=
2395 sec_start - mrs.offset_within_region;
2396 mrs.offset_within_region = sec_start;
2397 sec_size = sec_end - sec_start;
2398 mrs.size = int128_make64(sec_size);
2399 listener->log_clear(listener, &mrs);
2400 }
2401 flatview_unref(view);
2402 }
2403}
2404
2405DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2406 hwaddr addr,
2407 hwaddr size,
2408 unsigned client)
2409{
2410 DirtyBitmapSnapshot *snapshot;
2411 assert(mr->ram_block);
2412 memory_region_sync_dirty_bitmap(mr);
2413 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2414 memory_global_after_dirty_log_sync();
2415 return snapshot;
2416}
2417
2418bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2419 hwaddr addr, hwaddr size)
2420{
2421 assert(mr->ram_block);
2422 return cpu_physical_memory_snapshot_get_dirty(snap,
2423 memory_region_get_ram_addr(mr) + addr, size);
2424}
2425
2426void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2427{
2428 if (mr->readonly != readonly) {
2429 memory_region_transaction_begin();
2430 mr->readonly = readonly;
2431 memory_region_update_pending |= mr->enabled;
2432 memory_region_transaction_commit();
2433 }
2434}
2435
2436void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2437{
2438 if (mr->nonvolatile != nonvolatile) {
2439 memory_region_transaction_begin();
2440 mr->nonvolatile = nonvolatile;
2441 memory_region_update_pending |= mr->enabled;
2442 memory_region_transaction_commit();
2443 }
2444}
2445
2446void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2447{
2448 if (mr->romd_mode != romd_mode) {
2449 memory_region_transaction_begin();
2450 mr->romd_mode = romd_mode;
2451 memory_region_update_pending |= mr->enabled;
2452 memory_region_transaction_commit();
2453 }
2454}
2455
2456void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2457 hwaddr size, unsigned client)
2458{
2459 assert(mr->ram_block);
2460 cpu_physical_memory_test_and_clear_dirty(
2461 memory_region_get_ram_addr(mr) + addr, size, client);
2462}
2463
2464int memory_region_get_fd(MemoryRegion *mr)
2465{
2466 int fd;
2467
2468 RCU_READ_LOCK_GUARD();
2469 while (mr->alias) {
2470 mr = mr->alias;
2471 }
2472 fd = mr->ram_block->fd;
2473
2474 return fd;
2475}
2476
2477void *memory_region_get_ram_ptr(MemoryRegion *mr)
2478{
2479 void *ptr;
2480 uint64_t offset = 0;
2481
2482 RCU_READ_LOCK_GUARD();
2483 while (mr->alias) {
2484 offset += mr->alias_offset;
2485 mr = mr->alias;
2486 }
2487 assert(mr->ram_block);
2488 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2489
2490 return ptr;
2491}
2492
2493MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2494{
2495 RAMBlock *block;
2496
2497 block = qemu_ram_block_from_host(ptr, false, offset);
2498 if (!block) {
2499 return NULL;
2500 }
2501
2502 return block->mr;
2503}
2504
2505ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2506{
2507 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2508}
2509
2510void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2511{
2512 assert(mr->ram_block);
2513
2514 qemu_ram_resize(mr->ram_block, newsize, errp);
2515}
2516
2517void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2518{
2519 if (mr->ram_block) {
2520 qemu_ram_msync(mr->ram_block, addr, size);
2521 }
2522}
2523
2524void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
2525{
2526
2527
2528
2529
2530 if (mr->dirty_log_mask) {
2531 memory_region_msync(mr, addr, size);
2532 }
2533}
2534
2535
2536
2537
2538
2539static void memory_region_update_coalesced_range(MemoryRegion *mr,
2540 CoalescedMemoryRange *cmr,
2541 bool add)
2542{
2543 AddressSpace *as;
2544 FlatView *view;
2545 FlatRange *fr;
2546
2547 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2548 view = address_space_get_flatview(as);
2549 FOR_EACH_FLAT_RANGE(fr, view) {
2550 if (fr->mr == mr) {
2551 flat_range_coalesced_io_notify(fr, as, cmr, add);
2552 }
2553 }
2554 flatview_unref(view);
2555 }
2556}
2557
2558void memory_region_set_coalescing(MemoryRegion *mr)
2559{
2560 memory_region_clear_coalescing(mr);
2561 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2562}
2563
2564void memory_region_add_coalescing(MemoryRegion *mr,
2565 hwaddr offset,
2566 uint64_t size)
2567{
2568 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2569
2570 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2571 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2572 memory_region_update_coalesced_range(mr, cmr, true);
2573 memory_region_set_flush_coalesced(mr);
2574}
2575
2576void memory_region_clear_coalescing(MemoryRegion *mr)
2577{
2578 CoalescedMemoryRange *cmr;
2579
2580 if (QTAILQ_EMPTY(&mr->coalesced)) {
2581 return;
2582 }
2583
2584 qemu_flush_coalesced_mmio_buffer();
2585 mr->flush_coalesced_mmio = false;
2586
2587 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2588 cmr = QTAILQ_FIRST(&mr->coalesced);
2589 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2590 memory_region_update_coalesced_range(mr, cmr, false);
2591 g_free(cmr);
2592 }
2593}
2594
2595void memory_region_set_flush_coalesced(MemoryRegion *mr)
2596{
2597 mr->flush_coalesced_mmio = true;
2598}
2599
2600void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2601{
2602 qemu_flush_coalesced_mmio_buffer();
2603 if (QTAILQ_EMPTY(&mr->coalesced)) {
2604 mr->flush_coalesced_mmio = false;
2605 }
2606}
2607
2608void memory_region_clear_global_locking(MemoryRegion *mr)
2609{
2610 mr->global_locking = false;
2611}
2612
2613static bool userspace_eventfd_warning;
2614
2615void memory_region_add_eventfd(MemoryRegion *mr,
2616 hwaddr addr,
2617 unsigned size,
2618 bool match_data,
2619 uint64_t data,
2620 EventNotifier *e)
2621{
2622 MemoryRegionIoeventfd mrfd = {
2623 .addr.start = int128_make64(addr),
2624 .addr.size = int128_make64(size),
2625 .match_data = match_data,
2626 .data = data,
2627 .e = e,
2628 };
2629 unsigned i;
2630
2631 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2632 userspace_eventfd_warning))) {
2633 userspace_eventfd_warning = true;
2634 error_report("Using eventfd without MMIO binding in KVM. "
2635 "Suboptimal performance expected");
2636 }
2637
2638 if (size) {
2639 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2640 }
2641 memory_region_transaction_begin();
2642 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2643 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2644 break;
2645 }
2646 }
2647 ++mr->ioeventfd_nb;
2648 mr->ioeventfds = g_realloc(mr->ioeventfds,
2649 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2650 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2651 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2652 mr->ioeventfds[i] = mrfd;
2653 ioeventfd_update_pending |= mr->enabled;
2654 memory_region_transaction_commit();
2655}
2656
2657void memory_region_del_eventfd(MemoryRegion *mr,
2658 hwaddr addr,
2659 unsigned size,
2660 bool match_data,
2661 uint64_t data,
2662 EventNotifier *e)
2663{
2664 MemoryRegionIoeventfd mrfd = {
2665 .addr.start = int128_make64(addr),
2666 .addr.size = int128_make64(size),
2667 .match_data = match_data,
2668 .data = data,
2669 .e = e,
2670 };
2671 unsigned i;
2672
2673 if (size) {
2674 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2675 }
2676 memory_region_transaction_begin();
2677 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2678 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2679 break;
2680 }
2681 }
2682 assert(i != mr->ioeventfd_nb);
2683 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2684 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2685 --mr->ioeventfd_nb;
2686 mr->ioeventfds = g_realloc(mr->ioeventfds,
2687 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2688 ioeventfd_update_pending |= mr->enabled;
2689 memory_region_transaction_commit();
2690}
2691
2692static void memory_region_update_container_subregions(MemoryRegion *subregion)
2693{
2694 MemoryRegion *mr = subregion->container;
2695 MemoryRegion *other;
2696
2697 memory_region_transaction_begin();
2698
2699 memory_region_ref(subregion);
2700 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2701 if (subregion->priority >= other->priority) {
2702 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2703 goto done;
2704 }
2705 }
2706 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2707done:
2708 memory_region_update_pending |= mr->enabled && subregion->enabled;
2709 memory_region_transaction_commit();
2710}
2711
2712static void memory_region_add_subregion_common(MemoryRegion *mr,
2713 hwaddr offset,
2714 MemoryRegion *subregion)
2715{
2716 assert(!subregion->container);
2717 subregion->container = mr;
2718 subregion->addr = offset;
2719 memory_region_update_container_subregions(subregion);
2720}
2721
2722void memory_region_add_subregion(MemoryRegion *mr,
2723 hwaddr offset,
2724 MemoryRegion *subregion)
2725{
2726 subregion->priority = 0;
2727 memory_region_add_subregion_common(mr, offset, subregion);
2728}
2729
2730void memory_region_add_subregion_overlap(MemoryRegion *mr,
2731 hwaddr offset,
2732 MemoryRegion *subregion,
2733 int priority)
2734{
2735 subregion->priority = priority;
2736 memory_region_add_subregion_common(mr, offset, subregion);
2737}
2738
2739void memory_region_del_subregion(MemoryRegion *mr,
2740 MemoryRegion *subregion)
2741{
2742 memory_region_transaction_begin();
2743 assert(subregion->container == mr);
2744 subregion->container = NULL;
2745 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2746 memory_region_unref(subregion);
2747 memory_region_update_pending |= mr->enabled && subregion->enabled;
2748 memory_region_transaction_commit();
2749}
2750
2751void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2752{
2753 if (enabled == mr->enabled) {
2754 return;
2755 }
2756 memory_region_transaction_begin();
2757 mr->enabled = enabled;
2758 memory_region_update_pending = true;
2759 memory_region_transaction_commit();
2760}
2761
2762void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2763{
2764 Int128 s = int128_make64(size);
2765
2766 if (size == UINT64_MAX) {
2767 s = int128_2_64();
2768 }
2769 if (int128_eq(s, mr->size)) {
2770 return;
2771 }
2772 memory_region_transaction_begin();
2773 mr->size = s;
2774 if (mr->ram) {
2775 memory_region_do_set_ram(mr);
2776 }
2777 memory_region_update_pending = true;
2778 memory_region_transaction_commit();
2779}
2780
2781static void memory_region_readd_subregion(MemoryRegion *mr)
2782{
2783 MemoryRegion *container = mr->container;
2784
2785 if (container) {
2786 memory_region_transaction_begin();
2787 memory_region_ref(mr);
2788 memory_region_del_subregion(container, mr);
2789 mr->container = container;
2790 memory_region_update_container_subregions(mr);
2791 memory_region_unref(mr);
2792 memory_region_transaction_commit();
2793 }
2794}
2795
2796void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2797{
2798 if (addr != mr->addr) {
2799 mr->addr = addr;
2800 memory_region_readd_subregion(mr);
2801 }
2802}
2803
2804void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2805{
2806 assert(mr->alias);
2807
2808 if (offset == mr->alias_offset) {
2809 return;
2810 }
2811
2812 memory_region_transaction_begin();
2813 mr->alias_offset = offset;
2814 memory_region_update_pending |= mr->enabled;
2815 memory_region_transaction_commit();
2816}
2817
2818uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2819{
2820 return mr->align;
2821}
2822
2823static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2824{
2825 const AddrRange *addr = addr_;
2826 const FlatRange *fr = fr_;
2827
2828 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2829 return -1;
2830 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2831 return 1;
2832 }
2833 return 0;
2834}
2835
2836static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2837{
2838 return bsearch(&addr, view->ranges, view->nr,
2839 sizeof(FlatRange), cmp_flatrange_addr);
2840}
2841
2842bool memory_region_is_mapped(MemoryRegion *mr)
2843{
2844 return mr->container ? true : false;
2845}
2846
2847
2848
2849
2850static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2851 hwaddr addr, uint64_t size)
2852{
2853 MemoryRegionSection ret = { .mr = NULL };
2854 MemoryRegion *root;
2855 AddressSpace *as;
2856 AddrRange range;
2857 FlatView *view;
2858 FlatRange *fr;
2859
2860 addr += mr->addr;
2861 for (root = mr; root->container; ) {
2862 root = root->container;
2863 addr += root->addr;
2864 }
2865
2866 as = memory_region_to_address_space(root);
2867 if (!as) {
2868 return ret;
2869 }
2870 range = addrrange_make(int128_make64(addr), int128_make64(size));
2871
2872 view = address_space_to_flatview(as);
2873 fr = flatview_lookup(view, range);
2874 if (!fr) {
2875 return ret;
2876 }
2877
2878 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2879 --fr;
2880 }
2881
2882 ret.mr = fr->mr;
2883 ret.fv = view;
2884 range = addrrange_intersection(range, fr->addr);
2885 ret.offset_within_region = fr->offset_in_region;
2886 ret.offset_within_region += int128_get64(int128_sub(range.start,
2887 fr->addr.start));
2888 ret.size = range.size;
2889 ret.offset_within_address_space = int128_get64(range.start);
2890 ret.readonly = fr->readonly;
2891 ret.nonvolatile = fr->nonvolatile;
2892 return ret;
2893}
2894
2895MemoryRegionSection memory_region_find(MemoryRegion *mr,
2896 hwaddr addr, uint64_t size)
2897{
2898 MemoryRegionSection ret;
2899 RCU_READ_LOCK_GUARD();
2900 ret = memory_region_find_rcu(mr, addr, size);
2901 if (ret.mr) {
2902 memory_region_ref(ret.mr);
2903 }
2904 return ret;
2905}
2906
2907bool memory_region_present(MemoryRegion *container, hwaddr addr)
2908{
2909 MemoryRegion *mr;
2910
2911 RCU_READ_LOCK_GUARD();
2912 mr = memory_region_find_rcu(container, addr, 1).mr;
2913 return mr && mr != container;
2914}
2915
2916void memory_global_dirty_log_sync(void)
2917{
2918 memory_region_sync_dirty_bitmap(NULL);
2919}
2920
2921void memory_global_after_dirty_log_sync(void)
2922{
2923 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2924}
2925
2926static VMChangeStateEntry *vmstate_change;
2927
2928void memory_global_dirty_log_start(void)
2929{
2930 if (vmstate_change) {
2931 qemu_del_vm_change_state_handler(vmstate_change);
2932 vmstate_change = NULL;
2933 }
2934
2935 global_dirty_log = true;
2936
2937 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2938
2939
2940 memory_region_transaction_begin();
2941 memory_region_update_pending = true;
2942 memory_region_transaction_commit();
2943}
2944
2945static void memory_global_dirty_log_do_stop(void)
2946{
2947 global_dirty_log = false;
2948
2949
2950 memory_region_transaction_begin();
2951 memory_region_update_pending = true;
2952 memory_region_transaction_commit();
2953
2954 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2955}
2956
2957static void memory_vm_change_state_handler(void *opaque, int running,
2958 RunState state)
2959{
2960 if (running) {
2961 memory_global_dirty_log_do_stop();
2962
2963 if (vmstate_change) {
2964 qemu_del_vm_change_state_handler(vmstate_change);
2965 vmstate_change = NULL;
2966 }
2967 }
2968}
2969
2970void memory_global_dirty_log_stop(void)
2971{
2972 if (!runstate_is_running()) {
2973 if (vmstate_change) {
2974 return;
2975 }
2976 vmstate_change = qemu_add_vm_change_state_handler(
2977 memory_vm_change_state_handler, NULL);
2978 return;
2979 }
2980
2981 memory_global_dirty_log_do_stop();
2982}
2983
2984static void listener_add_address_space(MemoryListener *listener,
2985 AddressSpace *as)
2986{
2987 FlatView *view;
2988 FlatRange *fr;
2989
2990 if (listener->begin) {
2991 listener->begin(listener);
2992 }
2993 if (global_dirty_log) {
2994 if (listener->log_global_start) {
2995 listener->log_global_start(listener);
2996 }
2997 }
2998
2999 view = address_space_get_flatview(as);
3000 FOR_EACH_FLAT_RANGE(fr, view) {
3001 MemoryRegionSection section = section_from_flat_range(fr, view);
3002
3003 if (listener->region_add) {
3004 listener->region_add(listener, §ion);
3005 }
3006 if (fr->dirty_log_mask && listener->log_start) {
3007 listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
3008 }
3009 }
3010 if (listener->commit) {
3011 listener->commit(listener);
3012 }
3013 flatview_unref(view);
3014}
3015
3016static void listener_del_address_space(MemoryListener *listener,
3017 AddressSpace *as)
3018{
3019 FlatView *view;
3020 FlatRange *fr;
3021
3022 if (listener->begin) {
3023 listener->begin(listener);
3024 }
3025 view = address_space_get_flatview(as);
3026 FOR_EACH_FLAT_RANGE(fr, view) {
3027 MemoryRegionSection section = section_from_flat_range(fr, view);
3028
3029 if (fr->dirty_log_mask && listener->log_stop) {
3030 listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
3031 }
3032 if (listener->region_del) {
3033 listener->region_del(listener, §ion);
3034 }
3035 }
3036 if (listener->commit) {
3037 listener->commit(listener);
3038 }
3039 flatview_unref(view);
3040}
3041
3042void memory_listener_register(MemoryListener *listener, AddressSpace *as)
3043{
3044 MemoryListener *other = NULL;
3045
3046 listener->address_space = as;
3047 if (QTAILQ_EMPTY(&memory_listeners)
3048 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
3049 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
3050 } else {
3051 QTAILQ_FOREACH(other, &memory_listeners, link) {
3052 if (listener->priority < other->priority) {
3053 break;
3054 }
3055 }
3056 QTAILQ_INSERT_BEFORE(other, listener, link);
3057 }
3058
3059 if (QTAILQ_EMPTY(&as->listeners)
3060 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
3061 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
3062 } else {
3063 QTAILQ_FOREACH(other, &as->listeners, link_as) {
3064 if (listener->priority < other->priority) {
3065 break;
3066 }
3067 }
3068 QTAILQ_INSERT_BEFORE(other, listener, link_as);
3069 }
3070
3071 listener_add_address_space(listener, as);
3072}
3073
3074void memory_listener_unregister(MemoryListener *listener)
3075{
3076 if (!listener->address_space) {
3077 return;
3078 }
3079
3080 listener_del_address_space(listener, listener->address_space);
3081 QTAILQ_REMOVE(&memory_listeners, listener, link);
3082 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
3083 listener->address_space = NULL;
3084}
3085
3086void address_space_remove_listeners(AddressSpace *as)
3087{
3088 while (!QTAILQ_EMPTY(&as->listeners)) {
3089 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
3090 }
3091}
3092
3093void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
3094{
3095 memory_region_ref(root);
3096 as->root = root;
3097 as->current_map = NULL;
3098 as->ioeventfd_nb = 0;
3099 as->ioeventfds = NULL;
3100 QTAILQ_INIT(&as->listeners);
3101 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
3102
3103
3104
3105
3106
3107
3108
3109 as->name = g_strdup(name ? name : object_get_canonical_path(OBJECT(root)));
3110 address_space_update_topology(as);
3111 address_space_update_ioeventfds(as);
3112}
3113
3114static void do_address_space_destroy(AddressSpace *as)
3115{
3116 assert(QTAILQ_EMPTY(&as->listeners));
3117
3118 flatview_unref(as->current_map);
3119 g_free(as->name);
3120 g_free(as->ioeventfds);
3121 memory_region_unref(as->root);
3122}
3123
3124
3125AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
3126{
3127 AddressSpace *as;
3128
3129 as = g_malloc0(sizeof *as);
3130 address_space_init(as, root, name);
3131 return as;
3132}
3133
3134void address_space_destroy(AddressSpace *as)
3135{
3136 MemoryRegion *root = as->root;
3137
3138
3139 memory_region_transaction_begin();
3140 as->root = NULL;
3141 memory_region_transaction_commit();
3142 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
3143
3144
3145
3146
3147
3148 as->root = root;
3149 call_rcu(as, do_address_space_destroy, rcu);
3150}
3151
3152static const char *memory_region_type(MemoryRegion *mr)
3153{
3154 if (mr->alias) {
3155 return memory_region_type(mr->alias);
3156 }
3157 if (memory_region_is_ram_device(mr)) {
3158 return "ramd";
3159 } else if (memory_region_is_romd(mr)) {
3160 return "romd";
3161 } else if (memory_region_is_rom(mr)) {
3162 return "rom";
3163 } else if (memory_region_is_ram(mr)) {
3164 return "ram";
3165 } else {
3166 return "i/o";
3167 }
3168}
3169
3170typedef struct MemoryRegionList MemoryRegionList;
3171
3172struct MemoryRegionList {
3173 const MemoryRegion *mr;
3174 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3175};
3176
3177typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
3178
3179#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3180 int128_sub((size), int128_one())) : 0)
3181#define MTREE_INDENT " "
3182
3183static void mtree_expand_owner(const char *label, Object *obj)
3184{
3185 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
3186
3187 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
3188 if (dev && dev->id) {
3189 qemu_printf(" id=%s", dev->id);
3190 } else {
3191 char *canonical_path = object_get_canonical_path(obj);
3192 if (canonical_path) {
3193 qemu_printf(" path=%s", canonical_path);
3194 g_free(canonical_path);
3195 } else {
3196 qemu_printf(" type=%s", object_get_typename(obj));
3197 }
3198 }
3199 qemu_printf("}");
3200}
3201
3202static void mtree_print_mr_owner(const MemoryRegion *mr)
3203{
3204 Object *owner = mr->owner;
3205 Object *parent = memory_region_owner((MemoryRegion *)mr);
3206
3207 if (!owner && !parent) {
3208 qemu_printf(" orphan");
3209 return;
3210 }
3211 if (owner) {
3212 mtree_expand_owner("owner", owner);
3213 }
3214 if (parent && parent != owner) {
3215 mtree_expand_owner("parent", parent);
3216 }
3217}
3218
3219static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
3220 hwaddr base,
3221 MemoryRegionListHead *alias_print_queue,
3222 bool owner, bool display_disabled)
3223{
3224 MemoryRegionList *new_ml, *ml, *next_ml;
3225 MemoryRegionListHead submr_print_queue;
3226 const MemoryRegion *submr;
3227 unsigned int i;
3228 hwaddr cur_start, cur_end;
3229
3230 if (!mr) {
3231 return;
3232 }
3233
3234 cur_start = base + mr->addr;
3235 cur_end = cur_start + MR_SIZE(mr->size);
3236
3237
3238
3239
3240
3241
3242 if (cur_start < base || cur_end < cur_start) {
3243 qemu_printf("[DETECTED OVERFLOW!] ");
3244 }
3245
3246 if (mr->alias) {
3247 MemoryRegionList *ml;
3248 bool found = false;
3249
3250
3251 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3252 if (ml->mr == mr->alias) {
3253 found = true;
3254 }
3255 }
3256
3257 if (!found) {
3258 ml = g_new(MemoryRegionList, 1);
3259 ml->mr = mr->alias;
3260 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3261 }
3262 if (mr->enabled || display_disabled) {
3263 for (i = 0; i < level; i++) {
3264 qemu_printf(MTREE_INDENT);
3265 }
3266 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
3267 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
3268 "-" TARGET_FMT_plx "%s",
3269 cur_start, cur_end,
3270 mr->priority,
3271 mr->nonvolatile ? "nv-" : "",
3272 memory_region_type((MemoryRegion *)mr),
3273 memory_region_name(mr),
3274 memory_region_name(mr->alias),
3275 mr->alias_offset,
3276 mr->alias_offset + MR_SIZE(mr->size),
3277 mr->enabled ? "" : " [disabled]");
3278 if (owner) {
3279 mtree_print_mr_owner(mr);
3280 }
3281 qemu_printf("\n");
3282 }
3283 } else {
3284 if (mr->enabled || display_disabled) {
3285 for (i = 0; i < level; i++) {
3286 qemu_printf(MTREE_INDENT);
3287 }
3288 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
3289 " (prio %d, %s%s): %s%s",
3290 cur_start, cur_end,
3291 mr->priority,
3292 mr->nonvolatile ? "nv-" : "",
3293 memory_region_type((MemoryRegion *)mr),
3294 memory_region_name(mr),
3295 mr->enabled ? "" : " [disabled]");
3296 if (owner) {
3297 mtree_print_mr_owner(mr);
3298 }
3299 qemu_printf("\n");
3300 }
3301 }
3302
3303 QTAILQ_INIT(&submr_print_queue);
3304
3305 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3306 new_ml = g_new(MemoryRegionList, 1);
3307 new_ml->mr = submr;
3308 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3309 if (new_ml->mr->addr < ml->mr->addr ||
3310 (new_ml->mr->addr == ml->mr->addr &&
3311 new_ml->mr->priority > ml->mr->priority)) {
3312 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3313 new_ml = NULL;
3314 break;
3315 }
3316 }
3317 if (new_ml) {
3318 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3319 }
3320 }
3321
3322 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3323 mtree_print_mr(ml->mr, level + 1, cur_start,
3324 alias_print_queue, owner, display_disabled);
3325 }
3326
3327 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3328 g_free(ml);
3329 }
3330}
3331
3332struct FlatViewInfo {
3333 int counter;
3334 bool dispatch_tree;
3335 bool owner;
3336 AccelClass *ac;
3337};
3338
3339static void mtree_print_flatview(gpointer key, gpointer value,
3340 gpointer user_data)
3341{
3342 FlatView *view = key;
3343 GArray *fv_address_spaces = value;
3344 struct FlatViewInfo *fvi = user_data;
3345 FlatRange *range = &view->ranges[0];
3346 MemoryRegion *mr;
3347 int n = view->nr;
3348 int i;
3349 AddressSpace *as;
3350
3351 qemu_printf("FlatView #%d\n", fvi->counter);
3352 ++fvi->counter;
3353
3354 for (i = 0; i < fv_address_spaces->len; ++i) {
3355 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3356 qemu_printf(" AS \"%s\", root: %s",
3357 as->name, memory_region_name(as->root));
3358 if (as->root->alias) {
3359 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3360 }
3361 qemu_printf("\n");
3362 }
3363
3364 qemu_printf(" Root memory region: %s\n",
3365 view->root ? memory_region_name(view->root) : "(none)");
3366
3367 if (n <= 0) {
3368 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3369 return;
3370 }
3371
3372 while (n--) {
3373 mr = range->mr;
3374 if (range->offset_in_region) {
3375 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3376 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3377 int128_get64(range->addr.start),
3378 int128_get64(range->addr.start)
3379 + MR_SIZE(range->addr.size),
3380 mr->priority,
3381 range->nonvolatile ? "nv-" : "",
3382 range->readonly ? "rom" : memory_region_type(mr),
3383 memory_region_name(mr),
3384 range->offset_in_region);
3385 } else {
3386 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3387 " (prio %d, %s%s): %s",
3388 int128_get64(range->addr.start),
3389 int128_get64(range->addr.start)
3390 + MR_SIZE(range->addr.size),
3391 mr->priority,
3392 range->nonvolatile ? "nv-" : "",
3393 range->readonly ? "rom" : memory_region_type(mr),
3394 memory_region_name(mr));
3395 }
3396 if (fvi->owner) {
3397 mtree_print_mr_owner(mr);
3398 }
3399
3400 if (fvi->ac) {
3401 for (i = 0; i < fv_address_spaces->len; ++i) {
3402 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3403 if (fvi->ac->has_memory(current_machine, as,
3404 int128_get64(range->addr.start),
3405 MR_SIZE(range->addr.size) + 1)) {
3406 qemu_printf(" %s", fvi->ac->name);
3407 }
3408 }
3409 }
3410 qemu_printf("\n");
3411 range++;
3412 }
3413
3414#if !defined(CONFIG_USER_ONLY)
3415 if (fvi->dispatch_tree && view->root) {
3416 mtree_print_dispatch(view->dispatch, view->root);
3417 }
3418#endif
3419
3420 qemu_printf("\n");
3421}
3422
3423static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3424 gpointer user_data)
3425{
3426 FlatView *view = key;
3427 GArray *fv_address_spaces = value;
3428
3429 g_array_unref(fv_address_spaces);
3430 flatview_unref(view);
3431
3432 return true;
3433}
3434
3435void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
3436{
3437 MemoryRegionListHead ml_head;
3438 MemoryRegionList *ml, *ml2;
3439 AddressSpace *as;
3440
3441 if (flatview) {
3442 FlatView *view;
3443 struct FlatViewInfo fvi = {
3444 .counter = 0,
3445 .dispatch_tree = dispatch_tree,
3446 .owner = owner,
3447 };
3448 GArray *fv_address_spaces;
3449 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3450 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
3451
3452 if (ac->has_memory) {
3453 fvi.ac = ac;
3454 }
3455
3456
3457 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3458 view = address_space_get_flatview(as);
3459
3460 fv_address_spaces = g_hash_table_lookup(views, view);
3461 if (!fv_address_spaces) {
3462 fv_address_spaces = g_array_new(false, false, sizeof(as));
3463 g_hash_table_insert(views, view, fv_address_spaces);
3464 }
3465
3466 g_array_append_val(fv_address_spaces, as);
3467 }
3468
3469
3470 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3471
3472
3473 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3474 g_hash_table_unref(views);
3475
3476 return;
3477 }
3478
3479 QTAILQ_INIT(&ml_head);
3480
3481 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3482 qemu_printf("address-space: %s\n", as->name);
3483 mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled);
3484 qemu_printf("\n");
3485 }
3486
3487
3488 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3489 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3490 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
3491 qemu_printf("\n");
3492 }
3493
3494 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3495 g_free(ml);
3496 }
3497}
3498
3499static bool memory_region_parse_reg(FDTGenericMMap *obj,
3500 FDTGenericRegPropInfo reg, Error **errp)
3501{
3502 MemoryRegion *mr = MEMORY_REGION(obj);
3503 uint64_t base_addr = ~0ull;
3504 uint64_t total_size = 0;
3505 uint64_t max_addr = 0;
3506 int i;
3507
3508 if (!reg.n) {
3509 return false;
3510 }
3511
3512 for (i = 0; i < reg.n; ++i) {
3513 base_addr = MIN(base_addr, reg.a[i]);
3514 max_addr = MAX(max_addr, reg.a[i] + reg.s[i]);
3515 total_size += reg.s[i];
3516 if (reg.p[i] != reg.p[0]) {
3517 error_setg(errp, "FDT generic memory parser does not support"
3518 "mixed priorities\n");
3519 return false;
3520 }
3521 }
3522
3523 if (total_size != max_addr - base_addr) {
3524 return false;
3525 error_setg(errp, "FDT generic memory parse does not "
3526 "spport discontiguous or overlapping memory regions");
3527 }
3528
3529
3530
3531
3532 if (reg.parents[0]) {
3533 object_property_set_link(OBJECT(mr), "container", reg.parents[0], &error_abort);
3534 }
3535 object_property_set_int(OBJECT(mr), "size", total_size, &error_abort);
3536 object_property_set_int(OBJECT(mr), "addr", base_addr, &error_abort);
3537 object_property_set_int(OBJECT(mr), "priority", reg.p[0], &error_abort);
3538 return false;
3539}
3540
3541static void memory_region_class_init(ObjectClass *oc, void *data)
3542{
3543 FDTGenericMMapClass *fmc = FDT_GENERIC_MMAP_CLASS(oc);
3544
3545 fmc->parse_reg = memory_region_parse_reg;
3546}
3547
3548void memory_region_init_ram(MemoryRegion *mr,
3549 struct Object *owner,
3550 const char *name,
3551 uint64_t size,
3552 Error **errp)
3553{
3554 DeviceState *owner_dev;
3555 Error *err = NULL;
3556
3557 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3558 if (err) {
3559 error_propagate(errp, err);
3560 return;
3561 }
3562
3563
3564
3565
3566
3567
3568 owner_dev = DEVICE(owner);
3569 vmstate_register_ram(mr, owner_dev);
3570}
3571
3572void memory_region_init_rom(MemoryRegion *mr,
3573 struct Object *owner,
3574 const char *name,
3575 uint64_t size,
3576 Error **errp)
3577{
3578 DeviceState *owner_dev;
3579 Error *err = NULL;
3580
3581 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3582 if (err) {
3583 error_propagate(errp, err);
3584 return;
3585 }
3586
3587
3588
3589
3590
3591
3592 owner_dev = DEVICE(owner);
3593 vmstate_register_ram(mr, owner_dev);
3594}
3595
3596void memory_region_init_rom_device(MemoryRegion *mr,
3597 struct Object *owner,
3598 const MemoryRegionOps *ops,
3599 void *opaque,
3600 const char *name,
3601 uint64_t size,
3602 Error **errp)
3603{
3604 DeviceState *owner_dev;
3605 Error *err = NULL;
3606
3607 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3608 name, size, &err);
3609 if (err) {
3610 error_propagate(errp, err);
3611 return;
3612 }
3613
3614
3615
3616
3617
3618
3619 owner_dev = DEVICE(owner);
3620 vmstate_register_ram(mr, owner_dev);
3621}
3622
3623static const TypeInfo memory_region_info = {
3624 .parent = TYPE_OBJECT,
3625 .name = TYPE_MEMORY_REGION,
3626 .class_size = sizeof(MemoryRegionClass),
3627 .instance_size = sizeof(MemoryRegion),
3628 .instance_init = memory_region_initfn,
3629 .instance_finalize = memory_region_finalize,
3630 .class_init = memory_region_class_init,
3631 .interfaces = (InterfaceInfo[]) {
3632 { TYPE_FDT_GENERIC_MMAP },
3633 { },
3634 },
3635};
3636
3637static bool memory_transaction_attr_get_secure(Object *obj, Error **errp)
3638{
3639 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3640 return mattr->secure;
3641}
3642
3643static void memory_transaction_attr_set_secure(Object *obj, bool value,
3644 Error **errp)
3645{
3646 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3647 mattr->secure = value;
3648}
3649
3650static void mattr_get_requester_id(Object *obj, Visitor *v, const char *name,
3651 void *opaque, Error **errp)
3652{
3653 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3654 uint16_t value = mattr->requester_id;
3655
3656 visit_type_uint16(v, name, &value, errp);
3657}
3658
3659
3660static void mattr_set_requester_id(Object *obj, Visitor *v, const char *name,
3661 void *opaque, Error **errp)
3662{
3663 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3664 Error *local_err = NULL;
3665 uint16_t value;
3666
3667 visit_type_uint16(v, name, &value, &local_err);
3668 mattr->requester_id = value;
3669}
3670
3671static void mattr_set_master_id(Object *obj, Visitor *v, const char *name,
3672 void *opaque, Error **errp)
3673{
3674 gchar *path = object_get_canonical_path(obj);
3675
3676 qemu_log("WARNING: %s: The %s property will be deprecated.\n", path, name);
3677 g_free(path);
3678 mattr_set_requester_id(obj, v, name, opaque, errp);
3679}
3680
3681
3682static void memory_transaction_attr_initfn(Object *obj)
3683{
3684 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3685
3686 object_property_add_bool(OBJECT(mattr), "secure",
3687 memory_transaction_attr_get_secure,
3688 memory_transaction_attr_set_secure);
3689 object_property_add(OBJECT(mattr), "requester-id", "uint16",
3690 mattr_get_requester_id,
3691 mattr_set_requester_id,
3692 NULL, NULL);
3693
3694 object_property_add(OBJECT(mattr), "master-id", "uint16",
3695 mattr_get_requester_id,
3696 mattr_set_master_id,
3697 NULL, NULL);
3698}
3699
3700static const TypeInfo memory_transaction_attr_info = {
3701 .parent = TYPE_OBJECT,
3702 .name = TYPE_MEMORY_TRANSACTION_ATTR,
3703 .instance_size = sizeof(MemTxAttrs),
3704 .instance_init = memory_transaction_attr_initfn,
3705 .interfaces = (InterfaceInfo[]) {
3706 { TYPE_FDT_GENERIC_MMAP },
3707 { },
3708 },
3709};
3710
3711static const TypeInfo iommu_memory_region_info = {
3712 .parent = TYPE_MEMORY_REGION,
3713 .name = TYPE_IOMMU_MEMORY_REGION,
3714 .class_size = sizeof(IOMMUMemoryRegionClass),
3715 .instance_size = sizeof(IOMMUMemoryRegion),
3716 .instance_init = iommu_memory_region_initfn,
3717 .abstract = true,
3718};
3719
3720static void memory_register_types(void)
3721{
3722 type_register_static(&memory_region_info);
3723 type_register_static(&memory_transaction_attr_info);
3724 type_register_static(&iommu_memory_region_info);
3725}
3726
3727type_init(memory_register_types)
3728