1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include "qemu/osdep.h"
17#include "qapi/error.h"
18#include "qemu-common.h"
19#include "cpu.h"
20#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
23#include "qapi/visitor.h"
24#include "qemu/bitops.h"
25#include "qemu/error-report.h"
26#include "qom/object.h"
27#include "trace-root.h"
28
29#include "exec/memory-internal.h"
30#include "exec/ram_addr.h"
31#include "sysemu/kvm.h"
32#include "sysemu/sysemu.h"
33#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
35#include "migration/vmstate.h"
36
37#include "hw/fdt_generic_util.h"
38
39
40
41static unsigned memory_region_transaction_depth;
42static bool memory_region_update_pending;
43static bool ioeventfd_update_pending;
44static bool global_dirty_log = false;
45
46static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
47 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
48
49static QTAILQ_HEAD(, AddressSpace) address_spaces
50 = QTAILQ_HEAD_INITIALIZER(address_spaces);
51
52static GHashTable *flat_views;
53
54typedef struct AddrRange AddrRange;
55
56static void memory_region_update_container_subregions(MemoryRegion *subregion);
57static void memory_region_readd_subregion(MemoryRegion *mr);
58
59
60
61
62
63struct AddrRange {
64 Int128 start;
65 Int128 size;
66};
67
68static AddrRange addrrange_make(Int128 start, Int128 size)
69{
70 return (AddrRange) { start, size };
71}
72
73static bool addrrange_equal(AddrRange r1, AddrRange r2)
74{
75 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
76}
77
78static Int128 addrrange_end(AddrRange r)
79{
80 return int128_add(r.start, r.size);
81}
82
83static AddrRange addrrange_shift(AddrRange range, Int128 delta)
84{
85 int128_addto(&range.start, delta);
86 return range;
87}
88
89static bool addrrange_contains(AddrRange range, Int128 addr)
90{
91 return int128_ge(addr, range.start)
92 && int128_lt(addr, addrrange_end(range));
93}
94
95static bool addrrange_intersects(AddrRange r1, AddrRange r2)
96{
97 return addrrange_contains(r1, r2.start)
98 || addrrange_contains(r2, r1.start);
99}
100
101static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
102{
103 Int128 start = int128_max(r1.start, r2.start);
104 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
105 return addrrange_make(start, int128_sub(end, start));
106}
107
108enum ListenerDirection { Forward, Reverse };
109
110#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
111 do { \
112 MemoryListener *_listener; \
113 \
114 switch (_direction) { \
115 case Forward: \
116 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
117 if (_listener->_callback) { \
118 _listener->_callback(_listener, ##_args); \
119 } \
120 } \
121 break; \
122 case Reverse: \
123 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
124 memory_listeners, link) { \
125 if (_listener->_callback) { \
126 _listener->_callback(_listener, ##_args); \
127 } \
128 } \
129 break; \
130 default: \
131 abort(); \
132 } \
133 } while (0)
134
135#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
136 do { \
137 MemoryListener *_listener; \
138 struct memory_listeners_as *list = &(_as)->listeners; \
139 \
140 switch (_direction) { \
141 case Forward: \
142 QTAILQ_FOREACH(_listener, list, link_as) { \
143 if (_listener->_callback) { \
144 _listener->_callback(_listener, _section, ##_args); \
145 } \
146 } \
147 break; \
148 case Reverse: \
149 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
150 link_as) { \
151 if (_listener->_callback) { \
152 _listener->_callback(_listener, _section, ##_args); \
153 } \
154 } \
155 break; \
156 default: \
157 abort(); \
158 } \
159 } while (0)
160
161
162#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
163 do { \
164 MemoryRegionSection mrs = section_from_flat_range(fr, \
165 address_space_to_flatview(as)); \
166 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
167 } while(0)
168
169struct CoalescedMemoryRange {
170 AddrRange addr;
171 QTAILQ_ENTRY(CoalescedMemoryRange) link;
172};
173
174struct MemoryRegionIoeventfd {
175 AddrRange addr;
176 bool match_data;
177 uint64_t data;
178 EventNotifier *e;
179};
180
181static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
182 MemoryRegionIoeventfd b)
183{
184 if (int128_lt(a.addr.start, b.addr.start)) {
185 return true;
186 } else if (int128_gt(a.addr.start, b.addr.start)) {
187 return false;
188 } else if (int128_lt(a.addr.size, b.addr.size)) {
189 return true;
190 } else if (int128_gt(a.addr.size, b.addr.size)) {
191 return false;
192 } else if (a.match_data < b.match_data) {
193 return true;
194 } else if (a.match_data > b.match_data) {
195 return false;
196 } else if (a.match_data) {
197 if (a.data < b.data) {
198 return true;
199 } else if (a.data > b.data) {
200 return false;
201 }
202 }
203 if (a.e < b.e) {
204 return true;
205 } else if (a.e > b.e) {
206 return false;
207 }
208 return false;
209}
210
211static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
212 MemoryRegionIoeventfd b)
213{
214 return !memory_region_ioeventfd_before(a, b)
215 && !memory_region_ioeventfd_before(b, a);
216}
217
218typedef struct FlatRange FlatRange;
219
220
221struct FlatRange {
222 MemoryRegion *mr;
223 hwaddr offset_in_region;
224 AddrRange addr;
225 uint8_t dirty_log_mask;
226 bool romd_mode;
227 bool readonly;
228};
229
230
231
232
233struct FlatView {
234 struct rcu_head rcu;
235 unsigned ref;
236 FlatRange *ranges;
237 unsigned nr;
238 unsigned nr_allocated;
239 struct AddressSpaceDispatch *dispatch;
240 MemoryRegion *root;
241};
242
243typedef struct AddressSpaceOps AddressSpaceOps;
244
245#define FOR_EACH_FLAT_RANGE(var, view) \
246 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
247
248static inline MemoryRegionSection
249section_from_flat_range(FlatRange *fr, FlatView *fv)
250{
251 return (MemoryRegionSection) {
252 .mr = fr->mr,
253 .fv = fv,
254 .offset_within_region = fr->offset_in_region,
255 .size = fr->addr.size,
256 .offset_within_address_space = int128_get64(fr->addr.start),
257 .readonly = fr->readonly,
258 };
259}
260
261static bool flatrange_equal(FlatRange *a, FlatRange *b)
262{
263 return a->mr == b->mr
264 && addrrange_equal(a->addr, b->addr)
265 && a->offset_in_region == b->offset_in_region
266 && a->romd_mode == b->romd_mode
267 && a->readonly == b->readonly;
268}
269
270static FlatView *flatview_new(MemoryRegion *mr_root)
271{
272 FlatView *view;
273
274 view = g_new0(FlatView, 1);
275 view->ref = 1;
276 view->root = mr_root;
277 memory_region_ref(mr_root);
278 trace_flatview_new(view, mr_root);
279
280 return view;
281}
282
283
284
285
286static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
287{
288 if (view->nr == view->nr_allocated) {
289 view->nr_allocated = MAX(2 * view->nr, 10);
290 view->ranges = g_realloc(view->ranges,
291 view->nr_allocated * sizeof(*view->ranges));
292 }
293 memmove(view->ranges + pos + 1, view->ranges + pos,
294 (view->nr - pos) * sizeof(FlatRange));
295 view->ranges[pos] = *range;
296 memory_region_ref(range->mr);
297 ++view->nr;
298}
299
300static void flatview_destroy(FlatView *view)
301{
302 int i;
303
304 trace_flatview_destroy(view, view->root);
305 if (view->dispatch) {
306 address_space_dispatch_free(view->dispatch);
307 }
308 for (i = 0; i < view->nr; i++) {
309 memory_region_unref(view->ranges[i].mr);
310 }
311 g_free(view->ranges);
312 memory_region_unref(view->root);
313 g_free(view);
314}
315
316static bool flatview_ref(FlatView *view)
317{
318 return atomic_fetch_inc_nonzero(&view->ref) > 0;
319}
320
321static void flatview_unref(FlatView *view)
322{
323 if (atomic_fetch_dec(&view->ref) == 1) {
324 trace_flatview_destroy_rcu(view, view->root);
325 assert(view->root);
326 call_rcu(view, flatview_destroy, rcu);
327 }
328}
329
330FlatView *address_space_to_flatview(AddressSpace *as)
331{
332 return atomic_rcu_read(&as->current_map);
333}
334
335AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
336{
337 return fv->dispatch;
338}
339
340AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
341{
342 return flatview_to_dispatch(address_space_to_flatview(as));
343}
344
345static bool can_merge(FlatRange *r1, FlatRange *r2)
346{
347 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
348 && r1->mr == r2->mr
349 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
350 r1->addr.size),
351 int128_make64(r2->offset_in_region))
352 && r1->dirty_log_mask == r2->dirty_log_mask
353 && r1->romd_mode == r2->romd_mode
354 && r1->readonly == r2->readonly;
355}
356
357
358static void flatview_simplify(FlatView *view)
359{
360 unsigned i, j;
361
362 i = 0;
363 while (i < view->nr) {
364 j = i + 1;
365 while (j < view->nr
366 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
367 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
368 ++j;
369 }
370 ++i;
371 memmove(&view->ranges[i], &view->ranges[j],
372 (view->nr - j) * sizeof(view->ranges[j]));
373 view->nr -= j - i;
374 }
375}
376
377static bool memory_region_big_endian(MemoryRegion *mr)
378{
379#ifdef TARGET_WORDS_BIGENDIAN
380 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
381#else
382 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
383#endif
384}
385
386static bool memory_region_wrong_endianness(MemoryRegion *mr)
387{
388#ifdef TARGET_WORDS_BIGENDIAN
389 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
390#else
391 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
392#endif
393}
394
395static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
396{
397 if (memory_region_wrong_endianness(mr)) {
398 switch (size) {
399 case 1:
400 break;
401 case 2:
402 *data = bswap16(*data);
403 break;
404 case 4:
405 *data = bswap32(*data);
406 break;
407 case 8:
408 *data = bswap64(*data);
409 break;
410 default:
411 abort();
412 }
413 }
414}
415
416static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
417{
418 MemoryRegion *root;
419 hwaddr abs_addr = offset;
420
421 abs_addr += mr->addr;
422 for (root = mr; root->container; ) {
423 root = root->container;
424 abs_addr += root->addr;
425 }
426
427 return abs_addr;
428}
429
430static int get_cpu_index(void)
431{
432 if (current_cpu) {
433 return current_cpu->cpu_index;
434 }
435 return -1;
436}
437
438static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
439 hwaddr addr,
440 uint64_t *value,
441 unsigned size,
442 unsigned shift,
443 uint64_t mask,
444 MemTxAttrs attrs)
445{
446 uint64_t tmp;
447
448 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
449 if (mr->subpage) {
450 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
451 } else if (mr == &io_mem_notdirty) {
452
453
454
455 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
456 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
457 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
458 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
459 }
460 *value |= (tmp & mask) << shift;
461 return MEMTX_OK;
462}
463
464
465static MemTxResult memory_region_read_accessor_attr(MemoryRegion *mr,
466 hwaddr addr,
467 uint64_t *value,
468 unsigned size,
469 unsigned shift,
470 uint64_t mask,
471 MemTxAttrs attrs)
472{
473 MemoryTransaction tr = {{0}};
474
475 if (mr->flush_coalesced_mmio) {
476 qemu_flush_coalesced_mmio_buffer();
477 }
478
479 tr.opaque = mr->opaque;
480 tr.addr = addr;
481 tr.size = size;
482 tr.attr = attrs;
483 mr->ops->access(&tr);
484 *value |= (tr.data.u64 & mask) << shift;
485
486 return MEMTX_OK;
487}
488
489static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
490 hwaddr addr,
491 uint64_t *value,
492 unsigned size,
493 unsigned shift,
494 uint64_t mask,
495 MemTxAttrs attrs)
496{
497 uint64_t tmp;
498
499 tmp = mr->ops->read(mr->opaque, addr, size);
500 if (mr->subpage) {
501 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
502 } else if (mr == &io_mem_notdirty) {
503
504
505
506 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
507 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
508 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
509 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
510 }
511 *value |= (tmp & mask) << shift;
512 return MEMTX_OK;
513}
514
515static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
516 hwaddr addr,
517 uint64_t *value,
518 unsigned size,
519 unsigned shift,
520 uint64_t mask,
521 MemTxAttrs attrs)
522{
523 uint64_t tmp = 0;
524 MemTxResult r;
525
526 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
527 if (mr->subpage) {
528 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
529 } else if (mr == &io_mem_notdirty) {
530
531
532
533 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
534 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
535 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
536 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
537 }
538 *value |= (tmp & mask) << shift;
539 return r;
540}
541
542static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
543 hwaddr addr,
544 uint64_t *value,
545 unsigned size,
546 unsigned shift,
547 uint64_t mask,
548 MemTxAttrs attrs)
549{
550 uint64_t tmp;
551
552 tmp = (*value >> shift) & mask;
553 if (mr->subpage) {
554 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
555 } else if (mr == &io_mem_notdirty) {
556
557
558
559 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
560 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
561 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
562 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
563 }
564 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
565 return MEMTX_OK;
566}
567
568
569static MemTxResult memory_region_write_accessor_attr(MemoryRegion *mr,
570 hwaddr addr,
571 uint64_t *value,
572 unsigned size,
573 unsigned shift,
574 uint64_t mask,
575 MemTxAttrs attrs)
576{
577 MemoryTransaction tr = {{0}};
578
579 if (mr->flush_coalesced_mmio) {
580 qemu_flush_coalesced_mmio_buffer();
581 }
582
583 tr.opaque = mr->opaque;
584 tr.rw = true;
585 tr.addr = addr;
586 tr.size = size;
587 tr.attr = attrs;
588 tr.data.u64 = (*value >> shift) & mask;
589 trace_memory_region_ops_write(get_cpu_index(), mr, tr.addr, tr.data.u64, tr.size);
590 mr->ops->access(&tr);
591
592 return MEMTX_OK;
593}
594
595static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
596 hwaddr addr,
597 uint64_t *value,
598 unsigned size,
599 unsigned shift,
600 uint64_t mask,
601 MemTxAttrs attrs)
602{
603 uint64_t tmp;
604
605 tmp = (*value >> shift) & mask;
606 if (mr->subpage) {
607 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
608 } else if (mr == &io_mem_notdirty) {
609
610
611
612 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
613 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
614 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
615 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
616 }
617 mr->ops->write(mr->opaque, addr, tmp, size);
618 return MEMTX_OK;
619}
620
621static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
622 hwaddr addr,
623 uint64_t *value,
624 unsigned size,
625 unsigned shift,
626 uint64_t mask,
627 MemTxAttrs attrs)
628{
629 uint64_t tmp;
630
631 tmp = (*value >> shift) & mask;
632 if (mr->subpage) {
633 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
634 } else if (mr == &io_mem_notdirty) {
635
636
637
638 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
639 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
640 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
641 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
642 }
643 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
644}
645
646static MemTxResult access_with_adjusted_size(hwaddr addr,
647 uint64_t *value,
648 unsigned size,
649 unsigned access_size_min,
650 unsigned access_size_max,
651 MemTxResult (*access_fn)
652 (MemoryRegion *mr,
653 hwaddr addr,
654 uint64_t *value,
655 unsigned size,
656 unsigned shift,
657 uint64_t mask,
658 MemTxAttrs attrs),
659 MemoryRegion *mr,
660 MemTxAttrs attrs)
661{
662 uint64_t access_mask;
663 unsigned access_size;
664 unsigned i;
665 MemTxResult r = MEMTX_OK;
666
667 if (!access_size_min) {
668 access_size_min = 1;
669 }
670 if (!access_size_max) {
671 access_size_max = 4;
672 }
673
674
675 access_size = MAX(MIN(size, access_size_max), access_size_min);
676 access_mask = -1ULL >> (64 - access_size * 8);
677 if (memory_region_big_endian(mr)) {
678 for (i = 0; i < size; i += access_size) {
679 r |= access_fn(mr, addr + i, value, access_size,
680 (size - access_size - i) * 8, access_mask, attrs);
681 }
682 } else {
683 for (i = 0; i < size; i += access_size) {
684 r |= access_fn(mr, addr + i, value, access_size, i * 8,
685 access_mask, attrs);
686 }
687 }
688 return r;
689}
690
691static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
692{
693 AddressSpace *as;
694
695 while (mr->container) {
696 mr = mr->container;
697 }
698 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
699 if (mr == as->root) {
700 return as;
701 }
702 }
703 return NULL;
704}
705
706
707
708
709static void render_memory_region(FlatView *view,
710 MemoryRegion *mr,
711 Int128 base,
712 AddrRange clip,
713 bool readonly)
714{
715 MemoryRegion *subregion;
716 unsigned i;
717 hwaddr offset_in_region;
718 Int128 remain;
719 Int128 now;
720 FlatRange fr;
721 AddrRange tmp;
722
723 if (!mr->enabled) {
724 return;
725 }
726
727 int128_addto(&base, int128_make64(mr->addr));
728 readonly |= mr->readonly;
729
730 tmp = addrrange_make(base, mr->size);
731
732 if (!addrrange_intersects(tmp, clip)) {
733 return;
734 }
735
736 clip = addrrange_intersection(tmp, clip);
737
738 if (mr->alias) {
739 int128_subfrom(&base, int128_make64(mr->alias->addr));
740 int128_subfrom(&base, int128_make64(mr->alias_offset));
741 render_memory_region(view, mr->alias, base, clip, readonly);
742 return;
743 }
744
745
746 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
747 render_memory_region(view, subregion, base, clip, readonly);
748 }
749
750 if (!mr->terminates) {
751 return;
752 }
753
754 offset_in_region = int128_get64(int128_sub(clip.start, base));
755 base = clip.start;
756 remain = clip.size;
757
758 fr.mr = mr;
759 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
760 fr.romd_mode = mr->romd_mode;
761 fr.readonly = readonly;
762
763
764 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
765 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
766 continue;
767 }
768 if (int128_lt(base, view->ranges[i].addr.start)) {
769 now = int128_min(remain,
770 int128_sub(view->ranges[i].addr.start, base));
771 fr.offset_in_region = offset_in_region;
772 fr.addr = addrrange_make(base, now);
773 flatview_insert(view, i, &fr);
774 ++i;
775 int128_addto(&base, now);
776 offset_in_region += int128_get64(now);
777 int128_subfrom(&remain, now);
778 }
779 now = int128_sub(int128_min(int128_add(base, remain),
780 addrrange_end(view->ranges[i].addr)),
781 base);
782 int128_addto(&base, now);
783 offset_in_region += int128_get64(now);
784 int128_subfrom(&remain, now);
785 }
786 if (int128_nz(remain)) {
787 fr.offset_in_region = offset_in_region;
788 fr.addr = addrrange_make(base, remain);
789 flatview_insert(view, i, &fr);
790 }
791}
792
793static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
794{
795 while (mr->enabled) {
796 if (mr->alias) {
797 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
798
799
800
801 mr = mr->alias;
802 continue;
803 }
804 } else if (!mr->terminates) {
805 unsigned int found = 0;
806 MemoryRegion *child, *next = NULL;
807 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
808 if (child->enabled) {
809 if (++found > 1) {
810 next = NULL;
811 break;
812 }
813 if (!child->addr && int128_ge(mr->size, child->size)) {
814
815
816
817
818 next = child;
819 }
820 }
821 }
822 if (found == 0) {
823 return NULL;
824 }
825 if (next) {
826 mr = next;
827 continue;
828 }
829 }
830
831 return mr;
832 }
833
834 return NULL;
835}
836
837
838static FlatView *generate_memory_topology(MemoryRegion *mr)
839{
840 int i;
841 FlatView *view;
842
843 view = flatview_new(mr);
844
845 if (mr) {
846 render_memory_region(view, mr, int128_zero(),
847 addrrange_make(int128_zero(), int128_2_64()), false);
848 }
849 flatview_simplify(view);
850
851 view->dispatch = address_space_dispatch_new(view);
852 for (i = 0; i < view->nr; i++) {
853 MemoryRegionSection mrs =
854 section_from_flat_range(&view->ranges[i], view);
855 flatview_add_to_dispatch(view, &mrs);
856 }
857 address_space_dispatch_compact(view->dispatch);
858 g_hash_table_replace(flat_views, mr, view);
859
860 return view;
861}
862
863static void address_space_add_del_ioeventfds(AddressSpace *as,
864 MemoryRegionIoeventfd *fds_new,
865 unsigned fds_new_nb,
866 MemoryRegionIoeventfd *fds_old,
867 unsigned fds_old_nb)
868{
869 unsigned iold, inew;
870 MemoryRegionIoeventfd *fd;
871 MemoryRegionSection section;
872
873
874
875
876
877 iold = inew = 0;
878 while (iold < fds_old_nb || inew < fds_new_nb) {
879 if (iold < fds_old_nb
880 && (inew == fds_new_nb
881 || memory_region_ioeventfd_before(fds_old[iold],
882 fds_new[inew]))) {
883 fd = &fds_old[iold];
884 section = (MemoryRegionSection) {
885 .fv = address_space_to_flatview(as),
886 .offset_within_address_space = int128_get64(fd->addr.start),
887 .size = fd->addr.size,
888 };
889 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
890 fd->match_data, fd->data, fd->e);
891 ++iold;
892 } else if (inew < fds_new_nb
893 && (iold == fds_old_nb
894 || memory_region_ioeventfd_before(fds_new[inew],
895 fds_old[iold]))) {
896 fd = &fds_new[inew];
897 section = (MemoryRegionSection) {
898 .fv = address_space_to_flatview(as),
899 .offset_within_address_space = int128_get64(fd->addr.start),
900 .size = fd->addr.size,
901 };
902 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
903 fd->match_data, fd->data, fd->e);
904 ++inew;
905 } else {
906 ++iold;
907 ++inew;
908 }
909 }
910}
911
912static FlatView *address_space_get_flatview(AddressSpace *as)
913{
914 FlatView *view;
915
916 rcu_read_lock();
917 do {
918 view = address_space_to_flatview(as);
919
920
921
922 } while (!flatview_ref(view));
923 rcu_read_unlock();
924 return view;
925}
926
927static void address_space_update_ioeventfds(AddressSpace *as)
928{
929 FlatView *view;
930 FlatRange *fr;
931 unsigned ioeventfd_nb = 0;
932 MemoryRegionIoeventfd *ioeventfds = NULL;
933 AddrRange tmp;
934 unsigned i;
935
936 view = address_space_get_flatview(as);
937 FOR_EACH_FLAT_RANGE(fr, view) {
938 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
939 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
940 int128_sub(fr->addr.start,
941 int128_make64(fr->offset_in_region)));
942 if (addrrange_intersects(fr->addr, tmp)) {
943 ++ioeventfd_nb;
944 ioeventfds = g_realloc(ioeventfds,
945 ioeventfd_nb * sizeof(*ioeventfds));
946 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
947 ioeventfds[ioeventfd_nb-1].addr = tmp;
948 }
949 }
950 }
951
952 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
953 as->ioeventfds, as->ioeventfd_nb);
954
955 g_free(as->ioeventfds);
956 as->ioeventfds = ioeventfds;
957 as->ioeventfd_nb = ioeventfd_nb;
958 flatview_unref(view);
959}
960
961static void address_space_update_topology_pass(AddressSpace *as,
962 const FlatView *old_view,
963 const FlatView *new_view,
964 bool adding)
965{
966 unsigned iold, inew;
967 FlatRange *frold, *frnew;
968
969
970
971
972 iold = inew = 0;
973 while (iold < old_view->nr || inew < new_view->nr) {
974 if (iold < old_view->nr) {
975 frold = &old_view->ranges[iold];
976 } else {
977 frold = NULL;
978 }
979 if (inew < new_view->nr) {
980 frnew = &new_view->ranges[inew];
981 } else {
982 frnew = NULL;
983 }
984
985 if (frold
986 && (!frnew
987 || int128_lt(frold->addr.start, frnew->addr.start)
988 || (int128_eq(frold->addr.start, frnew->addr.start)
989 && !flatrange_equal(frold, frnew)))) {
990
991
992 if (!adding) {
993 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
994 }
995
996 ++iold;
997 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
998
999
1000 if (adding) {
1001 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
1002 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
1003 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
1004 frold->dirty_log_mask,
1005 frnew->dirty_log_mask);
1006 }
1007 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
1008 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
1009 frold->dirty_log_mask,
1010 frnew->dirty_log_mask);
1011 }
1012 }
1013
1014 ++iold;
1015 ++inew;
1016 } else {
1017
1018
1019 if (adding) {
1020 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
1021 }
1022
1023 ++inew;
1024 }
1025 }
1026}
1027
1028static void flatviews_init(void)
1029{
1030 static FlatView *empty_view;
1031
1032 if (flat_views) {
1033 return;
1034 }
1035
1036 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
1037 (GDestroyNotify) flatview_unref);
1038 if (!empty_view) {
1039 empty_view = generate_memory_topology(NULL);
1040
1041 flatview_ref(empty_view);
1042 } else {
1043 g_hash_table_replace(flat_views, NULL, empty_view);
1044 flatview_ref(empty_view);
1045 }
1046}
1047
1048static void flatviews_reset(void)
1049{
1050 AddressSpace *as;
1051
1052 if (flat_views) {
1053 g_hash_table_unref(flat_views);
1054 flat_views = NULL;
1055 }
1056 flatviews_init();
1057
1058
1059 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1060 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1061
1062 if (g_hash_table_lookup(flat_views, physmr)) {
1063 continue;
1064 }
1065
1066 generate_memory_topology(physmr);
1067 }
1068}
1069
1070static void address_space_set_flatview(AddressSpace *as)
1071{
1072 FlatView *old_view = address_space_to_flatview(as);
1073 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1074 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1075
1076 assert(new_view);
1077
1078 if (old_view == new_view) {
1079 return;
1080 }
1081
1082 if (old_view) {
1083 flatview_ref(old_view);
1084 }
1085
1086 flatview_ref(new_view);
1087
1088 if (!QTAILQ_EMPTY(&as->listeners)) {
1089 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1090
1091 if (!old_view2) {
1092 old_view2 = &tmpview;
1093 }
1094 address_space_update_topology_pass(as, old_view2, new_view, false);
1095 address_space_update_topology_pass(as, old_view2, new_view, true);
1096 }
1097
1098
1099 atomic_rcu_set(&as->current_map, new_view);
1100 if (old_view) {
1101 flatview_unref(old_view);
1102 }
1103
1104
1105
1106
1107
1108
1109
1110 if (old_view) {
1111 flatview_unref(old_view);
1112 }
1113}
1114
1115static void address_space_update_topology(AddressSpace *as)
1116{
1117 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1118
1119 flatviews_init();
1120 if (!g_hash_table_lookup(flat_views, physmr)) {
1121 generate_memory_topology(physmr);
1122 }
1123 address_space_set_flatview(as);
1124}
1125
1126void memory_region_transaction_begin(void)
1127{
1128 qemu_flush_coalesced_mmio_buffer();
1129 ++memory_region_transaction_depth;
1130}
1131
1132void memory_region_transaction_commit(void)
1133{
1134 AddressSpace *as;
1135
1136 assert(memory_region_transaction_depth);
1137 assert(qemu_mutex_iothread_locked());
1138
1139 --memory_region_transaction_depth;
1140 if (!memory_region_transaction_depth) {
1141 if (memory_region_update_pending) {
1142 flatviews_reset();
1143
1144 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1145
1146 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1147 address_space_set_flatview(as);
1148 address_space_update_ioeventfds(as);
1149 }
1150 memory_region_update_pending = false;
1151 ioeventfd_update_pending = false;
1152 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1153 } else if (ioeventfd_update_pending) {
1154 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1155 address_space_update_ioeventfds(as);
1156 }
1157 ioeventfd_update_pending = false;
1158 }
1159 }
1160}
1161
1162static void memory_region_destructor_none(MemoryRegion *mr)
1163{
1164}
1165
1166static void memory_region_destructor_ram(MemoryRegion *mr)
1167{
1168 qemu_ram_free(mr->ram_block);
1169}
1170
1171static bool memory_region_need_escape(char c)
1172{
1173 return c == '/' || c == '[' || c == '\\' || c == ']';
1174}
1175
1176static char *memory_region_escape_name(const char *name)
1177{
1178 const char *p;
1179 char *escaped, *q;
1180 uint8_t c;
1181 size_t bytes = 0;
1182
1183 for (p = name; *p; p++) {
1184 bytes += memory_region_need_escape(*p) ? 4 : 1;
1185 }
1186 if (bytes == p - name) {
1187 return g_memdup(name, bytes + 1);
1188 }
1189
1190 escaped = g_malloc(bytes + 1);
1191 for (p = name, q = escaped; *p; p++) {
1192 c = *p;
1193 if (unlikely(memory_region_need_escape(c))) {
1194 *q++ = '\\';
1195 *q++ = 'x';
1196 *q++ = "0123456789abcdef"[c >> 4];
1197 c = "0123456789abcdef"[c & 15];
1198 }
1199 *q++ = c;
1200 }
1201 *q = 0;
1202 return escaped;
1203}
1204
1205static void memory_region_do_init(MemoryRegion *mr,
1206 Object *owner,
1207 const char *name,
1208 uint64_t size)
1209{
1210 mr->size = int128_make64(size);
1211 if (size == UINT64_MAX) {
1212 mr->size = int128_2_64();
1213 }
1214 mr->name = g_strdup(name);
1215 mr->owner = owner;
1216 mr->ram_block = NULL;
1217
1218 if (name) {
1219 char *escaped_name = memory_region_escape_name(name);
1220 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1221
1222 if (!owner) {
1223 owner = container_get(qdev_get_machine(), "/unattached");
1224 }
1225
1226 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1227 object_unref(OBJECT(mr));
1228 g_free(name_array);
1229 g_free(escaped_name);
1230 }
1231}
1232
1233void memory_region_init(MemoryRegion *mr,
1234 Object *owner,
1235 const char *name,
1236 uint64_t size)
1237{
1238 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1239 memory_region_do_init(mr, owner, name, size);
1240}
1241
1242static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1243 void *opaque, Error **errp)
1244{
1245 MemoryRegion *mr = MEMORY_REGION(obj);
1246 uint64_t value = mr->addr;
1247
1248 visit_type_uint64(v, name, &value, errp);
1249}
1250
1251static void memory_region_set_addr(Object *obj, Visitor *v, const char *name,
1252 void *opaque, Error **errp)
1253{
1254 MemoryRegion *mr = MEMORY_REGION(obj);
1255 Error *local_err = NULL;
1256 uint64_t value;
1257
1258 visit_type_uint64(v, name, &value, &local_err);
1259 if (local_err) {
1260 error_propagate(errp, local_err);
1261 return;
1262 }
1263
1264 memory_region_set_address(mr, value);
1265}
1266
1267static void memory_region_set_container(Object *obj, Visitor *v, const char *name,
1268 void *opaque, Error **errp)
1269{
1270 MemoryRegion *mr = MEMORY_REGION(obj);
1271 Error *local_err = NULL;
1272 MemoryRegion *old_container = mr->container;
1273 MemoryRegion *new_container = NULL;
1274 char *path = NULL;
1275
1276 visit_type_str(v, name, &path, &local_err);
1277
1278 if (!local_err && strcmp(path, "") != 0) {
1279 new_container = MEMORY_REGION(object_resolve_link(obj, name, path,
1280 &local_err));
1281 while (new_container->alias) {
1282 new_container = new_container->alias;
1283 }
1284 }
1285
1286 if (local_err) {
1287 error_propagate(errp, local_err);
1288 return;
1289 }
1290
1291 object_ref(OBJECT(new_container));
1292
1293 memory_region_transaction_begin();
1294 memory_region_ref(mr);
1295 if (old_container) {
1296 memory_region_del_subregion(old_container, mr);
1297 }
1298 mr->container = new_container;
1299 if (new_container) {
1300 memory_region_update_container_subregions(mr);
1301 }
1302 memory_region_unref(mr);
1303 memory_region_transaction_commit();
1304
1305 object_unref(OBJECT(old_container));
1306}
1307
1308static void memory_region_get_container(Object *obj, Visitor *v,
1309 const char *name, void *opaque,
1310 Error **errp)
1311{
1312 MemoryRegion *mr = MEMORY_REGION(obj);
1313 gchar *path = (gchar *)"";
1314
1315 if (mr->container) {
1316 path = object_get_canonical_path(OBJECT(mr->container));
1317 }
1318 visit_type_str(v, name, &path, errp);
1319 if (mr->container) {
1320 g_free(path);
1321 }
1322}
1323
1324static Object *memory_region_resolve_container(Object *obj, void *opaque,
1325 const char *part)
1326{
1327 MemoryRegion *mr = MEMORY_REGION(obj);
1328
1329 return OBJECT(mr->container);
1330}
1331
1332static void memory_region_set_alias(const Object *obj, const char *name,
1333 Object *val, Error **errp)
1334{
1335 MemoryRegion *mr = MEMORY_REGION(obj);
1336 MemoryRegion *subregion, *next;
1337
1338
1339
1340 assert (!mr->alias);
1341
1342
1343
1344
1345
1346 memory_region_transaction_begin();
1347 QTAILQ_FOREACH_SAFE(subregion, &mr->subregions, subregions_link, next) {
1348 object_property_set_link(OBJECT(subregion), OBJECT(val),
1349 "container", errp);
1350 }
1351 memory_region_ref(mr);
1352 mr->alias = MEMORY_REGION(val);
1353 memory_region_unref(mr);
1354 memory_region_transaction_commit();
1355
1356}
1357
1358static void memory_region_get_priority(Object *obj, Visitor *v,
1359 const char *name, void *opaque,
1360 Error **errp)
1361{
1362 MemoryRegion *mr = MEMORY_REGION(obj);
1363 int32_t value = mr->priority;
1364
1365 visit_type_int32(v, name, &value, errp);
1366}
1367
1368static bool memory_region_get_may_overlap(Object *obj, Error **errp)
1369{
1370 MemoryRegion *mr = MEMORY_REGION(obj);
1371
1372 return mr->may_overlap;
1373}
1374
1375static void memory_region_set_priority(Object *obj, Visitor *v, const char *name,
1376 void *opaque, Error **errp)
1377{
1378 MemoryRegion *mr = MEMORY_REGION(obj);
1379 Error *local_err = NULL;
1380 int32_t value;
1381
1382 visit_type_uint32(v, name, (uint32_t *)&value, &error_abort);
1383 if (local_err) {
1384 error_propagate(errp, local_err);
1385 return;
1386 }
1387
1388 if (mr->priority != value) {
1389 mr->priority = value;
1390 memory_region_readd_subregion(mr);
1391 }
1392}
1393
1394static void memory_region_do_set_ram(MemoryRegion *mr)
1395{
1396 char *c, *filename, *sanitized_name;
1397
1398 if (mr->addr) {
1399 qemu_ram_free(mr->ram_block);
1400 }
1401 if (int128_eq(mr->size, int128_make64(0))) {
1402 return;
1403 }
1404 switch (mr->ram) {
1405 case(0):
1406 mr->ram_block = NULL;
1407 break;
1408 case(1):
1409 mr->ram_block = qemu_ram_alloc(int128_get64(mr->size), mr, &error_abort);
1410 break;
1411 case(2):
1412 if (mr->filename) {
1413 filename = g_strdup_printf("%s%s%s",
1414 machine_path ? machine_path : "",
1415 machine_path ? G_DIR_SEPARATOR_S : "",
1416 mr->filename);
1417 } else {
1418 sanitized_name = g_strdup(object_get_canonical_path(OBJECT(mr)));
1419
1420 for (c = sanitized_name; *c != '\0'; c++) {
1421 if (*c == '/') {
1422 *c = '_';
1423 }
1424 }
1425 filename = g_strdup_printf("%s" G_DIR_SEPARATOR_S "qemu-memory-%s",
1426 machine_path ? machine_path : ".",
1427 sanitized_name);
1428 g_free(sanitized_name);
1429 }
1430 mr->ram_block = qemu_ram_alloc_from_file(int128_get64(mr->size), mr,
1431 true, filename, &error_abort);
1432 g_free(filename);
1433 break;
1434 default:
1435 abort();
1436 }
1437}
1438
1439static void memory_region_set_ram(Object *obj, Visitor *v, const char *name,
1440 void *opaque, Error **errp)
1441{
1442 MemoryRegion *mr = MEMORY_REGION(obj);
1443 Error *local_err = NULL;
1444 uint8_t value;
1445
1446 visit_type_uint8(v, name, &value, &error_abort);
1447 if (local_err) {
1448 error_propagate(errp, local_err);
1449 return;
1450 }
1451
1452 mr->dirty_log_mask |= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1453
1454
1455 if (mr->ram == value) {
1456 return;
1457 }
1458
1459 mr->ram = value;
1460 mr->terminates = !!value;
1461
1462 if (int128_eq(int128_2_64(), mr->size)) {
1463 return;
1464 }
1465
1466 memory_region_do_set_ram(mr);
1467}
1468
1469static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1470 void *opaque, Error **errp)
1471{
1472 MemoryRegion *mr = MEMORY_REGION(obj);
1473 uint64_t value = memory_region_size(mr);
1474
1475 visit_type_uint64(v, name, &value, errp);
1476}
1477
1478static void memory_region_set_object_size(Object *obj, Visitor *v, const char *name,
1479 void *opaque, Error **errp)
1480{
1481 MemoryRegion *mr = MEMORY_REGION(obj);
1482 Error *local_err = NULL;
1483 uint64_t size;
1484
1485 visit_type_uint64(v, name, &size, &local_err);
1486
1487 memory_region_set_size(mr, size);
1488}
1489
1490static void memory_region_get_filename(Object *obj, Visitor *v,
1491 const char *name,
1492 void *opaque, Error **errp)
1493{
1494 MemoryRegion *mr = MEMORY_REGION(obj);
1495 char *filename = mr->filename;
1496
1497 visit_type_str(v, name, &filename, errp);
1498}
1499
1500static void memory_region_set_filename(Object *obj, Visitor *v,
1501 const char *name,
1502 void *opaque, Error **errp)
1503{
1504 MemoryRegion *mr = MEMORY_REGION(obj);
1505 Error *local_err = NULL;
1506 char *filename;
1507
1508 visit_type_str(v, name, &filename, &local_err);
1509 mr->filename = filename;
1510}
1511
1512static void memory_region_initfn(Object *obj)
1513{
1514 MemoryRegion *mr = MEMORY_REGION(obj);
1515 ObjectProperty *op;
1516
1517 mr->ops = &unassigned_mem_ops;
1518 mr->enabled = true;
1519 mr->romd_mode = true;
1520 mr->global_locking = true;
1521 mr->destructor = memory_region_destructor_none;
1522
1523
1524
1525 mr->size = int128_2_64();
1526 QTAILQ_INIT(&mr->subregions);
1527 QTAILQ_INIT(&mr->coalesced);
1528
1529 op = object_property_add(OBJECT(mr), "container",
1530 "link<" TYPE_MEMORY_REGION ">",
1531 memory_region_get_container,
1532 memory_region_set_container,
1533 NULL, NULL, &error_abort);
1534 op->resolve = memory_region_resolve_container;
1535
1536 object_property_add_link(OBJECT(mr), "alias", TYPE_MEMORY_REGION,
1537 (Object **)&mr->alias,
1538 memory_region_set_alias,
1539 0,
1540 &error_abort);
1541 object_property_add(OBJECT(mr), "addr", "uint64",
1542 memory_region_get_addr,
1543 memory_region_set_addr,
1544 NULL, NULL, &error_abort);
1545 object_property_add(OBJECT(mr), "priority", "uint32",
1546 memory_region_get_priority,
1547 memory_region_set_priority,
1548 NULL, NULL, &error_abort);
1549 object_property_add(OBJECT(mr), "ram", "uint8",
1550 NULL,
1551 memory_region_set_ram,
1552 NULL, NULL, &error_abort);
1553 object_property_add(OBJECT(mr), "filename", "string",
1554 memory_region_get_filename,
1555 memory_region_set_filename,
1556 NULL, NULL, &error_abort);
1557 object_property_add_bool(OBJECT(mr), "may-overlap",
1558 memory_region_get_may_overlap,
1559 NULL,
1560 &error_abort);
1561 object_property_add(OBJECT(mr), "size", "uint64",
1562 memory_region_get_size,
1563 memory_region_set_object_size,
1564 NULL, NULL, &error_abort);
1565}
1566
1567static void iommu_memory_region_initfn(Object *obj)
1568{
1569 MemoryRegion *mr = MEMORY_REGION(obj);
1570
1571 mr->is_iommu = true;
1572}
1573
1574static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1575 unsigned size)
1576{
1577#ifdef DEBUG_UNASSIGNED
1578 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1579#endif
1580 if (current_cpu != NULL) {
1581 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1582 }
1583 return 0;
1584}
1585
1586static void unassigned_mem_write(void *opaque, hwaddr addr,
1587 uint64_t val, unsigned size)
1588{
1589#ifdef DEBUG_UNASSIGNED
1590 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1591#endif
1592 if (current_cpu != NULL) {
1593 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1594 }
1595}
1596
1597static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1598 unsigned size, bool is_write)
1599{
1600 return false;
1601}
1602
1603const MemoryRegionOps unassigned_mem_ops = {
1604 .valid.accepts = unassigned_mem_accepts,
1605 .endianness = DEVICE_NATIVE_ENDIAN,
1606};
1607
1608static uint64_t memory_region_ram_device_read(void *opaque,
1609 hwaddr addr, unsigned size)
1610{
1611 MemoryRegion *mr = opaque;
1612 uint64_t data = (uint64_t)~0;
1613
1614 switch (size) {
1615 case 1:
1616 data = *(uint8_t *)(mr->ram_block->host + addr);
1617 break;
1618 case 2:
1619 data = *(uint16_t *)(mr->ram_block->host + addr);
1620 break;
1621 case 4:
1622 data = *(uint32_t *)(mr->ram_block->host + addr);
1623 break;
1624 case 8:
1625 data = *(uint64_t *)(mr->ram_block->host + addr);
1626 break;
1627 }
1628
1629 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1630
1631 return data;
1632}
1633
1634static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1635 uint64_t data, unsigned size)
1636{
1637 MemoryRegion *mr = opaque;
1638
1639 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1640
1641 switch (size) {
1642 case 1:
1643 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1644 break;
1645 case 2:
1646 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1647 break;
1648 case 4:
1649 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1650 break;
1651 case 8:
1652 *(uint64_t *)(mr->ram_block->host + addr) = data;
1653 break;
1654 }
1655}
1656
1657static const MemoryRegionOps ram_device_mem_ops = {
1658 .read = memory_region_ram_device_read,
1659 .write = memory_region_ram_device_write,
1660 .endianness = DEVICE_HOST_ENDIAN,
1661 .valid = {
1662 .min_access_size = 1,
1663 .max_access_size = 8,
1664 .unaligned = true,
1665 },
1666 .impl = {
1667 .min_access_size = 1,
1668 .max_access_size = 8,
1669 .unaligned = true,
1670 },
1671};
1672
1673bool memory_region_access_valid(MemoryRegion *mr,
1674 hwaddr addr,
1675 unsigned size,
1676 bool is_write)
1677{
1678 int access_size_min, access_size_max;
1679 int access_size, i;
1680
1681 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1682 return false;
1683 }
1684
1685 if (!mr->ops->valid.accepts) {
1686 return true;
1687 }
1688
1689 access_size_min = mr->ops->valid.min_access_size;
1690 if (!mr->ops->valid.min_access_size) {
1691 access_size_min = 1;
1692 }
1693
1694 access_size_max = mr->ops->valid.max_access_size;
1695 if (!mr->ops->valid.max_access_size) {
1696 access_size_max = 4;
1697 }
1698
1699 access_size = MAX(MIN(size, access_size_max), access_size_min);
1700 for (i = 0; i < size; i += access_size) {
1701 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1702 is_write)) {
1703 return false;
1704 }
1705 }
1706
1707 return true;
1708}
1709
1710static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1711 hwaddr addr,
1712 uint64_t *pval,
1713 unsigned size,
1714 MemTxAttrs attrs)
1715{
1716 *pval = 0;
1717
1718 if (mr->ops->access) {
1719 return access_with_adjusted_size(addr, pval, size,
1720 mr->ops->impl.min_access_size,
1721 mr->ops->impl.max_access_size,
1722 memory_region_read_accessor_attr,
1723 mr, attrs);
1724 } else if (mr->ops->read) {
1725 return access_with_adjusted_size(addr, pval, size,
1726 mr->ops->impl.min_access_size,
1727 mr->ops->impl.max_access_size,
1728 memory_region_read_accessor,
1729 mr, attrs);
1730 } else if (mr->ops->read_with_attrs) {
1731 return access_with_adjusted_size(addr, pval, size,
1732 mr->ops->impl.min_access_size,
1733 mr->ops->impl.max_access_size,
1734 memory_region_read_with_attrs_accessor,
1735 mr, attrs);
1736 } else {
1737 return access_with_adjusted_size(addr, pval, size, 1, 4,
1738 memory_region_oldmmio_read_accessor,
1739 mr, attrs);
1740 }
1741}
1742
1743MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1744 hwaddr addr,
1745 uint64_t *pval,
1746 unsigned size,
1747 MemTxAttrs attrs)
1748{
1749 MemTxResult r;
1750
1751 if (!memory_region_access_valid(mr, addr, size, false)) {
1752 *pval = unassigned_mem_read(mr, addr, size);
1753 return MEMTX_DECODE_ERROR;
1754 }
1755
1756 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1757 adjust_endianness(mr, pval, size);
1758 return r;
1759}
1760
1761
1762static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1763 hwaddr addr,
1764 uint64_t data,
1765 unsigned size,
1766 MemTxAttrs attrs)
1767{
1768 MemoryRegionIoeventfd ioeventfd = {
1769 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1770 .data = data,
1771 };
1772 unsigned i;
1773
1774 for (i = 0; i < mr->ioeventfd_nb; i++) {
1775 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1776 ioeventfd.e = mr->ioeventfds[i].e;
1777
1778 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1779 event_notifier_set(ioeventfd.e);
1780 return true;
1781 }
1782 }
1783
1784 return false;
1785}
1786
1787MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1788 hwaddr addr,
1789 uint64_t data,
1790 unsigned size,
1791 MemTxAttrs attrs)
1792{
1793 if (!memory_region_access_valid(mr, addr, size, true)) {
1794 unassigned_mem_write(mr, addr, data, size);
1795 return MEMTX_DECODE_ERROR;
1796 }
1797
1798 adjust_endianness(mr, &data, size);
1799
1800 if ((!kvm_eventfds_enabled()) &&
1801 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1802 return MEMTX_OK;
1803 }
1804
1805 if (mr->ops->access) {
1806 return access_with_adjusted_size(addr, &data, size,
1807 mr->ops->impl.min_access_size,
1808 mr->ops->impl.max_access_size,
1809 memory_region_write_accessor_attr,
1810 mr, attrs);
1811 } else if (mr->ops->write) {
1812 return access_with_adjusted_size(addr, &data, size,
1813 mr->ops->impl.min_access_size,
1814 mr->ops->impl.max_access_size,
1815 memory_region_write_accessor, mr,
1816 attrs);
1817 } else if (mr->ops->write_with_attrs) {
1818 return
1819 access_with_adjusted_size(addr, &data, size,
1820 mr->ops->impl.min_access_size,
1821 mr->ops->impl.max_access_size,
1822 memory_region_write_with_attrs_accessor,
1823 mr, attrs);
1824 } else {
1825 return access_with_adjusted_size(addr, &data, size, 1, 4,
1826 memory_region_oldmmio_write_accessor,
1827 mr, attrs);
1828 }
1829}
1830
1831void memory_region_init_io(MemoryRegion *mr,
1832 Object *owner,
1833 const MemoryRegionOps *ops,
1834 void *opaque,
1835 const char *name,
1836 uint64_t size)
1837{
1838 memory_region_init(mr, owner, name, size);
1839 mr->ops = ops ? ops : &unassigned_mem_ops;
1840 mr->opaque = opaque;
1841 mr->terminates = true;
1842}
1843
1844void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1845 Object *owner,
1846 const char *name,
1847 uint64_t size,
1848 Error **errp)
1849{
1850 memory_region_init(mr, owner, name, size);
1851 mr->ram = 1;
1852 mr->terminates = true;
1853 mr->destructor = memory_region_destructor_ram;
1854 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1855 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1856}
1857
1858void memory_region_init_resizeable_ram(MemoryRegion *mr,
1859 Object *owner,
1860 const char *name,
1861 uint64_t size,
1862 uint64_t max_size,
1863 void (*resized)(const char*,
1864 uint64_t length,
1865 void *host),
1866 Error **errp)
1867{
1868 memory_region_init(mr, owner, name, size);
1869 mr->ram = true;
1870 mr->terminates = true;
1871 mr->destructor = memory_region_destructor_ram;
1872 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1873 mr, errp);
1874 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1875}
1876
1877#ifdef __linux__
1878void memory_region_init_ram_from_file(MemoryRegion *mr,
1879 struct Object *owner,
1880 const char *name,
1881 uint64_t size,
1882 bool share,
1883 const char *path,
1884 Error **errp)
1885{
1886 memory_region_init(mr, owner, name, size);
1887 mr->ram = 2;
1888 mr->terminates = true;
1889 mr->destructor = memory_region_destructor_ram;
1890 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1891 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1892}
1893
1894void memory_region_init_ram_from_fd(MemoryRegion *mr,
1895 struct Object *owner,
1896 const char *name,
1897 uint64_t size,
1898 bool share,
1899 int fd,
1900 Error **errp)
1901{
1902 memory_region_init(mr, owner, name, size);
1903 mr->ram = true;
1904 mr->terminates = true;
1905 mr->destructor = memory_region_destructor_ram;
1906 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1907 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1908}
1909#endif
1910
1911void memory_region_init_ram_ptr(MemoryRegion *mr,
1912 Object *owner,
1913 const char *name,
1914 uint64_t size,
1915 void *ptr)
1916{
1917 memory_region_init(mr, owner, name, size);
1918 mr->ram = 3;
1919 mr->terminates = true;
1920 mr->destructor = memory_region_destructor_ram;
1921 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1922
1923
1924 assert(ptr != NULL);
1925 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1926}
1927
1928void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1929 Object *owner,
1930 const char *name,
1931 uint64_t size,
1932 void *ptr)
1933{
1934 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1935 mr->ram_device = true;
1936 mr->ops = &ram_device_mem_ops;
1937 mr->opaque = mr;
1938}
1939
1940void memory_region_init_alias(MemoryRegion *mr,
1941 Object *owner,
1942 const char *name,
1943 MemoryRegion *orig,
1944 hwaddr offset,
1945 uint64_t size)
1946{
1947 memory_region_init(mr, owner, name, size);
1948 mr->alias = orig;
1949 mr->alias_offset = offset;
1950}
1951
1952void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1953 struct Object *owner,
1954 const char *name,
1955 uint64_t size,
1956 Error **errp)
1957{
1958 memory_region_init(mr, owner, name, size);
1959 mr->ram = true;
1960 mr->readonly = true;
1961 mr->terminates = true;
1962 mr->destructor = memory_region_destructor_ram;
1963 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1964 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1965}
1966
1967void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1968 Object *owner,
1969 const MemoryRegionOps *ops,
1970 void *opaque,
1971 const char *name,
1972 uint64_t size,
1973 Error **errp)
1974{
1975 assert(ops);
1976 memory_region_init(mr, owner, name, size);
1977 mr->ops = ops;
1978 mr->opaque = opaque;
1979 mr->terminates = true;
1980 mr->rom_device = true;
1981 mr->destructor = memory_region_destructor_ram;
1982 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1983}
1984
1985void memory_region_init_iommu(void *_iommu_mr,
1986 size_t instance_size,
1987 const char *mrtypename,
1988 Object *owner,
1989 const char *name,
1990 uint64_t size)
1991{
1992 struct IOMMUMemoryRegion *iommu_mr;
1993 struct MemoryRegion *mr;
1994
1995 object_initialize(_iommu_mr, instance_size, mrtypename);
1996 mr = MEMORY_REGION(_iommu_mr);
1997 memory_region_do_init(mr, owner, name, size);
1998 iommu_mr = IOMMU_MEMORY_REGION(mr);
1999 mr->terminates = true;
2000 QLIST_INIT(&iommu_mr->iommu_notify);
2001 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
2002}
2003
2004static void memory_region_finalize(Object *obj)
2005{
2006 MemoryRegion *mr = MEMORY_REGION(obj);
2007
2008 assert(!mr->container);
2009
2010
2011
2012
2013
2014
2015
2016 mr->enabled = false;
2017 memory_region_transaction_begin();
2018 while (!QTAILQ_EMPTY(&mr->subregions)) {
2019 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
2020 memory_region_del_subregion(mr, subregion);
2021 }
2022 memory_region_transaction_commit();
2023
2024 mr->destructor(mr);
2025 memory_region_clear_coalescing(mr);
2026 g_free((char *)mr->name);
2027 g_free(mr->ioeventfds);
2028}
2029
2030Object *memory_region_owner(MemoryRegion *mr)
2031{
2032 Object *obj = OBJECT(mr);
2033 return obj->parent;
2034}
2035
2036void memory_region_ref(MemoryRegion *mr)
2037{
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048 if (mr && mr->owner) {
2049 object_ref(mr->owner);
2050 }
2051}
2052
2053void memory_region_unref(MemoryRegion *mr)
2054{
2055 if (mr && mr->owner) {
2056 object_unref(mr->owner);
2057 }
2058}
2059
2060uint64_t memory_region_size(MemoryRegion *mr)
2061{
2062 if (int128_eq(mr->size, int128_2_64())) {
2063 return UINT64_MAX;
2064 }
2065 return int128_get64(mr->size);
2066}
2067
2068const char *memory_region_name(const MemoryRegion *mr)
2069{
2070 if (!mr->name) {
2071 ((MemoryRegion *)mr)->name =
2072 object_get_canonical_path_component(OBJECT(mr));
2073 }
2074 return mr->name;
2075}
2076
2077bool memory_region_is_ram_device(MemoryRegion *mr)
2078{
2079 return mr->ram_device;
2080}
2081
2082uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
2083{
2084 uint8_t mask = mr->dirty_log_mask;
2085 if (global_dirty_log && mr->ram_block) {
2086 mask |= (1 << DIRTY_MEMORY_MIGRATION);
2087 }
2088 return mask;
2089}
2090
2091bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
2092{
2093 return memory_region_get_dirty_log_mask(mr) & (1 << client);
2094}
2095
2096static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
2097{
2098 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
2099 IOMMUNotifier *iommu_notifier;
2100 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2101
2102 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2103 flags |= iommu_notifier->notifier_flags;
2104 }
2105
2106 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
2107 imrc->notify_flag_changed(iommu_mr,
2108 iommu_mr->iommu_notify_flags,
2109 flags);
2110 }
2111
2112 iommu_mr->iommu_notify_flags = flags;
2113}
2114
2115void memory_region_register_iommu_notifier(MemoryRegion *mr,
2116 IOMMUNotifier *n)
2117{
2118 IOMMUMemoryRegion *iommu_mr;
2119
2120 if (mr->alias) {
2121 memory_region_register_iommu_notifier(mr->alias, n);
2122 return;
2123 }
2124
2125
2126 iommu_mr = IOMMU_MEMORY_REGION(mr);
2127 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
2128 assert(n->start <= n->end);
2129 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
2130 memory_region_update_iommu_notify_flags(iommu_mr);
2131}
2132
2133uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
2134{
2135 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2136
2137 if (imrc->get_min_page_size) {
2138 return imrc->get_min_page_size(iommu_mr);
2139 }
2140 return TARGET_PAGE_SIZE;
2141}
2142
2143void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
2144{
2145 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
2146 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2147 hwaddr addr, granularity;
2148 IOMMUTLBEntry iotlb;
2149
2150
2151 if (imrc->replay) {
2152 imrc->replay(iommu_mr, n);
2153 return;
2154 }
2155
2156 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
2157
2158 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
2159 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
2160 if (iotlb.perm != IOMMU_NONE) {
2161 n->notify(n, &iotlb);
2162 }
2163
2164
2165
2166 if ((addr + granularity) < addr) {
2167 break;
2168 }
2169 }
2170}
2171
2172void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
2173{
2174 IOMMUNotifier *notifier;
2175
2176 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
2177 memory_region_iommu_replay(iommu_mr, notifier);
2178 }
2179}
2180
2181void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
2182 IOMMUNotifier *n)
2183{
2184 IOMMUMemoryRegion *iommu_mr;
2185
2186 if (mr->alias) {
2187 memory_region_unregister_iommu_notifier(mr->alias, n);
2188 return;
2189 }
2190 QLIST_REMOVE(n, node);
2191 iommu_mr = IOMMU_MEMORY_REGION(mr);
2192 memory_region_update_iommu_notify_flags(iommu_mr);
2193}
2194
2195void memory_region_notify_one(IOMMUNotifier *notifier,
2196 IOMMUTLBEntry *entry)
2197{
2198 IOMMUNotifierFlag request_flags;
2199
2200
2201
2202
2203
2204 if (notifier->start > entry->iova + entry->addr_mask ||
2205 notifier->end < entry->iova) {
2206 return;
2207 }
2208
2209 if (entry->perm & IOMMU_RW) {
2210 request_flags = IOMMU_NOTIFIER_MAP;
2211 } else {
2212 request_flags = IOMMU_NOTIFIER_UNMAP;
2213 }
2214
2215 if (notifier->notifier_flags & request_flags) {
2216 notifier->notify(notifier, entry);
2217 }
2218}
2219
2220void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2221 IOMMUTLBEntry entry)
2222{
2223 IOMMUNotifier *iommu_notifier;
2224
2225 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2226
2227 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2228 memory_region_notify_one(iommu_notifier, &entry);
2229 }
2230}
2231
2232void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2233{
2234 uint8_t mask = 1 << client;
2235 uint8_t old_logging;
2236
2237 assert(client == DIRTY_MEMORY_VGA);
2238 old_logging = mr->vga_logging_count;
2239 mr->vga_logging_count += log ? 1 : -1;
2240 if (!!old_logging == !!mr->vga_logging_count) {
2241 return;
2242 }
2243
2244 memory_region_transaction_begin();
2245 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2246 memory_region_update_pending |= mr->enabled;
2247 memory_region_transaction_commit();
2248}
2249
2250bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
2251 hwaddr size, unsigned client)
2252{
2253 assert(mr->ram_block);
2254 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
2255 size, client);
2256}
2257
2258void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2259 hwaddr size)
2260{
2261 assert(mr->ram_block);
2262 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2263 size,
2264 memory_region_get_dirty_log_mask(mr));
2265}
2266
2267bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
2268 hwaddr size, unsigned client)
2269{
2270 assert(mr->ram_block);
2271 return cpu_physical_memory_test_and_clear_dirty(
2272 memory_region_get_ram_addr(mr) + addr, size, client);
2273}
2274
2275DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2276 hwaddr addr,
2277 hwaddr size,
2278 unsigned client)
2279{
2280 assert(mr->ram_block);
2281 return cpu_physical_memory_snapshot_and_clear_dirty(
2282 memory_region_get_ram_addr(mr) + addr, size, client);
2283}
2284
2285bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2286 hwaddr addr, hwaddr size)
2287{
2288 assert(mr->ram_block);
2289 return cpu_physical_memory_snapshot_get_dirty(snap,
2290 memory_region_get_ram_addr(mr) + addr, size);
2291}
2292
2293void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
2294{
2295 MemoryListener *listener;
2296 AddressSpace *as;
2297 FlatView *view;
2298 FlatRange *fr;
2299
2300
2301
2302
2303
2304
2305 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2306 if (!listener->log_sync) {
2307 continue;
2308 }
2309 as = listener->address_space;
2310 view = address_space_get_flatview(as);
2311 FOR_EACH_FLAT_RANGE(fr, view) {
2312 if (fr->mr == mr) {
2313 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2314 listener->log_sync(listener, &mrs);
2315 }
2316 }
2317 flatview_unref(view);
2318 }
2319}
2320
2321void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2322{
2323 if (mr->readonly != readonly) {
2324 memory_region_transaction_begin();
2325 mr->readonly = readonly;
2326 memory_region_update_pending |= mr->enabled;
2327 memory_region_transaction_commit();
2328 }
2329}
2330
2331void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2332{
2333 if (mr->romd_mode != romd_mode) {
2334 memory_region_transaction_begin();
2335 mr->romd_mode = romd_mode;
2336 memory_region_update_pending |= mr->enabled;
2337 memory_region_transaction_commit();
2338 }
2339}
2340
2341void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2342 hwaddr size, unsigned client)
2343{
2344 assert(mr->ram_block);
2345 cpu_physical_memory_test_and_clear_dirty(
2346 memory_region_get_ram_addr(mr) + addr, size, client);
2347}
2348
2349int memory_region_get_fd(MemoryRegion *mr)
2350{
2351 int fd;
2352
2353 rcu_read_lock();
2354 while (mr->alias) {
2355 mr = mr->alias;
2356 }
2357 fd = mr->ram_block->fd;
2358 rcu_read_unlock();
2359
2360 return fd;
2361}
2362
2363void *memory_region_get_ram_ptr(MemoryRegion *mr)
2364{
2365 void *ptr;
2366 uint64_t offset = 0;
2367
2368 rcu_read_lock();
2369 while (mr->alias) {
2370 offset += mr->alias_offset;
2371 mr = mr->alias;
2372 }
2373 assert(mr->ram_block);
2374 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2375 rcu_read_unlock();
2376
2377 return ptr;
2378}
2379
2380MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2381{
2382 RAMBlock *block;
2383
2384 block = qemu_ram_block_from_host(ptr, false, offset);
2385 if (!block) {
2386 return NULL;
2387 }
2388
2389 return block->mr;
2390}
2391
2392ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2393{
2394 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2395}
2396
2397void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2398{
2399 assert(mr->ram_block);
2400
2401 qemu_ram_resize(mr->ram_block, newsize, errp);
2402}
2403
2404static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
2405{
2406 FlatView *view;
2407 FlatRange *fr;
2408 CoalescedMemoryRange *cmr;
2409 AddrRange tmp;
2410 MemoryRegionSection section;
2411
2412 view = address_space_get_flatview(as);
2413 FOR_EACH_FLAT_RANGE(fr, view) {
2414 if (fr->mr == mr) {
2415 section = (MemoryRegionSection) {
2416 .fv = view,
2417 .offset_within_address_space = int128_get64(fr->addr.start),
2418 .size = fr->addr.size,
2419 };
2420
2421 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, §ion,
2422 int128_get64(fr->addr.start),
2423 int128_get64(fr->addr.size));
2424 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2425 tmp = addrrange_shift(cmr->addr,
2426 int128_sub(fr->addr.start,
2427 int128_make64(fr->offset_in_region)));
2428 if (!addrrange_intersects(tmp, fr->addr)) {
2429 continue;
2430 }
2431 tmp = addrrange_intersection(tmp, fr->addr);
2432 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, §ion,
2433 int128_get64(tmp.start),
2434 int128_get64(tmp.size));
2435 }
2436 }
2437 }
2438 flatview_unref(view);
2439}
2440
2441static void memory_region_update_coalesced_range(MemoryRegion *mr)
2442{
2443 AddressSpace *as;
2444
2445 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2446 memory_region_update_coalesced_range_as(mr, as);
2447 }
2448}
2449
2450void memory_region_set_coalescing(MemoryRegion *mr)
2451{
2452 memory_region_clear_coalescing(mr);
2453 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2454}
2455
2456void memory_region_add_coalescing(MemoryRegion *mr,
2457 hwaddr offset,
2458 uint64_t size)
2459{
2460 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2461
2462 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2463 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2464 memory_region_update_coalesced_range(mr);
2465 memory_region_set_flush_coalesced(mr);
2466}
2467
2468void memory_region_clear_coalescing(MemoryRegion *mr)
2469{
2470 CoalescedMemoryRange *cmr;
2471 bool updated = false;
2472
2473 qemu_flush_coalesced_mmio_buffer();
2474 mr->flush_coalesced_mmio = false;
2475
2476 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2477 cmr = QTAILQ_FIRST(&mr->coalesced);
2478 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2479 g_free(cmr);
2480 updated = true;
2481 }
2482
2483 if (updated) {
2484 memory_region_update_coalesced_range(mr);
2485 }
2486}
2487
2488void memory_region_set_flush_coalesced(MemoryRegion *mr)
2489{
2490 mr->flush_coalesced_mmio = true;
2491}
2492
2493void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2494{
2495 qemu_flush_coalesced_mmio_buffer();
2496 if (QTAILQ_EMPTY(&mr->coalesced)) {
2497 mr->flush_coalesced_mmio = false;
2498 }
2499}
2500
2501void memory_region_set_global_locking(MemoryRegion *mr)
2502{
2503 mr->global_locking = true;
2504}
2505
2506void memory_region_clear_global_locking(MemoryRegion *mr)
2507{
2508 mr->global_locking = false;
2509}
2510
2511static bool userspace_eventfd_warning;
2512
2513void memory_region_add_eventfd(MemoryRegion *mr,
2514 hwaddr addr,
2515 unsigned size,
2516 bool match_data,
2517 uint64_t data,
2518 EventNotifier *e)
2519{
2520 MemoryRegionIoeventfd mrfd = {
2521 .addr.start = int128_make64(addr),
2522 .addr.size = int128_make64(size),
2523 .match_data = match_data,
2524 .data = data,
2525 .e = e,
2526 };
2527 unsigned i;
2528
2529 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2530 userspace_eventfd_warning))) {
2531 userspace_eventfd_warning = true;
2532 error_report("Using eventfd without MMIO binding in KVM. "
2533 "Suboptimal performance expected");
2534 }
2535
2536 if (size) {
2537 adjust_endianness(mr, &mrfd.data, size);
2538 }
2539 memory_region_transaction_begin();
2540 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2541 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2542 break;
2543 }
2544 }
2545 ++mr->ioeventfd_nb;
2546 mr->ioeventfds = g_realloc(mr->ioeventfds,
2547 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2548 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2549 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2550 mr->ioeventfds[i] = mrfd;
2551 ioeventfd_update_pending |= mr->enabled;
2552 memory_region_transaction_commit();
2553}
2554
2555void memory_region_del_eventfd(MemoryRegion *mr,
2556 hwaddr addr,
2557 unsigned size,
2558 bool match_data,
2559 uint64_t data,
2560 EventNotifier *e)
2561{
2562 MemoryRegionIoeventfd mrfd = {
2563 .addr.start = int128_make64(addr),
2564 .addr.size = int128_make64(size),
2565 .match_data = match_data,
2566 .data = data,
2567 .e = e,
2568 };
2569 unsigned i;
2570
2571 if (size) {
2572 adjust_endianness(mr, &mrfd.data, size);
2573 }
2574 memory_region_transaction_begin();
2575 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2576 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2577 break;
2578 }
2579 }
2580 assert(i != mr->ioeventfd_nb);
2581 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2582 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2583 --mr->ioeventfd_nb;
2584 mr->ioeventfds = g_realloc(mr->ioeventfds,
2585 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2586 ioeventfd_update_pending |= mr->enabled;
2587 memory_region_transaction_commit();
2588}
2589
2590static void memory_region_update_container_subregions(MemoryRegion *subregion)
2591{
2592 MemoryRegion *mr = subregion->container;
2593 MemoryRegion *other;
2594
2595 memory_region_transaction_begin();
2596
2597 memory_region_ref(subregion);
2598 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2599 if (subregion->priority >= other->priority) {
2600 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2601 goto done;
2602 }
2603 }
2604 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2605done:
2606 memory_region_update_pending |= mr->enabled && subregion->enabled;
2607 memory_region_transaction_commit();
2608}
2609
2610static void memory_region_add_subregion_common(MemoryRegion *mr,
2611 hwaddr offset,
2612 MemoryRegion *subregion)
2613{
2614 assert(!subregion->container);
2615 subregion->container = mr;
2616 subregion->addr = offset;
2617 memory_region_update_container_subregions(subregion);
2618}
2619
2620void memory_region_add_subregion(MemoryRegion *mr,
2621 hwaddr offset,
2622 MemoryRegion *subregion)
2623{
2624 subregion->priority = 0;
2625 memory_region_add_subregion_common(mr, offset, subregion);
2626}
2627
2628void memory_region_add_subregion_overlap(MemoryRegion *mr,
2629 hwaddr offset,
2630 MemoryRegion *subregion,
2631 int priority)
2632{
2633 subregion->priority = priority;
2634 memory_region_add_subregion_common(mr, offset, subregion);
2635}
2636
2637void memory_region_del_subregion(MemoryRegion *mr,
2638 MemoryRegion *subregion)
2639{
2640 memory_region_transaction_begin();
2641 assert(subregion->container == mr);
2642 subregion->container = NULL;
2643 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2644 memory_region_unref(subregion);
2645 memory_region_update_pending |= mr->enabled && subregion->enabled;
2646 memory_region_transaction_commit();
2647}
2648
2649void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2650{
2651 if (enabled == mr->enabled) {
2652 return;
2653 }
2654 memory_region_transaction_begin();
2655 mr->enabled = enabled;
2656 memory_region_update_pending = true;
2657 memory_region_transaction_commit();
2658}
2659
2660void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2661{
2662 Int128 s = int128_make64(size);
2663
2664 if (size == UINT64_MAX) {
2665 s = int128_2_64();
2666 }
2667 if (int128_eq(s, mr->size)) {
2668 return;
2669 }
2670 memory_region_transaction_begin();
2671 mr->size = s;
2672 if (mr->ram) {
2673 memory_region_do_set_ram(mr);
2674 }
2675 memory_region_update_pending = true;
2676 memory_region_transaction_commit();
2677}
2678
2679static void memory_region_readd_subregion(MemoryRegion *mr)
2680{
2681 MemoryRegion *container = mr->container;
2682
2683 if (container) {
2684 memory_region_transaction_begin();
2685 memory_region_ref(mr);
2686 memory_region_del_subregion(container, mr);
2687 mr->container = container;
2688 memory_region_update_container_subregions(mr);
2689 memory_region_unref(mr);
2690 memory_region_transaction_commit();
2691 }
2692}
2693
2694void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2695{
2696 if (addr != mr->addr) {
2697 mr->addr = addr;
2698 memory_region_readd_subregion(mr);
2699 }
2700}
2701
2702void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2703{
2704 assert(mr->alias);
2705
2706 if (offset == mr->alias_offset) {
2707 return;
2708 }
2709
2710 memory_region_transaction_begin();
2711 mr->alias_offset = offset;
2712 memory_region_update_pending |= mr->enabled;
2713 memory_region_transaction_commit();
2714}
2715
2716uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2717{
2718 return mr->align;
2719}
2720
2721static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2722{
2723 const AddrRange *addr = addr_;
2724 const FlatRange *fr = fr_;
2725
2726 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2727 return -1;
2728 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2729 return 1;
2730 }
2731 return 0;
2732}
2733
2734static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2735{
2736 return bsearch(&addr, view->ranges, view->nr,
2737 sizeof(FlatRange), cmp_flatrange_addr);
2738}
2739
2740bool memory_region_is_mapped(MemoryRegion *mr)
2741{
2742 return mr->container ? true : false;
2743}
2744
2745
2746
2747
2748static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2749 hwaddr addr, uint64_t size)
2750{
2751 MemoryRegionSection ret = { .mr = NULL };
2752 MemoryRegion *root;
2753 AddressSpace *as;
2754 AddrRange range;
2755 FlatView *view;
2756 FlatRange *fr;
2757
2758 addr += mr->addr;
2759 for (root = mr; root->container; ) {
2760 root = root->container;
2761 addr += root->addr;
2762 }
2763
2764 as = memory_region_to_address_space(root);
2765 if (!as) {
2766 return ret;
2767 }
2768 range = addrrange_make(int128_make64(addr), int128_make64(size));
2769
2770 view = address_space_to_flatview(as);
2771 fr = flatview_lookup(view, range);
2772 if (!fr) {
2773 return ret;
2774 }
2775
2776 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2777 --fr;
2778 }
2779
2780 ret.mr = fr->mr;
2781 ret.fv = view;
2782 range = addrrange_intersection(range, fr->addr);
2783 ret.offset_within_region = fr->offset_in_region;
2784 ret.offset_within_region += int128_get64(int128_sub(range.start,
2785 fr->addr.start));
2786 ret.size = range.size;
2787 ret.offset_within_address_space = int128_get64(range.start);
2788 ret.readonly = fr->readonly;
2789 return ret;
2790}
2791
2792MemoryRegionSection memory_region_find(MemoryRegion *mr,
2793 hwaddr addr, uint64_t size)
2794{
2795 MemoryRegionSection ret;
2796 rcu_read_lock();
2797 ret = memory_region_find_rcu(mr, addr, size);
2798 if (ret.mr) {
2799 memory_region_ref(ret.mr);
2800 }
2801 rcu_read_unlock();
2802 return ret;
2803}
2804
2805bool memory_region_present(MemoryRegion *container, hwaddr addr)
2806{
2807 MemoryRegion *mr;
2808
2809 rcu_read_lock();
2810 mr = memory_region_find_rcu(container, addr, 1).mr;
2811 rcu_read_unlock();
2812 return mr && mr != container;
2813}
2814
2815void memory_global_dirty_log_sync(void)
2816{
2817 MemoryListener *listener;
2818 AddressSpace *as;
2819 FlatView *view;
2820 FlatRange *fr;
2821
2822 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2823 if (!listener->log_sync) {
2824 continue;
2825 }
2826 as = listener->address_space;
2827 view = address_space_get_flatview(as);
2828 FOR_EACH_FLAT_RANGE(fr, view) {
2829 if (fr->dirty_log_mask) {
2830 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2831
2832 listener->log_sync(listener, &mrs);
2833 }
2834 }
2835 flatview_unref(view);
2836 }
2837}
2838
2839static VMChangeStateEntry *vmstate_change;
2840
2841void memory_global_dirty_log_start(void)
2842{
2843 if (vmstate_change) {
2844 qemu_del_vm_change_state_handler(vmstate_change);
2845 vmstate_change = NULL;
2846 }
2847
2848 global_dirty_log = true;
2849
2850 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2851
2852
2853 memory_region_transaction_begin();
2854 memory_region_update_pending = true;
2855 memory_region_transaction_commit();
2856}
2857
2858static void memory_global_dirty_log_do_stop(void)
2859{
2860 global_dirty_log = false;
2861
2862
2863 memory_region_transaction_begin();
2864 memory_region_update_pending = true;
2865 memory_region_transaction_commit();
2866
2867 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2868}
2869
2870static void memory_vm_change_state_handler(void *opaque, int running,
2871 RunState state)
2872{
2873 if (running) {
2874 memory_global_dirty_log_do_stop();
2875
2876 if (vmstate_change) {
2877 qemu_del_vm_change_state_handler(vmstate_change);
2878 vmstate_change = NULL;
2879 }
2880 }
2881}
2882
2883void memory_global_dirty_log_stop(void)
2884{
2885 if (!runstate_is_running()) {
2886 if (vmstate_change) {
2887 return;
2888 }
2889 vmstate_change = qemu_add_vm_change_state_handler(
2890 memory_vm_change_state_handler, NULL);
2891 return;
2892 }
2893
2894 memory_global_dirty_log_do_stop();
2895}
2896
2897static void listener_add_address_space(MemoryListener *listener,
2898 AddressSpace *as)
2899{
2900 FlatView *view;
2901 FlatRange *fr;
2902
2903 if (listener->begin) {
2904 listener->begin(listener);
2905 }
2906 if (global_dirty_log) {
2907 if (listener->log_global_start) {
2908 listener->log_global_start(listener);
2909 }
2910 }
2911
2912 view = address_space_get_flatview(as);
2913 FOR_EACH_FLAT_RANGE(fr, view) {
2914 MemoryRegionSection section = section_from_flat_range(fr, view);
2915
2916 if (listener->region_add) {
2917 listener->region_add(listener, §ion);
2918 }
2919 if (fr->dirty_log_mask && listener->log_start) {
2920 listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
2921 }
2922 }
2923 if (listener->commit) {
2924 listener->commit(listener);
2925 }
2926 flatview_unref(view);
2927}
2928
2929void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2930{
2931 MemoryListener *other = NULL;
2932
2933 listener->address_space = as;
2934 if (QTAILQ_EMPTY(&memory_listeners)
2935 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2936 memory_listeners)->priority) {
2937 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2938 } else {
2939 QTAILQ_FOREACH(other, &memory_listeners, link) {
2940 if (listener->priority < other->priority) {
2941 break;
2942 }
2943 }
2944 QTAILQ_INSERT_BEFORE(other, listener, link);
2945 }
2946
2947 if (QTAILQ_EMPTY(&as->listeners)
2948 || listener->priority >= QTAILQ_LAST(&as->listeners,
2949 memory_listeners)->priority) {
2950 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2951 } else {
2952 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2953 if (listener->priority < other->priority) {
2954 break;
2955 }
2956 }
2957 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2958 }
2959
2960 listener_add_address_space(listener, as);
2961}
2962
2963void memory_listener_unregister(MemoryListener *listener)
2964{
2965 if (!listener->address_space) {
2966 return;
2967 }
2968
2969 QTAILQ_REMOVE(&memory_listeners, listener, link);
2970 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2971 listener->address_space = NULL;
2972}
2973
2974bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2975{
2976 void *host;
2977 unsigned size = 0;
2978 unsigned offset = 0;
2979 Object *new_interface;
2980
2981 if (!mr || !mr->ops->request_ptr) {
2982 return false;
2983 }
2984
2985
2986
2987
2988
2989
2990 memory_region_transaction_begin();
2991
2992 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2993
2994 if (!host || !size) {
2995 memory_region_transaction_commit();
2996 return false;
2997 }
2998
2999 new_interface = object_new("mmio_interface");
3000 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
3001 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
3002 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
3003 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
3004 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
3005 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
3006
3007 memory_region_transaction_commit();
3008 return true;
3009}
3010
3011typedef struct MMIOPtrInvalidate {
3012 MemoryRegion *mr;
3013 hwaddr offset;
3014 unsigned size;
3015 int busy;
3016 int allocated;
3017} MMIOPtrInvalidate;
3018
3019#define MAX_MMIO_INVALIDATE 10
3020static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
3021
3022static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
3023 run_on_cpu_data data)
3024{
3025 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
3026 MemoryRegion *mr = invalidate_data->mr;
3027 hwaddr offset = invalidate_data->offset;
3028 unsigned size = invalidate_data->size;
3029 MemoryRegionSection section = memory_region_find(mr, offset, size);
3030
3031 qemu_mutex_lock_iothread();
3032
3033
3034 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
3035
3036 if (section.mr != mr) {
3037
3038 memory_region_unref(section.mr);
3039 if (MMIO_INTERFACE(section.mr->owner)) {
3040
3041 object_property_set_bool(section.mr->owner, false, "realized",
3042 NULL);
3043 object_unref(section.mr->owner);
3044 object_unparent(section.mr->owner);
3045 }
3046 }
3047
3048 qemu_mutex_unlock_iothread();
3049
3050 if (invalidate_data->allocated) {
3051 g_free(invalidate_data);
3052 } else {
3053 invalidate_data->busy = 0;
3054 }
3055}
3056
3057void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
3058 unsigned size)
3059{
3060 size_t i;
3061 MMIOPtrInvalidate *invalidate_data = NULL;
3062
3063 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
3064 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
3065 invalidate_data = &mmio_ptr_invalidate_list[i];
3066 break;
3067 }
3068 }
3069
3070 if (!invalidate_data) {
3071 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
3072 invalidate_data->allocated = 1;
3073 }
3074
3075 invalidate_data->mr = mr;
3076 invalidate_data->offset = offset;
3077 invalidate_data->size = size;
3078
3079 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
3080 RUN_ON_CPU_HOST_PTR(invalidate_data));
3081}
3082
3083void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
3084{
3085 memory_region_ref(root);
3086 as->root = root;
3087 as->current_map = NULL;
3088 as->ioeventfd_nb = 0;
3089 as->ioeventfds = NULL;
3090 QTAILQ_INIT(&as->listeners);
3091 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
3092
3093
3094
3095
3096
3097
3098
3099 as->name = g_strdup(name ? name : object_get_canonical_path(OBJECT(root)));
3100 address_space_update_topology(as);
3101 address_space_update_ioeventfds(as);
3102}
3103
3104static void do_address_space_destroy(AddressSpace *as)
3105{
3106 assert(QTAILQ_EMPTY(&as->listeners));
3107
3108 flatview_unref(as->current_map);
3109 g_free(as->name);
3110 g_free(as->ioeventfds);
3111 memory_region_unref(as->root);
3112}
3113
3114
3115AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
3116{
3117 AddressSpace *as;
3118
3119 as = g_malloc0(sizeof *as);
3120 address_space_init(as, root, name);
3121 return as;
3122}
3123
3124void address_space_destroy(AddressSpace *as)
3125{
3126 MemoryRegion *root = as->root;
3127
3128
3129 memory_region_transaction_begin();
3130 as->root = NULL;
3131 memory_region_transaction_commit();
3132 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
3133
3134
3135
3136
3137
3138 as->root = root;
3139 call_rcu(as, do_address_space_destroy, rcu);
3140}
3141
3142static const char *memory_region_type(MemoryRegion *mr)
3143{
3144 if (memory_region_is_ram_device(mr)) {
3145 return "ramd";
3146 } else if (memory_region_is_romd(mr)) {
3147 return "romd";
3148 } else if (memory_region_is_rom(mr)) {
3149 return "rom";
3150 } else if (memory_region_is_ram(mr)) {
3151 return "ram";
3152 } else {
3153 return "i/o";
3154 }
3155}
3156
3157typedef struct MemoryRegionList MemoryRegionList;
3158
3159struct MemoryRegionList {
3160 const MemoryRegion *mr;
3161 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3162};
3163
3164typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
3165
3166#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3167 int128_sub((size), int128_one())) : 0)
3168#define MTREE_INDENT " "
3169
3170static void mtree_print_mr(fprintf_function mon_printf, void *f,
3171 const MemoryRegion *mr, unsigned int level,
3172 hwaddr base,
3173 MemoryRegionListHead *alias_print_queue)
3174{
3175 MemoryRegionList *new_ml, *ml, *next_ml;
3176 MemoryRegionListHead submr_print_queue;
3177 const MemoryRegion *submr;
3178 unsigned int i;
3179 hwaddr cur_start, cur_end;
3180
3181 if (!mr) {
3182 return;
3183 }
3184
3185 for (i = 0; i < level; i++) {
3186 mon_printf(f, MTREE_INDENT);
3187 }
3188
3189 cur_start = base + mr->addr;
3190 cur_end = cur_start + MR_SIZE(mr->size);
3191
3192
3193
3194
3195
3196
3197 if (cur_start < base || cur_end < cur_start) {
3198 mon_printf(f, "[DETECTED OVERFLOW!] ");
3199 }
3200
3201 if (mr->alias) {
3202 MemoryRegionList *ml;
3203 bool found = false;
3204
3205
3206 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3207 if (ml->mr == mr->alias) {
3208 found = true;
3209 }
3210 }
3211
3212 if (!found) {
3213 ml = g_new(MemoryRegionList, 1);
3214 ml->mr = mr->alias;
3215 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3216 }
3217 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
3218 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
3219 "-" TARGET_FMT_plx "%s\n",
3220 cur_start, cur_end,
3221 mr->priority,
3222 memory_region_type((MemoryRegion *)mr),
3223 memory_region_name(mr),
3224 memory_region_name(mr->alias),
3225 mr->alias_offset,
3226 mr->alias_offset + MR_SIZE(mr->size),
3227 mr->enabled ? "" : " [disabled]");
3228 } else {
3229 mon_printf(f,
3230 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
3231 cur_start, cur_end,
3232 mr->priority,
3233 memory_region_type((MemoryRegion *)mr),
3234 memory_region_name(mr),
3235 mr->enabled ? "" : " [disabled]");
3236 }
3237
3238 QTAILQ_INIT(&submr_print_queue);
3239
3240 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3241 new_ml = g_new(MemoryRegionList, 1);
3242 new_ml->mr = submr;
3243 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3244 if (new_ml->mr->addr < ml->mr->addr ||
3245 (new_ml->mr->addr == ml->mr->addr &&
3246 new_ml->mr->priority > ml->mr->priority)) {
3247 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3248 new_ml = NULL;
3249 break;
3250 }
3251 }
3252 if (new_ml) {
3253 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3254 }
3255 }
3256
3257 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3258 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
3259 alias_print_queue);
3260 }
3261
3262 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3263 g_free(ml);
3264 }
3265}
3266
3267struct FlatViewInfo {
3268 fprintf_function mon_printf;
3269 void *f;
3270 int counter;
3271 bool dispatch_tree;
3272};
3273
3274static void mtree_print_flatview(gpointer key, gpointer value,
3275 gpointer user_data)
3276{
3277 FlatView *view = key;
3278 GArray *fv_address_spaces = value;
3279 struct FlatViewInfo *fvi = user_data;
3280 fprintf_function p = fvi->mon_printf;
3281 void *f = fvi->f;
3282 FlatRange *range = &view->ranges[0];
3283 MemoryRegion *mr;
3284 int n = view->nr;
3285 int i;
3286 AddressSpace *as;
3287
3288 p(f, "FlatView #%d\n", fvi->counter);
3289 ++fvi->counter;
3290
3291 for (i = 0; i < fv_address_spaces->len; ++i) {
3292 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3293 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
3294 if (as->root->alias) {
3295 p(f, ", alias %s", memory_region_name(as->root->alias));
3296 }
3297 p(f, "\n");
3298 }
3299
3300 p(f, " Root memory region: %s\n",
3301 view->root ? memory_region_name(view->root) : "(none)");
3302
3303 if (n <= 0) {
3304 p(f, MTREE_INDENT "No rendered FlatView\n\n");
3305 return;
3306 }
3307
3308 while (n--) {
3309 mr = range->mr;
3310 if (range->offset_in_region) {
3311 p(f, MTREE_INDENT TARGET_FMT_plx "-"
3312 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
3313 int128_get64(range->addr.start),
3314 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3315 mr->priority,
3316 range->readonly ? "rom" : memory_region_type(mr),
3317 memory_region_name(mr),
3318 range->offset_in_region);
3319 } else {
3320 p(f, MTREE_INDENT TARGET_FMT_plx "-"
3321 TARGET_FMT_plx " (prio %d, %s): %s\n",
3322 int128_get64(range->addr.start),
3323 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3324 mr->priority,
3325 range->readonly ? "rom" : memory_region_type(mr),
3326 memory_region_name(mr));
3327 }
3328 range++;
3329 }
3330
3331#if !defined(CONFIG_USER_ONLY)
3332 if (fvi->dispatch_tree && view->root) {
3333 mtree_print_dispatch(p, f, view->dispatch, view->root);
3334 }
3335#endif
3336
3337 p(f, "\n");
3338}
3339
3340static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3341 gpointer user_data)
3342{
3343 FlatView *view = key;
3344 GArray *fv_address_spaces = value;
3345
3346 g_array_unref(fv_address_spaces);
3347 flatview_unref(view);
3348
3349 return true;
3350}
3351
3352void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3353 bool dispatch_tree)
3354{
3355 MemoryRegionListHead ml_head;
3356 MemoryRegionList *ml, *ml2;
3357 AddressSpace *as;
3358
3359 if (flatview) {
3360 FlatView *view;
3361 struct FlatViewInfo fvi = {
3362 .mon_printf = mon_printf,
3363 .f = f,
3364 .counter = 0,
3365 .dispatch_tree = dispatch_tree
3366 };
3367 GArray *fv_address_spaces;
3368 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3369
3370
3371 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3372 view = address_space_get_flatview(as);
3373
3374 fv_address_spaces = g_hash_table_lookup(views, view);
3375 if (!fv_address_spaces) {
3376 fv_address_spaces = g_array_new(false, false, sizeof(as));
3377 g_hash_table_insert(views, view, fv_address_spaces);
3378 }
3379
3380 g_array_append_val(fv_address_spaces, as);
3381 }
3382
3383
3384 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3385
3386
3387 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3388 g_hash_table_unref(views);
3389
3390 return;
3391 }
3392
3393 QTAILQ_INIT(&ml_head);
3394
3395 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3396 mon_printf(f, "address-space: %s\n", as->name);
3397 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3398 mon_printf(f, "\n");
3399 }
3400
3401
3402 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3403 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3404 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3405 mon_printf(f, "\n");
3406 }
3407
3408 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3409 g_free(ml);
3410 }
3411}
3412
3413static bool memory_region_parse_reg(FDTGenericMMap *obj,
3414 FDTGenericRegPropInfo reg, Error **errp)
3415{
3416 MemoryRegion *mr = MEMORY_REGION(obj);
3417 uint64_t base_addr = ~0ull;
3418 uint64_t total_size = 0;
3419 uint64_t max_addr = 0;
3420 int i;
3421
3422 if (!reg.n) {
3423 return false;
3424 }
3425
3426 for (i = 0; i < reg.n; ++i) {
3427 base_addr = MIN(base_addr, reg.a[i]);
3428 max_addr = MAX(max_addr, reg.a[i] + reg.s[i]);
3429 total_size += reg.s[i];
3430 if (reg.p[i] != reg.p[0]) {
3431 error_setg(errp, "FDT generic memory parser does not support"
3432 "mixed priorities\n");
3433 return false;
3434 }
3435 }
3436
3437 if (total_size != max_addr - base_addr) {
3438 return false;
3439 error_setg(errp, "FDT generic memory parse does not "
3440 "spport discontiguous or overlapping memory regions");
3441 }
3442
3443
3444
3445
3446 if (reg.parents[0]) {
3447 object_property_set_link(OBJECT(mr), reg.parents[0], "container",
3448 &error_abort);
3449 }
3450 object_property_set_int(OBJECT(mr), total_size, "size", &error_abort);
3451 object_property_set_int(OBJECT(mr), base_addr, "addr", &error_abort);
3452 object_property_set_int(OBJECT(mr), reg.p[0], "priority", &error_abort);
3453 return false;
3454}
3455
3456static void memory_region_class_init(ObjectClass *oc, void *data)
3457{
3458 FDTGenericMMapClass *fmc = FDT_GENERIC_MMAP_CLASS(oc);
3459
3460 fmc->parse_reg = memory_region_parse_reg;
3461}
3462
3463void memory_region_init_ram(MemoryRegion *mr,
3464 struct Object *owner,
3465 const char *name,
3466 uint64_t size,
3467 Error **errp)
3468{
3469 DeviceState *owner_dev;
3470 Error *err = NULL;
3471
3472 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3473 if (err) {
3474 error_propagate(errp, err);
3475 return;
3476 }
3477
3478
3479
3480
3481
3482
3483 owner_dev = DEVICE(owner);
3484 vmstate_register_ram(mr, owner_dev);
3485}
3486
3487void memory_region_init_rom(MemoryRegion *mr,
3488 struct Object *owner,
3489 const char *name,
3490 uint64_t size,
3491 Error **errp)
3492{
3493 DeviceState *owner_dev;
3494 Error *err = NULL;
3495
3496 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3497 if (err) {
3498 error_propagate(errp, err);
3499 return;
3500 }
3501
3502
3503
3504
3505
3506
3507 owner_dev = DEVICE(owner);
3508 vmstate_register_ram(mr, owner_dev);
3509}
3510
3511void memory_region_init_rom_device(MemoryRegion *mr,
3512 struct Object *owner,
3513 const MemoryRegionOps *ops,
3514 void *opaque,
3515 const char *name,
3516 uint64_t size,
3517 Error **errp)
3518{
3519 DeviceState *owner_dev;
3520 Error *err = NULL;
3521
3522 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3523 name, size, &err);
3524 if (err) {
3525 error_propagate(errp, err);
3526 return;
3527 }
3528
3529
3530
3531
3532
3533
3534 owner_dev = DEVICE(owner);
3535 vmstate_register_ram(mr, owner_dev);
3536}
3537
3538static const TypeInfo memory_region_info = {
3539 .parent = TYPE_OBJECT,
3540 .name = TYPE_MEMORY_REGION,
3541 .instance_size = sizeof(MemoryRegion),
3542 .instance_init = memory_region_initfn,
3543 .instance_finalize = memory_region_finalize,
3544 .class_init = memory_region_class_init,
3545 .interfaces = (InterfaceInfo[]) {
3546 { TYPE_FDT_GENERIC_MMAP },
3547 { },
3548 },
3549};
3550
3551static bool memory_transaction_attr_get_secure(Object *obj, Error **errp)
3552{
3553 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3554 return mattr->secure;
3555}
3556
3557static void memory_transaction_attr_set_secure(Object *obj, bool value,
3558 Error **errp)
3559{
3560 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3561 mattr->secure = value;
3562}
3563
3564static void mattr_get_requester_id(Object *obj, Visitor *v, const char *name,
3565 void *opaque, Error **errp)
3566{
3567 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3568 uint16_t value = mattr->requester_id;
3569
3570 visit_type_uint16(v, name, &value, errp);
3571}
3572
3573
3574static void mattr_set_requester_id(Object *obj, Visitor *v, const char *name,
3575 void *opaque, Error **errp)
3576{
3577 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3578 Error *local_err = NULL;
3579 uint16_t value;
3580
3581 visit_type_uint16(v, name, &value, &local_err);
3582 mattr->requester_id = value;
3583}
3584
3585static void mattr_set_master_id(Object *obj, Visitor *v, const char *name,
3586 void *opaque, Error **errp)
3587{
3588 gchar *path = object_get_canonical_path(obj);
3589
3590 qemu_log("WARNING: %s: The %s property will be deprecated.\n", path, name);
3591 g_free(path);
3592 mattr_set_requester_id(obj, v, name, opaque, errp);
3593}
3594
3595
3596static void memory_transaction_attr_initfn(Object *obj)
3597{
3598 MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3599
3600 object_property_add_bool(OBJECT(mattr), "secure",
3601 memory_transaction_attr_get_secure,
3602 memory_transaction_attr_set_secure,
3603 NULL);
3604 object_property_add(OBJECT(mattr), "requester-id", "uint16",
3605 mattr_get_requester_id,
3606 mattr_set_requester_id,
3607 NULL, NULL, &error_abort);
3608
3609 object_property_add(OBJECT(mattr), "master-id", "uint16",
3610 mattr_get_requester_id,
3611 mattr_set_master_id,
3612 NULL, NULL, &error_abort);
3613}
3614
3615static const TypeInfo memory_transaction_attr_info = {
3616 .parent = TYPE_OBJECT,
3617 .name = TYPE_MEMORY_TRANSACTION_ATTR,
3618 .instance_size = sizeof(MemTxAttrs),
3619 .instance_init = memory_transaction_attr_initfn,
3620 .interfaces = (InterfaceInfo[]) {
3621 { TYPE_FDT_GENERIC_MMAP },
3622 { },
3623 },
3624};
3625
3626static const TypeInfo iommu_memory_region_info = {
3627 .parent = TYPE_MEMORY_REGION,
3628 .name = TYPE_IOMMU_MEMORY_REGION,
3629 .class_size = sizeof(IOMMUMemoryRegionClass),
3630 .instance_size = sizeof(IOMMUMemoryRegion),
3631 .instance_init = iommu_memory_region_initfn,
3632 .abstract = true,
3633};
3634
3635static void memory_register_types(void)
3636{
3637 type_register_static(&memory_region_info);
3638 type_register_static(&memory_transaction_attr_info);
3639 type_register_static(&iommu_memory_region_info);
3640}
3641
3642type_init(memory_register_types)
3643