qemu/memory.c
<<
>>
Prefs
   1/*
   2 * Physical memory management
   3 *
   4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
   5 *
   6 * Authors:
   7 *  Avi Kivity <avi@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "qemu-common.h"
  19#include "cpu.h"
  20#include "exec/memory.h"
  21#include "exec/address-spaces.h"
  22#include "exec/ioport.h"
  23#include "qapi/visitor.h"
  24#include "qemu/bitops.h"
  25#include "qemu/error-report.h"
  26#include "qom/object.h"
  27#include "trace-root.h"
  28
  29#include "exec/memory-internal.h"
  30#include "exec/ram_addr.h"
  31#include "sysemu/kvm.h"
  32#include "sysemu/sysemu.h"
  33#include "hw/misc/mmio_interface.h"
  34#include "hw/qdev-properties.h"
  35#include "migration/vmstate.h"
  36
  37#include "hw/fdt_generic_util.h"
  38
  39//#define DEBUG_UNASSIGNED
  40
  41static unsigned memory_region_transaction_depth;
  42static bool memory_region_update_pending;
  43static bool ioeventfd_update_pending;
  44static bool global_dirty_log = false;
  45
  46static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
  47    = QTAILQ_HEAD_INITIALIZER(memory_listeners);
  48
  49static QTAILQ_HEAD(, AddressSpace) address_spaces
  50    = QTAILQ_HEAD_INITIALIZER(address_spaces);
  51
  52static GHashTable *flat_views;
  53
  54typedef struct AddrRange AddrRange;
  55
  56static void memory_region_update_container_subregions(MemoryRegion *subregion);
  57static void memory_region_readd_subregion(MemoryRegion *mr);
  58
  59/*
  60 * Note that signed integers are needed for negative offsetting in aliases
  61 * (large MemoryRegion::alias_offset).
  62 */
  63struct AddrRange {
  64    Int128 start;
  65    Int128 size;
  66};
  67
  68static AddrRange addrrange_make(Int128 start, Int128 size)
  69{
  70    return (AddrRange) { start, size };
  71}
  72
  73static bool addrrange_equal(AddrRange r1, AddrRange r2)
  74{
  75    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
  76}
  77
  78static Int128 addrrange_end(AddrRange r)
  79{
  80    return int128_add(r.start, r.size);
  81}
  82
  83static AddrRange addrrange_shift(AddrRange range, Int128 delta)
  84{
  85    int128_addto(&range.start, delta);
  86    return range;
  87}
  88
  89static bool addrrange_contains(AddrRange range, Int128 addr)
  90{
  91    return int128_ge(addr, range.start)
  92        && int128_lt(addr, addrrange_end(range));
  93}
  94
  95static bool addrrange_intersects(AddrRange r1, AddrRange r2)
  96{
  97    return addrrange_contains(r1, r2.start)
  98        || addrrange_contains(r2, r1.start);
  99}
 100
 101static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
 102{
 103    Int128 start = int128_max(r1.start, r2.start);
 104    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
 105    return addrrange_make(start, int128_sub(end, start));
 106}
 107
 108enum ListenerDirection { Forward, Reverse };
 109
 110#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
 111    do {                                                                \
 112        MemoryListener *_listener;                                      \
 113                                                                        \
 114        switch (_direction) {                                           \
 115        case Forward:                                                   \
 116            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
 117                if (_listener->_callback) {                             \
 118                    _listener->_callback(_listener, ##_args);           \
 119                }                                                       \
 120            }                                                           \
 121            break;                                                      \
 122        case Reverse:                                                   \
 123            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
 124                                   memory_listeners, link) {            \
 125                if (_listener->_callback) {                             \
 126                    _listener->_callback(_listener, ##_args);           \
 127                }                                                       \
 128            }                                                           \
 129            break;                                                      \
 130        default:                                                        \
 131            abort();                                                    \
 132        }                                                               \
 133    } while (0)
 134
 135#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
 136    do {                                                                \
 137        MemoryListener *_listener;                                      \
 138        struct memory_listeners_as *list = &(_as)->listeners;           \
 139                                                                        \
 140        switch (_direction) {                                           \
 141        case Forward:                                                   \
 142            QTAILQ_FOREACH(_listener, list, link_as) {                  \
 143                if (_listener->_callback) {                             \
 144                    _listener->_callback(_listener, _section, ##_args); \
 145                }                                                       \
 146            }                                                           \
 147            break;                                                      \
 148        case Reverse:                                                   \
 149            QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
 150                                   link_as) {                           \
 151                if (_listener->_callback) {                             \
 152                    _listener->_callback(_listener, _section, ##_args); \
 153                }                                                       \
 154            }                                                           \
 155            break;                                                      \
 156        default:                                                        \
 157            abort();                                                    \
 158        }                                                               \
 159    } while (0)
 160
 161/* No need to ref/unref .mr, the FlatRange keeps it alive.  */
 162#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...)  \
 163    do {                                                                \
 164        MemoryRegionSection mrs = section_from_flat_range(fr,           \
 165                address_space_to_flatview(as));                         \
 166        MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args);         \
 167    } while(0)
 168
 169struct CoalescedMemoryRange {
 170    AddrRange addr;
 171    QTAILQ_ENTRY(CoalescedMemoryRange) link;
 172};
 173
 174struct MemoryRegionIoeventfd {
 175    AddrRange addr;
 176    bool match_data;
 177    uint64_t data;
 178    EventNotifier *e;
 179};
 180
 181static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
 182                                           MemoryRegionIoeventfd b)
 183{
 184    if (int128_lt(a.addr.start, b.addr.start)) {
 185        return true;
 186    } else if (int128_gt(a.addr.start, b.addr.start)) {
 187        return false;
 188    } else if (int128_lt(a.addr.size, b.addr.size)) {
 189        return true;
 190    } else if (int128_gt(a.addr.size, b.addr.size)) {
 191        return false;
 192    } else if (a.match_data < b.match_data) {
 193        return true;
 194    } else  if (a.match_data > b.match_data) {
 195        return false;
 196    } else if (a.match_data) {
 197        if (a.data < b.data) {
 198            return true;
 199        } else if (a.data > b.data) {
 200            return false;
 201        }
 202    }
 203    if (a.e < b.e) {
 204        return true;
 205    } else if (a.e > b.e) {
 206        return false;
 207    }
 208    return false;
 209}
 210
 211static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
 212                                          MemoryRegionIoeventfd b)
 213{
 214    return !memory_region_ioeventfd_before(a, b)
 215        && !memory_region_ioeventfd_before(b, a);
 216}
 217
 218typedef struct FlatRange FlatRange;
 219
 220/* Range of memory in the global map.  Addresses are absolute. */
 221struct FlatRange {
 222    MemoryRegion *mr;
 223    hwaddr offset_in_region;
 224    AddrRange addr;
 225    uint8_t dirty_log_mask;
 226    bool romd_mode;
 227    bool readonly;
 228};
 229
 230/* Flattened global view of current active memory hierarchy.  Kept in sorted
 231 * order.
 232 */
 233struct FlatView {
 234    struct rcu_head rcu;
 235    unsigned ref;
 236    FlatRange *ranges;
 237    unsigned nr;
 238    unsigned nr_allocated;
 239    struct AddressSpaceDispatch *dispatch;
 240    MemoryRegion *root;
 241};
 242
 243typedef struct AddressSpaceOps AddressSpaceOps;
 244
 245#define FOR_EACH_FLAT_RANGE(var, view)          \
 246    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
 247
 248static inline MemoryRegionSection
 249section_from_flat_range(FlatRange *fr, FlatView *fv)
 250{
 251    return (MemoryRegionSection) {
 252        .mr = fr->mr,
 253        .fv = fv,
 254        .offset_within_region = fr->offset_in_region,
 255        .size = fr->addr.size,
 256        .offset_within_address_space = int128_get64(fr->addr.start),
 257        .readonly = fr->readonly,
 258    };
 259}
 260
 261static bool flatrange_equal(FlatRange *a, FlatRange *b)
 262{
 263    return a->mr == b->mr
 264        && addrrange_equal(a->addr, b->addr)
 265        && a->offset_in_region == b->offset_in_region
 266        && a->romd_mode == b->romd_mode
 267        && a->readonly == b->readonly;
 268}
 269
 270static FlatView *flatview_new(MemoryRegion *mr_root)
 271{
 272    FlatView *view;
 273
 274    view = g_new0(FlatView, 1);
 275    view->ref = 1;
 276    view->root = mr_root;
 277    memory_region_ref(mr_root);
 278    trace_flatview_new(view, mr_root);
 279
 280    return view;
 281}
 282
 283/* Insert a range into a given position.  Caller is responsible for maintaining
 284 * sorting order.
 285 */
 286static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
 287{
 288    if (view->nr == view->nr_allocated) {
 289        view->nr_allocated = MAX(2 * view->nr, 10);
 290        view->ranges = g_realloc(view->ranges,
 291                                    view->nr_allocated * sizeof(*view->ranges));
 292    }
 293    memmove(view->ranges + pos + 1, view->ranges + pos,
 294            (view->nr - pos) * sizeof(FlatRange));
 295    view->ranges[pos] = *range;
 296    memory_region_ref(range->mr);
 297    ++view->nr;
 298}
 299
 300static void flatview_destroy(FlatView *view)
 301{
 302    int i;
 303
 304    trace_flatview_destroy(view, view->root);
 305    if (view->dispatch) {
 306        address_space_dispatch_free(view->dispatch);
 307    }
 308    for (i = 0; i < view->nr; i++) {
 309        memory_region_unref(view->ranges[i].mr);
 310    }
 311    g_free(view->ranges);
 312    memory_region_unref(view->root);
 313    g_free(view);
 314}
 315
 316static bool flatview_ref(FlatView *view)
 317{
 318    return atomic_fetch_inc_nonzero(&view->ref) > 0;
 319}
 320
 321static void flatview_unref(FlatView *view)
 322{
 323    if (atomic_fetch_dec(&view->ref) == 1) {
 324        trace_flatview_destroy_rcu(view, view->root);
 325        assert(view->root);
 326        call_rcu(view, flatview_destroy, rcu);
 327    }
 328}
 329
 330FlatView *address_space_to_flatview(AddressSpace *as)
 331{
 332    return atomic_rcu_read(&as->current_map);
 333}
 334
 335AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
 336{
 337    return fv->dispatch;
 338}
 339
 340AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
 341{
 342    return flatview_to_dispatch(address_space_to_flatview(as));
 343}
 344
 345static bool can_merge(FlatRange *r1, FlatRange *r2)
 346{
 347    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
 348        && r1->mr == r2->mr
 349        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
 350                                r1->addr.size),
 351                     int128_make64(r2->offset_in_region))
 352        && r1->dirty_log_mask == r2->dirty_log_mask
 353        && r1->romd_mode == r2->romd_mode
 354        && r1->readonly == r2->readonly;
 355}
 356
 357/* Attempt to simplify a view by merging adjacent ranges */
 358static void flatview_simplify(FlatView *view)
 359{
 360    unsigned i, j;
 361
 362    i = 0;
 363    while (i < view->nr) {
 364        j = i + 1;
 365        while (j < view->nr
 366               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
 367            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
 368            ++j;
 369        }
 370        ++i;
 371        memmove(&view->ranges[i], &view->ranges[j],
 372                (view->nr - j) * sizeof(view->ranges[j]));
 373        view->nr -= j - i;
 374    }
 375}
 376
 377static bool memory_region_big_endian(MemoryRegion *mr)
 378{
 379#ifdef TARGET_WORDS_BIGENDIAN
 380    return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
 381#else
 382    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
 383#endif
 384}
 385
 386static bool memory_region_wrong_endianness(MemoryRegion *mr)
 387{
 388#ifdef TARGET_WORDS_BIGENDIAN
 389    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
 390#else
 391    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
 392#endif
 393}
 394
 395static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
 396{
 397    if (memory_region_wrong_endianness(mr)) {
 398        switch (size) {
 399        case 1:
 400            break;
 401        case 2:
 402            *data = bswap16(*data);
 403            break;
 404        case 4:
 405            *data = bswap32(*data);
 406            break;
 407        case 8:
 408            *data = bswap64(*data);
 409            break;
 410        default:
 411            abort();
 412        }
 413    }
 414}
 415
 416static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
 417{
 418    MemoryRegion *root;
 419    hwaddr abs_addr = offset;
 420
 421    abs_addr += mr->addr;
 422    for (root = mr; root->container; ) {
 423        root = root->container;
 424        abs_addr += root->addr;
 425    }
 426
 427    return abs_addr;
 428}
 429
 430static int get_cpu_index(void)
 431{
 432    if (current_cpu) {
 433        return current_cpu->cpu_index;
 434    }
 435    return -1;
 436}
 437
 438static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
 439                                                       hwaddr addr,
 440                                                       uint64_t *value,
 441                                                       unsigned size,
 442                                                       unsigned shift,
 443                                                       uint64_t mask,
 444                                                       MemTxAttrs attrs)
 445{
 446    uint64_t tmp;
 447
 448    tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
 449    if (mr->subpage) {
 450        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
 451    } else if (mr == &io_mem_notdirty) {
 452        /* Accesses to code which has previously been translated into a TB show
 453         * up in the MMIO path, as accesses to the io_mem_notdirty
 454         * MemoryRegion. */
 455        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
 456    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
 457        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 458        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
 459    }
 460    *value |= (tmp & mask) << shift;
 461    return MEMTX_OK;
 462}
 463
 464/* FIXME: Remove */
 465static MemTxResult memory_region_read_accessor_attr(MemoryRegion *mr,
 466                                                    hwaddr addr,
 467                                                    uint64_t *value,
 468                                                    unsigned size,
 469                                                    unsigned shift,
 470                                                    uint64_t mask,
 471                                                    MemTxAttrs attrs)
 472{
 473    MemoryTransaction tr = {{0}};
 474
 475    if (mr->flush_coalesced_mmio) {
 476        qemu_flush_coalesced_mmio_buffer();
 477    }
 478
 479    tr.opaque = mr->opaque;
 480    tr.addr = addr;
 481    tr.size = size;
 482    tr.attr = attrs;
 483    mr->ops->access(&tr);
 484    *value |= (tr.data.u64 & mask) << shift;
 485
 486    return MEMTX_OK;
 487}
 488
 489static MemTxResult  memory_region_read_accessor(MemoryRegion *mr,
 490                                                hwaddr addr,
 491                                                uint64_t *value,
 492                                                unsigned size,
 493                                                unsigned shift,
 494                                                uint64_t mask,
 495                                                MemTxAttrs attrs)
 496{
 497    uint64_t tmp;
 498
 499    tmp = mr->ops->read(mr->opaque, addr, size);
 500    if (mr->subpage) {
 501        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
 502    } else if (mr == &io_mem_notdirty) {
 503        /* Accesses to code which has previously been translated into a TB show
 504         * up in the MMIO path, as accesses to the io_mem_notdirty
 505         * MemoryRegion. */
 506        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
 507    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
 508        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 509        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
 510    }
 511    *value |= (tmp & mask) << shift;
 512    return MEMTX_OK;
 513}
 514
 515static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
 516                                                          hwaddr addr,
 517                                                          uint64_t *value,
 518                                                          unsigned size,
 519                                                          unsigned shift,
 520                                                          uint64_t mask,
 521                                                          MemTxAttrs attrs)
 522{
 523    uint64_t tmp = 0;
 524    MemTxResult r;
 525
 526    r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
 527    if (mr->subpage) {
 528        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
 529    } else if (mr == &io_mem_notdirty) {
 530        /* Accesses to code which has previously been translated into a TB show
 531         * up in the MMIO path, as accesses to the io_mem_notdirty
 532         * MemoryRegion. */
 533        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
 534    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
 535        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 536        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
 537    }
 538    *value |= (tmp & mask) << shift;
 539    return r;
 540}
 541
 542static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
 543                                                        hwaddr addr,
 544                                                        uint64_t *value,
 545                                                        unsigned size,
 546                                                        unsigned shift,
 547                                                        uint64_t mask,
 548                                                        MemTxAttrs attrs)
 549{
 550    uint64_t tmp;
 551
 552    tmp = (*value >> shift) & mask;
 553    if (mr->subpage) {
 554        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
 555    } else if (mr == &io_mem_notdirty) {
 556        /* Accesses to code which has previously been translated into a TB show
 557         * up in the MMIO path, as accesses to the io_mem_notdirty
 558         * MemoryRegion. */
 559        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
 560    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
 561        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 562        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
 563    }
 564    mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
 565    return MEMTX_OK;
 566}
 567
 568/* FIXME: Remove */
 569static MemTxResult memory_region_write_accessor_attr(MemoryRegion *mr,
 570                                                     hwaddr addr,
 571                                                     uint64_t *value,
 572                                                     unsigned size,
 573                                                     unsigned shift,
 574                                                     uint64_t mask,
 575                                                     MemTxAttrs attrs)
 576{
 577    MemoryTransaction tr = {{0}};
 578
 579    if (mr->flush_coalesced_mmio) {
 580        qemu_flush_coalesced_mmio_buffer();
 581    }
 582
 583    tr.opaque = mr->opaque;
 584    tr.rw = true;
 585    tr.addr = addr;
 586    tr.size = size;
 587    tr.attr = attrs;
 588    tr.data.u64 = (*value >> shift) & mask;
 589    trace_memory_region_ops_write(get_cpu_index(), mr, tr.addr, tr.data.u64, tr.size);
 590    mr->ops->access(&tr);
 591
 592    return MEMTX_OK;
 593}
 594
 595static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
 596                                                hwaddr addr,
 597                                                uint64_t *value,
 598                                                unsigned size,
 599                                                unsigned shift,
 600                                                uint64_t mask,
 601                                                MemTxAttrs attrs)
 602{
 603    uint64_t tmp;
 604
 605    tmp = (*value >> shift) & mask;
 606    if (mr->subpage) {
 607        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
 608    } else if (mr == &io_mem_notdirty) {
 609        /* Accesses to code which has previously been translated into a TB show
 610         * up in the MMIO path, as accesses to the io_mem_notdirty
 611         * MemoryRegion. */
 612        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
 613    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
 614        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 615        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
 616    }
 617    mr->ops->write(mr->opaque, addr, tmp, size);
 618    return MEMTX_OK;
 619}
 620
 621static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
 622                                                           hwaddr addr,
 623                                                           uint64_t *value,
 624                                                           unsigned size,
 625                                                           unsigned shift,
 626                                                           uint64_t mask,
 627                                                           MemTxAttrs attrs)
 628{
 629    uint64_t tmp;
 630
 631    tmp = (*value >> shift) & mask;
 632    if (mr->subpage) {
 633        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
 634    } else if (mr == &io_mem_notdirty) {
 635        /* Accesses to code which has previously been translated into a TB show
 636         * up in the MMIO path, as accesses to the io_mem_notdirty
 637         * MemoryRegion. */
 638        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
 639    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
 640        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 641        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
 642    }
 643    return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
 644}
 645
 646static MemTxResult access_with_adjusted_size(hwaddr addr,
 647                                      uint64_t *value,
 648                                      unsigned size,
 649                                      unsigned access_size_min,
 650                                      unsigned access_size_max,
 651                                      MemTxResult (*access_fn)
 652                                                  (MemoryRegion *mr,
 653                                                   hwaddr addr,
 654                                                   uint64_t *value,
 655                                                   unsigned size,
 656                                                   unsigned shift,
 657                                                   uint64_t mask,
 658                                                   MemTxAttrs attrs),
 659                                      MemoryRegion *mr,
 660                                      MemTxAttrs attrs)
 661{
 662    uint64_t access_mask;
 663    unsigned access_size;
 664    unsigned i;
 665    MemTxResult r = MEMTX_OK;
 666
 667    if (!access_size_min) {
 668        access_size_min = 1;
 669    }
 670    if (!access_size_max) {
 671        access_size_max = 4;
 672    }
 673
 674    /* FIXME: support unaligned access? */
 675    access_size = MAX(MIN(size, access_size_max), access_size_min);
 676    access_mask = -1ULL >> (64 - access_size * 8);
 677    if (memory_region_big_endian(mr)) {
 678        for (i = 0; i < size; i += access_size) {
 679            r |= access_fn(mr, addr + i, value, access_size,
 680                        (size - access_size - i) * 8, access_mask, attrs);
 681        }
 682    } else {
 683        for (i = 0; i < size; i += access_size) {
 684            r |= access_fn(mr, addr + i, value, access_size, i * 8,
 685                        access_mask, attrs);
 686        }
 687    }
 688    return r;
 689}
 690
 691static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
 692{
 693    AddressSpace *as;
 694
 695    while (mr->container) {
 696        mr = mr->container;
 697    }
 698    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
 699        if (mr == as->root) {
 700            return as;
 701        }
 702    }
 703    return NULL;
 704}
 705
 706/* Render a memory region into the global view.  Ranges in @view obscure
 707 * ranges in @mr.
 708 */
 709static void render_memory_region(FlatView *view,
 710                                 MemoryRegion *mr,
 711                                 Int128 base,
 712                                 AddrRange clip,
 713                                 bool readonly)
 714{
 715    MemoryRegion *subregion;
 716    unsigned i;
 717    hwaddr offset_in_region;
 718    Int128 remain;
 719    Int128 now;
 720    FlatRange fr;
 721    AddrRange tmp;
 722
 723    if (!mr->enabled) {
 724        return;
 725    }
 726
 727    int128_addto(&base, int128_make64(mr->addr));
 728    readonly |= mr->readonly;
 729
 730    tmp = addrrange_make(base, mr->size);
 731
 732    if (!addrrange_intersects(tmp, clip)) {
 733        return;
 734    }
 735
 736    clip = addrrange_intersection(tmp, clip);
 737
 738    if (mr->alias) {
 739        int128_subfrom(&base, int128_make64(mr->alias->addr));
 740        int128_subfrom(&base, int128_make64(mr->alias_offset));
 741        render_memory_region(view, mr->alias, base, clip, readonly);
 742        return;
 743    }
 744
 745    /* Render subregions in priority order. */
 746    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
 747        render_memory_region(view, subregion, base, clip, readonly);
 748    }
 749
 750    if (!mr->terminates) {
 751        return;
 752    }
 753
 754    offset_in_region = int128_get64(int128_sub(clip.start, base));
 755    base = clip.start;
 756    remain = clip.size;
 757
 758    fr.mr = mr;
 759    fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
 760    fr.romd_mode = mr->romd_mode;
 761    fr.readonly = readonly;
 762
 763    /* Render the region itself into any gaps left by the current view. */
 764    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
 765        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
 766            continue;
 767        }
 768        if (int128_lt(base, view->ranges[i].addr.start)) {
 769            now = int128_min(remain,
 770                             int128_sub(view->ranges[i].addr.start, base));
 771            fr.offset_in_region = offset_in_region;
 772            fr.addr = addrrange_make(base, now);
 773            flatview_insert(view, i, &fr);
 774            ++i;
 775            int128_addto(&base, now);
 776            offset_in_region += int128_get64(now);
 777            int128_subfrom(&remain, now);
 778        }
 779        now = int128_sub(int128_min(int128_add(base, remain),
 780                                    addrrange_end(view->ranges[i].addr)),
 781                         base);
 782        int128_addto(&base, now);
 783        offset_in_region += int128_get64(now);
 784        int128_subfrom(&remain, now);
 785    }
 786    if (int128_nz(remain)) {
 787        fr.offset_in_region = offset_in_region;
 788        fr.addr = addrrange_make(base, remain);
 789        flatview_insert(view, i, &fr);
 790    }
 791}
 792
 793static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
 794{
 795    while (mr->enabled) {
 796        if (mr->alias) {
 797            if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
 798                /* The alias is included in its entirety.  Use it as
 799                 * the "real" root, so that we can share more FlatViews.
 800                 */
 801                mr = mr->alias;
 802                continue;
 803            }
 804        } else if (!mr->terminates) {
 805            unsigned int found = 0;
 806            MemoryRegion *child, *next = NULL;
 807            QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
 808                if (child->enabled) {
 809                    if (++found > 1) {
 810                        next = NULL;
 811                        break;
 812                    }
 813                    if (!child->addr && int128_ge(mr->size, child->size)) {
 814                        /* A child is included in its entirety.  If it's the only
 815                         * enabled one, use it in the hope of finding an alias down the
 816                         * way. This will also let us share FlatViews.
 817                         */
 818                        next = child;
 819                    }
 820                }
 821            }
 822            if (found == 0) {
 823                return NULL;
 824            }
 825            if (next) {
 826                mr = next;
 827                continue;
 828            }
 829        }
 830
 831        return mr;
 832    }
 833
 834    return NULL;
 835}
 836
 837/* Render a memory topology into a list of disjoint absolute ranges. */
 838static FlatView *generate_memory_topology(MemoryRegion *mr)
 839{
 840    int i;
 841    FlatView *view;
 842
 843    view = flatview_new(mr);
 844
 845    if (mr) {
 846        render_memory_region(view, mr, int128_zero(),
 847                             addrrange_make(int128_zero(), int128_2_64()), false);
 848    }
 849    flatview_simplify(view);
 850
 851    view->dispatch = address_space_dispatch_new(view);
 852    for (i = 0; i < view->nr; i++) {
 853        MemoryRegionSection mrs =
 854            section_from_flat_range(&view->ranges[i], view);
 855        flatview_add_to_dispatch(view, &mrs);
 856    }
 857    address_space_dispatch_compact(view->dispatch);
 858    g_hash_table_replace(flat_views, mr, view);
 859
 860    return view;
 861}
 862
 863static void address_space_add_del_ioeventfds(AddressSpace *as,
 864                                             MemoryRegionIoeventfd *fds_new,
 865                                             unsigned fds_new_nb,
 866                                             MemoryRegionIoeventfd *fds_old,
 867                                             unsigned fds_old_nb)
 868{
 869    unsigned iold, inew;
 870    MemoryRegionIoeventfd *fd;
 871    MemoryRegionSection section;
 872
 873    /* Generate a symmetric difference of the old and new fd sets, adding
 874     * and deleting as necessary.
 875     */
 876
 877    iold = inew = 0;
 878    while (iold < fds_old_nb || inew < fds_new_nb) {
 879        if (iold < fds_old_nb
 880            && (inew == fds_new_nb
 881                || memory_region_ioeventfd_before(fds_old[iold],
 882                                                  fds_new[inew]))) {
 883            fd = &fds_old[iold];
 884            section = (MemoryRegionSection) {
 885                .fv = address_space_to_flatview(as),
 886                .offset_within_address_space = int128_get64(fd->addr.start),
 887                .size = fd->addr.size,
 888            };
 889            MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
 890                                 fd->match_data, fd->data, fd->e);
 891            ++iold;
 892        } else if (inew < fds_new_nb
 893                   && (iold == fds_old_nb
 894                       || memory_region_ioeventfd_before(fds_new[inew],
 895                                                         fds_old[iold]))) {
 896            fd = &fds_new[inew];
 897            section = (MemoryRegionSection) {
 898                .fv = address_space_to_flatview(as),
 899                .offset_within_address_space = int128_get64(fd->addr.start),
 900                .size = fd->addr.size,
 901            };
 902            MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
 903                                 fd->match_data, fd->data, fd->e);
 904            ++inew;
 905        } else {
 906            ++iold;
 907            ++inew;
 908        }
 909    }
 910}
 911
 912static FlatView *address_space_get_flatview(AddressSpace *as)
 913{
 914    FlatView *view;
 915
 916    rcu_read_lock();
 917    do {
 918        view = address_space_to_flatview(as);
 919        /* If somebody has replaced as->current_map concurrently,
 920         * flatview_ref returns false.
 921         */
 922    } while (!flatview_ref(view));
 923    rcu_read_unlock();
 924    return view;
 925}
 926
 927static void address_space_update_ioeventfds(AddressSpace *as)
 928{
 929    FlatView *view;
 930    FlatRange *fr;
 931    unsigned ioeventfd_nb = 0;
 932    MemoryRegionIoeventfd *ioeventfds = NULL;
 933    AddrRange tmp;
 934    unsigned i;
 935
 936    view = address_space_get_flatview(as);
 937    FOR_EACH_FLAT_RANGE(fr, view) {
 938        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
 939            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
 940                                  int128_sub(fr->addr.start,
 941                                             int128_make64(fr->offset_in_region)));
 942            if (addrrange_intersects(fr->addr, tmp)) {
 943                ++ioeventfd_nb;
 944                ioeventfds = g_realloc(ioeventfds,
 945                                          ioeventfd_nb * sizeof(*ioeventfds));
 946                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
 947                ioeventfds[ioeventfd_nb-1].addr = tmp;
 948            }
 949        }
 950    }
 951
 952    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
 953                                     as->ioeventfds, as->ioeventfd_nb);
 954
 955    g_free(as->ioeventfds);
 956    as->ioeventfds = ioeventfds;
 957    as->ioeventfd_nb = ioeventfd_nb;
 958    flatview_unref(view);
 959}
 960
 961static void address_space_update_topology_pass(AddressSpace *as,
 962                                               const FlatView *old_view,
 963                                               const FlatView *new_view,
 964                                               bool adding)
 965{
 966    unsigned iold, inew;
 967    FlatRange *frold, *frnew;
 968
 969    /* Generate a symmetric difference of the old and new memory maps.
 970     * Kill ranges in the old map, and instantiate ranges in the new map.
 971     */
 972    iold = inew = 0;
 973    while (iold < old_view->nr || inew < new_view->nr) {
 974        if (iold < old_view->nr) {
 975            frold = &old_view->ranges[iold];
 976        } else {
 977            frold = NULL;
 978        }
 979        if (inew < new_view->nr) {
 980            frnew = &new_view->ranges[inew];
 981        } else {
 982            frnew = NULL;
 983        }
 984
 985        if (frold
 986            && (!frnew
 987                || int128_lt(frold->addr.start, frnew->addr.start)
 988                || (int128_eq(frold->addr.start, frnew->addr.start)
 989                    && !flatrange_equal(frold, frnew)))) {
 990            /* In old but not in new, or in both but attributes changed. */
 991
 992            if (!adding) {
 993                MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
 994            }
 995
 996            ++iold;
 997        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
 998            /* In both and unchanged (except logging may have changed) */
 999
1000            if (adding) {
1001                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
1002                if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
1003                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
1004                                                  frold->dirty_log_mask,
1005                                                  frnew->dirty_log_mask);
1006                }
1007                if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
1008                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
1009                                                  frold->dirty_log_mask,
1010                                                  frnew->dirty_log_mask);
1011                }
1012            }
1013
1014            ++iold;
1015            ++inew;
1016        } else {
1017            /* In new */
1018
1019            if (adding) {
1020                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
1021            }
1022
1023            ++inew;
1024        }
1025    }
1026}
1027
1028static void flatviews_init(void)
1029{
1030    static FlatView *empty_view;
1031
1032    if (flat_views) {
1033        return;
1034    }
1035
1036    flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
1037                                       (GDestroyNotify) flatview_unref);
1038    if (!empty_view) {
1039        empty_view = generate_memory_topology(NULL);
1040        /* We keep it alive forever in the global variable.  */
1041        flatview_ref(empty_view);
1042    } else {
1043        g_hash_table_replace(flat_views, NULL, empty_view);
1044        flatview_ref(empty_view);
1045    }
1046}
1047
1048static void flatviews_reset(void)
1049{
1050    AddressSpace *as;
1051
1052    if (flat_views) {
1053        g_hash_table_unref(flat_views);
1054        flat_views = NULL;
1055    }
1056    flatviews_init();
1057
1058    /* Render unique FVs */
1059    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1060        MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1061
1062        if (g_hash_table_lookup(flat_views, physmr)) {
1063            continue;
1064        }
1065
1066        generate_memory_topology(physmr);
1067    }
1068}
1069
1070static void address_space_set_flatview(AddressSpace *as)
1071{
1072    FlatView *old_view = address_space_to_flatview(as);
1073    MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1074    FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1075
1076    assert(new_view);
1077
1078    if (old_view == new_view) {
1079        return;
1080    }
1081
1082    if (old_view) {
1083        flatview_ref(old_view);
1084    }
1085
1086    flatview_ref(new_view);
1087
1088    if (!QTAILQ_EMPTY(&as->listeners)) {
1089        FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1090
1091        if (!old_view2) {
1092            old_view2 = &tmpview;
1093        }
1094        address_space_update_topology_pass(as, old_view2, new_view, false);
1095        address_space_update_topology_pass(as, old_view2, new_view, true);
1096    }
1097
1098    /* Writes are protected by the BQL.  */
1099    atomic_rcu_set(&as->current_map, new_view);
1100    if (old_view) {
1101        flatview_unref(old_view);
1102    }
1103
1104    /* Note that all the old MemoryRegions are still alive up to this
1105     * point.  This relieves most MemoryListeners from the need to
1106     * ref/unref the MemoryRegions they get---unless they use them
1107     * outside the iothread mutex, in which case precise reference
1108     * counting is necessary.
1109     */
1110    if (old_view) {
1111        flatview_unref(old_view);
1112    }
1113}
1114
1115static void address_space_update_topology(AddressSpace *as)
1116{
1117    MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1118
1119    flatviews_init();
1120    if (!g_hash_table_lookup(flat_views, physmr)) {
1121        generate_memory_topology(physmr);
1122    }
1123    address_space_set_flatview(as);
1124}
1125
1126void memory_region_transaction_begin(void)
1127{
1128    qemu_flush_coalesced_mmio_buffer();
1129    ++memory_region_transaction_depth;
1130}
1131
1132void memory_region_transaction_commit(void)
1133{
1134    AddressSpace *as;
1135
1136    assert(memory_region_transaction_depth);
1137    assert(qemu_mutex_iothread_locked());
1138
1139    --memory_region_transaction_depth;
1140    if (!memory_region_transaction_depth) {
1141        if (memory_region_update_pending) {
1142            flatviews_reset();
1143
1144            MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1145
1146            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1147                address_space_set_flatview(as);
1148                address_space_update_ioeventfds(as);
1149            }
1150            memory_region_update_pending = false;
1151            ioeventfd_update_pending = false;
1152            MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1153        } else if (ioeventfd_update_pending) {
1154            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1155                address_space_update_ioeventfds(as);
1156            }
1157            ioeventfd_update_pending = false;
1158        }
1159   }
1160}
1161
1162static void memory_region_destructor_none(MemoryRegion *mr)
1163{
1164}
1165
1166static void memory_region_destructor_ram(MemoryRegion *mr)
1167{
1168    qemu_ram_free(mr->ram_block);
1169}
1170
1171static bool memory_region_need_escape(char c)
1172{
1173    return c == '/' || c == '[' || c == '\\' || c == ']';
1174}
1175
1176static char *memory_region_escape_name(const char *name)
1177{
1178    const char *p;
1179    char *escaped, *q;
1180    uint8_t c;
1181    size_t bytes = 0;
1182
1183    for (p = name; *p; p++) {
1184        bytes += memory_region_need_escape(*p) ? 4 : 1;
1185    }
1186    if (bytes == p - name) {
1187       return g_memdup(name, bytes + 1);
1188    }
1189
1190    escaped = g_malloc(bytes + 1);
1191    for (p = name, q = escaped; *p; p++) {
1192        c = *p;
1193        if (unlikely(memory_region_need_escape(c))) {
1194            *q++ = '\\';
1195            *q++ = 'x';
1196            *q++ = "0123456789abcdef"[c >> 4];
1197            c = "0123456789abcdef"[c & 15];
1198        }
1199        *q++ = c;
1200    }
1201    *q = 0;
1202    return escaped;
1203}
1204
1205static void memory_region_do_init(MemoryRegion *mr,
1206                                  Object *owner,
1207                                  const char *name,
1208                                  uint64_t size)
1209{
1210    mr->size = int128_make64(size);
1211    if (size == UINT64_MAX) {
1212        mr->size = int128_2_64();
1213    }
1214    mr->name = g_strdup(name);
1215    mr->owner = owner;
1216    mr->ram_block = NULL;
1217
1218    if (name) {
1219        char *escaped_name = memory_region_escape_name(name);
1220        char *name_array = g_strdup_printf("%s[*]", escaped_name);
1221
1222        if (!owner) {
1223            owner = container_get(qdev_get_machine(), "/unattached");
1224        }
1225
1226        object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1227        object_unref(OBJECT(mr));
1228        g_free(name_array);
1229        g_free(escaped_name);
1230    }
1231}
1232
1233void memory_region_init(MemoryRegion *mr,
1234                        Object *owner,
1235                        const char *name,
1236                        uint64_t size)
1237{
1238    object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1239    memory_region_do_init(mr, owner, name, size);
1240}
1241
1242static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1243                                   void *opaque, Error **errp)
1244{
1245    MemoryRegion *mr = MEMORY_REGION(obj);
1246    uint64_t value = mr->addr;
1247
1248    visit_type_uint64(v, name, &value, errp);
1249}
1250
1251static void memory_region_set_addr(Object *obj, Visitor *v, const char *name,
1252                                   void *opaque, Error **errp)
1253{
1254    MemoryRegion *mr = MEMORY_REGION(obj);
1255    Error *local_err = NULL;
1256    uint64_t value;
1257
1258    visit_type_uint64(v, name, &value, &local_err);
1259    if (local_err) {
1260        error_propagate(errp, local_err);
1261        return;
1262    }
1263
1264    memory_region_set_address(mr, value);
1265}
1266
1267static void memory_region_set_container(Object *obj, Visitor *v, const char *name,
1268                                        void *opaque, Error **errp)
1269{
1270    MemoryRegion *mr = MEMORY_REGION(obj);
1271    Error *local_err = NULL;
1272    MemoryRegion *old_container = mr->container;
1273    MemoryRegion *new_container = NULL;
1274    char *path = NULL;
1275
1276    visit_type_str(v, name, &path, &local_err);
1277
1278    if (!local_err && strcmp(path, "") != 0) {
1279        new_container = MEMORY_REGION(object_resolve_link(obj, name, path,
1280                                      &local_err));
1281        while (new_container->alias) {
1282            new_container = new_container->alias;
1283        }
1284    }
1285
1286    if (local_err) {
1287        error_propagate(errp, local_err);
1288        return;
1289    }
1290
1291    object_ref(OBJECT(new_container));
1292
1293    memory_region_transaction_begin();
1294    memory_region_ref(mr);
1295    if (old_container) {
1296        memory_region_del_subregion(old_container, mr);
1297    }
1298    mr->container = new_container;
1299    if (new_container) {
1300        memory_region_update_container_subregions(mr);
1301    }
1302    memory_region_unref(mr);
1303    memory_region_transaction_commit();
1304
1305    object_unref(OBJECT(old_container));
1306}
1307
1308static void memory_region_get_container(Object *obj, Visitor *v,
1309                                        const char *name, void *opaque,
1310                                        Error **errp)
1311{
1312    MemoryRegion *mr = MEMORY_REGION(obj);
1313    gchar *path = (gchar *)"";
1314
1315    if (mr->container) {
1316        path = object_get_canonical_path(OBJECT(mr->container));
1317    }
1318    visit_type_str(v, name, &path, errp);
1319    if (mr->container) {
1320        g_free(path);
1321    }
1322}
1323
1324static Object *memory_region_resolve_container(Object *obj, void *opaque,
1325                                               const char *part)
1326{
1327    MemoryRegion *mr = MEMORY_REGION(obj);
1328
1329    return OBJECT(mr->container);
1330}
1331
1332static void memory_region_set_alias(const Object *obj, const char *name,
1333                                    Object *val, Error **errp)
1334{
1335    MemoryRegion *mr = MEMORY_REGION(obj);
1336    MemoryRegion *subregion, *next;
1337
1338    /* Be conservative and only allow one shotting for the mo */
1339    /* FIXME: Use a softer error than assert */
1340    assert (!mr->alias);
1341
1342    /* FIXME: check we don't already have subregions and
1343     * anything else that might be mutex with aliasing
1344     */
1345
1346    memory_region_transaction_begin();
1347    QTAILQ_FOREACH_SAFE(subregion, &mr->subregions, subregions_link, next) {
1348        object_property_set_link(OBJECT(subregion), OBJECT(val),
1349                                 "container", errp);
1350    }
1351    memory_region_ref(mr);
1352    mr->alias = MEMORY_REGION(val);
1353    memory_region_unref(mr);
1354    memory_region_transaction_commit();
1355    /* FIXME: add cleanup destructors etc etc */
1356}
1357
1358static void memory_region_get_priority(Object *obj, Visitor *v,
1359                                       const char *name, void *opaque,
1360                                       Error **errp)
1361{
1362    MemoryRegion *mr = MEMORY_REGION(obj);
1363    int32_t value = mr->priority;
1364
1365    visit_type_int32(v, name, &value, errp);
1366}
1367
1368static bool memory_region_get_may_overlap(Object *obj, Error **errp)
1369{
1370    MemoryRegion *mr = MEMORY_REGION(obj);
1371
1372    return mr->may_overlap;
1373}
1374
1375static void memory_region_set_priority(Object *obj, Visitor *v, const char *name,
1376                                       void *opaque, Error **errp)
1377{
1378    MemoryRegion *mr = MEMORY_REGION(obj);
1379    Error *local_err = NULL;
1380    int32_t value;
1381
1382    visit_type_uint32(v, name, (uint32_t *)&value, &error_abort);
1383    if (local_err) {
1384        error_propagate(errp, local_err);
1385        return;
1386    }
1387
1388    if (mr->priority != value) {
1389        mr->priority = value;
1390        memory_region_readd_subregion(mr);
1391    }
1392}
1393
1394static void memory_region_do_set_ram(MemoryRegion *mr)
1395{
1396    char *c, *filename, *sanitized_name;
1397
1398    if (mr->addr) {
1399        qemu_ram_free(mr->ram_block);
1400    }
1401    if (int128_eq(mr->size, int128_make64(0))) {
1402        return;
1403    }
1404    switch (mr->ram) {
1405    case(0):
1406        mr->ram_block = NULL;
1407        break;
1408    case(1):
1409        mr->ram_block = qemu_ram_alloc(int128_get64(mr->size), mr, &error_abort);
1410        break;
1411    case(2):
1412        sanitized_name = g_strdup(object_get_canonical_path(OBJECT(mr)));
1413
1414        for (c = sanitized_name; *c != '\0'; c++) {
1415            if (*c == '/')
1416                *c = '_';
1417        }
1418        filename = g_strdup_printf("%s" G_DIR_SEPARATOR_S "qemu-memory-%s",
1419                                   machine_path ? machine_path : ".",
1420                                   sanitized_name);
1421        g_free(sanitized_name);
1422        mr->ram_block = qemu_ram_alloc_from_file(int128_get64(mr->size), mr,
1423                                                 true, filename, &error_abort);
1424        g_free(filename);
1425        break;
1426    default:
1427        abort();
1428    }
1429}
1430
1431static void memory_region_set_ram(Object *obj, Visitor *v, const char *name,
1432                                  void *opaque, Error **errp)
1433{
1434    MemoryRegion *mr = MEMORY_REGION(obj);
1435    Error *local_err = NULL;
1436    uint8_t value;
1437
1438    visit_type_uint8(v, name, &value, &error_abort);
1439    if (local_err) {
1440        error_propagate(errp, local_err);
1441        return;
1442    }
1443
1444    /* FIXME: Sanitize error handling */
1445    /* FIXME: Probably need all that transactions stuff */
1446    if (mr->ram == value) {
1447        return;
1448    }
1449
1450    mr->ram = value;
1451    mr->terminates = !!value; /*FIXME: Wrong */
1452
1453    if (int128_eq(int128_2_64(), mr->size)) {
1454        return;
1455    }
1456
1457    memory_region_do_set_ram(mr);
1458}
1459
1460static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1461                                   void *opaque, Error **errp)
1462{
1463    MemoryRegion *mr = MEMORY_REGION(obj);
1464    uint64_t value = memory_region_size(mr);
1465
1466    visit_type_uint64(v, name, &value, errp);
1467}
1468
1469static void memory_region_set_object_size(Object *obj, Visitor *v, const char *name,
1470                                          void *opaque, Error **errp)
1471{
1472    MemoryRegion *mr = MEMORY_REGION(obj);
1473    Error *local_err = NULL;
1474    uint64_t size;
1475
1476    visit_type_uint64(v, name, &size, &local_err);
1477
1478    memory_region_set_size(mr, size);
1479}
1480
1481static void memory_region_initfn(Object *obj)
1482{
1483    MemoryRegion *mr = MEMORY_REGION(obj);
1484    ObjectProperty *op;
1485
1486    mr->ops = &unassigned_mem_ops;
1487    mr->enabled = true;
1488    mr->romd_mode = true;
1489    mr->global_locking = true;
1490    mr->destructor = memory_region_destructor_none;
1491    /* Xilinx: We need this as the default to allow the amba memory regions
1492     * to be created correctly.
1493     */
1494    mr->size = int128_2_64();
1495    QTAILQ_INIT(&mr->subregions);
1496    QTAILQ_INIT(&mr->coalesced);
1497
1498    op = object_property_add(OBJECT(mr), "container",
1499                             "link<" TYPE_MEMORY_REGION ">",
1500                             memory_region_get_container,
1501                             memory_region_set_container,
1502                             NULL, NULL, &error_abort);
1503    op->resolve = memory_region_resolve_container;
1504
1505    object_property_add_link(OBJECT(mr), "alias", TYPE_MEMORY_REGION,
1506                             (Object **)&mr->alias,
1507                             memory_region_set_alias,
1508                             0,
1509                             &error_abort);
1510    object_property_add(OBJECT(mr), "addr", "uint64",
1511                        memory_region_get_addr,
1512                        memory_region_set_addr,
1513                        NULL, NULL, &error_abort);
1514    object_property_add(OBJECT(mr), "priority", "uint32",
1515                        memory_region_get_priority,
1516                        memory_region_set_priority,
1517                        NULL, NULL, &error_abort);
1518    object_property_add(OBJECT(mr), "ram", "uint8",
1519                        NULL, /* FIXME: Add getter */
1520                        memory_region_set_ram,
1521                        NULL, NULL, &error_abort);
1522    object_property_add_bool(OBJECT(mr), "may-overlap",
1523                        memory_region_get_may_overlap,
1524                        NULL, /* memory_region_set_may_overlap */
1525                        &error_abort);
1526    object_property_add(OBJECT(mr), "size", "uint64",
1527                        memory_region_get_size,
1528                        memory_region_set_object_size,
1529                        NULL, NULL, &error_abort);
1530}
1531
1532static void iommu_memory_region_initfn(Object *obj)
1533{
1534    MemoryRegion *mr = MEMORY_REGION(obj);
1535
1536    mr->is_iommu = true;
1537}
1538
1539static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1540                                    unsigned size)
1541{
1542#ifdef DEBUG_UNASSIGNED
1543    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1544#endif
1545    if (current_cpu != NULL) {
1546        cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1547    }
1548    return 0;
1549}
1550
1551static void unassigned_mem_write(void *opaque, hwaddr addr,
1552                                 uint64_t val, unsigned size)
1553{
1554#ifdef DEBUG_UNASSIGNED
1555    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1556#endif
1557    if (current_cpu != NULL) {
1558        cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1559    }
1560}
1561
1562static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1563                                   unsigned size, bool is_write)
1564{
1565    return false;
1566}
1567
1568const MemoryRegionOps unassigned_mem_ops = {
1569    .valid.accepts = unassigned_mem_accepts,
1570    .endianness = DEVICE_NATIVE_ENDIAN,
1571};
1572
1573static uint64_t memory_region_ram_device_read(void *opaque,
1574                                              hwaddr addr, unsigned size)
1575{
1576    MemoryRegion *mr = opaque;
1577    uint64_t data = (uint64_t)~0;
1578
1579    switch (size) {
1580    case 1:
1581        data = *(uint8_t *)(mr->ram_block->host + addr);
1582        break;
1583    case 2:
1584        data = *(uint16_t *)(mr->ram_block->host + addr);
1585        break;
1586    case 4:
1587        data = *(uint32_t *)(mr->ram_block->host + addr);
1588        break;
1589    case 8:
1590        data = *(uint64_t *)(mr->ram_block->host + addr);
1591        break;
1592    }
1593
1594    trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1595
1596    return data;
1597}
1598
1599static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1600                                           uint64_t data, unsigned size)
1601{
1602    MemoryRegion *mr = opaque;
1603
1604    trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1605
1606    switch (size) {
1607    case 1:
1608        *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1609        break;
1610    case 2:
1611        *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1612        break;
1613    case 4:
1614        *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1615        break;
1616    case 8:
1617        *(uint64_t *)(mr->ram_block->host + addr) = data;
1618        break;
1619    }
1620}
1621
1622static const MemoryRegionOps ram_device_mem_ops = {
1623    .read = memory_region_ram_device_read,
1624    .write = memory_region_ram_device_write,
1625    .endianness = DEVICE_HOST_ENDIAN,
1626    .valid = {
1627        .min_access_size = 1,
1628        .max_access_size = 8,
1629        .unaligned = true,
1630    },
1631    .impl = {
1632        .min_access_size = 1,
1633        .max_access_size = 8,
1634        .unaligned = true,
1635    },
1636};
1637
1638bool memory_region_access_valid(MemoryRegion *mr,
1639                                hwaddr addr,
1640                                unsigned size,
1641                                bool is_write)
1642{
1643    int access_size_min, access_size_max;
1644    int access_size, i;
1645
1646    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1647        return false;
1648    }
1649
1650    if (!mr->ops->valid.accepts) {
1651        return true;
1652    }
1653
1654    access_size_min = mr->ops->valid.min_access_size;
1655    if (!mr->ops->valid.min_access_size) {
1656        access_size_min = 1;
1657    }
1658
1659    access_size_max = mr->ops->valid.max_access_size;
1660    if (!mr->ops->valid.max_access_size) {
1661        access_size_max = 4;
1662    }
1663
1664    access_size = MAX(MIN(size, access_size_max), access_size_min);
1665    for (i = 0; i < size; i += access_size) {
1666        if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1667                                    is_write)) {
1668            return false;
1669        }
1670    }
1671
1672    return true;
1673}
1674
1675static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1676                                                hwaddr addr,
1677                                                uint64_t *pval,
1678                                                unsigned size,
1679                                                MemTxAttrs attrs)
1680{
1681    *pval = 0;
1682
1683    if (mr->ops->access) {
1684        return access_with_adjusted_size(addr, pval, size,
1685                                         mr->ops->impl.min_access_size,
1686                                         mr->ops->impl.max_access_size,
1687                                         memory_region_read_accessor_attr,
1688                                         mr, attrs);
1689    } else if (mr->ops->read) {
1690        return access_with_adjusted_size(addr, pval, size,
1691                                         mr->ops->impl.min_access_size,
1692                                         mr->ops->impl.max_access_size,
1693                                         memory_region_read_accessor,
1694                                         mr, attrs);
1695    } else if (mr->ops->read_with_attrs) {
1696        return access_with_adjusted_size(addr, pval, size,
1697                                         mr->ops->impl.min_access_size,
1698                                         mr->ops->impl.max_access_size,
1699                                         memory_region_read_with_attrs_accessor,
1700                                         mr, attrs);
1701    } else {
1702        return access_with_adjusted_size(addr, pval, size, 1, 4,
1703                                         memory_region_oldmmio_read_accessor,
1704                                         mr, attrs);
1705    }
1706}
1707
1708MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1709                                        hwaddr addr,
1710                                        uint64_t *pval,
1711                                        unsigned size,
1712                                        MemTxAttrs attrs)
1713{
1714    MemTxResult r;
1715
1716    if (!memory_region_access_valid(mr, addr, size, false)) {
1717        *pval = unassigned_mem_read(mr, addr, size);
1718        return MEMTX_DECODE_ERROR;
1719    }
1720
1721    r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1722    adjust_endianness(mr, pval, size);
1723    return r;
1724}
1725
1726/* Return true if an eventfd was signalled */
1727static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1728                                                    hwaddr addr,
1729                                                    uint64_t data,
1730                                                    unsigned size,
1731                                                    MemTxAttrs attrs)
1732{
1733    MemoryRegionIoeventfd ioeventfd = {
1734        .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1735        .data = data,
1736    };
1737    unsigned i;
1738
1739    for (i = 0; i < mr->ioeventfd_nb; i++) {
1740        ioeventfd.match_data = mr->ioeventfds[i].match_data;
1741        ioeventfd.e = mr->ioeventfds[i].e;
1742
1743        if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1744            event_notifier_set(ioeventfd.e);
1745            return true;
1746        }
1747    }
1748
1749    return false;
1750}
1751
1752MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1753                                         hwaddr addr,
1754                                         uint64_t data,
1755                                         unsigned size,
1756                                         MemTxAttrs attrs)
1757{
1758    if (!memory_region_access_valid(mr, addr, size, true)) {
1759        unassigned_mem_write(mr, addr, data, size);
1760        return MEMTX_DECODE_ERROR;
1761    }
1762
1763    adjust_endianness(mr, &data, size);
1764
1765    if ((!kvm_eventfds_enabled()) &&
1766        memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1767        return MEMTX_OK;
1768    }
1769
1770    if (mr->ops->access) {
1771        return access_with_adjusted_size(addr, &data, size,
1772                                         mr->ops->impl.min_access_size,
1773                                         mr->ops->impl.max_access_size,
1774                                         memory_region_write_accessor_attr,
1775                                         mr, attrs);
1776    } else if (mr->ops->write) {
1777        return access_with_adjusted_size(addr, &data, size,
1778                                         mr->ops->impl.min_access_size,
1779                                         mr->ops->impl.max_access_size,
1780                                         memory_region_write_accessor, mr,
1781                                         attrs);
1782    } else if (mr->ops->write_with_attrs) {
1783        return
1784            access_with_adjusted_size(addr, &data, size,
1785                                      mr->ops->impl.min_access_size,
1786                                      mr->ops->impl.max_access_size,
1787                                      memory_region_write_with_attrs_accessor,
1788                                      mr, attrs);
1789    } else {
1790        return access_with_adjusted_size(addr, &data, size, 1, 4,
1791                                         memory_region_oldmmio_write_accessor,
1792                                         mr, attrs);
1793    }
1794}
1795
1796void memory_region_init_io(MemoryRegion *mr,
1797                           Object *owner,
1798                           const MemoryRegionOps *ops,
1799                           void *opaque,
1800                           const char *name,
1801                           uint64_t size)
1802{
1803    memory_region_init(mr, owner, name, size);
1804    mr->ops = ops ? ops : &unassigned_mem_ops;
1805    mr->opaque = opaque;
1806    mr->terminates = true;
1807}
1808
1809void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1810                                      Object *owner,
1811                                      const char *name,
1812                                      uint64_t size,
1813                                      Error **errp)
1814{
1815    memory_region_init(mr, owner, name, size);
1816    mr->ram = 1;
1817    mr->terminates = true;
1818    mr->destructor = memory_region_destructor_ram;
1819    mr->ram_block = qemu_ram_alloc(size, mr, errp);
1820    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1821}
1822
1823void memory_region_init_resizeable_ram(MemoryRegion *mr,
1824                                       Object *owner,
1825                                       const char *name,
1826                                       uint64_t size,
1827                                       uint64_t max_size,
1828                                       void (*resized)(const char*,
1829                                                       uint64_t length,
1830                                                       void *host),
1831                                       Error **errp)
1832{
1833    memory_region_init(mr, owner, name, size);
1834    mr->ram = true;
1835    mr->terminates = true;
1836    mr->destructor = memory_region_destructor_ram;
1837    mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1838                                              mr, errp);
1839    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1840}
1841
1842#ifdef __linux__
1843void memory_region_init_ram_from_file(MemoryRegion *mr,
1844                                      struct Object *owner,
1845                                      const char *name,
1846                                      uint64_t size,
1847                                      bool share,
1848                                      const char *path,
1849                                      Error **errp)
1850{
1851    memory_region_init(mr, owner, name, size);
1852    mr->ram = 2;
1853    mr->terminates = true;
1854    mr->destructor = memory_region_destructor_ram;
1855    mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1856    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1857}
1858
1859void memory_region_init_ram_from_fd(MemoryRegion *mr,
1860                                    struct Object *owner,
1861                                    const char *name,
1862                                    uint64_t size,
1863                                    bool share,
1864                                    int fd,
1865                                    Error **errp)
1866{
1867    memory_region_init(mr, owner, name, size);
1868    mr->ram = true;
1869    mr->terminates = true;
1870    mr->destructor = memory_region_destructor_ram;
1871    mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1872    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1873}
1874#endif
1875
1876void memory_region_init_ram_ptr(MemoryRegion *mr,
1877                                Object *owner,
1878                                const char *name,
1879                                uint64_t size,
1880                                void *ptr)
1881{
1882    memory_region_init(mr, owner, name, size);
1883    mr->ram = 3;
1884    mr->terminates = true;
1885    mr->destructor = memory_region_destructor_ram;
1886    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1887
1888    /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL.  */
1889    assert(ptr != NULL);
1890    mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1891}
1892
1893void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1894                                       Object *owner,
1895                                       const char *name,
1896                                       uint64_t size,
1897                                       void *ptr)
1898{
1899    memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1900    mr->ram_device = true;
1901    mr->ops = &ram_device_mem_ops;
1902    mr->opaque = mr;
1903}
1904
1905void memory_region_init_alias(MemoryRegion *mr,
1906                              Object *owner,
1907                              const char *name,
1908                              MemoryRegion *orig,
1909                              hwaddr offset,
1910                              uint64_t size)
1911{
1912    memory_region_init(mr, owner, name, size);
1913    mr->alias = orig;
1914    mr->alias_offset = offset;
1915}
1916
1917void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1918                                      struct Object *owner,
1919                                      const char *name,
1920                                      uint64_t size,
1921                                      Error **errp)
1922{
1923    memory_region_init(mr, owner, name, size);
1924    mr->ram = true;
1925    mr->readonly = true;
1926    mr->terminates = true;
1927    mr->destructor = memory_region_destructor_ram;
1928    mr->ram_block = qemu_ram_alloc(size, mr, errp);
1929    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1930}
1931
1932void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1933                                             Object *owner,
1934                                             const MemoryRegionOps *ops,
1935                                             void *opaque,
1936                                             const char *name,
1937                                             uint64_t size,
1938                                             Error **errp)
1939{
1940    assert(ops);
1941    memory_region_init(mr, owner, name, size);
1942    mr->ops = ops;
1943    mr->opaque = opaque;
1944    mr->terminates = true;
1945    mr->rom_device = true;
1946    mr->destructor = memory_region_destructor_ram;
1947    mr->ram_block = qemu_ram_alloc(size, mr, errp);
1948}
1949
1950void memory_region_init_iommu(void *_iommu_mr,
1951                              size_t instance_size,
1952                              const char *mrtypename,
1953                              Object *owner,
1954                              const char *name,
1955                              uint64_t size)
1956{
1957    struct IOMMUMemoryRegion *iommu_mr;
1958    struct MemoryRegion *mr;
1959
1960    object_initialize(_iommu_mr, instance_size, mrtypename);
1961    mr = MEMORY_REGION(_iommu_mr);
1962    memory_region_do_init(mr, owner, name, size);
1963    iommu_mr = IOMMU_MEMORY_REGION(mr);
1964    mr->terminates = true;  /* then re-forwards */
1965    QLIST_INIT(&iommu_mr->iommu_notify);
1966    iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1967}
1968
1969static void memory_region_finalize(Object *obj)
1970{
1971    MemoryRegion *mr = MEMORY_REGION(obj);
1972
1973    assert(!mr->container);
1974
1975    /* We know the region is not visible in any address space (it
1976     * does not have a container and cannot be a root either because
1977     * it has no references, so we can blindly clear mr->enabled.
1978     * memory_region_set_enabled instead could trigger a transaction
1979     * and cause an infinite loop.
1980     */
1981    mr->enabled = false;
1982    memory_region_transaction_begin();
1983    while (!QTAILQ_EMPTY(&mr->subregions)) {
1984        MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1985        memory_region_del_subregion(mr, subregion);
1986    }
1987    memory_region_transaction_commit();
1988
1989    mr->destructor(mr);
1990    memory_region_clear_coalescing(mr);
1991    g_free((char *)mr->name);
1992    g_free(mr->ioeventfds);
1993}
1994
1995Object *memory_region_owner(MemoryRegion *mr)
1996{
1997    Object *obj = OBJECT(mr);
1998    return obj->parent;
1999}
2000
2001void memory_region_ref(MemoryRegion *mr)
2002{
2003    /* MMIO callbacks most likely will access data that belongs
2004     * to the owner, hence the need to ref/unref the owner whenever
2005     * the memory region is in use.
2006     *
2007     * The memory region is a child of its owner.  As long as the
2008     * owner doesn't call unparent itself on the memory region,
2009     * ref-ing the owner will also keep the memory region alive.
2010     * Memory regions without an owner are supposed to never go away;
2011     * we do not ref/unref them because it slows down DMA sensibly.
2012     */
2013    if (mr && mr->owner) {
2014        object_ref(mr->owner);
2015    }
2016}
2017
2018void memory_region_unref(MemoryRegion *mr)
2019{
2020    if (mr && mr->owner) {
2021        object_unref(mr->owner);
2022    }
2023}
2024
2025uint64_t memory_region_size(MemoryRegion *mr)
2026{
2027    if (int128_eq(mr->size, int128_2_64())) {
2028        return UINT64_MAX;
2029    }
2030    return int128_get64(mr->size);
2031}
2032
2033const char *memory_region_name(const MemoryRegion *mr)
2034{
2035    if (!mr->name) {
2036        ((MemoryRegion *)mr)->name =
2037            object_get_canonical_path_component(OBJECT(mr));
2038    }
2039    return mr->name;
2040}
2041
2042bool memory_region_is_ram_device(MemoryRegion *mr)
2043{
2044    return mr->ram_device;
2045}
2046
2047uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
2048{
2049    uint8_t mask = mr->dirty_log_mask;
2050    if (global_dirty_log && mr->ram_block) {
2051        mask |= (1 << DIRTY_MEMORY_MIGRATION);
2052    }
2053    return mask;
2054}
2055
2056bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
2057{
2058    return memory_region_get_dirty_log_mask(mr) & (1 << client);
2059}
2060
2061static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
2062{
2063    IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
2064    IOMMUNotifier *iommu_notifier;
2065    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2066
2067    IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2068        flags |= iommu_notifier->notifier_flags;
2069    }
2070
2071    if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
2072        imrc->notify_flag_changed(iommu_mr,
2073                                  iommu_mr->iommu_notify_flags,
2074                                  flags);
2075    }
2076
2077    iommu_mr->iommu_notify_flags = flags;
2078}
2079
2080void memory_region_register_iommu_notifier(MemoryRegion *mr,
2081                                           IOMMUNotifier *n)
2082{
2083    IOMMUMemoryRegion *iommu_mr;
2084
2085    if (mr->alias) {
2086        memory_region_register_iommu_notifier(mr->alias, n);
2087        return;
2088    }
2089
2090    /* We need to register for at least one bitfield */
2091    iommu_mr = IOMMU_MEMORY_REGION(mr);
2092    assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
2093    assert(n->start <= n->end);
2094    QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
2095    memory_region_update_iommu_notify_flags(iommu_mr);
2096}
2097
2098uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
2099{
2100    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2101
2102    if (imrc->get_min_page_size) {
2103        return imrc->get_min_page_size(iommu_mr);
2104    }
2105    return TARGET_PAGE_SIZE;
2106}
2107
2108void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
2109{
2110    MemoryRegion *mr = MEMORY_REGION(iommu_mr);
2111    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2112    hwaddr addr, granularity;
2113    IOMMUTLBEntry iotlb;
2114
2115    /* If the IOMMU has its own replay callback, override */
2116    if (imrc->replay) {
2117        imrc->replay(iommu_mr, n);
2118        return;
2119    }
2120
2121    granularity = memory_region_iommu_get_min_page_size(iommu_mr);
2122
2123    for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
2124        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
2125        if (iotlb.perm != IOMMU_NONE) {
2126            n->notify(n, &iotlb);
2127        }
2128
2129        /* if (2^64 - MR size) < granularity, it's possible to get an
2130         * infinite loop here.  This should catch such a wraparound */
2131        if ((addr + granularity) < addr) {
2132            break;
2133        }
2134    }
2135}
2136
2137void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
2138{
2139    IOMMUNotifier *notifier;
2140
2141    IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
2142        memory_region_iommu_replay(iommu_mr, notifier);
2143    }
2144}
2145
2146void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
2147                                             IOMMUNotifier *n)
2148{
2149    IOMMUMemoryRegion *iommu_mr;
2150
2151    if (mr->alias) {
2152        memory_region_unregister_iommu_notifier(mr->alias, n);
2153        return;
2154    }
2155    QLIST_REMOVE(n, node);
2156    iommu_mr = IOMMU_MEMORY_REGION(mr);
2157    memory_region_update_iommu_notify_flags(iommu_mr);
2158}
2159
2160void memory_region_notify_one(IOMMUNotifier *notifier,
2161                              IOMMUTLBEntry *entry)
2162{
2163    IOMMUNotifierFlag request_flags;
2164
2165    /*
2166     * Skip the notification if the notification does not overlap
2167     * with registered range.
2168     */
2169    if (notifier->start > entry->iova + entry->addr_mask ||
2170        notifier->end < entry->iova) {
2171        return;
2172    }
2173
2174    if (entry->perm & IOMMU_RW) {
2175        request_flags = IOMMU_NOTIFIER_MAP;
2176    } else {
2177        request_flags = IOMMU_NOTIFIER_UNMAP;
2178    }
2179
2180    if (notifier->notifier_flags & request_flags) {
2181        notifier->notify(notifier, entry);
2182    }
2183}
2184
2185void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2186                                IOMMUTLBEntry entry)
2187{
2188    IOMMUNotifier *iommu_notifier;
2189
2190    assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2191
2192    IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2193        memory_region_notify_one(iommu_notifier, &entry);
2194    }
2195}
2196
2197void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2198{
2199    uint8_t mask = 1 << client;
2200    uint8_t old_logging;
2201
2202    assert(client == DIRTY_MEMORY_VGA);
2203    old_logging = mr->vga_logging_count;
2204    mr->vga_logging_count += log ? 1 : -1;
2205    if (!!old_logging == !!mr->vga_logging_count) {
2206        return;
2207    }
2208
2209    memory_region_transaction_begin();
2210    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2211    memory_region_update_pending |= mr->enabled;
2212    memory_region_transaction_commit();
2213}
2214
2215bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
2216                             hwaddr size, unsigned client)
2217{
2218    assert(mr->ram_block);
2219    return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
2220                                         size, client);
2221}
2222
2223void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2224                             hwaddr size)
2225{
2226    assert(mr->ram_block);
2227    cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2228                                        size,
2229                                        memory_region_get_dirty_log_mask(mr));
2230}
2231
2232bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
2233                                        hwaddr size, unsigned client)
2234{
2235    assert(mr->ram_block);
2236    return cpu_physical_memory_test_and_clear_dirty(
2237                memory_region_get_ram_addr(mr) + addr, size, client);
2238}
2239
2240DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2241                                                            hwaddr addr,
2242                                                            hwaddr size,
2243                                                            unsigned client)
2244{
2245    assert(mr->ram_block);
2246    return cpu_physical_memory_snapshot_and_clear_dirty(
2247                memory_region_get_ram_addr(mr) + addr, size, client);
2248}
2249
2250bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2251                                      hwaddr addr, hwaddr size)
2252{
2253    assert(mr->ram_block);
2254    return cpu_physical_memory_snapshot_get_dirty(snap,
2255                memory_region_get_ram_addr(mr) + addr, size);
2256}
2257
2258void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
2259{
2260    MemoryListener *listener;
2261    AddressSpace *as;
2262    FlatView *view;
2263    FlatRange *fr;
2264
2265    /* If the same address space has multiple log_sync listeners, we
2266     * visit that address space's FlatView multiple times.  But because
2267     * log_sync listeners are rare, it's still cheaper than walking each
2268     * address space once.
2269     */
2270    QTAILQ_FOREACH(listener, &memory_listeners, link) {
2271        if (!listener->log_sync) {
2272            continue;
2273        }
2274        as = listener->address_space;
2275        view = address_space_get_flatview(as);
2276        FOR_EACH_FLAT_RANGE(fr, view) {
2277            if (fr->mr == mr) {
2278                MemoryRegionSection mrs = section_from_flat_range(fr, view);
2279                listener->log_sync(listener, &mrs);
2280            }
2281        }
2282        flatview_unref(view);
2283    }
2284}
2285
2286void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2287{
2288    if (mr->readonly != readonly) {
2289        memory_region_transaction_begin();
2290        mr->readonly = readonly;
2291        memory_region_update_pending |= mr->enabled;
2292        memory_region_transaction_commit();
2293    }
2294}
2295
2296void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2297{
2298    if (mr->romd_mode != romd_mode) {
2299        memory_region_transaction_begin();
2300        mr->romd_mode = romd_mode;
2301        memory_region_update_pending |= mr->enabled;
2302        memory_region_transaction_commit();
2303    }
2304}
2305
2306void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2307                               hwaddr size, unsigned client)
2308{
2309    assert(mr->ram_block);
2310    cpu_physical_memory_test_and_clear_dirty(
2311        memory_region_get_ram_addr(mr) + addr, size, client);
2312}
2313
2314int memory_region_get_fd(MemoryRegion *mr)
2315{
2316    int fd;
2317
2318    rcu_read_lock();
2319    while (mr->alias) {
2320        mr = mr->alias;
2321    }
2322    fd = mr->ram_block->fd;
2323    rcu_read_unlock();
2324
2325    return fd;
2326}
2327
2328void *memory_region_get_ram_ptr(MemoryRegion *mr)
2329{
2330    void *ptr;
2331    uint64_t offset = 0;
2332
2333    rcu_read_lock();
2334    while (mr->alias) {
2335        offset += mr->alias_offset;
2336        mr = mr->alias;
2337    }
2338    assert(mr->ram_block);
2339    ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2340    rcu_read_unlock();
2341
2342    return ptr;
2343}
2344
2345MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2346{
2347    RAMBlock *block;
2348
2349    block = qemu_ram_block_from_host(ptr, false, offset);
2350    if (!block) {
2351        return NULL;
2352    }
2353
2354    return block->mr;
2355}
2356
2357ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2358{
2359    return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2360}
2361
2362void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2363{
2364    assert(mr->ram_block);
2365
2366    qemu_ram_resize(mr->ram_block, newsize, errp);
2367}
2368
2369static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
2370{
2371    FlatView *view;
2372    FlatRange *fr;
2373    CoalescedMemoryRange *cmr;
2374    AddrRange tmp;
2375    MemoryRegionSection section;
2376
2377    view = address_space_get_flatview(as);
2378    FOR_EACH_FLAT_RANGE(fr, view) {
2379        if (fr->mr == mr) {
2380            section = (MemoryRegionSection) {
2381                .fv = view,
2382                .offset_within_address_space = int128_get64(fr->addr.start),
2383                .size = fr->addr.size,
2384            };
2385
2386            MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
2387                                 int128_get64(fr->addr.start),
2388                                 int128_get64(fr->addr.size));
2389            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2390                tmp = addrrange_shift(cmr->addr,
2391                                      int128_sub(fr->addr.start,
2392                                                 int128_make64(fr->offset_in_region)));
2393                if (!addrrange_intersects(tmp, fr->addr)) {
2394                    continue;
2395                }
2396                tmp = addrrange_intersection(tmp, fr->addr);
2397                MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
2398                                     int128_get64(tmp.start),
2399                                     int128_get64(tmp.size));
2400            }
2401        }
2402    }
2403    flatview_unref(view);
2404}
2405
2406static void memory_region_update_coalesced_range(MemoryRegion *mr)
2407{
2408    AddressSpace *as;
2409
2410    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2411        memory_region_update_coalesced_range_as(mr, as);
2412    }
2413}
2414
2415void memory_region_set_coalescing(MemoryRegion *mr)
2416{
2417    memory_region_clear_coalescing(mr);
2418    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2419}
2420
2421void memory_region_add_coalescing(MemoryRegion *mr,
2422                                  hwaddr offset,
2423                                  uint64_t size)
2424{
2425    CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2426
2427    cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2428    QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2429    memory_region_update_coalesced_range(mr);
2430    memory_region_set_flush_coalesced(mr);
2431}
2432
2433void memory_region_clear_coalescing(MemoryRegion *mr)
2434{
2435    CoalescedMemoryRange *cmr;
2436    bool updated = false;
2437
2438    qemu_flush_coalesced_mmio_buffer();
2439    mr->flush_coalesced_mmio = false;
2440
2441    while (!QTAILQ_EMPTY(&mr->coalesced)) {
2442        cmr = QTAILQ_FIRST(&mr->coalesced);
2443        QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2444        g_free(cmr);
2445        updated = true;
2446    }
2447
2448    if (updated) {
2449        memory_region_update_coalesced_range(mr);
2450    }
2451}
2452
2453void memory_region_set_flush_coalesced(MemoryRegion *mr)
2454{
2455    mr->flush_coalesced_mmio = true;
2456}
2457
2458void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2459{
2460    qemu_flush_coalesced_mmio_buffer();
2461    if (QTAILQ_EMPTY(&mr->coalesced)) {
2462        mr->flush_coalesced_mmio = false;
2463    }
2464}
2465
2466void memory_region_set_global_locking(MemoryRegion *mr)
2467{
2468    mr->global_locking = true;
2469}
2470
2471void memory_region_clear_global_locking(MemoryRegion *mr)
2472{
2473    mr->global_locking = false;
2474}
2475
2476static bool userspace_eventfd_warning;
2477
2478void memory_region_add_eventfd(MemoryRegion *mr,
2479                               hwaddr addr,
2480                               unsigned size,
2481                               bool match_data,
2482                               uint64_t data,
2483                               EventNotifier *e)
2484{
2485    MemoryRegionIoeventfd mrfd = {
2486        .addr.start = int128_make64(addr),
2487        .addr.size = int128_make64(size),
2488        .match_data = match_data,
2489        .data = data,
2490        .e = e,
2491    };
2492    unsigned i;
2493
2494    if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2495                            userspace_eventfd_warning))) {
2496        userspace_eventfd_warning = true;
2497        error_report("Using eventfd without MMIO binding in KVM. "
2498                     "Suboptimal performance expected");
2499    }
2500
2501    if (size) {
2502        adjust_endianness(mr, &mrfd.data, size);
2503    }
2504    memory_region_transaction_begin();
2505    for (i = 0; i < mr->ioeventfd_nb; ++i) {
2506        if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2507            break;
2508        }
2509    }
2510    ++mr->ioeventfd_nb;
2511    mr->ioeventfds = g_realloc(mr->ioeventfds,
2512                                  sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2513    memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2514            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2515    mr->ioeventfds[i] = mrfd;
2516    ioeventfd_update_pending |= mr->enabled;
2517    memory_region_transaction_commit();
2518}
2519
2520void memory_region_del_eventfd(MemoryRegion *mr,
2521                               hwaddr addr,
2522                               unsigned size,
2523                               bool match_data,
2524                               uint64_t data,
2525                               EventNotifier *e)
2526{
2527    MemoryRegionIoeventfd mrfd = {
2528        .addr.start = int128_make64(addr),
2529        .addr.size = int128_make64(size),
2530        .match_data = match_data,
2531        .data = data,
2532        .e = e,
2533    };
2534    unsigned i;
2535
2536    if (size) {
2537        adjust_endianness(mr, &mrfd.data, size);
2538    }
2539    memory_region_transaction_begin();
2540    for (i = 0; i < mr->ioeventfd_nb; ++i) {
2541        if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2542            break;
2543        }
2544    }
2545    assert(i != mr->ioeventfd_nb);
2546    memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2547            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2548    --mr->ioeventfd_nb;
2549    mr->ioeventfds = g_realloc(mr->ioeventfds,
2550                                  sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2551    ioeventfd_update_pending |= mr->enabled;
2552    memory_region_transaction_commit();
2553}
2554
2555static void memory_region_update_container_subregions(MemoryRegion *subregion)
2556{
2557    MemoryRegion *mr = subregion->container;
2558    MemoryRegion *other;
2559
2560    memory_region_transaction_begin();
2561
2562    memory_region_ref(subregion);
2563    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2564        if (subregion->priority >= other->priority) {
2565            QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2566            goto done;
2567        }
2568    }
2569    QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2570done:
2571    memory_region_update_pending |= mr->enabled && subregion->enabled;
2572    memory_region_transaction_commit();
2573}
2574
2575static void memory_region_add_subregion_common(MemoryRegion *mr,
2576                                               hwaddr offset,
2577                                               MemoryRegion *subregion)
2578{
2579    assert(!subregion->container);
2580    subregion->container = mr;
2581    subregion->addr = offset;
2582    memory_region_update_container_subregions(subregion);
2583}
2584
2585void memory_region_add_subregion(MemoryRegion *mr,
2586                                 hwaddr offset,
2587                                 MemoryRegion *subregion)
2588{
2589    subregion->priority = 0;
2590    memory_region_add_subregion_common(mr, offset, subregion);
2591}
2592
2593void memory_region_add_subregion_overlap(MemoryRegion *mr,
2594                                         hwaddr offset,
2595                                         MemoryRegion *subregion,
2596                                         int priority)
2597{
2598    subregion->priority = priority;
2599    memory_region_add_subregion_common(mr, offset, subregion);
2600}
2601
2602void memory_region_del_subregion(MemoryRegion *mr,
2603                                 MemoryRegion *subregion)
2604{
2605    memory_region_transaction_begin();
2606    assert(subregion->container == mr);
2607    subregion->container = NULL;
2608    QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2609    memory_region_unref(subregion);
2610    memory_region_update_pending |= mr->enabled && subregion->enabled;
2611    memory_region_transaction_commit();
2612}
2613
2614void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2615{
2616    if (enabled == mr->enabled) {
2617        return;
2618    }
2619    memory_region_transaction_begin();
2620    mr->enabled = enabled;
2621    memory_region_update_pending = true;
2622    memory_region_transaction_commit();
2623}
2624
2625void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2626{
2627    Int128 s = int128_make64(size);
2628
2629    if (size == UINT64_MAX) {
2630        s = int128_2_64();
2631    }
2632    if (int128_eq(s, mr->size)) {
2633        return;
2634    }
2635    memory_region_transaction_begin();
2636    mr->size = s;
2637    if (mr->ram) {
2638        memory_region_do_set_ram(mr);
2639    }
2640    memory_region_update_pending = true;
2641    memory_region_transaction_commit();
2642}
2643
2644static void memory_region_readd_subregion(MemoryRegion *mr)
2645{
2646    MemoryRegion *container = mr->container;
2647
2648    if (container) {
2649        memory_region_transaction_begin();
2650        memory_region_ref(mr);
2651        memory_region_del_subregion(container, mr);
2652        mr->container = container;
2653        memory_region_update_container_subregions(mr);
2654        memory_region_unref(mr);
2655        memory_region_transaction_commit();
2656    }
2657}
2658
2659void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2660{
2661    if (addr != mr->addr) {
2662        mr->addr = addr;
2663        memory_region_readd_subregion(mr);
2664    }
2665}
2666
2667void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2668{
2669    assert(mr->alias);
2670
2671    if (offset == mr->alias_offset) {
2672        return;
2673    }
2674
2675    memory_region_transaction_begin();
2676    mr->alias_offset = offset;
2677    memory_region_update_pending |= mr->enabled;
2678    memory_region_transaction_commit();
2679}
2680
2681uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2682{
2683    return mr->align;
2684}
2685
2686static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2687{
2688    const AddrRange *addr = addr_;
2689    const FlatRange *fr = fr_;
2690
2691    if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2692        return -1;
2693    } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2694        return 1;
2695    }
2696    return 0;
2697}
2698
2699static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2700{
2701    return bsearch(&addr, view->ranges, view->nr,
2702                   sizeof(FlatRange), cmp_flatrange_addr);
2703}
2704
2705bool memory_region_is_mapped(MemoryRegion *mr)
2706{
2707    return mr->container ? true : false;
2708}
2709
2710/* Same as memory_region_find, but it does not add a reference to the
2711 * returned region.  It must be called from an RCU critical section.
2712 */
2713static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2714                                                  hwaddr addr, uint64_t size)
2715{
2716    MemoryRegionSection ret = { .mr = NULL };
2717    MemoryRegion *root;
2718    AddressSpace *as;
2719    AddrRange range;
2720    FlatView *view;
2721    FlatRange *fr;
2722
2723    addr += mr->addr;
2724    for (root = mr; root->container; ) {
2725        root = root->container;
2726        addr += root->addr;
2727    }
2728
2729    as = memory_region_to_address_space(root);
2730    if (!as) {
2731        return ret;
2732    }
2733    range = addrrange_make(int128_make64(addr), int128_make64(size));
2734
2735    view = address_space_to_flatview(as);
2736    fr = flatview_lookup(view, range);
2737    if (!fr) {
2738        return ret;
2739    }
2740
2741    while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2742        --fr;
2743    }
2744
2745    ret.mr = fr->mr;
2746    ret.fv = view;
2747    range = addrrange_intersection(range, fr->addr);
2748    ret.offset_within_region = fr->offset_in_region;
2749    ret.offset_within_region += int128_get64(int128_sub(range.start,
2750                                                        fr->addr.start));
2751    ret.size = range.size;
2752    ret.offset_within_address_space = int128_get64(range.start);
2753    ret.readonly = fr->readonly;
2754    return ret;
2755}
2756
2757MemoryRegionSection memory_region_find(MemoryRegion *mr,
2758                                       hwaddr addr, uint64_t size)
2759{
2760    MemoryRegionSection ret;
2761    rcu_read_lock();
2762    ret = memory_region_find_rcu(mr, addr, size);
2763    if (ret.mr) {
2764        memory_region_ref(ret.mr);
2765    }
2766    rcu_read_unlock();
2767    return ret;
2768}
2769
2770bool memory_region_present(MemoryRegion *container, hwaddr addr)
2771{
2772    MemoryRegion *mr;
2773
2774    rcu_read_lock();
2775    mr = memory_region_find_rcu(container, addr, 1).mr;
2776    rcu_read_unlock();
2777    return mr && mr != container;
2778}
2779
2780void memory_global_dirty_log_sync(void)
2781{
2782    MemoryListener *listener;
2783    AddressSpace *as;
2784    FlatView *view;
2785    FlatRange *fr;
2786
2787    QTAILQ_FOREACH(listener, &memory_listeners, link) {
2788        if (!listener->log_sync) {
2789            continue;
2790        }
2791        as = listener->address_space;
2792        view = address_space_get_flatview(as);
2793        FOR_EACH_FLAT_RANGE(fr, view) {
2794            if (fr->dirty_log_mask) {
2795                MemoryRegionSection mrs = section_from_flat_range(fr, view);
2796
2797                listener->log_sync(listener, &mrs);
2798            }
2799        }
2800        flatview_unref(view);
2801    }
2802}
2803
2804static VMChangeStateEntry *vmstate_change;
2805
2806void memory_global_dirty_log_start(void)
2807{
2808    if (vmstate_change) {
2809        qemu_del_vm_change_state_handler(vmstate_change);
2810        vmstate_change = NULL;
2811    }
2812
2813    global_dirty_log = true;
2814
2815    MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2816
2817    /* Refresh DIRTY_LOG_MIGRATION bit.  */
2818    memory_region_transaction_begin();
2819    memory_region_update_pending = true;
2820    memory_region_transaction_commit();
2821}
2822
2823static void memory_global_dirty_log_do_stop(void)
2824{
2825    global_dirty_log = false;
2826
2827    /* Refresh DIRTY_LOG_MIGRATION bit.  */
2828    memory_region_transaction_begin();
2829    memory_region_update_pending = true;
2830    memory_region_transaction_commit();
2831
2832    MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2833}
2834
2835static void memory_vm_change_state_handler(void *opaque, int running,
2836                                           RunState state)
2837{
2838    if (running) {
2839        memory_global_dirty_log_do_stop();
2840
2841        if (vmstate_change) {
2842            qemu_del_vm_change_state_handler(vmstate_change);
2843            vmstate_change = NULL;
2844        }
2845    }
2846}
2847
2848void memory_global_dirty_log_stop(void)
2849{
2850    if (!runstate_is_running()) {
2851        if (vmstate_change) {
2852            return;
2853        }
2854        vmstate_change = qemu_add_vm_change_state_handler(
2855                                memory_vm_change_state_handler, NULL);
2856        return;
2857    }
2858
2859    memory_global_dirty_log_do_stop();
2860}
2861
2862static void listener_add_address_space(MemoryListener *listener,
2863                                       AddressSpace *as)
2864{
2865    FlatView *view;
2866    FlatRange *fr;
2867
2868    if (listener->begin) {
2869        listener->begin(listener);
2870    }
2871    if (global_dirty_log) {
2872        if (listener->log_global_start) {
2873            listener->log_global_start(listener);
2874        }
2875    }
2876
2877    view = address_space_get_flatview(as);
2878    FOR_EACH_FLAT_RANGE(fr, view) {
2879        MemoryRegionSection section = section_from_flat_range(fr, view);
2880
2881        if (listener->region_add) {
2882            listener->region_add(listener, &section);
2883        }
2884        if (fr->dirty_log_mask && listener->log_start) {
2885            listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2886        }
2887    }
2888    if (listener->commit) {
2889        listener->commit(listener);
2890    }
2891    flatview_unref(view);
2892}
2893
2894void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2895{
2896    MemoryListener *other = NULL;
2897
2898    listener->address_space = as;
2899    if (QTAILQ_EMPTY(&memory_listeners)
2900        || listener->priority >= QTAILQ_LAST(&memory_listeners,
2901                                             memory_listeners)->priority) {
2902        QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2903    } else {
2904        QTAILQ_FOREACH(other, &memory_listeners, link) {
2905            if (listener->priority < other->priority) {
2906                break;
2907            }
2908        }
2909        QTAILQ_INSERT_BEFORE(other, listener, link);
2910    }
2911
2912    if (QTAILQ_EMPTY(&as->listeners)
2913        || listener->priority >= QTAILQ_LAST(&as->listeners,
2914                                             memory_listeners)->priority) {
2915        QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2916    } else {
2917        QTAILQ_FOREACH(other, &as->listeners, link_as) {
2918            if (listener->priority < other->priority) {
2919                break;
2920            }
2921        }
2922        QTAILQ_INSERT_BEFORE(other, listener, link_as);
2923    }
2924
2925    listener_add_address_space(listener, as);
2926}
2927
2928void memory_listener_unregister(MemoryListener *listener)
2929{
2930    if (!listener->address_space) {
2931        return;
2932    }
2933
2934    QTAILQ_REMOVE(&memory_listeners, listener, link);
2935    QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2936    listener->address_space = NULL;
2937}
2938
2939bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2940{
2941    void *host;
2942    unsigned size = 0;
2943    unsigned offset = 0;
2944    Object *new_interface;
2945
2946    if (!mr || !mr->ops->request_ptr) {
2947        return false;
2948    }
2949
2950    /*
2951     * Avoid an update if the request_ptr call
2952     * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2953     * a cache.
2954     */
2955    memory_region_transaction_begin();
2956
2957    host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2958
2959    if (!host || !size) {
2960        memory_region_transaction_commit();
2961        return false;
2962    }
2963
2964    new_interface = object_new("mmio_interface");
2965    qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2966    qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2967    qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2968    qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2969    qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2970    object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2971
2972    memory_region_transaction_commit();
2973    return true;
2974}
2975
2976typedef struct MMIOPtrInvalidate {
2977    MemoryRegion *mr;
2978    hwaddr offset;
2979    unsigned size;
2980    int busy;
2981    int allocated;
2982} MMIOPtrInvalidate;
2983
2984#define MAX_MMIO_INVALIDATE 10
2985static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2986
2987static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2988                                                 run_on_cpu_data data)
2989{
2990    MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2991    MemoryRegion *mr = invalidate_data->mr;
2992    hwaddr offset = invalidate_data->offset;
2993    unsigned size = invalidate_data->size;
2994    MemoryRegionSection section = memory_region_find(mr, offset, size);
2995
2996    qemu_mutex_lock_iothread();
2997
2998    /* Reset dirty so this doesn't happen later. */
2999    cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
3000
3001    if (section.mr != mr) {
3002        /* memory_region_find add a ref on section.mr */
3003        memory_region_unref(section.mr);
3004        if (MMIO_INTERFACE(section.mr->owner)) {
3005            /* We found the interface just drop it. */
3006            object_property_set_bool(section.mr->owner, false, "realized",
3007                                     NULL);
3008            object_unref(section.mr->owner);
3009            object_unparent(section.mr->owner);
3010        }
3011    }
3012
3013    qemu_mutex_unlock_iothread();
3014
3015    if (invalidate_data->allocated) {
3016        g_free(invalidate_data);
3017    } else {
3018        invalidate_data->busy = 0;
3019    }
3020}
3021
3022void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
3023                                       unsigned size)
3024{
3025    size_t i;
3026    MMIOPtrInvalidate *invalidate_data = NULL;
3027
3028    for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
3029        if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
3030            invalidate_data = &mmio_ptr_invalidate_list[i];
3031            break;
3032        }
3033    }
3034
3035    if (!invalidate_data) {
3036        invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
3037        invalidate_data->allocated = 1;
3038    }
3039
3040    invalidate_data->mr = mr;
3041    invalidate_data->offset = offset;
3042    invalidate_data->size = size;
3043
3044    async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
3045                          RUN_ON_CPU_HOST_PTR(invalidate_data));
3046}
3047
3048void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
3049{
3050    memory_region_ref(root);
3051    as->root = root;
3052    as->current_map = NULL;
3053    as->ioeventfd_nb = 0;
3054    as->ioeventfds = NULL;
3055    QTAILQ_INIT(&as->listeners);
3056    QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
3057
3058    /* XILINX
3059     *
3060     * Use the root MemoryRegion's name as name if nothing was specified.
3061     * Since we use device-trees to create the machine, we always
3062     * have sensible names for the root MR.
3063     */
3064    as->name = g_strdup(name ? name : object_get_canonical_path(OBJECT(root)));
3065    address_space_update_topology(as);
3066    address_space_update_ioeventfds(as);
3067}
3068
3069static void do_address_space_destroy(AddressSpace *as)
3070{
3071    assert(QTAILQ_EMPTY(&as->listeners));
3072
3073    flatview_unref(as->current_map);
3074    g_free(as->name);
3075    g_free(as->ioeventfds);
3076    memory_region_unref(as->root);
3077}
3078
3079/* REMOVE THIS */
3080AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
3081{
3082    AddressSpace *as;
3083
3084    as = g_malloc0(sizeof *as);
3085    address_space_init(as, root, name);
3086    return as;
3087}
3088
3089void address_space_destroy(AddressSpace *as)
3090{
3091    MemoryRegion *root = as->root;
3092
3093    /* Flush out anything from MemoryListeners listening in on this */
3094    memory_region_transaction_begin();
3095    as->root = NULL;
3096    memory_region_transaction_commit();
3097    QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
3098
3099    /* At this point, as->dispatch and as->current_map are dummy
3100     * entries that the guest should never use.  Wait for the old
3101     * values to expire before freeing the data.
3102     */
3103    as->root = root;
3104    call_rcu(as, do_address_space_destroy, rcu);
3105}
3106
3107static const char *memory_region_type(MemoryRegion *mr)
3108{
3109    if (memory_region_is_ram_device(mr)) {
3110        return "ramd";
3111    } else if (memory_region_is_romd(mr)) {
3112        return "romd";
3113    } else if (memory_region_is_rom(mr)) {
3114        return "rom";
3115    } else if (memory_region_is_ram(mr)) {
3116        return "ram";
3117    } else {
3118        return "i/o";
3119    }
3120}
3121
3122typedef struct MemoryRegionList MemoryRegionList;
3123
3124struct MemoryRegionList {
3125    const MemoryRegion *mr;
3126    QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3127};
3128
3129typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
3130
3131#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3132                           int128_sub((size), int128_one())) : 0)
3133#define MTREE_INDENT "  "
3134
3135static void mtree_print_mr(fprintf_function mon_printf, void *f,
3136                           const MemoryRegion *mr, unsigned int level,
3137                           hwaddr base,
3138                           MemoryRegionListHead *alias_print_queue)
3139{
3140    MemoryRegionList *new_ml, *ml, *next_ml;
3141    MemoryRegionListHead submr_print_queue;
3142    const MemoryRegion *submr;
3143    unsigned int i;
3144    hwaddr cur_start, cur_end;
3145
3146    if (!mr) {
3147        return;
3148    }
3149
3150    for (i = 0; i < level; i++) {
3151        mon_printf(f, MTREE_INDENT);
3152    }
3153
3154    cur_start = base + mr->addr;
3155    cur_end = cur_start + MR_SIZE(mr->size);
3156
3157    /*
3158     * Try to detect overflow of memory region. This should never
3159     * happen normally. When it happens, we dump something to warn the
3160     * user who is observing this.
3161     */
3162    if (cur_start < base || cur_end < cur_start) {
3163        mon_printf(f, "[DETECTED OVERFLOW!] ");
3164    }
3165
3166    if (mr->alias) {
3167        MemoryRegionList *ml;
3168        bool found = false;
3169
3170        /* check if the alias is already in the queue */
3171        QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3172            if (ml->mr == mr->alias) {
3173                found = true;
3174            }
3175        }
3176
3177        if (!found) {
3178            ml = g_new(MemoryRegionList, 1);
3179            ml->mr = mr->alias;
3180            QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3181        }
3182        mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
3183                   " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
3184                   "-" TARGET_FMT_plx "%s\n",
3185                   cur_start, cur_end,
3186                   mr->priority,
3187                   memory_region_type((MemoryRegion *)mr),
3188                   memory_region_name(mr),
3189                   memory_region_name(mr->alias),
3190                   mr->alias_offset,
3191                   mr->alias_offset + MR_SIZE(mr->size),
3192                   mr->enabled ? "" : " [disabled]");
3193    } else {
3194        mon_printf(f,
3195                   TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
3196                   cur_start, cur_end,
3197                   mr->priority,
3198                   memory_region_type((MemoryRegion *)mr),
3199                   memory_region_name(mr),
3200                   mr->enabled ? "" : " [disabled]");
3201    }
3202
3203    QTAILQ_INIT(&submr_print_queue);
3204
3205    QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3206        new_ml = g_new(MemoryRegionList, 1);
3207        new_ml->mr = submr;
3208        QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3209            if (new_ml->mr->addr < ml->mr->addr ||
3210                (new_ml->mr->addr == ml->mr->addr &&
3211                 new_ml->mr->priority > ml->mr->priority)) {
3212                QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3213                new_ml = NULL;
3214                break;
3215            }
3216        }
3217        if (new_ml) {
3218            QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3219        }
3220    }
3221
3222    QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3223        mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
3224                       alias_print_queue);
3225    }
3226
3227    QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3228        g_free(ml);
3229    }
3230}
3231
3232struct FlatViewInfo {
3233    fprintf_function mon_printf;
3234    void *f;
3235    int counter;
3236    bool dispatch_tree;
3237};
3238
3239static void mtree_print_flatview(gpointer key, gpointer value,
3240                                 gpointer user_data)
3241{
3242    FlatView *view = key;
3243    GArray *fv_address_spaces = value;
3244    struct FlatViewInfo *fvi = user_data;
3245    fprintf_function p = fvi->mon_printf;
3246    void *f = fvi->f;
3247    FlatRange *range = &view->ranges[0];
3248    MemoryRegion *mr;
3249    int n = view->nr;
3250    int i;
3251    AddressSpace *as;
3252
3253    p(f, "FlatView #%d\n", fvi->counter);
3254    ++fvi->counter;
3255
3256    for (i = 0; i < fv_address_spaces->len; ++i) {
3257        as = g_array_index(fv_address_spaces, AddressSpace*, i);
3258        p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
3259        if (as->root->alias) {
3260            p(f, ", alias %s", memory_region_name(as->root->alias));
3261        }
3262        p(f, "\n");
3263    }
3264
3265    p(f, " Root memory region: %s\n",
3266      view->root ? memory_region_name(view->root) : "(none)");
3267
3268    if (n <= 0) {
3269        p(f, MTREE_INDENT "No rendered FlatView\n\n");
3270        return;
3271    }
3272
3273    while (n--) {
3274        mr = range->mr;
3275        if (range->offset_in_region) {
3276            p(f, MTREE_INDENT TARGET_FMT_plx "-"
3277              TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
3278              int128_get64(range->addr.start),
3279              int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3280              mr->priority,
3281              range->readonly ? "rom" : memory_region_type(mr),
3282              memory_region_name(mr),
3283              range->offset_in_region);
3284        } else {
3285            p(f, MTREE_INDENT TARGET_FMT_plx "-"
3286              TARGET_FMT_plx " (prio %d, %s): %s\n",
3287              int128_get64(range->addr.start),
3288              int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3289              mr->priority,
3290              range->readonly ? "rom" : memory_region_type(mr),
3291              memory_region_name(mr));
3292        }
3293        range++;
3294    }
3295
3296#if !defined(CONFIG_USER_ONLY)
3297    if (fvi->dispatch_tree && view->root) {
3298        mtree_print_dispatch(p, f, view->dispatch, view->root);
3299    }
3300#endif
3301
3302    p(f, "\n");
3303}
3304
3305static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3306                                      gpointer user_data)
3307{
3308    FlatView *view = key;
3309    GArray *fv_address_spaces = value;
3310
3311    g_array_unref(fv_address_spaces);
3312    flatview_unref(view);
3313
3314    return true;
3315}
3316
3317void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3318                bool dispatch_tree)
3319{
3320    MemoryRegionListHead ml_head;
3321    MemoryRegionList *ml, *ml2;
3322    AddressSpace *as;
3323
3324    if (flatview) {
3325        FlatView *view;
3326        struct FlatViewInfo fvi = {
3327            .mon_printf = mon_printf,
3328            .f = f,
3329            .counter = 0,
3330            .dispatch_tree = dispatch_tree
3331        };
3332        GArray *fv_address_spaces;
3333        GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3334
3335        /* Gather all FVs in one table */
3336        QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3337            view = address_space_get_flatview(as);
3338
3339            fv_address_spaces = g_hash_table_lookup(views, view);
3340            if (!fv_address_spaces) {
3341                fv_address_spaces = g_array_new(false, false, sizeof(as));
3342                g_hash_table_insert(views, view, fv_address_spaces);
3343            }
3344
3345            g_array_append_val(fv_address_spaces, as);
3346        }
3347
3348        /* Print */
3349        g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3350
3351        /* Free */
3352        g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3353        g_hash_table_unref(views);
3354
3355        return;
3356    }
3357
3358    QTAILQ_INIT(&ml_head);
3359
3360    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3361        mon_printf(f, "address-space: %s\n", as->name);
3362        mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3363        mon_printf(f, "\n");
3364    }
3365
3366    /* print aliased regions */
3367    QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3368        mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3369        mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3370        mon_printf(f, "\n");
3371    }
3372
3373    QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3374        g_free(ml);
3375    }
3376}
3377
3378static bool memory_region_parse_reg(FDTGenericMMap *obj,
3379                                    FDTGenericRegPropInfo reg, Error **errp)
3380{
3381    MemoryRegion *mr = MEMORY_REGION(obj);
3382    uint64_t base_addr = ~0ull;
3383    uint64_t total_size = 0;
3384    uint64_t max_addr = 0;
3385    int i;
3386
3387    if (!reg.n) {
3388        return false;
3389    }
3390
3391    for (i = 0; i < reg.n; ++i) {
3392        base_addr = MIN(base_addr, reg.a[i]);
3393        max_addr = MAX(max_addr, reg.a[i] + reg.s[i]);
3394        total_size += reg.s[i];
3395        if (reg.p[i] != reg.p[0]) {
3396            error_setg(errp, "FDT generic memory parser does not support"
3397                       "mixed priorities\n");
3398            return false;
3399        }
3400    }
3401
3402    if (total_size != max_addr - base_addr) {
3403        return false;
3404        error_setg(errp, "FDT generic memory parse does not "
3405                   "spport discontiguous or overlapping memory regions");
3406    }
3407
3408    /* FIXME: parent should not be optional but we need to implement
3409     * reg-extended in kernel before we can do things properly
3410     */
3411    if (reg.parents[0]) {
3412        object_property_set_link(OBJECT(mr), reg.parents[0], "container",
3413                                 &error_abort);
3414    }
3415    object_property_set_int(OBJECT(mr), total_size, "size", &error_abort);
3416    object_property_set_int(OBJECT(mr), base_addr, "addr", &error_abort);
3417    object_property_set_int(OBJECT(mr), reg.p[0], "priority", &error_abort);
3418    return false;
3419}
3420
3421static void memory_region_class_init(ObjectClass *oc, void *data)
3422{
3423    FDTGenericMMapClass *fmc = FDT_GENERIC_MMAP_CLASS(oc);
3424
3425    fmc->parse_reg = memory_region_parse_reg;
3426}
3427
3428void memory_region_init_ram(MemoryRegion *mr,
3429                            struct Object *owner,
3430                            const char *name,
3431                            uint64_t size,
3432                            Error **errp)
3433{
3434    DeviceState *owner_dev;
3435    Error *err = NULL;
3436
3437    memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3438    if (err) {
3439        error_propagate(errp, err);
3440        return;
3441    }
3442    /* This will assert if owner is neither NULL nor a DeviceState.
3443     * We only want the owner here for the purposes of defining a
3444     * unique name for migration. TODO: Ideally we should implement
3445     * a naming scheme for Objects which are not DeviceStates, in
3446     * which case we can relax this restriction.
3447     */
3448    owner_dev = DEVICE(owner);
3449    vmstate_register_ram(mr, owner_dev);
3450}
3451
3452void memory_region_init_rom(MemoryRegion *mr,
3453                            struct Object *owner,
3454                            const char *name,
3455                            uint64_t size,
3456                            Error **errp)
3457{
3458    DeviceState *owner_dev;
3459    Error *err = NULL;
3460
3461    memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3462    if (err) {
3463        error_propagate(errp, err);
3464        return;
3465    }
3466    /* This will assert if owner is neither NULL nor a DeviceState.
3467     * We only want the owner here for the purposes of defining a
3468     * unique name for migration. TODO: Ideally we should implement
3469     * a naming scheme for Objects which are not DeviceStates, in
3470     * which case we can relax this restriction.
3471     */
3472    owner_dev = DEVICE(owner);
3473    vmstate_register_ram(mr, owner_dev);
3474}
3475
3476void memory_region_init_rom_device(MemoryRegion *mr,
3477                                   struct Object *owner,
3478                                   const MemoryRegionOps *ops,
3479                                   void *opaque,
3480                                   const char *name,
3481                                   uint64_t size,
3482                                   Error **errp)
3483{
3484    DeviceState *owner_dev;
3485    Error *err = NULL;
3486
3487    memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3488                                            name, size, &err);
3489    if (err) {
3490        error_propagate(errp, err);
3491        return;
3492    }
3493    /* This will assert if owner is neither NULL nor a DeviceState.
3494     * We only want the owner here for the purposes of defining a
3495     * unique name for migration. TODO: Ideally we should implement
3496     * a naming scheme for Objects which are not DeviceStates, in
3497     * which case we can relax this restriction.
3498     */
3499    owner_dev = DEVICE(owner);
3500    vmstate_register_ram(mr, owner_dev);
3501}
3502
3503static const TypeInfo memory_region_info = {
3504    .parent             = TYPE_OBJECT,
3505    .name               = TYPE_MEMORY_REGION,
3506    .instance_size      = sizeof(MemoryRegion),
3507    .instance_init      = memory_region_initfn,
3508    .instance_finalize  = memory_region_finalize,
3509    .class_init         = memory_region_class_init,
3510    .interfaces         = (InterfaceInfo[]) {
3511        { TYPE_FDT_GENERIC_MMAP },
3512        { },
3513    },
3514};
3515
3516static bool memory_transaction_attr_get_secure(Object *obj, Error **errp)
3517{
3518    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3519    return mattr->secure;
3520}
3521
3522static void memory_transaction_attr_set_secure(Object *obj, bool value,
3523                                               Error **errp)
3524{
3525    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3526    mattr->secure = value;
3527}
3528
3529static void mattr_get_master_id(Object *obj, Visitor *v, const char *name,
3530                                void *opaque, Error **errp)
3531{
3532    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3533    uint64_t value = mattr->master_id;
3534
3535    visit_type_uint64(v, name, &value, errp);
3536}
3537
3538
3539static void mattr_set_master_id(Object *obj, Visitor *v, const char *name,
3540                                void *opaque, Error **errp)
3541{
3542    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3543    Error *local_err = NULL;
3544    uint64_t value;
3545
3546    visit_type_uint64(v, name, &value, &local_err);
3547    mattr->master_id = value;
3548}
3549
3550
3551static void memory_transaction_attr_initfn(Object *obj)
3552{
3553    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
3554
3555    object_property_add_bool(OBJECT(mattr), "secure",
3556                        memory_transaction_attr_get_secure,
3557                        memory_transaction_attr_set_secure,
3558                        NULL);
3559    object_property_add(OBJECT(mattr), "master-id", "uint64",
3560                        mattr_get_master_id,
3561                        mattr_set_master_id,
3562                        NULL, NULL, &error_abort);
3563}
3564
3565static const TypeInfo memory_transaction_attr_info = {
3566    .parent             = TYPE_OBJECT,
3567    .name               = TYPE_MEMORY_TRANSACTION_ATTR,
3568    .instance_size      = sizeof(MemTxAttrs),
3569    .instance_init      = memory_transaction_attr_initfn,
3570    .interfaces         = (InterfaceInfo[]) {
3571        { TYPE_FDT_GENERIC_MMAP },
3572        { },
3573    },
3574};
3575
3576static const TypeInfo iommu_memory_region_info = {
3577    .parent             = TYPE_MEMORY_REGION,
3578    .name               = TYPE_IOMMU_MEMORY_REGION,
3579    .class_size         = sizeof(IOMMUMemoryRegionClass),
3580    .instance_size      = sizeof(IOMMUMemoryRegion),
3581    .instance_init      = iommu_memory_region_initfn,
3582    .abstract           = true,
3583};
3584
3585static void memory_register_types(void)
3586{
3587    type_register_static(&memory_region_info);
3588    type_register_static(&memory_transaction_attr_info);
3589    type_register_static(&iommu_memory_region_info);
3590}
3591
3592type_init(memory_register_types)
3593