qemu/memory.c
<<
>>
Prefs
   1/*
   2 * Physical memory management
   3 *
   4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
   5 *
   6 * Authors:
   7 *  Avi Kivity <avi@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "exec/memory.h"
  19#include "exec/address-spaces.h"
  20#include "exec/ioport.h"
  21#include "qapi/visitor.h"
  22#include "qemu/bitops.h"
  23#include "qemu/error-report.h"
  24#include "qom/object.h"
  25#include "trace.h"
  26
  27#include "exec/memory-internal.h"
  28#include "exec/ram_addr.h"
  29#include "sysemu/kvm.h"
  30#include "sysemu/sysemu.h"
  31
  32#include "hw/fdt_generic_util.h"
  33
  34//#define DEBUG_UNASSIGNED
  35
  36#define RAM_ADDR_INVALID (~(ram_addr_t)0)
  37
  38static unsigned memory_region_transaction_depth;
  39static bool memory_region_update_pending;
  40static bool ioeventfd_update_pending;
  41static bool global_dirty_log = false;
  42
  43static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
  44    = QTAILQ_HEAD_INITIALIZER(memory_listeners);
  45
  46static QTAILQ_HEAD(, AddressSpace) address_spaces
  47    = QTAILQ_HEAD_INITIALIZER(address_spaces);
  48
  49typedef struct AddrRange AddrRange;
  50
  51/*
  52 * Note that signed integers are needed for negative offsetting in aliases
  53 * (large MemoryRegion::alias_offset).
  54 */
  55struct AddrRange {
  56    Int128 start;
  57    Int128 size;
  58};
  59
  60static AddrRange addrrange_make(Int128 start, Int128 size)
  61{
  62    return (AddrRange) { start, size };
  63}
  64
  65static bool addrrange_equal(AddrRange r1, AddrRange r2)
  66{
  67    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
  68}
  69
  70static Int128 addrrange_end(AddrRange r)
  71{
  72    return int128_add(r.start, r.size);
  73}
  74
  75static AddrRange addrrange_shift(AddrRange range, Int128 delta)
  76{
  77    int128_addto(&range.start, delta);
  78    return range;
  79}
  80
  81static bool addrrange_contains(AddrRange range, Int128 addr)
  82{
  83    return int128_ge(addr, range.start)
  84        && int128_lt(addr, addrrange_end(range));
  85}
  86
  87static bool addrrange_intersects(AddrRange r1, AddrRange r2)
  88{
  89    return addrrange_contains(r1, r2.start)
  90        || addrrange_contains(r2, r1.start);
  91}
  92
  93static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
  94{
  95    Int128 start = int128_max(r1.start, r2.start);
  96    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
  97    return addrrange_make(start, int128_sub(end, start));
  98}
  99
 100enum ListenerDirection { Forward, Reverse };
 101
 102static bool memory_listener_match(MemoryListener *listener,
 103                                  MemoryRegionSection *section)
 104{
 105    return !listener->address_space_filter
 106        || listener->address_space_filter == section->address_space;
 107}
 108
 109#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
 110    do {                                                                \
 111        MemoryListener *_listener;                                      \
 112                                                                        \
 113        switch (_direction) {                                           \
 114        case Forward:                                                   \
 115            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
 116                if (_listener->_callback) {                             \
 117                    _listener->_callback(_listener, ##_args);           \
 118                }                                                       \
 119            }                                                           \
 120            break;                                                      \
 121        case Reverse:                                                   \
 122            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
 123                                   memory_listeners, link) {            \
 124                if (_listener->_callback) {                             \
 125                    _listener->_callback(_listener, ##_args);           \
 126                }                                                       \
 127            }                                                           \
 128            break;                                                      \
 129        default:                                                        \
 130            abort();                                                    \
 131        }                                                               \
 132    } while (0)
 133
 134#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
 135    do {                                                                \
 136        MemoryListener *_listener;                                      \
 137                                                                        \
 138        switch (_direction) {                                           \
 139        case Forward:                                                   \
 140            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
 141                if (_listener->_callback                                \
 142                    && memory_listener_match(_listener, _section)) {    \
 143                    _listener->_callback(_listener, _section, ##_args); \
 144                }                                                       \
 145            }                                                           \
 146            break;                                                      \
 147        case Reverse:                                                   \
 148            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
 149                                   memory_listeners, link) {            \
 150                if (_listener->_callback                                \
 151                    && memory_listener_match(_listener, _section)) {    \
 152                    _listener->_callback(_listener, _section, ##_args); \
 153                }                                                       \
 154            }                                                           \
 155            break;                                                      \
 156        default:                                                        \
 157            abort();                                                    \
 158        }                                                               \
 159    } while (0)
 160
 161/* No need to ref/unref .mr, the FlatRange keeps it alive.  */
 162#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...)  \
 163    MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) {       \
 164        .mr = (fr)->mr,                                                 \
 165        .address_space = (as),                                          \
 166        .offset_within_region = (fr)->offset_in_region,                 \
 167        .size = (fr)->addr.size,                                        \
 168        .offset_within_address_space = int128_get64((fr)->addr.start),  \
 169        .readonly = (fr)->readonly,                                     \
 170              }), ##_args)
 171
 172struct CoalescedMemoryRange {
 173    AddrRange addr;
 174    QTAILQ_ENTRY(CoalescedMemoryRange) link;
 175};
 176
 177struct MemoryRegionIoeventfd {
 178    AddrRange addr;
 179    bool match_data;
 180    uint64_t data;
 181    EventNotifier *e;
 182};
 183
 184static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
 185                                           MemoryRegionIoeventfd b)
 186{
 187    if (int128_lt(a.addr.start, b.addr.start)) {
 188        return true;
 189    } else if (int128_gt(a.addr.start, b.addr.start)) {
 190        return false;
 191    } else if (int128_lt(a.addr.size, b.addr.size)) {
 192        return true;
 193    } else if (int128_gt(a.addr.size, b.addr.size)) {
 194        return false;
 195    } else if (a.match_data < b.match_data) {
 196        return true;
 197    } else  if (a.match_data > b.match_data) {
 198        return false;
 199    } else if (a.match_data) {
 200        if (a.data < b.data) {
 201            return true;
 202        } else if (a.data > b.data) {
 203            return false;
 204        }
 205    }
 206    if (a.e < b.e) {
 207        return true;
 208    } else if (a.e > b.e) {
 209        return false;
 210    }
 211    return false;
 212}
 213
 214static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
 215                                          MemoryRegionIoeventfd b)
 216{
 217    return !memory_region_ioeventfd_before(a, b)
 218        && !memory_region_ioeventfd_before(b, a);
 219}
 220
 221typedef struct FlatRange FlatRange;
 222typedef struct FlatView FlatView;
 223
 224/* Range of memory in the global map.  Addresses are absolute. */
 225struct FlatRange {
 226    MemoryRegion *mr;
 227    hwaddr offset_in_region;
 228    AddrRange addr;
 229    uint8_t dirty_log_mask;
 230    bool romd_mode;
 231    bool readonly;
 232};
 233
 234/* Flattened global view of current active memory hierarchy.  Kept in sorted
 235 * order.
 236 */
 237struct FlatView {
 238    struct rcu_head rcu;
 239    unsigned ref;
 240    FlatRange *ranges;
 241    unsigned nr;
 242    unsigned nr_allocated;
 243};
 244
 245typedef struct AddressSpaceOps AddressSpaceOps;
 246
 247#define FOR_EACH_FLAT_RANGE(var, view)          \
 248    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
 249
 250static bool flatrange_equal(FlatRange *a, FlatRange *b)
 251{
 252    return a->mr == b->mr
 253        && addrrange_equal(a->addr, b->addr)
 254        && a->offset_in_region == b->offset_in_region
 255        && a->readonly == b->readonly;
 256}
 257
 258static void flatview_init(FlatView *view)
 259{
 260    view->ref = 1;
 261    view->ranges = NULL;
 262    view->nr = 0;
 263    view->nr_allocated = 0;
 264}
 265
 266/* Insert a range into a given position.  Caller is responsible for maintaining
 267 * sorting order.
 268 */
 269static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
 270{
 271    if (view->nr == view->nr_allocated) {
 272        view->nr_allocated = MAX(2 * view->nr, 10);
 273        view->ranges = g_realloc(view->ranges,
 274                                    view->nr_allocated * sizeof(*view->ranges));
 275    }
 276    memmove(view->ranges + pos + 1, view->ranges + pos,
 277            (view->nr - pos) * sizeof(FlatRange));
 278    view->ranges[pos] = *range;
 279    memory_region_ref(range->mr);
 280    ++view->nr;
 281}
 282
 283static void flatview_destroy(FlatView *view)
 284{
 285    int i;
 286
 287    for (i = 0; i < view->nr; i++) {
 288        memory_region_unref(view->ranges[i].mr);
 289    }
 290    g_free(view->ranges);
 291    g_free(view);
 292}
 293
 294static void flatview_ref(FlatView *view)
 295{
 296    atomic_inc(&view->ref);
 297}
 298
 299static void flatview_unref(FlatView *view)
 300{
 301    if (atomic_fetch_dec(&view->ref) == 1) {
 302        flatview_destroy(view);
 303    }
 304}
 305
 306static bool can_merge(FlatRange *r1, FlatRange *r2)
 307{
 308    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
 309        && r1->mr == r2->mr
 310        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
 311                                r1->addr.size),
 312                     int128_make64(r2->offset_in_region))
 313        && r1->dirty_log_mask == r2->dirty_log_mask
 314        && r1->readonly == r2->readonly;
 315}
 316
 317/* Attempt to simplify a view by merging adjacent ranges */
 318static void flatview_simplify(FlatView *view)
 319{
 320    unsigned i, j;
 321
 322    i = 0;
 323    while (i < view->nr) {
 324        j = i + 1;
 325        while (j < view->nr
 326               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
 327            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
 328            ++j;
 329        }
 330        ++i;
 331        memmove(&view->ranges[i], &view->ranges[j],
 332                (view->nr - j) * sizeof(view->ranges[j]));
 333        view->nr -= j - i;
 334    }
 335}
 336
 337static bool memory_region_big_endian(MemoryRegion *mr)
 338{
 339#ifdef TARGET_WORDS_BIGENDIAN
 340    return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
 341#else
 342    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
 343#endif
 344}
 345
 346static bool memory_region_wrong_endianness(MemoryRegion *mr)
 347{
 348#ifdef TARGET_WORDS_BIGENDIAN
 349    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
 350#else
 351    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
 352#endif
 353}
 354
 355static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
 356{
 357    if (memory_region_wrong_endianness(mr)) {
 358        switch (size) {
 359        case 1:
 360            break;
 361        case 2:
 362            *data = bswap16(*data);
 363            break;
 364        case 4:
 365            *data = bswap32(*data);
 366            break;
 367        case 8:
 368            *data = bswap64(*data);
 369            break;
 370        default:
 371            abort();
 372        }
 373    }
 374}
 375
 376static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
 377{
 378    MemoryRegion *root;
 379    hwaddr abs_addr = offset;
 380
 381    abs_addr += mr->addr;
 382    for (root = mr; root->container; ) {
 383        root = root->container;
 384        abs_addr += root->addr;
 385    }
 386
 387    return abs_addr;
 388}
 389
 390static int get_cpu_index(void)
 391{
 392    if (current_cpu) {
 393        return current_cpu->cpu_index;
 394    }
 395    return -1;
 396}
 397
 398static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
 399                                                       hwaddr addr,
 400                                                       uint64_t *value,
 401                                                       unsigned size,
 402                                                       unsigned shift,
 403                                                       uint64_t mask,
 404                                                       MemTxAttrs attrs)
 405{
 406    uint64_t tmp;
 407
 408    tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
 409    if (mr->subpage) {
 410        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
 411    } else if (mr == &io_mem_notdirty) {
 412        /* Accesses to code which has previously been translated into a TB show
 413         * up in the MMIO path, as accesses to the io_mem_notdirty
 414         * MemoryRegion. */
 415        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
 416    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
 417        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 418        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
 419    }
 420    *value |= (tmp & mask) << shift;
 421    return MEMTX_OK;
 422}
 423
 424/* FIXME: Remove */
 425static MemTxResult memory_region_read_accessor_attr(MemoryRegion *mr,
 426                                                    hwaddr addr,
 427                                                    uint64_t *value,
 428                                                    unsigned size,
 429                                                    unsigned shift,
 430                                                    uint64_t mask,
 431                                                    MemTxAttrs attrs)
 432{
 433    MemoryTransaction tr = {{0}};
 434
 435    if (mr->flush_coalesced_mmio) {
 436        qemu_flush_coalesced_mmio_buffer();
 437    }
 438
 439    tr.opaque = mr->opaque;
 440    tr.addr = addr;
 441    tr.size = size;
 442    tr.attr = attrs;
 443    mr->ops->access(&tr);
 444    *value |= (tr.data.u64 & mask) << shift;
 445
 446    return 0;
 447}
 448
 449static MemTxResult  memory_region_read_accessor(MemoryRegion *mr,
 450                                                hwaddr addr,
 451                                                uint64_t *value,
 452                                                unsigned size,
 453                                                unsigned shift,
 454                                                uint64_t mask,
 455                                                MemTxAttrs attrs)
 456{
 457    uint64_t tmp;
 458
 459    tmp = mr->ops->read(mr->opaque, addr, size);
 460    if (mr->subpage) {
 461        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
 462    } else if (mr == &io_mem_notdirty) {
 463        /* Accesses to code which has previously been translated into a TB show
 464         * up in the MMIO path, as accesses to the io_mem_notdirty
 465         * MemoryRegion. */
 466        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
 467    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
 468        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 469        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
 470    }
 471    *value |= (tmp & mask) << shift;
 472    return MEMTX_OK;
 473}
 474
 475static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
 476                                                          hwaddr addr,
 477                                                          uint64_t *value,
 478                                                          unsigned size,
 479                                                          unsigned shift,
 480                                                          uint64_t mask,
 481                                                          MemTxAttrs attrs)
 482{
 483    uint64_t tmp = 0;
 484    MemTxResult r;
 485
 486    r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
 487    if (mr->subpage) {
 488        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
 489    } else if (mr == &io_mem_notdirty) {
 490        /* Accesses to code which has previously been translated into a TB show
 491         * up in the MMIO path, as accesses to the io_mem_notdirty
 492         * MemoryRegion. */
 493        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
 494    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
 495        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 496        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
 497    }
 498    *value |= (tmp & mask) << shift;
 499    return r;
 500}
 501
 502static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
 503                                                        hwaddr addr,
 504                                                        uint64_t *value,
 505                                                        unsigned size,
 506                                                        unsigned shift,
 507                                                        uint64_t mask,
 508                                                        MemTxAttrs attrs)
 509{
 510    uint64_t tmp;
 511
 512    tmp = (*value >> shift) & mask;
 513    if (mr->subpage) {
 514        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
 515    } else if (mr == &io_mem_notdirty) {
 516        /* Accesses to code which has previously been translated into a TB show
 517         * up in the MMIO path, as accesses to the io_mem_notdirty
 518         * MemoryRegion. */
 519        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
 520    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
 521        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 522        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
 523    }
 524    mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
 525    return MEMTX_OK;
 526}
 527
 528/* FIXME: Remove */
 529static MemTxResult memory_region_write_accessor_attr(MemoryRegion *mr,
 530                                                     hwaddr addr,
 531                                                     uint64_t *value,
 532                                                     unsigned size,
 533                                                     unsigned shift,
 534                                                     uint64_t mask,
 535                                                     MemTxAttrs attrs)
 536{
 537    MemoryTransaction tr = {{0}};
 538
 539    if (mr->flush_coalesced_mmio) {
 540        qemu_flush_coalesced_mmio_buffer();
 541    }
 542
 543    tr.opaque = mr->opaque;
 544    tr.rw = true;
 545    tr.addr = addr;
 546    tr.size = size;
 547    tr.attr = attrs;
 548    tr.data.u64 = (*value >> shift) & mask;
 549    trace_memory_region_ops_write(get_cpu_index(), mr, tr.addr, tr.data.u64, tr.size);
 550    mr->ops->access(&tr);
 551
 552    return 0;
 553}
 554
 555static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
 556                                                hwaddr addr,
 557                                                uint64_t *value,
 558                                                unsigned size,
 559                                                unsigned shift,
 560                                                uint64_t mask,
 561                                                MemTxAttrs attrs)
 562{
 563    uint64_t tmp;
 564
 565    tmp = (*value >> shift) & mask;
 566    if (mr->subpage) {
 567        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
 568    } else if (mr == &io_mem_notdirty) {
 569        /* Accesses to code which has previously been translated into a TB show
 570         * up in the MMIO path, as accesses to the io_mem_notdirty
 571         * MemoryRegion. */
 572        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
 573    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
 574        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 575        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
 576    }
 577    mr->ops->write(mr->opaque, addr, tmp, size);
 578    return MEMTX_OK;
 579}
 580
 581static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
 582                                                           hwaddr addr,
 583                                                           uint64_t *value,
 584                                                           unsigned size,
 585                                                           unsigned shift,
 586                                                           uint64_t mask,
 587                                                           MemTxAttrs attrs)
 588{
 589    uint64_t tmp;
 590
 591    tmp = (*value >> shift) & mask;
 592    if (mr->subpage) {
 593        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
 594    } else if (mr == &io_mem_notdirty) {
 595        /* Accesses to code which has previously been translated into a TB show
 596         * up in the MMIO path, as accesses to the io_mem_notdirty
 597         * MemoryRegion. */
 598        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
 599    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
 600        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
 601        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
 602    }
 603    return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
 604}
 605
 606static MemTxResult access_with_adjusted_size(hwaddr addr,
 607                                      uint64_t *value,
 608                                      unsigned size,
 609                                      unsigned access_size_min,
 610                                      unsigned access_size_max,
 611                                      MemTxResult (*access)(MemoryRegion *mr,
 612                                                            hwaddr addr,
 613                                                            uint64_t *value,
 614                                                            unsigned size,
 615                                                            unsigned shift,
 616                                                            uint64_t mask,
 617                                                            MemTxAttrs attrs),
 618                                      MemoryRegion *mr,
 619                                      MemTxAttrs attrs)
 620{
 621    uint64_t access_mask;
 622    unsigned access_size;
 623    unsigned i;
 624    MemTxResult r = MEMTX_OK;
 625
 626    if (!access_size_min) {
 627        access_size_min = 1;
 628    }
 629    if (!access_size_max) {
 630        access_size_max = 4;
 631    }
 632
 633    /* FIXME: support unaligned access? */
 634    access_size = MAX(MIN(size, access_size_max), access_size_min);
 635    access_mask = -1ULL >> (64 - access_size * 8);
 636    if (memory_region_big_endian(mr)) {
 637        for (i = 0; i < size; i += access_size) {
 638            r |= access(mr, addr + i, value, access_size,
 639                        (size - access_size - i) * 8, access_mask, attrs);
 640        }
 641    } else {
 642        for (i = 0; i < size; i += access_size) {
 643            r |= access(mr, addr + i, value, access_size, i * 8,
 644                        access_mask, attrs);
 645        }
 646    }
 647    return r;
 648}
 649
 650static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
 651{
 652    AddressSpace *as;
 653
 654    while (mr->container) {
 655        mr = mr->container;
 656    }
 657    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
 658        if (mr == as->root) {
 659            return as;
 660        }
 661    }
 662    return NULL;
 663}
 664
 665/* Render a memory region into the global view.  Ranges in @view obscure
 666 * ranges in @mr.
 667 */
 668static void render_memory_region(FlatView *view,
 669                                 MemoryRegion *mr,
 670                                 Int128 base,
 671                                 AddrRange clip,
 672                                 bool readonly)
 673{
 674    MemoryRegion *subregion;
 675    unsigned i;
 676    hwaddr offset_in_region;
 677    Int128 remain;
 678    Int128 now;
 679    FlatRange fr;
 680    AddrRange tmp;
 681
 682    if (!mr->enabled) {
 683        return;
 684    }
 685
 686    int128_addto(&base, int128_make64(mr->addr));
 687    readonly |= mr->readonly;
 688
 689    tmp = addrrange_make(base, mr->size);
 690
 691    if (!addrrange_intersects(tmp, clip)) {
 692        return;
 693    }
 694
 695    clip = addrrange_intersection(tmp, clip);
 696
 697    if (mr->alias) {
 698        int128_subfrom(&base, int128_make64(mr->alias->addr));
 699        int128_subfrom(&base, int128_make64(mr->alias_offset));
 700        render_memory_region(view, mr->alias, base, clip, readonly);
 701        return;
 702    }
 703
 704    /* Render subregions in priority order. */
 705    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
 706        render_memory_region(view, subregion, base, clip, readonly);
 707    }
 708
 709    if (!mr->terminates) {
 710        return;
 711    }
 712
 713    offset_in_region = int128_get64(int128_sub(clip.start, base));
 714    base = clip.start;
 715    remain = clip.size;
 716
 717    fr.mr = mr;
 718    fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
 719    fr.romd_mode = mr->romd_mode;
 720    fr.readonly = readonly;
 721
 722    /* Render the region itself into any gaps left by the current view. */
 723    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
 724        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
 725            continue;
 726        }
 727        if (int128_lt(base, view->ranges[i].addr.start)) {
 728            now = int128_min(remain,
 729                             int128_sub(view->ranges[i].addr.start, base));
 730            fr.offset_in_region = offset_in_region;
 731            fr.addr = addrrange_make(base, now);
 732            flatview_insert(view, i, &fr);
 733            ++i;
 734            int128_addto(&base, now);
 735            offset_in_region += int128_get64(now);
 736            int128_subfrom(&remain, now);
 737        }
 738        now = int128_sub(int128_min(int128_add(base, remain),
 739                                    addrrange_end(view->ranges[i].addr)),
 740                         base);
 741        int128_addto(&base, now);
 742        offset_in_region += int128_get64(now);
 743        int128_subfrom(&remain, now);
 744    }
 745    if (int128_nz(remain)) {
 746        fr.offset_in_region = offset_in_region;
 747        fr.addr = addrrange_make(base, remain);
 748        flatview_insert(view, i, &fr);
 749    }
 750}
 751
 752/* Render a memory topology into a list of disjoint absolute ranges. */
 753static FlatView *generate_memory_topology(MemoryRegion *mr)
 754{
 755    FlatView *view;
 756
 757    view = g_new(FlatView, 1);
 758    flatview_init(view);
 759
 760    if (mr) {
 761        render_memory_region(view, mr, int128_zero(),
 762                             addrrange_make(int128_zero(), int128_2_64()), false);
 763    }
 764    flatview_simplify(view);
 765
 766    return view;
 767}
 768
 769static void address_space_add_del_ioeventfds(AddressSpace *as,
 770                                             MemoryRegionIoeventfd *fds_new,
 771                                             unsigned fds_new_nb,
 772                                             MemoryRegionIoeventfd *fds_old,
 773                                             unsigned fds_old_nb)
 774{
 775    unsigned iold, inew;
 776    MemoryRegionIoeventfd *fd;
 777    MemoryRegionSection section;
 778
 779    /* Generate a symmetric difference of the old and new fd sets, adding
 780     * and deleting as necessary.
 781     */
 782
 783    iold = inew = 0;
 784    while (iold < fds_old_nb || inew < fds_new_nb) {
 785        if (iold < fds_old_nb
 786            && (inew == fds_new_nb
 787                || memory_region_ioeventfd_before(fds_old[iold],
 788                                                  fds_new[inew]))) {
 789            fd = &fds_old[iold];
 790            section = (MemoryRegionSection) {
 791                .address_space = as,
 792                .offset_within_address_space = int128_get64(fd->addr.start),
 793                .size = fd->addr.size,
 794            };
 795            MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
 796                                 fd->match_data, fd->data, fd->e);
 797            ++iold;
 798        } else if (inew < fds_new_nb
 799                   && (iold == fds_old_nb
 800                       || memory_region_ioeventfd_before(fds_new[inew],
 801                                                         fds_old[iold]))) {
 802            fd = &fds_new[inew];
 803            section = (MemoryRegionSection) {
 804                .address_space = as,
 805                .offset_within_address_space = int128_get64(fd->addr.start),
 806                .size = fd->addr.size,
 807            };
 808            MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
 809                                 fd->match_data, fd->data, fd->e);
 810            ++inew;
 811        } else {
 812            ++iold;
 813            ++inew;
 814        }
 815    }
 816}
 817
 818static FlatView *address_space_get_flatview(AddressSpace *as)
 819{
 820    FlatView *view;
 821
 822    rcu_read_lock();
 823    view = atomic_rcu_read(&as->current_map);
 824    flatview_ref(view);
 825    rcu_read_unlock();
 826    return view;
 827}
 828
 829static void address_space_update_ioeventfds(AddressSpace *as)
 830{
 831    FlatView *view;
 832    FlatRange *fr;
 833    unsigned ioeventfd_nb = 0;
 834    MemoryRegionIoeventfd *ioeventfds = NULL;
 835    AddrRange tmp;
 836    unsigned i;
 837
 838    view = address_space_get_flatview(as);
 839    FOR_EACH_FLAT_RANGE(fr, view) {
 840        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
 841            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
 842                                  int128_sub(fr->addr.start,
 843                                             int128_make64(fr->offset_in_region)));
 844            if (addrrange_intersects(fr->addr, tmp)) {
 845                ++ioeventfd_nb;
 846                ioeventfds = g_realloc(ioeventfds,
 847                                          ioeventfd_nb * sizeof(*ioeventfds));
 848                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
 849                ioeventfds[ioeventfd_nb-1].addr = tmp;
 850            }
 851        }
 852    }
 853
 854    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
 855                                     as->ioeventfds, as->ioeventfd_nb);
 856
 857    g_free(as->ioeventfds);
 858    as->ioeventfds = ioeventfds;
 859    as->ioeventfd_nb = ioeventfd_nb;
 860    flatview_unref(view);
 861}
 862
 863static void address_space_update_topology_pass(AddressSpace *as,
 864                                               const FlatView *old_view,
 865                                               const FlatView *new_view,
 866                                               bool adding)
 867{
 868    unsigned iold, inew;
 869    FlatRange *frold, *frnew;
 870
 871    /* Generate a symmetric difference of the old and new memory maps.
 872     * Kill ranges in the old map, and instantiate ranges in the new map.
 873     */
 874    iold = inew = 0;
 875    while (iold < old_view->nr || inew < new_view->nr) {
 876        if (iold < old_view->nr) {
 877            frold = &old_view->ranges[iold];
 878        } else {
 879            frold = NULL;
 880        }
 881        if (inew < new_view->nr) {
 882            frnew = &new_view->ranges[inew];
 883        } else {
 884            frnew = NULL;
 885        }
 886
 887        if (frold
 888            && (!frnew
 889                || int128_lt(frold->addr.start, frnew->addr.start)
 890                || (int128_eq(frold->addr.start, frnew->addr.start)
 891                    && !flatrange_equal(frold, frnew)))) {
 892            /* In old but not in new, or in both but attributes changed. */
 893
 894            if (!adding) {
 895                MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
 896            }
 897
 898            ++iold;
 899        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
 900            /* In both and unchanged (except logging may have changed) */
 901
 902            if (adding) {
 903                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
 904                if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
 905                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
 906                                                  frold->dirty_log_mask,
 907                                                  frnew->dirty_log_mask);
 908                }
 909                if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
 910                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
 911                                                  frold->dirty_log_mask,
 912                                                  frnew->dirty_log_mask);
 913                }
 914            }
 915
 916            ++iold;
 917            ++inew;
 918        } else {
 919            /* In new */
 920
 921            if (adding) {
 922                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
 923            }
 924
 925            ++inew;
 926        }
 927    }
 928}
 929
 930
 931static void address_space_update_topology(AddressSpace *as)
 932{
 933    FlatView *old_view = address_space_get_flatview(as);
 934    FlatView *new_view = generate_memory_topology(as->root);
 935
 936    address_space_update_topology_pass(as, old_view, new_view, false);
 937    address_space_update_topology_pass(as, old_view, new_view, true);
 938
 939    /* Writes are protected by the BQL.  */
 940    atomic_rcu_set(&as->current_map, new_view);
 941    call_rcu(old_view, flatview_unref, rcu);
 942
 943    /* Note that all the old MemoryRegions are still alive up to this
 944     * point.  This relieves most MemoryListeners from the need to
 945     * ref/unref the MemoryRegions they get---unless they use them
 946     * outside the iothread mutex, in which case precise reference
 947     * counting is necessary.
 948     */
 949    flatview_unref(old_view);
 950
 951    address_space_update_ioeventfds(as);
 952}
 953
 954void memory_region_transaction_begin(void)
 955{
 956    qemu_flush_coalesced_mmio_buffer();
 957    ++memory_region_transaction_depth;
 958}
 959
 960static void memory_region_clear_pending(void)
 961{
 962    memory_region_update_pending = false;
 963    ioeventfd_update_pending = false;
 964}
 965
 966void memory_region_transaction_commit(void)
 967{
 968    AddressSpace *as;
 969
 970    assert(memory_region_transaction_depth);
 971    --memory_region_transaction_depth;
 972    if (!memory_region_transaction_depth) {
 973        if (memory_region_update_pending) {
 974            MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
 975
 976            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
 977                address_space_update_topology(as);
 978            }
 979
 980            MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
 981        } else if (ioeventfd_update_pending) {
 982            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
 983                address_space_update_ioeventfds(as);
 984            }
 985        }
 986        memory_region_clear_pending();
 987   }
 988}
 989
 990static void memory_region_destructor_none(MemoryRegion *mr)
 991{
 992}
 993
 994static void memory_region_destructor_ram(MemoryRegion *mr)
 995{
 996    qemu_ram_free(mr->ram_block);
 997}
 998
 999static void memory_region_destructor_rom_device(MemoryRegion *mr)
1000{
1001    qemu_ram_free(mr->ram_block);
1002}
1003
1004static bool memory_region_need_escape(char c)
1005{
1006    return c == '/' || c == '[' || c == '\\' || c == ']';
1007}
1008
1009static char *memory_region_escape_name(const char *name)
1010{
1011    const char *p;
1012    char *escaped, *q;
1013    uint8_t c;
1014    size_t bytes = 0;
1015
1016    for (p = name; *p; p++) {
1017        bytes += memory_region_need_escape(*p) ? 4 : 1;
1018    }
1019    if (bytes == p - name) {
1020       return g_memdup(name, bytes + 1);
1021    }
1022
1023    escaped = g_malloc(bytes + 1);
1024    for (p = name, q = escaped; *p; p++) {
1025        c = *p;
1026        if (unlikely(memory_region_need_escape(c))) {
1027            *q++ = '\\';
1028            *q++ = 'x';
1029            *q++ = "0123456789abcdef"[c >> 4];
1030            c = "0123456789abcdef"[c & 15];
1031        }
1032        *q++ = c;
1033    }
1034    *q = 0;
1035    return escaped;
1036}
1037
1038void memory_region_init(MemoryRegion *mr,
1039                        Object *owner,
1040                        const char *name,
1041                        uint64_t size)
1042{
1043    object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1044    mr->size = int128_make64(size);
1045    if (size == UINT64_MAX) {
1046        mr->size = int128_2_64();
1047    }
1048    mr->name = g_strdup(name);
1049    mr->owner = owner;
1050    mr->ram_block = NULL;
1051
1052    if (name) {
1053        char *escaped_name = memory_region_escape_name(name);
1054        char *name_array = g_strdup_printf("%s[*]", escaped_name);
1055
1056        if (!owner) {
1057            owner = container_get(qdev_get_machine(), "/unattached");
1058        }
1059
1060        object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1061        object_unref(OBJECT(mr));
1062        g_free(name_array);
1063        g_free(escaped_name);
1064    }
1065}
1066
1067static void memory_region_update_container_subregions(MemoryRegion *subregion)
1068{
1069    MemoryRegion *mr = subregion->container;
1070    MemoryRegion *other;
1071
1072    memory_region_transaction_begin();
1073
1074    memory_region_ref(subregion);
1075    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1076        if (subregion->priority >= other->priority) {
1077            QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1078            goto done;
1079        }
1080    }
1081    QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1082done:
1083    memory_region_update_pending |= mr->enabled && subregion->enabled;
1084    memory_region_transaction_commit();
1085}
1086
1087static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1088                                   void *opaque, Error **errp)
1089{
1090    MemoryRegion *mr = MEMORY_REGION(obj);
1091    uint64_t value = mr->addr;
1092
1093    visit_type_uint64(v, name, &value, errp);
1094}
1095
1096static void memory_region_set_addr(Object *obj, Visitor *v, const char *name,
1097                                   void *opaque, Error **errp)
1098{
1099    MemoryRegion *mr = MEMORY_REGION(obj);
1100    Error *local_err = NULL;
1101    uint64_t value;
1102
1103    visit_type_uint64(v, name, &value, &local_err);
1104    if (local_err) {
1105        error_propagate(errp, local_err);
1106        return;
1107    }
1108
1109    memory_region_set_address(mr, value);
1110}
1111
1112static void memory_region_set_container(Object *obj, Visitor *v, const char *name,
1113                                        void *opaque, Error **errp)
1114{
1115    MemoryRegion *mr = MEMORY_REGION(obj);
1116    Error *local_err = NULL;
1117    MemoryRegion *old_container = mr->container;
1118    MemoryRegion *new_container = NULL;
1119    char *path = NULL;
1120
1121    visit_type_str(v, name, &path, &local_err);
1122
1123    if (!local_err && strcmp(path, "") != 0) {
1124        new_container = MEMORY_REGION(object_resolve_link(obj, name, path,
1125                                      &local_err));
1126        while (new_container->alias) {
1127            new_container = new_container->alias;
1128        }
1129    }
1130
1131    if (local_err) {
1132        error_propagate(errp, local_err);
1133        return;
1134    }
1135
1136    object_ref(OBJECT(new_container));
1137
1138    memory_region_transaction_begin();
1139    memory_region_ref(mr);
1140    if (old_container) {
1141        memory_region_del_subregion(old_container, mr);
1142    }
1143    mr->container = new_container;
1144    if (new_container) {
1145        memory_region_update_container_subregions(mr);
1146    }
1147    memory_region_unref(mr);
1148    memory_region_transaction_commit();
1149
1150    object_unref(OBJECT(old_container));
1151}
1152
1153static void memory_region_get_container(Object *obj, Visitor *v,
1154                                        const char *name, void *opaque,
1155                                        Error **errp)
1156{
1157    MemoryRegion *mr = MEMORY_REGION(obj);
1158    gchar *path = (gchar *)"";
1159
1160    if (mr->container) {
1161        path = object_get_canonical_path(OBJECT(mr->container));
1162    }
1163    visit_type_str(v, name, &path, errp);
1164    if (mr->container) {
1165        g_free(path);
1166    }
1167}
1168
1169static void memory_region_readd_subregion(MemoryRegion *mr)
1170{
1171    MemoryRegion *container = mr->container;
1172
1173    if (container) {
1174        memory_region_transaction_begin();
1175        memory_region_ref(mr);
1176        memory_region_del_subregion(container, mr);
1177        mr->container = container;
1178        memory_region_update_container_subregions(mr);
1179        memory_region_unref(mr);
1180        memory_region_transaction_commit();
1181    }
1182}
1183
1184static Object *memory_region_resolve_container(Object *obj, void *opaque,
1185                                               const char *part)
1186{
1187    MemoryRegion *mr = MEMORY_REGION(obj);
1188
1189    return OBJECT(mr->container);
1190}
1191
1192static void memory_region_set_alias(Object *obj, const char *name,
1193                                    Object *target, Error **errp)
1194{
1195    MemoryRegion *mr = MEMORY_REGION(obj);
1196    MemoryRegion *subregion, *next;
1197
1198    /* Be conservative and only allow one shotting for the mo */
1199    /* FIXME: Use a softer error than assert */
1200    assert (!mr->alias);
1201
1202    /* FIXME: check we don't already have subregions and
1203     * anything else that might be mutex with aliasing
1204     */
1205
1206    memory_region_transaction_begin();
1207    QTAILQ_FOREACH_SAFE(subregion, &mr->subregions, subregions_link, next) {
1208        object_property_set_link(OBJECT(subregion), OBJECT(target),
1209                                 "container", errp);
1210    }
1211    memory_region_ref(mr);
1212    mr->alias = MEMORY_REGION(target);
1213    memory_region_unref(mr);
1214    memory_region_transaction_commit();
1215    /* FIXME: add cleanup destructors etc etc */
1216}
1217
1218static void memory_region_get_priority(Object *obj, Visitor *v,
1219                                       const char *name, void *opaque,
1220                                       Error **errp)
1221{
1222    MemoryRegion *mr = MEMORY_REGION(obj);
1223    int32_t value = mr->priority;
1224
1225    visit_type_int32(v, name, &value, errp);
1226}
1227
1228static bool memory_region_get_may_overlap(Object *obj, Error **errp)
1229{
1230    MemoryRegion *mr = MEMORY_REGION(obj);
1231
1232    return mr->may_overlap;
1233}
1234
1235static void memory_region_set_priority(Object *obj, Visitor *v, const char *name,
1236                                       void *opaque, Error **errp)
1237{
1238    MemoryRegion *mr = MEMORY_REGION(obj);
1239    Error *local_err = NULL;
1240    int32_t value;
1241
1242    visit_type_uint32(v, name, (uint32_t *)&value, &error_abort);
1243    if (local_err) {
1244        error_propagate(errp, local_err);
1245        return;
1246    }
1247
1248    if (mr->priority != value) {
1249        mr->priority = value;
1250        memory_region_readd_subregion(mr);
1251    }
1252}
1253
1254static void memory_region_do_set_ram(MemoryRegion *mr)
1255{
1256    char *c, *filename, *sanitized_name;
1257
1258    if (mr->addr) {
1259        qemu_ram_free(mr->ram_block);
1260    }
1261    if (int128_eq(mr->size, int128_make64(0))) {
1262        return;
1263    }
1264    switch (mr->ram) {
1265    case(0):
1266        mr->ram_block = NULL;
1267        break;
1268    case(1):
1269        mr->ram_block = qemu_ram_alloc(int128_get64(mr->size), mr, &error_abort);
1270        break;
1271    case(2):
1272        sanitized_name = g_strdup(object_get_canonical_path(OBJECT(mr)));
1273
1274        for (c = sanitized_name; *c != '\0'; c++) {
1275            if (*c == '/')
1276                *c = '_';
1277        }
1278        filename = g_strdup_printf("%s" G_DIR_SEPARATOR_S "qemu-memory-%s",
1279                                   machine_path ? machine_path : ".",
1280                                   sanitized_name);
1281        g_free(sanitized_name);
1282        mr->ram_block = qemu_ram_alloc_from_file(int128_get64(mr->size), mr,
1283                                                 true, filename, &error_abort);
1284        g_free(filename);
1285        break;
1286    default:
1287        abort();
1288    }
1289}
1290
1291static void memory_region_set_ram(Object *obj, Visitor *v, const char *name,
1292                                  void *opaque, Error **errp)
1293{
1294    MemoryRegion *mr = MEMORY_REGION(obj);
1295    Error *local_err = NULL;
1296    uint8_t value;
1297
1298    visit_type_uint8(v, name, &value, &error_abort);
1299    if (local_err) {
1300        error_propagate(errp, local_err);
1301        return;
1302    }
1303
1304    /* FIXME: Sanitize error handling */
1305    /* FIXME: Probably need all that transactions stuff */
1306    if (mr->ram == value) {
1307        return;
1308    }
1309
1310    mr->ram = value;
1311    mr->terminates = !!value; /*FIXME: Wrong */
1312
1313    if (int128_eq(int128_2_64(), mr->size)) {
1314        return;
1315    }
1316
1317    memory_region_do_set_ram(mr);
1318}
1319
1320static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1321                                   void *opaque, Error **errp)
1322{
1323    MemoryRegion *mr = MEMORY_REGION(obj);
1324    uint64_t value = memory_region_size(mr);
1325
1326    visit_type_uint64(v, name, &value, errp);
1327}
1328
1329static void memory_region_set_object_size(Object *obj, Visitor *v, const char *name,
1330                                          void *opaque, Error **errp)
1331{
1332    MemoryRegion *mr = MEMORY_REGION(obj);
1333    Error *local_err = NULL;
1334    uint64_t size;
1335
1336    visit_type_uint64(v, name, &size, &local_err);
1337
1338    memory_region_set_size(mr, size);
1339}
1340
1341static void memory_region_initfn(Object *obj)
1342{
1343    MemoryRegion *mr = MEMORY_REGION(obj);
1344    ObjectProperty *op;
1345
1346    mr->ops = &unassigned_mem_ops;
1347    mr->enabled = true;
1348    mr->romd_mode = true;
1349    mr->global_locking = true;
1350    mr->destructor = memory_region_destructor_none;
1351    mr->size = int128_2_64();
1352    QTAILQ_INIT(&mr->subregions);
1353    QTAILQ_INIT(&mr->coalesced);
1354
1355    op = object_property_add(OBJECT(mr), "container",
1356                             "link<" TYPE_MEMORY_REGION ">",
1357                             memory_region_get_container,
1358                             memory_region_set_container,
1359                             NULL, NULL, &error_abort);
1360    op->resolve = memory_region_resolve_container;
1361
1362    object_property_add_link(OBJECT(mr), "alias", TYPE_MEMORY_REGION,
1363                             (Object **)&mr->alias,
1364                             memory_region_set_alias,
1365                             0,
1366                             &error_abort);
1367    object_property_add(OBJECT(mr), "addr", "uint64",
1368                        memory_region_get_addr,
1369                        memory_region_set_addr,
1370                        NULL, NULL, &error_abort);
1371    object_property_add(OBJECT(mr), "priority", "uint32",
1372                        memory_region_get_priority,
1373                        memory_region_set_priority,
1374                        NULL, NULL, &error_abort);
1375    object_property_add(OBJECT(mr), "ram", "uint8",
1376                        NULL, /* FIXME: Add getter */
1377                        memory_region_set_ram,
1378                        NULL, NULL, &error_abort);
1379    object_property_add_bool(OBJECT(mr), "may-overlap",
1380                        memory_region_get_may_overlap,
1381                        NULL, /* memory_region_set_may_overlap */
1382                        &error_abort);
1383    object_property_add(OBJECT(mr), "size", "uint64",
1384                        memory_region_get_size,
1385                        memory_region_set_object_size,
1386                        NULL, NULL, &error_abort);
1387}
1388
1389static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1390                                    unsigned size)
1391{
1392#ifdef DEBUG_UNASSIGNED
1393    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1394#endif
1395    if (current_cpu != NULL) {
1396        cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1397    }
1398    return 0;
1399}
1400
1401static void unassigned_mem_write(void *opaque, hwaddr addr,
1402                                 uint64_t val, unsigned size)
1403{
1404#ifdef DEBUG_UNASSIGNED
1405    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1406#endif
1407    if (current_cpu != NULL) {
1408        cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1409    }
1410}
1411
1412static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1413                                   unsigned size, bool is_write)
1414{
1415    return false;
1416}
1417
1418const MemoryRegionOps unassigned_mem_ops = {
1419    .valid.accepts = unassigned_mem_accepts,
1420    .endianness = DEVICE_NATIVE_ENDIAN,
1421};
1422
1423bool memory_region_access_valid(MemoryRegion *mr,
1424                                hwaddr addr,
1425                                unsigned size,
1426                                bool is_write,
1427                                MemTxAttrs attrs)
1428{
1429    int access_size_min, access_size_max;
1430    int access_size, i;
1431
1432    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1433        return false;
1434    }
1435
1436    if (!mr->ops->valid.accepts) {
1437        return true;
1438    }
1439
1440    access_size_min = mr->ops->valid.min_access_size;
1441    if (!mr->ops->valid.min_access_size) {
1442        access_size_min = 1;
1443    }
1444
1445    access_size_max = mr->ops->valid.max_access_size;
1446    if (!mr->ops->valid.max_access_size) {
1447        access_size_max = 4;
1448    }
1449
1450    access_size = MAX(MIN(size, access_size_max), access_size_min);
1451    for (i = 0; i < size; i += access_size) {
1452        if (mr->ops->valid.accepts_tr) {
1453            MemoryTransaction tr = { {0} };
1454            tr.opaque = mr->opaque;
1455            tr.addr = addr + i;
1456            tr.size = access_size;
1457            tr.rw = is_write;
1458            tr.attr = attrs;
1459
1460            if (!mr->ops->valid.accepts_tr(&tr)) {
1461                return false;
1462            }
1463        } else {
1464            if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1465                                        is_write)) {
1466                return false;
1467            }
1468        }
1469    }
1470
1471    return true;
1472}
1473
1474static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1475                                                hwaddr addr,
1476                                                uint64_t *pval,
1477                                                unsigned size,
1478                                                MemTxAttrs attrs)
1479{
1480    *pval = 0;
1481
1482    if (mr->ops->access) {
1483        return access_with_adjusted_size(addr, pval, size,
1484                                         mr->ops->impl.min_access_size,
1485                                         mr->ops->impl.max_access_size,
1486                                         memory_region_read_accessor_attr,
1487                                         mr, attrs);
1488    } else if (mr->ops->read) {
1489        return access_with_adjusted_size(addr, pval, size,
1490                                         mr->ops->impl.min_access_size,
1491                                         mr->ops->impl.max_access_size,
1492                                         memory_region_read_accessor,
1493                                         mr, attrs);
1494    } else if (mr->ops->read_with_attrs) {
1495        return access_with_adjusted_size(addr, pval, size,
1496                                         mr->ops->impl.min_access_size,
1497                                         mr->ops->impl.max_access_size,
1498                                         memory_region_read_with_attrs_accessor,
1499                                         mr, attrs);
1500    } else {
1501        return access_with_adjusted_size(addr, pval, size, 1, 4,
1502                                         memory_region_oldmmio_read_accessor,
1503                                         mr, attrs);
1504    }
1505}
1506
1507MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1508                                        hwaddr addr,
1509                                        uint64_t *pval,
1510                                        unsigned size,
1511                                        MemTxAttrs attrs)
1512{
1513    MemTxResult r;
1514
1515    if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1516        *pval = unassigned_mem_read(mr, addr, size);
1517        return MEMTX_DECODE_ERROR;
1518    }
1519
1520    r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1521    adjust_endianness(mr, pval, size);
1522    return r;
1523}
1524
1525/* Return true if an eventfd was signalled */
1526static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1527                                                    hwaddr addr,
1528                                                    uint64_t data,
1529                                                    unsigned size,
1530                                                    MemTxAttrs attrs)
1531{
1532    MemoryRegionIoeventfd ioeventfd = {
1533        .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1534        .data = data,
1535    };
1536    unsigned i;
1537
1538    for (i = 0; i < mr->ioeventfd_nb; i++) {
1539        ioeventfd.match_data = mr->ioeventfds[i].match_data;
1540        ioeventfd.e = mr->ioeventfds[i].e;
1541
1542        if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1543            event_notifier_set(ioeventfd.e);
1544            return true;
1545        }
1546    }
1547
1548    return false;
1549}
1550
1551MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1552                                         hwaddr addr,
1553                                         uint64_t data,
1554                                         unsigned size,
1555                                         MemTxAttrs attrs)
1556{
1557    if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1558        unassigned_mem_write(mr, addr, data, size);
1559        return MEMTX_DECODE_ERROR;
1560    }
1561
1562    adjust_endianness(mr, &data, size);
1563
1564    if ((!kvm_eventfds_enabled()) &&
1565        memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1566        return MEMTX_OK;
1567    }
1568
1569    if (mr->ops->access) {
1570        return access_with_adjusted_size(addr, &data, size,
1571                                         mr->ops->impl.min_access_size,
1572                                         mr->ops->impl.max_access_size,
1573                                         memory_region_write_accessor_attr,
1574                                         mr, attrs);
1575    } else if (mr->ops->write) {
1576        return access_with_adjusted_size(addr, &data, size,
1577                                         mr->ops->impl.min_access_size,
1578                                         mr->ops->impl.max_access_size,
1579                                         memory_region_write_accessor, mr,
1580                                         attrs);
1581    } else if (mr->ops->write_with_attrs) {
1582        return
1583            access_with_adjusted_size(addr, &data, size,
1584                                      mr->ops->impl.min_access_size,
1585                                      mr->ops->impl.max_access_size,
1586                                      memory_region_write_with_attrs_accessor,
1587                                      mr, attrs);
1588    } else {
1589        return access_with_adjusted_size(addr, &data, size, 1, 4,
1590                                         memory_region_oldmmio_write_accessor,
1591                                         mr, attrs);
1592    }
1593}
1594
1595void memory_region_init_io(MemoryRegion *mr,
1596                           Object *owner,
1597                           const MemoryRegionOps *ops,
1598                           void *opaque,
1599                           const char *name,
1600                           uint64_t size)
1601{
1602    memory_region_init(mr, owner, name, size);
1603    mr->ops = ops ? ops : &unassigned_mem_ops;
1604    mr->opaque = opaque;
1605    mr->terminates = true;
1606}
1607
1608void memory_region_init_ram(MemoryRegion *mr,
1609                            Object *owner,
1610                            const char *name,
1611                            uint64_t size,
1612                            Error **errp)
1613{
1614    memory_region_init(mr, owner, name, size);
1615    mr->ram = 1;
1616    mr->terminates = true;
1617    mr->destructor = memory_region_destructor_ram;
1618    mr->ram_block = qemu_ram_alloc(size, mr, errp);
1619    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1620}
1621
1622void memory_region_init_resizeable_ram(MemoryRegion *mr,
1623                                       Object *owner,
1624                                       const char *name,
1625                                       uint64_t size,
1626                                       uint64_t max_size,
1627                                       void (*resized)(const char*,
1628                                                       uint64_t length,
1629                                                       void *host),
1630                                       Error **errp)
1631{
1632    memory_region_init(mr, owner, name, size);
1633    mr->ram = true;
1634    mr->terminates = true;
1635    mr->destructor = memory_region_destructor_ram;
1636    mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1637                                              mr, errp);
1638    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1639}
1640
1641#ifdef __linux__
1642void memory_region_init_ram_from_file(MemoryRegion *mr,
1643                                      struct Object *owner,
1644                                      const char *name,
1645                                      uint64_t size,
1646                                      bool share,
1647                                      const char *path,
1648                                      Error **errp)
1649{
1650    memory_region_init(mr, owner, name, size);
1651    mr->ram = 2;
1652    mr->terminates = true;
1653    mr->destructor = memory_region_destructor_ram;
1654    mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1655    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1656}
1657#endif
1658
1659void memory_region_init_ram_ptr(MemoryRegion *mr,
1660                                Object *owner,
1661                                const char *name,
1662                                uint64_t size,
1663                                void *ptr)
1664{
1665    memory_region_init(mr, owner, name, size);
1666    mr->ram = 3;
1667    mr->terminates = true;
1668    mr->destructor = memory_region_destructor_ram;
1669    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1670
1671    /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL.  */
1672    assert(ptr != NULL);
1673    mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1674}
1675
1676void memory_region_set_skip_dump(MemoryRegion *mr)
1677{
1678    mr->skip_dump = true;
1679}
1680
1681void memory_region_init_alias(MemoryRegion *mr,
1682                              Object *owner,
1683                              const char *name,
1684                              MemoryRegion *orig,
1685                              hwaddr offset,
1686                              uint64_t size)
1687{
1688    memory_region_init(mr, owner, name, size);
1689    mr->alias = orig;
1690    mr->alias_offset = offset;
1691}
1692
1693void memory_region_init_rom_device(MemoryRegion *mr,
1694                                   Object *owner,
1695                                   const MemoryRegionOps *ops,
1696                                   void *opaque,
1697                                   const char *name,
1698                                   uint64_t size,
1699                                   Error **errp)
1700{
1701    memory_region_init(mr, owner, name, size);
1702    mr->ops = ops;
1703    mr->opaque = opaque;
1704    mr->terminates = true;
1705    mr->rom_device = true;
1706    mr->destructor = memory_region_destructor_rom_device;
1707    mr->ram_block = qemu_ram_alloc(size, mr, errp);
1708}
1709
1710void memory_region_init_iommu(MemoryRegion *mr,
1711                              Object *owner,
1712                              const MemoryRegionIOMMUOps *ops,
1713                              const char *name,
1714                              uint64_t size)
1715{
1716    memory_region_init(mr, owner, name, size);
1717    mr->iommu_ops = ops,
1718    mr->terminates = true;  /* then re-forwards */
1719    notifier_list_init(&mr->iommu_notify);
1720}
1721
1722static void memory_region_finalize(Object *obj)
1723{
1724    MemoryRegion *mr = MEMORY_REGION(obj);
1725
1726    assert(!mr->container);
1727
1728    /* We know the region is not visible in any address space (it
1729     * does not have a container and cannot be a root either because
1730     * it has no references, so we can blindly clear mr->enabled.
1731     * memory_region_set_enabled instead could trigger a transaction
1732     * and cause an infinite loop.
1733     */
1734    mr->enabled = false;
1735    memory_region_transaction_begin();
1736    while (!QTAILQ_EMPTY(&mr->subregions)) {
1737        MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1738        memory_region_del_subregion(mr, subregion);
1739    }
1740    memory_region_transaction_commit();
1741
1742    mr->destructor(mr);
1743    memory_region_clear_coalescing(mr);
1744    g_free((char *)mr->name);
1745    g_free(mr->ioeventfds);
1746}
1747
1748Object *memory_region_owner(MemoryRegion *mr)
1749{
1750    Object *obj = OBJECT(mr);
1751    return obj->parent;
1752}
1753
1754void memory_region_ref(MemoryRegion *mr)
1755{
1756    /* MMIO callbacks most likely will access data that belongs
1757     * to the owner, hence the need to ref/unref the owner whenever
1758     * the memory region is in use.
1759     *
1760     * The memory region is a child of its owner.  As long as the
1761     * owner doesn't call unparent itself on the memory region,
1762     * ref-ing the owner will also keep the memory region alive.
1763     * Memory regions without an owner are supposed to never go away;
1764     * we do not ref/unref them because it slows down DMA sensibly.
1765     */
1766    if (mr && mr->owner) {
1767        object_ref(mr->owner);
1768    }
1769}
1770
1771void memory_region_unref(MemoryRegion *mr)
1772{
1773    if (mr && mr->owner) {
1774        object_unref(mr->owner);
1775    }
1776}
1777
1778uint64_t memory_region_size(MemoryRegion *mr)
1779{
1780    if (int128_eq(mr->size, int128_2_64())) {
1781        return UINT64_MAX;
1782    }
1783    return int128_get64(mr->size);
1784}
1785
1786const char *memory_region_name(const MemoryRegion *mr)
1787{
1788    if (!mr->name) {
1789        ((MemoryRegion *)mr)->name =
1790            object_get_canonical_path_component(OBJECT(mr));
1791    }
1792    return mr->name;
1793}
1794
1795bool memory_region_is_skip_dump(MemoryRegion *mr)
1796{
1797    return mr->skip_dump;
1798}
1799
1800uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1801{
1802    uint8_t mask = mr->dirty_log_mask;
1803    if (global_dirty_log) {
1804        mask |= (1 << DIRTY_MEMORY_MIGRATION);
1805    }
1806    return mask;
1807}
1808
1809bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1810{
1811    return memory_region_get_dirty_log_mask(mr) & (1 << client);
1812}
1813
1814void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
1815{
1816    notifier_list_add(&mr->iommu_notify, n);
1817}
1818
1819void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n,
1820                                hwaddr granularity, bool is_write)
1821{
1822    hwaddr addr;
1823    IOMMUTLBEntry iotlb;
1824
1825    for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1826        iotlb = mr->iommu_ops->translate(mr, addr, is_write);
1827        if (iotlb.perm != IOMMU_NONE) {
1828            n->notify(n, &iotlb);
1829        }
1830
1831        /* if (2^64 - MR size) < granularity, it's possible to get an
1832         * infinite loop here.  This should catch such a wraparound */
1833        if ((addr + granularity) < addr) {
1834            break;
1835        }
1836    }
1837}
1838
1839void memory_region_unregister_iommu_notifier(Notifier *n)
1840{
1841    notifier_remove(n);
1842}
1843
1844void memory_region_notify_iommu(MemoryRegion *mr,
1845                                IOMMUTLBEntry entry)
1846{
1847    assert(memory_region_is_iommu(mr));
1848    notifier_list_notify(&mr->iommu_notify, &entry);
1849}
1850
1851void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1852{
1853    uint8_t mask = 1 << client;
1854    uint8_t old_logging;
1855
1856    assert(client == DIRTY_MEMORY_VGA);
1857    old_logging = mr->vga_logging_count;
1858    mr->vga_logging_count += log ? 1 : -1;
1859    if (!!old_logging == !!mr->vga_logging_count) {
1860        return;
1861    }
1862
1863    memory_region_transaction_begin();
1864    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1865    memory_region_update_pending |= mr->enabled;
1866    memory_region_transaction_commit();
1867}
1868
1869bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1870                             hwaddr size, unsigned client)
1871{
1872    assert(mr->ram_block);
1873    return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1874                                         size, client);
1875}
1876
1877void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1878                             hwaddr size)
1879{
1880    assert(mr->ram_block);
1881    cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1882                                        size,
1883                                        memory_region_get_dirty_log_mask(mr));
1884}
1885
1886bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1887                                        hwaddr size, unsigned client)
1888{
1889    assert(mr->ram_block);
1890    return cpu_physical_memory_test_and_clear_dirty(
1891                memory_region_get_ram_addr(mr) + addr, size, client);
1892}
1893
1894
1895void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1896{
1897    AddressSpace *as;
1898    FlatRange *fr;
1899
1900    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1901        FlatView *view = address_space_get_flatview(as);
1902        FOR_EACH_FLAT_RANGE(fr, view) {
1903            if (fr->mr == mr) {
1904                MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
1905            }
1906        }
1907        flatview_unref(view);
1908    }
1909}
1910
1911void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1912{
1913    if (mr->readonly != readonly) {
1914        memory_region_transaction_begin();
1915        mr->readonly = readonly;
1916        memory_region_update_pending |= mr->enabled;
1917        memory_region_transaction_commit();
1918    }
1919}
1920
1921void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
1922{
1923    if (mr->romd_mode != romd_mode) {
1924        memory_region_transaction_begin();
1925        mr->romd_mode = romd_mode;
1926        memory_region_update_pending |= mr->enabled;
1927        memory_region_transaction_commit();
1928    }
1929}
1930
1931void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1932                               hwaddr size, unsigned client)
1933{
1934    assert(mr->ram_block);
1935    cpu_physical_memory_test_and_clear_dirty(
1936        memory_region_get_ram_addr(mr) + addr, size, client);
1937}
1938
1939int memory_region_get_fd(MemoryRegion *mr)
1940{
1941    if (mr->alias) {
1942        return memory_region_get_fd(mr->alias);
1943    }
1944
1945    assert(mr->ram_block);
1946
1947    return qemu_get_ram_fd(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
1948}
1949
1950void *memory_region_get_ram_ptr(MemoryRegion *mr)
1951{
1952    void *ptr;
1953    uint64_t offset = 0;
1954
1955    rcu_read_lock();
1956    while (mr->alias) {
1957        offset += mr->alias_offset;
1958        mr = mr->alias;
1959    }
1960    assert(mr->ram_block);
1961    ptr = qemu_get_ram_ptr(mr->ram_block,
1962                           memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
1963    rcu_read_unlock();
1964
1965    return ptr + offset;
1966}
1967
1968ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1969{
1970    return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1971}
1972
1973void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1974{
1975    assert(mr->ram_block);
1976
1977    qemu_ram_resize(memory_region_get_ram_addr(mr), newsize, errp);
1978}
1979
1980static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
1981{
1982    FlatView *view;
1983    FlatRange *fr;
1984    CoalescedMemoryRange *cmr;
1985    AddrRange tmp;
1986    MemoryRegionSection section;
1987
1988    view = address_space_get_flatview(as);
1989    FOR_EACH_FLAT_RANGE(fr, view) {
1990        if (fr->mr == mr) {
1991            section = (MemoryRegionSection) {
1992                .address_space = as,
1993                .offset_within_address_space = int128_get64(fr->addr.start),
1994                .size = fr->addr.size,
1995            };
1996
1997            MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
1998                                 int128_get64(fr->addr.start),
1999                                 int128_get64(fr->addr.size));
2000            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2001                tmp = addrrange_shift(cmr->addr,
2002                                      int128_sub(fr->addr.start,
2003                                                 int128_make64(fr->offset_in_region)));
2004                if (!addrrange_intersects(tmp, fr->addr)) {
2005                    continue;
2006                }
2007                tmp = addrrange_intersection(tmp, fr->addr);
2008                MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
2009                                     int128_get64(tmp.start),
2010                                     int128_get64(tmp.size));
2011            }
2012        }
2013    }
2014    flatview_unref(view);
2015}
2016
2017static void memory_region_update_coalesced_range(MemoryRegion *mr)
2018{
2019    AddressSpace *as;
2020
2021    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2022        memory_region_update_coalesced_range_as(mr, as);
2023    }
2024}
2025
2026void memory_region_set_coalescing(MemoryRegion *mr)
2027{
2028    memory_region_clear_coalescing(mr);
2029    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2030}
2031
2032void memory_region_add_coalescing(MemoryRegion *mr,
2033                                  hwaddr offset,
2034                                  uint64_t size)
2035{
2036    CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2037
2038    cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2039    QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2040    memory_region_update_coalesced_range(mr);
2041    memory_region_set_flush_coalesced(mr);
2042}
2043
2044void memory_region_clear_coalescing(MemoryRegion *mr)
2045{
2046    CoalescedMemoryRange *cmr;
2047    bool updated = false;
2048
2049    qemu_flush_coalesced_mmio_buffer();
2050    mr->flush_coalesced_mmio = false;
2051
2052    while (!QTAILQ_EMPTY(&mr->coalesced)) {
2053        cmr = QTAILQ_FIRST(&mr->coalesced);
2054        QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2055        g_free(cmr);
2056        updated = true;
2057    }
2058
2059    if (updated) {
2060        memory_region_update_coalesced_range(mr);
2061    }
2062}
2063
2064void memory_region_set_flush_coalesced(MemoryRegion *mr)
2065{
2066    mr->flush_coalesced_mmio = true;
2067}
2068
2069void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2070{
2071    qemu_flush_coalesced_mmio_buffer();
2072    if (QTAILQ_EMPTY(&mr->coalesced)) {
2073        mr->flush_coalesced_mmio = false;
2074    }
2075}
2076
2077void memory_region_set_global_locking(MemoryRegion *mr)
2078{
2079    mr->global_locking = true;
2080}
2081
2082void memory_region_clear_global_locking(MemoryRegion *mr)
2083{
2084    mr->global_locking = false;
2085}
2086
2087static bool userspace_eventfd_warning;
2088
2089void memory_region_add_eventfd(MemoryRegion *mr,
2090                               hwaddr addr,
2091                               unsigned size,
2092                               bool match_data,
2093                               uint64_t data,
2094                               EventNotifier *e)
2095{
2096    MemoryRegionIoeventfd mrfd = {
2097        .addr.start = int128_make64(addr),
2098        .addr.size = int128_make64(size),
2099        .match_data = match_data,
2100        .data = data,
2101        .e = e,
2102    };
2103    unsigned i;
2104
2105    if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2106                            userspace_eventfd_warning))) {
2107        userspace_eventfd_warning = true;
2108        error_report("Using eventfd without MMIO binding in KVM. "
2109                     "Suboptimal performance expected");
2110    }
2111
2112    if (size) {
2113        adjust_endianness(mr, &mrfd.data, size);
2114    }
2115    memory_region_transaction_begin();
2116    for (i = 0; i < mr->ioeventfd_nb; ++i) {
2117        if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2118            break;
2119        }
2120    }
2121    ++mr->ioeventfd_nb;
2122    mr->ioeventfds = g_realloc(mr->ioeventfds,
2123                                  sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2124    memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2125            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2126    mr->ioeventfds[i] = mrfd;
2127    ioeventfd_update_pending |= mr->enabled;
2128    memory_region_transaction_commit();
2129}
2130
2131void memory_region_del_eventfd(MemoryRegion *mr,
2132                               hwaddr addr,
2133                               unsigned size,
2134                               bool match_data,
2135                               uint64_t data,
2136                               EventNotifier *e)
2137{
2138    MemoryRegionIoeventfd mrfd = {
2139        .addr.start = int128_make64(addr),
2140        .addr.size = int128_make64(size),
2141        .match_data = match_data,
2142        .data = data,
2143        .e = e,
2144    };
2145    unsigned i;
2146
2147    if (size) {
2148        adjust_endianness(mr, &mrfd.data, size);
2149    }
2150    memory_region_transaction_begin();
2151    for (i = 0; i < mr->ioeventfd_nb; ++i) {
2152        if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2153            break;
2154        }
2155    }
2156    assert(i != mr->ioeventfd_nb);
2157    memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2158            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2159    --mr->ioeventfd_nb;
2160    mr->ioeventfds = g_realloc(mr->ioeventfds,
2161                                  sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2162    ioeventfd_update_pending |= mr->enabled;
2163    memory_region_transaction_commit();
2164}
2165
2166static void memory_region_add_subregion_common(MemoryRegion *mr,
2167                                               hwaddr offset,
2168                                               MemoryRegion *subregion)
2169{
2170    assert(!subregion->container);
2171    subregion->container = mr;
2172    subregion->addr = offset;
2173    memory_region_update_container_subregions(subregion);
2174}
2175
2176void memory_region_add_subregion(MemoryRegion *mr,
2177                                 hwaddr offset,
2178                                 MemoryRegion *subregion)
2179{
2180    subregion->priority = 0;
2181    memory_region_add_subregion_common(mr, offset, subregion);
2182}
2183
2184void memory_region_add_subregion_overlap(MemoryRegion *mr,
2185                                         hwaddr offset,
2186                                         MemoryRegion *subregion,
2187                                         int priority)
2188{
2189    subregion->priority = priority;
2190    memory_region_add_subregion_common(mr, offset, subregion);
2191}
2192
2193void memory_region_del_subregion(MemoryRegion *mr,
2194                                 MemoryRegion *subregion)
2195{
2196    memory_region_transaction_begin();
2197    assert(subregion->container == mr);
2198    subregion->container = NULL;
2199    QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2200    memory_region_unref(subregion);
2201    memory_region_update_pending |= mr->enabled && subregion->enabled;
2202    memory_region_transaction_commit();
2203}
2204
2205void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2206{
2207    if (enabled == mr->enabled) {
2208        return;
2209    }
2210    memory_region_transaction_begin();
2211    mr->enabled = enabled;
2212    memory_region_update_pending = true;
2213    memory_region_transaction_commit();
2214}
2215
2216void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2217{
2218    Int128 s = int128_make64(size);
2219
2220    if (size == UINT64_MAX) {
2221        s = int128_2_64();
2222    }
2223    if (int128_eq(s, mr->size)) {
2224        return;
2225    }
2226    memory_region_transaction_begin();
2227    mr->size = s;
2228    if (mr->ram) {
2229        memory_region_do_set_ram(mr);
2230    }
2231    memory_region_update_pending = true;
2232    memory_region_transaction_commit();
2233}
2234
2235void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2236{
2237    if (addr != mr->addr) {
2238        mr->addr = addr;
2239        memory_region_readd_subregion(mr);
2240    }
2241}
2242
2243void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2244{
2245    assert(mr->alias);
2246
2247    if (offset == mr->alias_offset) {
2248        return;
2249    }
2250
2251    memory_region_transaction_begin();
2252    mr->alias_offset = offset;
2253    memory_region_update_pending |= mr->enabled;
2254    memory_region_transaction_commit();
2255}
2256
2257uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2258{
2259    return mr->align;
2260}
2261
2262static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2263{
2264    const AddrRange *addr = addr_;
2265    const FlatRange *fr = fr_;
2266
2267    if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2268        return -1;
2269    } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2270        return 1;
2271    }
2272    return 0;
2273}
2274
2275static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2276{
2277    return bsearch(&addr, view->ranges, view->nr,
2278                   sizeof(FlatRange), cmp_flatrange_addr);
2279}
2280
2281bool memory_region_is_mapped(MemoryRegion *mr)
2282{
2283    return mr->container ? true : false;
2284}
2285
2286/* Same as memory_region_find, but it does not add a reference to the
2287 * returned region.  It must be called from an RCU critical section.
2288 */
2289static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2290                                                  hwaddr addr, uint64_t size)
2291{
2292    MemoryRegionSection ret = { .mr = NULL };
2293    MemoryRegion *root;
2294    AddressSpace *as;
2295    AddrRange range;
2296    FlatView *view;
2297    FlatRange *fr;
2298
2299    addr += mr->addr;
2300    for (root = mr; root->container; ) {
2301        root = root->container;
2302        addr += root->addr;
2303    }
2304
2305    as = memory_region_to_address_space(root);
2306    if (!as) {
2307        return ret;
2308    }
2309    range = addrrange_make(int128_make64(addr), int128_make64(size));
2310
2311    view = atomic_rcu_read(&as->current_map);
2312    fr = flatview_lookup(view, range);
2313    if (!fr) {
2314        return ret;
2315    }
2316
2317    while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2318        --fr;
2319    }
2320
2321    ret.mr = fr->mr;
2322    ret.address_space = as;
2323    range = addrrange_intersection(range, fr->addr);
2324    ret.offset_within_region = fr->offset_in_region;
2325    ret.offset_within_region += int128_get64(int128_sub(range.start,
2326                                                        fr->addr.start));
2327    ret.size = range.size;
2328    ret.offset_within_address_space = int128_get64(range.start);
2329    ret.readonly = fr->readonly;
2330    return ret;
2331}
2332
2333MemoryRegionSection memory_region_find(MemoryRegion *mr,
2334                                       hwaddr addr, uint64_t size)
2335{
2336    MemoryRegionSection ret;
2337    rcu_read_lock();
2338    ret = memory_region_find_rcu(mr, addr, size);
2339    if (ret.mr) {
2340        memory_region_ref(ret.mr);
2341    }
2342    rcu_read_unlock();
2343    return ret;
2344}
2345
2346bool memory_region_present(MemoryRegion *container, hwaddr addr)
2347{
2348    MemoryRegion *mr;
2349
2350    rcu_read_lock();
2351    mr = memory_region_find_rcu(container, addr, 1).mr;
2352    rcu_read_unlock();
2353    return mr && mr != container;
2354}
2355
2356void address_space_sync_dirty_bitmap(AddressSpace *as)
2357{
2358    FlatView *view;
2359    FlatRange *fr;
2360
2361    view = address_space_get_flatview(as);
2362    FOR_EACH_FLAT_RANGE(fr, view) {
2363        MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
2364    }
2365    flatview_unref(view);
2366}
2367
2368void memory_global_dirty_log_start(void)
2369{
2370    global_dirty_log = true;
2371
2372    MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2373
2374    /* Refresh DIRTY_LOG_MIGRATION bit.  */
2375    memory_region_transaction_begin();
2376    memory_region_update_pending = true;
2377    memory_region_transaction_commit();
2378}
2379
2380void memory_global_dirty_log_stop(void)
2381{
2382    global_dirty_log = false;
2383
2384    /* Refresh DIRTY_LOG_MIGRATION bit.  */
2385    memory_region_transaction_begin();
2386    memory_region_update_pending = true;
2387    memory_region_transaction_commit();
2388
2389    MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2390}
2391
2392static void listener_add_address_space(MemoryListener *listener,
2393                                       AddressSpace *as)
2394{
2395    FlatView *view;
2396    FlatRange *fr;
2397
2398    if (listener->address_space_filter
2399        && listener->address_space_filter != as) {
2400        return;
2401    }
2402
2403    if (listener->begin) {
2404        listener->begin(listener);
2405    }
2406    if (global_dirty_log) {
2407        if (listener->log_global_start) {
2408            listener->log_global_start(listener);
2409        }
2410    }
2411
2412    view = address_space_get_flatview(as);
2413    FOR_EACH_FLAT_RANGE(fr, view) {
2414        MemoryRegionSection section = {
2415            .mr = fr->mr,
2416            .address_space = as,
2417            .offset_within_region = fr->offset_in_region,
2418            .size = fr->addr.size,
2419            .offset_within_address_space = int128_get64(fr->addr.start),
2420            .readonly = fr->readonly,
2421        };
2422        if (fr->dirty_log_mask && listener->log_start) {
2423            listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2424        }
2425        if (listener->region_add) {
2426            listener->region_add(listener, &section);
2427        }
2428    }
2429    if (listener->commit) {
2430        listener->commit(listener);
2431    }
2432    flatview_unref(view);
2433}
2434
2435void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
2436{
2437    MemoryListener *other = NULL;
2438    AddressSpace *as;
2439
2440    listener->address_space_filter = filter;
2441    if (QTAILQ_EMPTY(&memory_listeners)
2442        || listener->priority >= QTAILQ_LAST(&memory_listeners,
2443                                             memory_listeners)->priority) {
2444        QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2445    } else {
2446        QTAILQ_FOREACH(other, &memory_listeners, link) {
2447            if (listener->priority < other->priority) {
2448                break;
2449            }
2450        }
2451        QTAILQ_INSERT_BEFORE(other, listener, link);
2452    }
2453
2454    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2455        listener_add_address_space(listener, as);
2456    }
2457}
2458
2459void memory_listener_unregister(MemoryListener *listener)
2460{
2461    QTAILQ_REMOVE(&memory_listeners, listener, link);
2462}
2463
2464void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2465{
2466    memory_region_ref(root);
2467    memory_region_transaction_begin();
2468    as->ref_count = 1;
2469    as->root = root;
2470    as->malloced = false;
2471    as->current_map = g_new(FlatView, 1);
2472    flatview_init(as->current_map);
2473    as->ioeventfd_nb = 0;
2474    as->ioeventfds = NULL;
2475    QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2476    as->name = g_strdup(name ? name : "anonymous");
2477    address_space_init_dispatch(as);
2478    memory_region_update_pending |= root->enabled;
2479    memory_region_transaction_commit();
2480}
2481
2482static void do_address_space_destroy(AddressSpace *as)
2483{
2484    MemoryListener *listener;
2485    bool do_free = as->malloced;
2486
2487    address_space_destroy_dispatch(as);
2488
2489    QTAILQ_FOREACH(listener, &memory_listeners, link) {
2490        assert(listener->address_space_filter != as);
2491    }
2492
2493    flatview_unref(as->current_map);
2494    g_free(as->name);
2495    g_free(as->ioeventfds);
2496    memory_region_unref(as->root);
2497    if (do_free) {
2498        g_free(as);
2499    }
2500}
2501
2502AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2503{
2504    /* XILINX:
2505     *
2506     * Use the root MemoryRegion's name as name for shareable ASs.
2507     * Since we use device-trees to create the machine, we always
2508     * have sensible names for the root MR.
2509     */
2510    char *mr_name = object_get_canonical_path(OBJECT(root));
2511    AddressSpace *as;
2512
2513    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2514        if (root == as->root && as->malloced) {
2515            as->ref_count++;
2516            return as;
2517        }
2518    }
2519
2520    as = g_malloc0(sizeof *as);
2521    address_space_init(as, root, mr_name);
2522    as->malloced = true;
2523    return as;
2524}
2525
2526void address_space_destroy(AddressSpace *as)
2527{
2528    MemoryRegion *root = as->root;
2529
2530    as->ref_count--;
2531    if (as->ref_count) {
2532        return;
2533    }
2534    /* Flush out anything from MemoryListeners listening in on this */
2535    memory_region_transaction_begin();
2536    as->root = NULL;
2537    memory_region_transaction_commit();
2538    QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2539    address_space_unregister(as);
2540
2541    /* At this point, as->dispatch and as->current_map are dummy
2542     * entries that the guest should never use.  Wait for the old
2543     * values to expire before freeing the data.
2544     */
2545    as->root = root;
2546    call_rcu(as, do_address_space_destroy, rcu);
2547}
2548
2549typedef struct MemoryRegionList MemoryRegionList;
2550
2551struct MemoryRegionList {
2552    const MemoryRegion *mr;
2553    QTAILQ_ENTRY(MemoryRegionList) queue;
2554};
2555
2556typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
2557
2558static void mtree_print_mr(fprintf_function mon_printf, void *f,
2559                           const MemoryRegion *mr, unsigned int level,
2560                           hwaddr base,
2561                           MemoryRegionListHead *alias_print_queue)
2562{
2563    MemoryRegionList *new_ml, *ml, *next_ml;
2564    MemoryRegionListHead submr_print_queue;
2565    const MemoryRegion *submr;
2566    unsigned int i;
2567
2568    if (!mr) {
2569        return;
2570    }
2571
2572    for (i = 0; i < level; i++) {
2573        mon_printf(f, "  ");
2574    }
2575
2576    if (mr->alias) {
2577        MemoryRegionList *ml;
2578        bool found = false;
2579
2580        /* check if the alias is already in the queue */
2581        QTAILQ_FOREACH(ml, alias_print_queue, queue) {
2582            if (ml->mr == mr->alias) {
2583                found = true;
2584            }
2585        }
2586
2587        if (!found) {
2588            ml = g_new(MemoryRegionList, 1);
2589            ml->mr = mr->alias;
2590            QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
2591        }
2592        mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2593                   " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
2594                   "-" TARGET_FMT_plx "%s\n",
2595                   base + mr->addr,
2596                   base + mr->addr
2597                   + (int128_nz(mr->size) ?
2598                      (hwaddr)int128_get64(int128_sub(mr->size,
2599                                                      int128_one())) : 0),
2600                   mr->priority,
2601                   mr->romd_mode ? 'R' : '-',
2602                   !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2603                                                                       : '-',
2604                   memory_region_name(mr),
2605                   memory_region_name(mr->alias),
2606                   mr->alias_offset,
2607                   mr->alias_offset
2608                   + (int128_nz(mr->size) ?
2609                      (hwaddr)int128_get64(int128_sub(mr->size,
2610                                                      int128_one())) : 0),
2611                   mr->enabled ? "" : " [disabled]");
2612    } else {
2613        mon_printf(f,
2614                   TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n",
2615                   base + mr->addr,
2616                   base + mr->addr
2617                   + (int128_nz(mr->size) ?
2618                      (hwaddr)int128_get64(int128_sub(mr->size,
2619                                                      int128_one())) : 0),
2620                   mr->priority,
2621                   mr->romd_mode ? 'R' : '-',
2622                   !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2623                                                                       : '-',
2624                   memory_region_name(mr),
2625                   mr->enabled ? "" : " [disabled]");
2626    }
2627
2628    QTAILQ_INIT(&submr_print_queue);
2629
2630    QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2631        new_ml = g_new(MemoryRegionList, 1);
2632        new_ml->mr = submr;
2633        QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2634            if (new_ml->mr->addr < ml->mr->addr ||
2635                (new_ml->mr->addr == ml->mr->addr &&
2636                 new_ml->mr->priority > ml->mr->priority)) {
2637                QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
2638                new_ml = NULL;
2639                break;
2640            }
2641        }
2642        if (new_ml) {
2643            QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
2644        }
2645    }
2646
2647    QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2648        mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
2649                       alias_print_queue);
2650    }
2651
2652    QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
2653        g_free(ml);
2654    }
2655}
2656
2657void mtree_info(fprintf_function mon_printf, void *f)
2658{
2659    MemoryRegionListHead ml_head;
2660    MemoryRegionList *ml, *ml2;
2661    AddressSpace *as;
2662
2663    QTAILQ_INIT(&ml_head);
2664
2665    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2666        mon_printf(f, "address-space: %s\n", as->name);
2667        mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2668        mon_printf(f, "\n");
2669    }
2670
2671    /* print aliased regions */
2672    QTAILQ_FOREACH(ml, &ml_head, queue) {
2673        mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2674        mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2675        mon_printf(f, "\n");
2676    }
2677
2678    QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
2679        g_free(ml);
2680    }
2681}
2682
2683static bool memory_region_parse_reg(FDTGenericMMap *obj,
2684                                    FDTGenericRegPropInfo reg, Error **errp)
2685{
2686    MemoryRegion *mr = MEMORY_REGION(obj);
2687    uint64_t base_addr = ~0ull;
2688    uint64_t total_size = 0;
2689    uint64_t max_addr = 0;
2690    int i;
2691
2692    if (!reg.n) {
2693        return false;
2694    }
2695
2696    for (i = 0; i < reg.n; ++i) {
2697        base_addr = MIN(base_addr, reg.a[i]);
2698        max_addr = MAX(max_addr, reg.a[i] + reg.s[i]);
2699        total_size += reg.s[i];
2700        if (reg.p[i] != reg.p[0]) {
2701            error_setg(errp, "FDT generic memory parser does not support"
2702                       "mixed priorities\n");
2703            return false;
2704        }
2705    }
2706
2707    if (total_size != max_addr - base_addr) {
2708        return false;
2709        error_setg(errp, "FDT generic memory parse does not "
2710                   "spport discontiguous or overlapping memory regions");
2711    }
2712
2713    /* FIXME: parent should not be optional but we need to implement
2714     * reg-extended in kernel before we can do things properly
2715     */
2716    if (reg.parents[0]) {
2717        object_property_set_link(OBJECT(mr), reg.parents[0], "container",
2718                                 &error_abort);
2719    }
2720    object_property_set_int(OBJECT(mr), total_size, "size", &error_abort);
2721    object_property_set_int(OBJECT(mr), base_addr, "addr", &error_abort);
2722    object_property_set_int(OBJECT(mr), reg.p[0], "priority", &error_abort);
2723    return false;
2724}
2725
2726static void memory_region_class_init(ObjectClass *oc, void *data)
2727{
2728    FDTGenericMMapClass *fmc = FDT_GENERIC_MMAP_CLASS(oc);
2729
2730    fmc->parse_reg = memory_region_parse_reg;
2731}
2732
2733static const TypeInfo memory_region_info = {
2734    .parent             = TYPE_OBJECT,
2735    .name               = TYPE_MEMORY_REGION,
2736    .instance_size      = sizeof(MemoryRegion),
2737    .instance_init      = memory_region_initfn,
2738    .instance_finalize  = memory_region_finalize,
2739    .class_init         = memory_region_class_init,
2740    .interfaces         = (InterfaceInfo[]) {
2741        { TYPE_FDT_GENERIC_MMAP },
2742        { },
2743    },
2744};
2745
2746static bool memory_transaction_attr_get_secure(Object *obj, Error **errp)
2747{
2748    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
2749    return mattr->secure;
2750}
2751
2752static void memory_transaction_attr_set_secure(Object *obj, bool value,
2753                                               Error **errp)
2754{
2755    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
2756    mattr->secure = value;
2757}
2758
2759static void mattr_get_master_id(Object *obj, Visitor *v, const char *name,
2760                                void *opaque, Error **errp)
2761{
2762    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
2763    uint64_t value = mattr->master_id;
2764
2765    visit_type_uint64(v, name, &value, errp);
2766}
2767
2768
2769static void mattr_set_master_id(Object *obj, Visitor *v, const char *name,
2770                                void *opaque, Error **errp)
2771{
2772    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
2773    Error *local_err = NULL;
2774    uint64_t value;
2775
2776    visit_type_uint64(v, name, &value, &local_err);
2777    mattr->master_id = value;
2778}
2779
2780
2781static void memory_transaction_attr_initfn(Object *obj)
2782{
2783    MemTxAttrs *mattr = MEMORY_TRANSACTION_ATTR(obj);
2784
2785    object_property_add_bool(OBJECT(mattr), "secure",
2786                        memory_transaction_attr_get_secure,
2787                        memory_transaction_attr_set_secure,
2788                        NULL);
2789    object_property_add(OBJECT(mattr), "master-id", "uint64",
2790                        mattr_get_master_id,
2791                        mattr_set_master_id,
2792                        NULL, NULL, &error_abort);
2793}
2794
2795static const TypeInfo memory_transaction_attr_info = {
2796    .parent             = TYPE_OBJECT,
2797    .name               = TYPE_MEMORY_TRANSACTION_ATTR,
2798    .instance_size      = sizeof(MemTxAttrs),
2799    .instance_init      = memory_transaction_attr_initfn,
2800    .interfaces         = (InterfaceInfo[]) {
2801        { TYPE_FDT_GENERIC_MMAP },
2802        { },
2803    },
2804};
2805
2806static void memory_register_types(void)
2807{
2808    type_register_static(&memory_region_info);
2809    type_register_static(&memory_transaction_attr_info);
2810}
2811
2812type_init(memory_register_types)
2813