qemu/include/exec/memory.h
<<
>>
Prefs
   1/*
   2 * Physical memory management API
   3 *
   4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
   5 *
   6 * Authors:
   7 *  Avi Kivity <avi@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#ifndef MEMORY_H
  15#define MEMORY_H
  16
  17#ifndef CONFIG_USER_ONLY
  18
  19#define DIRTY_MEMORY_VGA       0
  20#define DIRTY_MEMORY_CODE      1
  21#define DIRTY_MEMORY_MIGRATION 2
  22#define DIRTY_MEMORY_NUM       3        /* num of dirty bits */
  23
  24#include "exec/cpu-common.h"
  25#ifndef CONFIG_USER_ONLY
  26#include "exec/hwaddr.h"
  27#endif
  28#include "exec/memattrs.h"
  29#include "qemu/queue.h"
  30#include "qemu/int128.h"
  31#include "qemu/notify.h"
  32#include "qom/object.h"
  33#include "qemu/rcu.h"
  34
  35extern const char *machine_path;
  36#define RAM_ADDR_INVALID (~(ram_addr_t)0)
  37
  38#define MAX_PHYS_ADDR_SPACE_BITS 62
  39#define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
  40
  41#define TYPE_MEMORY_REGION "qemu:memory-region"
  42#define MEMORY_REGION(obj) \
  43        OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
  44
  45#define TYPE_MEMORY_TRANSACTION_ATTR "qemu:memory-transaction-attr"
  46#define MEMORY_TRANSACTION_ATTR(obj) \
  47        OBJECT_CHECK(MemTxAttrs, (obj), TYPE_MEMORY_TRANSACTION_ATTR)
  48
  49typedef struct MemoryRegionOps MemoryRegionOps;
  50typedef struct MemoryRegionMmio MemoryRegionMmio;
  51
  52typedef struct MemoryTransaction
  53{
  54    union {
  55        /*
  56         * Data is passed by values up to 64bit sizes. Beyond
  57         * that, a pointer is passed in p8.
  58         *
  59         * Note that p8 has no alignment restrictions.
  60         */
  61        uint8_t *p8;
  62        uint64_t u64;
  63        uint32_t u32;
  64        uint16_t u16;
  65        uint8_t  u8;
  66    } data;
  67    bool rw;
  68    hwaddr addr;
  69    unsigned int size;
  70    MemTxAttrs attr;
  71    void *opaque;
  72} MemoryTransaction;
  73
  74struct MemoryRegionMmio {
  75    CPUReadMemoryFunc *read[3];
  76    CPUWriteMemoryFunc *write[3];
  77};
  78
  79typedef struct IOMMUTLBEntry IOMMUTLBEntry;
  80
  81/* See address_space_translate: bit 0 is read, bit 1 is write.  */
  82typedef enum {
  83    IOMMU_NONE = 0,
  84    IOMMU_RO   = 1,
  85    IOMMU_WO   = 2,
  86    IOMMU_RW   = 3,
  87} IOMMUAccessFlags;
  88
  89struct IOMMUTLBEntry {
  90    AddressSpace    *target_as;
  91    hwaddr           iova;
  92    hwaddr           translated_addr;
  93    hwaddr           addr_mask;  /* 0xfff = 4k translation */
  94    IOMMUAccessFlags perm;
  95};
  96
  97/*
  98 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
  99 * register with one or multiple IOMMU Notifier capability bit(s).
 100 */
 101typedef enum {
 102    IOMMU_NOTIFIER_NONE = 0,
 103    /* Notify cache invalidations */
 104    IOMMU_NOTIFIER_UNMAP = 0x1,
 105    /* Notify entry changes (newly created entries) */
 106    IOMMU_NOTIFIER_MAP = 0x2,
 107} IOMMUNotifierFlag;
 108
 109#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
 110
 111struct IOMMUNotifier {
 112    void (*notify)(struct IOMMUNotifier *notifier, IOMMUTLBEntry *data);
 113    IOMMUNotifierFlag notifier_flags;
 114    QLIST_ENTRY(IOMMUNotifier) node;
 115};
 116typedef struct IOMMUNotifier IOMMUNotifier;
 117
 118/* New-style MMIO accessors can indicate that the transaction failed.
 119 * A zero (MEMTX_OK) response means success; anything else is a failure
 120 * of some kind. The memory subsystem will bitwise-OR together results
 121 * if it is synthesizing an operation from multiple smaller accesses.
 122 */
 123#define MEMTX_OK 0
 124#define MEMTX_ERROR             (1U << 0) /* device returned an error */
 125#define MEMTX_DECODE_ERROR      (1U << 1) /* nothing at that address */
 126typedef uint32_t MemTxResult;
 127
 128/*
 129 * Memory region callbacks
 130 */
 131struct MemoryRegionOps {
 132    /* FIXME: Remove */
 133    void (*access)(MemoryTransaction *tr);
 134
 135    /* Read from the memory region. @addr is relative to @mr; @size is
 136     * in bytes. */
 137    uint64_t (*read)(void *opaque,
 138                     hwaddr addr,
 139                     unsigned size);
 140    /* Write to the memory region. @addr is relative to @mr; @size is
 141     * in bytes. */
 142    void (*write)(void *opaque,
 143                  hwaddr addr,
 144                  uint64_t data,
 145                  unsigned size);
 146
 147    MemTxResult (*read_with_attrs)(void *opaque,
 148                                   hwaddr addr,
 149                                   uint64_t *data,
 150                                   unsigned size,
 151                                   MemTxAttrs attrs);
 152    MemTxResult (*write_with_attrs)(void *opaque,
 153                                    hwaddr addr,
 154                                    uint64_t data,
 155                                    unsigned size,
 156                                    MemTxAttrs attrs);
 157
 158    enum device_endian endianness;
 159    /* Guest-visible constraints: */
 160    struct {
 161        /* If nonzero, specify bounds on access sizes beyond which a machine
 162         * check is thrown.
 163         */
 164        unsigned min_access_size;
 165        unsigned max_access_size;
 166        /* If true, unaligned accesses are supported.  Otherwise unaligned
 167         * accesses throw machine checks.
 168         */
 169         bool unaligned;
 170        /*
 171         * If present, and returns #false, the transaction is not accepted
 172         * by the device (and results in machine dependent behaviour such
 173         * as a machine check exception).
 174         */
 175        bool (*accepts)(void *opaque, hwaddr addr,
 176                        unsigned size, bool is_write);
 177    } valid;
 178    /* Internal implementation constraints: */
 179    struct {
 180        /* If nonzero, specifies the minimum size implemented.  Smaller sizes
 181         * will be rounded upwards and a partial result will be returned.
 182         */
 183        unsigned min_access_size;
 184        /* If nonzero, specifies the maximum size implemented.  Larger sizes
 185         * will be done as a series of accesses with smaller sizes.
 186         */
 187        unsigned max_access_size;
 188        /* If true, unaligned accesses are supported.  Otherwise all accesses
 189         * are converted to (possibly multiple) naturally aligned accesses.
 190         */
 191        bool unaligned;
 192    } impl;
 193
 194    /* If .read and .write are not present, old_mmio may be used for
 195     * backwards compatibility with old mmio registration
 196     */
 197    const MemoryRegionMmio old_mmio;
 198};
 199
 200typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
 201
 202struct MemoryRegionIOMMUOps {
 203    /* Return a TLB entry that contains a given address. */
 204    IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
 205    IOMMUTLBEntry (*translate_attr)(MemoryRegion *iommu, hwaddr addr,
 206                                    bool is_write, MemTxAttrs *attr);
 207    /* Returns minimum supported page size */
 208    uint64_t (*get_min_page_size)(MemoryRegion *iommu);
 209    /* Called when IOMMU Notifier flag changed */
 210    void (*notify_flag_changed)(MemoryRegion *iommu,
 211                                IOMMUNotifierFlag old_flags,
 212                                IOMMUNotifierFlag new_flags);
 213};
 214
 215typedef struct CoalescedMemoryRange CoalescedMemoryRange;
 216typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
 217
 218struct MemoryRegion {
 219    Object parent_obj;
 220
 221    /* All fields are private - violators will be prosecuted */
 222
 223    /* The following fields should fit in a cache line */
 224    bool romd_mode;
 225    uint8_t ram;
 226    bool subpage;
 227    bool readonly; /* For RAM regions */
 228    bool rom_device;
 229    bool flush_coalesced_mmio;
 230    bool global_locking;
 231    uint8_t dirty_log_mask;
 232    RAMBlock *ram_block;
 233    Object *owner;
 234    const MemoryRegionIOMMUOps *iommu_ops;
 235
 236    const MemoryRegionOps *ops;
 237    void *opaque;
 238    MemoryRegion *container;
 239    Int128 size;
 240    hwaddr addr;
 241    void (*destructor)(MemoryRegion *mr);
 242    uint64_t align;
 243    bool terminates;
 244    bool ram_device;
 245    bool enabled;
 246    bool warning_printed; /* For reservations */
 247    uint8_t vga_logging_count;
 248    MemoryRegion *alias;
 249    hwaddr alias_offset;
 250    int32_t priority;
 251    bool may_overlap;
 252    QTAILQ_HEAD(subregions, MemoryRegion) subregions;
 253    QTAILQ_ENTRY(MemoryRegion) subregions_link;
 254    QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
 255    const char *name;
 256    unsigned ioeventfd_nb;
 257    MemoryRegionIoeventfd *ioeventfds;
 258    QLIST_HEAD(, IOMMUNotifier) iommu_notify;
 259    IOMMUNotifierFlag iommu_notify_flags;
 260};
 261
 262/**
 263 * MemoryListener: callbacks structure for updates to the physical memory map
 264 *
 265 * Allows a component to adjust to changes in the guest-visible memory map.
 266 * Use with memory_listener_register() and memory_listener_unregister().
 267 */
 268struct MemoryListener {
 269    void (*begin)(MemoryListener *listener);
 270    void (*commit)(MemoryListener *listener);
 271    void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
 272    void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
 273    void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
 274    void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
 275                      int old, int new);
 276    void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
 277                     int old, int new);
 278    void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
 279    void (*log_global_start)(MemoryListener *listener);
 280    void (*log_global_stop)(MemoryListener *listener);
 281    void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
 282                        bool match_data, uint64_t data, EventNotifier *e);
 283    void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
 284                        bool match_data, uint64_t data, EventNotifier *e);
 285    void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
 286                               hwaddr addr, hwaddr len);
 287    void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
 288                               hwaddr addr, hwaddr len);
 289    /* Lower = earlier (during add), later (during del) */
 290    unsigned priority;
 291    AddressSpace *address_space;
 292    QTAILQ_ENTRY(MemoryListener) link;
 293    QTAILQ_ENTRY(MemoryListener) link_as;
 294};
 295
 296/**
 297 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
 298 */
 299struct AddressSpace {
 300    /* All fields are private. */
 301    struct rcu_head rcu;
 302    char *name;
 303    MemoryRegion *root;
 304    int ref_count;
 305    bool malloced;
 306
 307    /* Accessed via RCU.  */
 308    struct FlatView *current_map;
 309
 310    int ioeventfd_nb;
 311    struct MemoryRegionIoeventfd *ioeventfds;
 312    struct AddressSpaceDispatch *dispatch;
 313    struct AddressSpaceDispatch *next_dispatch;
 314    MemoryListener dispatch_listener;
 315    QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners;
 316    QTAILQ_ENTRY(AddressSpace) address_spaces_link;
 317};
 318
 319/**
 320 * MemoryRegionSection: describes a fragment of a #MemoryRegion
 321 *
 322 * @mr: the region, or %NULL if empty
 323 * @address_space: the address space the region is mapped in
 324 * @offset_within_region: the beginning of the section, relative to @mr's start
 325 * @size: the size of the section; will not exceed @mr's boundaries
 326 * @offset_within_address_space: the address of the first byte of the section
 327 *     relative to the region's address space
 328 * @readonly: writes to this section are ignored
 329 */
 330struct MemoryRegionSection {
 331    MemoryRegion *mr;
 332    AddressSpace *address_space;
 333    hwaddr offset_within_region;
 334    Int128 size;
 335    hwaddr offset_within_address_space;
 336    bool readonly;
 337};
 338
 339/**
 340 * memory_region_init: Initialize a memory region
 341 *
 342 * The region typically acts as a container for other memory regions.  Use
 343 * memory_region_add_subregion() to add subregions.
 344 *
 345 * @mr: the #MemoryRegion to be initialized
 346 * @owner: the object that tracks the region's reference count
 347 * @name: used for debugging; not visible to the user or ABI
 348 * @size: size of the region; any subregions beyond this size will be clipped
 349 */
 350void memory_region_init(MemoryRegion *mr,
 351                        struct Object *owner,
 352                        const char *name,
 353                        uint64_t size);
 354
 355/**
 356 * memory_region_ref: Add 1 to a memory region's reference count
 357 *
 358 * Whenever memory regions are accessed outside the BQL, they need to be
 359 * preserved against hot-unplug.  MemoryRegions actually do not have their
 360 * own reference count; they piggyback on a QOM object, their "owner".
 361 * This function adds a reference to the owner.
 362 *
 363 * All MemoryRegions must have an owner if they can disappear, even if the
 364 * device they belong to operates exclusively under the BQL.  This is because
 365 * the region could be returned at any time by memory_region_find, and this
 366 * is usually under guest control.
 367 *
 368 * @mr: the #MemoryRegion
 369 */
 370void memory_region_ref(MemoryRegion *mr);
 371
 372/**
 373 * memory_region_unref: Remove 1 to a memory region's reference count
 374 *
 375 * Whenever memory regions are accessed outside the BQL, they need to be
 376 * preserved against hot-unplug.  MemoryRegions actually do not have their
 377 * own reference count; they piggyback on a QOM object, their "owner".
 378 * This function removes a reference to the owner and possibly destroys it.
 379 *
 380 * @mr: the #MemoryRegion
 381 */
 382void memory_region_unref(MemoryRegion *mr);
 383
 384/**
 385 * memory_region_init_io: Initialize an I/O memory region.
 386 *
 387 * Accesses into the region will cause the callbacks in @ops to be called.
 388 * if @size is nonzero, subregions will be clipped to @size.
 389 *
 390 * @mr: the #MemoryRegion to be initialized.
 391 * @owner: the object that tracks the region's reference count
 392 * @ops: a structure containing read and write callbacks to be used when
 393 *       I/O is performed on the region.
 394 * @opaque: passed to the read and write callbacks of the @ops structure.
 395 * @name: used for debugging; not visible to the user or ABI
 396 * @size: size of the region.
 397 */
 398void memory_region_init_io(MemoryRegion *mr,
 399                           struct Object *owner,
 400                           const MemoryRegionOps *ops,
 401                           void *opaque,
 402                           const char *name,
 403                           uint64_t size);
 404
 405/**
 406 * memory_region_init_ram:  Initialize RAM memory region.  Accesses into the
 407 *                          region will modify memory directly.
 408 *
 409 * @mr: the #MemoryRegion to be initialized.
 410 * @owner: the object that tracks the region's reference count
 411 * @name: the name of the region.
 412 * @size: size of the region.
 413 * @errp: pointer to Error*, to store an error if it happens.
 414 */
 415void memory_region_init_ram(MemoryRegion *mr,
 416                            struct Object *owner,
 417                            const char *name,
 418                            uint64_t size,
 419                            Error **errp);
 420
 421/**
 422 * memory_region_init_resizeable_ram:  Initialize memory region with resizeable
 423 *                                     RAM.  Accesses into the region will
 424 *                                     modify memory directly.  Only an initial
 425 *                                     portion of this RAM is actually used.
 426 *                                     The used size can change across reboots.
 427 *
 428 * @mr: the #MemoryRegion to be initialized.
 429 * @owner: the object that tracks the region's reference count
 430 * @name: the name of the region.
 431 * @size: used size of the region.
 432 * @max_size: max size of the region.
 433 * @resized: callback to notify owner about used size change.
 434 * @errp: pointer to Error*, to store an error if it happens.
 435 */
 436void memory_region_init_resizeable_ram(MemoryRegion *mr,
 437                                       struct Object *owner,
 438                                       const char *name,
 439                                       uint64_t size,
 440                                       uint64_t max_size,
 441                                       void (*resized)(const char*,
 442                                                       uint64_t length,
 443                                                       void *host),
 444                                       Error **errp);
 445#ifdef __linux__
 446/**
 447 * memory_region_init_ram_from_file:  Initialize RAM memory region with a
 448 *                                    mmap-ed backend.
 449 *
 450 * @mr: the #MemoryRegion to be initialized.
 451 * @owner: the object that tracks the region's reference count
 452 * @name: the name of the region.
 453 * @size: size of the region.
 454 * @share: %true if memory must be mmaped with the MAP_SHARED flag
 455 * @path: the path in which to allocate the RAM.
 456 * @errp: pointer to Error*, to store an error if it happens.
 457 */
 458void memory_region_init_ram_from_file(MemoryRegion *mr,
 459                                      struct Object *owner,
 460                                      const char *name,
 461                                      uint64_t size,
 462                                      bool share,
 463                                      const char *path,
 464                                      Error **errp);
 465#endif
 466
 467/**
 468 * memory_region_init_ram_ptr:  Initialize RAM memory region from a
 469 *                              user-provided pointer.  Accesses into the
 470 *                              region will modify memory directly.
 471 *
 472 * @mr: the #MemoryRegion to be initialized.
 473 * @owner: the object that tracks the region's reference count
 474 * @name: the name of the region.
 475 * @size: size of the region.
 476 * @ptr: memory to be mapped; must contain at least @size bytes.
 477 */
 478void memory_region_init_ram_ptr(MemoryRegion *mr,
 479                                struct Object *owner,
 480                                const char *name,
 481                                uint64_t size,
 482                                void *ptr);
 483
 484/**
 485 * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
 486 *                                     a user-provided pointer.
 487 *
 488 * A RAM device represents a mapping to a physical device, such as to a PCI
 489 * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
 490 * into the VM address space and access to the region will modify memory
 491 * directly.  However, the memory region should not be included in a memory
 492 * dump (device may not be enabled/mapped at the time of the dump), and
 493 * operations incompatible with manipulating MMIO should be avoided.  Replaces
 494 * skip_dump flag.
 495 *
 496 * @mr: the #MemoryRegion to be initialized.
 497 * @owner: the object that tracks the region's reference count
 498 * @name: the name of the region.
 499 * @size: size of the region.
 500 * @ptr: memory to be mapped; must contain at least @size bytes.
 501 */
 502void memory_region_init_ram_device_ptr(MemoryRegion *mr,
 503                                       struct Object *owner,
 504                                       const char *name,
 505                                       uint64_t size,
 506                                       void *ptr);
 507
 508/**
 509 * memory_region_init_alias: Initialize a memory region that aliases all or a
 510 *                           part of another memory region.
 511 *
 512 * @mr: the #MemoryRegion to be initialized.
 513 * @owner: the object that tracks the region's reference count
 514 * @name: used for debugging; not visible to the user or ABI
 515 * @orig: the region to be referenced; @mr will be equivalent to
 516 *        @orig between @offset and @offset + @size - 1.
 517 * @offset: start of the section in @orig to be referenced.
 518 * @size: size of the region.
 519 */
 520void memory_region_init_alias(MemoryRegion *mr,
 521                              struct Object *owner,
 522                              const char *name,
 523                              MemoryRegion *orig,
 524                              hwaddr offset,
 525                              uint64_t size);
 526
 527/**
 528 * memory_region_init_rom: Initialize a ROM memory region.
 529 *
 530 * This has the same effect as calling memory_region_init_ram()
 531 * and then marking the resulting region read-only with
 532 * memory_region_set_readonly().
 533 *
 534 * @mr: the #MemoryRegion to be initialized.
 535 * @owner: the object that tracks the region's reference count
 536 * @name: the name of the region.
 537 * @size: size of the region.
 538 * @errp: pointer to Error*, to store an error if it happens.
 539 */
 540void memory_region_init_rom(MemoryRegion *mr,
 541                            struct Object *owner,
 542                            const char *name,
 543                            uint64_t size,
 544                            Error **errp);
 545
 546/**
 547 * memory_region_init_rom_device:  Initialize a ROM memory region.  Writes are
 548 *                                 handled via callbacks.
 549 *
 550 * @mr: the #MemoryRegion to be initialized.
 551 * @owner: the object that tracks the region's reference count
 552 * @ops: callbacks for write access handling (must not be NULL).
 553 * @name: the name of the region.
 554 * @size: size of the region.
 555 * @errp: pointer to Error*, to store an error if it happens.
 556 */
 557void memory_region_init_rom_device(MemoryRegion *mr,
 558                                   struct Object *owner,
 559                                   const MemoryRegionOps *ops,
 560                                   void *opaque,
 561                                   const char *name,
 562                                   uint64_t size,
 563                                   Error **errp);
 564
 565/**
 566 * memory_region_init_reservation: Initialize a memory region that reserves
 567 *                                 I/O space.
 568 *
 569 * A reservation region primariy serves debugging purposes.  It claims I/O
 570 * space that is not supposed to be handled by QEMU itself.  Any access via
 571 * the memory API will cause an abort().
 572 * This function is deprecated. Use memory_region_init_io() with NULL
 573 * callbacks instead.
 574 *
 575 * @mr: the #MemoryRegion to be initialized
 576 * @owner: the object that tracks the region's reference count
 577 * @name: used for debugging; not visible to the user or ABI
 578 * @size: size of the region.
 579 */
 580static inline void memory_region_init_reservation(MemoryRegion *mr,
 581                                    Object *owner,
 582                                    const char *name,
 583                                    uint64_t size)
 584{
 585    memory_region_init_io(mr, owner, NULL, mr, name, size);
 586}
 587
 588/**
 589 * memory_region_init_iommu: Initialize a memory region that translates
 590 * addresses
 591 *
 592 * An IOMMU region translates addresses and forwards accesses to a target
 593 * memory region.
 594 *
 595 * @mr: the #MemoryRegion to be initialized
 596 * @owner: the object that tracks the region's reference count
 597 * @ops: a function that translates addresses into the @target region
 598 * @name: used for debugging; not visible to the user or ABI
 599 * @size: size of the region.
 600 */
 601void memory_region_init_iommu(MemoryRegion *mr,
 602                              struct Object *owner,
 603                              const MemoryRegionIOMMUOps *ops,
 604                              const char *name,
 605                              uint64_t size);
 606
 607/**
 608 * memory_region_owner: get a memory region's owner.
 609 *
 610 * @mr: the memory region being queried.
 611 */
 612struct Object *memory_region_owner(MemoryRegion *mr);
 613
 614/**
 615 * memory_region_size: get a memory region's size.
 616 *
 617 * @mr: the memory region being queried.
 618 */
 619uint64_t memory_region_size(MemoryRegion *mr);
 620
 621/**
 622 * memory_region_is_ram: check whether a memory region is random access
 623 *
 624 * Returns %true is a memory region is random access.
 625 *
 626 * @mr: the memory region being queried
 627 */
 628static inline bool memory_region_is_ram(MemoryRegion *mr)
 629{
 630    return mr->ram;
 631}
 632
 633/**
 634 * memory_region_is_ram_device: check whether a memory region is a ram device
 635 *
 636 * Returns %true is a memory region is a device backed ram region
 637 *
 638 * @mr: the memory region being queried
 639 */
 640bool memory_region_is_ram_device(MemoryRegion *mr);
 641
 642/**
 643 * memory_region_is_romd: check whether a memory region is in ROMD mode
 644 *
 645 * Returns %true if a memory region is a ROM device and currently set to allow
 646 * direct reads.
 647 *
 648 * @mr: the memory region being queried
 649 */
 650static inline bool memory_region_is_romd(MemoryRegion *mr)
 651{
 652    return mr->rom_device && mr->romd_mode;
 653}
 654
 655/**
 656 * memory_region_is_iommu: check whether a memory region is an iommu
 657 *
 658 * Returns %true is a memory region is an iommu.
 659 *
 660 * @mr: the memory region being queried
 661 */
 662static inline bool memory_region_is_iommu(MemoryRegion *mr)
 663{
 664    return mr->iommu_ops;
 665}
 666
 667
 668/**
 669 * memory_region_iommu_get_min_page_size: get minimum supported page size
 670 * for an iommu
 671 *
 672 * Returns minimum supported page size for an iommu.
 673 *
 674 * @mr: the memory region being queried
 675 */
 676uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr);
 677
 678/**
 679 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
 680 *
 681 * The notification type will be decided by entry.perm bits:
 682 *
 683 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
 684 * - For MAP (newly added entry) notifies: set entry.perm to the
 685 *   permission of the page (which is definitely !IOMMU_NONE).
 686 *
 687 * Note: for any IOMMU implementation, an in-place mapping change
 688 * should be notified with an UNMAP followed by a MAP.
 689 *
 690 * @mr: the memory region that was changed
 691 * @entry: the new entry in the IOMMU translation table.  The entry
 692 *         replaces all old entries for the same virtual I/O address range.
 693 *         Deleted entries have .@perm == 0.
 694 */
 695void memory_region_notify_iommu(MemoryRegion *mr,
 696                                IOMMUTLBEntry entry);
 697
 698/**
 699 * memory_region_register_iommu_notifier: register a notifier for changes to
 700 * IOMMU translation entries.
 701 *
 702 * @mr: the memory region to observe
 703 * @n: the IOMMUNotifier to be added; the notify callback receives a
 704 *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
 705 *     ceases to be valid on exit from the notifier.
 706 */
 707void memory_region_register_iommu_notifier(MemoryRegion *mr,
 708                                           IOMMUNotifier *n);
 709
 710/**
 711 * memory_region_iommu_replay: replay existing IOMMU translations to
 712 * a notifier with the minimum page granularity returned by
 713 * mr->iommu_ops->get_page_size().
 714 *
 715 * @mr: the memory region to observe
 716 * @n: the notifier to which to replay iommu mappings
 717 * @is_write: Whether to treat the replay as a translate "write"
 718 *     through the iommu
 719 */
 720void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
 721                                bool is_write);
 722
 723/**
 724 * memory_region_unregister_iommu_notifier: unregister a notifier for
 725 * changes to IOMMU translation entries.
 726 *
 727 * @mr: the memory region which was observed and for which notity_stopped()
 728 *      needs to be called
 729 * @n: the notifier to be removed.
 730 */
 731void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
 732                                             IOMMUNotifier *n);
 733
 734/**
 735 * memory_region_name: get a memory region's name
 736 *
 737 * Returns the string that was used to initialize the memory region.
 738 *
 739 * @mr: the memory region being queried
 740 */
 741const char *memory_region_name(const MemoryRegion *mr);
 742
 743/**
 744 * memory_region_is_logging: return whether a memory region is logging writes
 745 *
 746 * Returns %true if the memory region is logging writes for the given client
 747 *
 748 * @mr: the memory region being queried
 749 * @client: the client being queried
 750 */
 751bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
 752
 753/**
 754 * memory_region_get_dirty_log_mask: return the clients for which a
 755 * memory region is logging writes.
 756 *
 757 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
 758 * are the bit indices.
 759 *
 760 * @mr: the memory region being queried
 761 */
 762uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
 763
 764/**
 765 * memory_region_is_rom: check whether a memory region is ROM
 766 *
 767 * Returns %true is a memory region is read-only memory.
 768 *
 769 * @mr: the memory region being queried
 770 */
 771static inline bool memory_region_is_rom(MemoryRegion *mr)
 772{
 773    return mr->ram && mr->readonly;
 774}
 775
 776
 777/**
 778 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
 779 *
 780 * Returns a file descriptor backing a file-based RAM memory region,
 781 * or -1 if the region is not a file-based RAM memory region.
 782 *
 783 * @mr: the RAM or alias memory region being queried.
 784 */
 785int memory_region_get_fd(MemoryRegion *mr);
 786
 787/**
 788 * memory_region_set_fd: Mark a RAM memory region as backed by a
 789 * file descriptor.
 790 *
 791 * This function is typically used after memory_region_init_ram_ptr().
 792 *
 793 * @mr: the memory region being queried.
 794 * @fd: the file descriptor that backs @mr.
 795 */
 796void memory_region_set_fd(MemoryRegion *mr, int fd);
 797
 798/**
 799 * memory_region_from_host: Convert a pointer into a RAM memory region
 800 * and an offset within it.
 801 *
 802 * Given a host pointer inside a RAM memory region (created with
 803 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
 804 * the MemoryRegion and the offset within it.
 805 *
 806 * Use with care; by the time this function returns, the returned pointer is
 807 * not protected by RCU anymore.  If the caller is not within an RCU critical
 808 * section and does not hold the iothread lock, it must have other means of
 809 * protecting the pointer, such as a reference to the region that includes
 810 * the incoming ram_addr_t.
 811 *
 812 * @mr: the memory region being queried.
 813 */
 814MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
 815
 816/**
 817 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
 818 *
 819 * Returns a host pointer to a RAM memory region (created with
 820 * memory_region_init_ram() or memory_region_init_ram_ptr()).
 821 *
 822 * Use with care; by the time this function returns, the returned pointer is
 823 * not protected by RCU anymore.  If the caller is not within an RCU critical
 824 * section and does not hold the iothread lock, it must have other means of
 825 * protecting the pointer, such as a reference to the region that includes
 826 * the incoming ram_addr_t.
 827 *
 828 * @mr: the memory region being queried.
 829 */
 830void *memory_region_get_ram_ptr(MemoryRegion *mr);
 831
 832/* memory_region_ram_resize: Resize a RAM region.
 833 *
 834 * Only legal before guest might have detected the memory size: e.g. on
 835 * incoming migration, or right after reset.
 836 *
 837 * @mr: a memory region created with @memory_region_init_resizeable_ram.
 838 * @newsize: the new size the region
 839 * @errp: pointer to Error*, to store an error if it happens.
 840 */
 841void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
 842                              Error **errp);
 843
 844/**
 845 * memory_region_set_log: Turn dirty logging on or off for a region.
 846 *
 847 * Turns dirty logging on or off for a specified client (display, migration).
 848 * Only meaningful for RAM regions.
 849 *
 850 * @mr: the memory region being updated.
 851 * @log: whether dirty logging is to be enabled or disabled.
 852 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
 853 */
 854void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
 855
 856/**
 857 * memory_region_get_dirty: Check whether a range of bytes is dirty
 858 *                          for a specified client.
 859 *
 860 * Checks whether a range of bytes has been written to since the last
 861 * call to memory_region_reset_dirty() with the same @client.  Dirty logging
 862 * must be enabled.
 863 *
 864 * @mr: the memory region being queried.
 865 * @addr: the address (relative to the start of the region) being queried.
 866 * @size: the size of the range being queried.
 867 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
 868 *          %DIRTY_MEMORY_VGA.
 869 */
 870bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
 871                             hwaddr size, unsigned client);
 872
 873/**
 874 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
 875 *
 876 * Marks a range of bytes as dirty, after it has been dirtied outside
 877 * guest code.
 878 *
 879 * @mr: the memory region being dirtied.
 880 * @addr: the address (relative to the start of the region) being dirtied.
 881 * @size: size of the range being dirtied.
 882 */
 883void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
 884                             hwaddr size);
 885
 886/**
 887 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
 888 *                                     for a specified client. It clears them.
 889 *
 890 * Checks whether a range of bytes has been written to since the last
 891 * call to memory_region_reset_dirty() with the same @client.  Dirty logging
 892 * must be enabled.
 893 *
 894 * @mr: the memory region being queried.
 895 * @addr: the address (relative to the start of the region) being queried.
 896 * @size: the size of the range being queried.
 897 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
 898 *          %DIRTY_MEMORY_VGA.
 899 */
 900bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
 901                                        hwaddr size, unsigned client);
 902/**
 903 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
 904 *                                  any external TLBs (e.g. kvm)
 905 *
 906 * Flushes dirty information from accelerators such as kvm and vhost-net
 907 * and makes it available to users of the memory API.
 908 *
 909 * @mr: the region being flushed.
 910 */
 911void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
 912
 913/**
 914 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
 915 *                            client.
 916 *
 917 * Marks a range of pages as no longer dirty.
 918 *
 919 * @mr: the region being updated.
 920 * @addr: the start of the subrange being cleaned.
 921 * @size: the size of the subrange being cleaned.
 922 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
 923 *          %DIRTY_MEMORY_VGA.
 924 */
 925void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
 926                               hwaddr size, unsigned client);
 927
 928/**
 929 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
 930 *
 931 * Allows a memory region to be marked as read-only (turning it into a ROM).
 932 * only useful on RAM regions.
 933 *
 934 * @mr: the region being updated.
 935 * @readonly: whether rhe region is to be ROM or RAM.
 936 */
 937void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
 938
 939/**
 940 * memory_region_rom_device_set_romd: enable/disable ROMD mode
 941 *
 942 * Allows a ROM device (initialized with memory_region_init_rom_device() to
 943 * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
 944 * device is mapped to guest memory and satisfies read access directly.
 945 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
 946 * Writes are always handled by the #MemoryRegion.write function.
 947 *
 948 * @mr: the memory region to be updated
 949 * @romd_mode: %true to put the region into ROMD mode
 950 */
 951void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
 952
 953/**
 954 * memory_region_set_coalescing: Enable memory coalescing for the region.
 955 *
 956 * Enabled writes to a region to be queued for later processing. MMIO ->write
 957 * callbacks may be delayed until a non-coalesced MMIO is issued.
 958 * Only useful for IO regions.  Roughly similar to write-combining hardware.
 959 *
 960 * @mr: the memory region to be write coalesced
 961 */
 962void memory_region_set_coalescing(MemoryRegion *mr);
 963
 964/**
 965 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
 966 *                               a region.
 967 *
 968 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
 969 * Multiple calls can be issued coalesced disjoint ranges.
 970 *
 971 * @mr: the memory region to be updated.
 972 * @offset: the start of the range within the region to be coalesced.
 973 * @size: the size of the subrange to be coalesced.
 974 */
 975void memory_region_add_coalescing(MemoryRegion *mr,
 976                                  hwaddr offset,
 977                                  uint64_t size);
 978
 979/**
 980 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
 981 *
 982 * Disables any coalescing caused by memory_region_set_coalescing() or
 983 * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
 984 * hardware.
 985 *
 986 * @mr: the memory region to be updated.
 987 */
 988void memory_region_clear_coalescing(MemoryRegion *mr);
 989
 990/**
 991 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
 992 *                                    accesses.
 993 *
 994 * Ensure that pending coalesced MMIO request are flushed before the memory
 995 * region is accessed. This property is automatically enabled for all regions
 996 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
 997 *
 998 * @mr: the memory region to be updated.
 999 */
1000void memory_region_set_flush_coalesced(MemoryRegion *mr);
1001
1002/**
1003 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1004 *                                      accesses.
1005 *
1006 * Clear the automatic coalesced MMIO flushing enabled via
1007 * memory_region_set_flush_coalesced. Note that this service has no effect on
1008 * memory regions that have MMIO coalescing enabled for themselves. For them,
1009 * automatic flushing will stop once coalescing is disabled.
1010 *
1011 * @mr: the memory region to be updated.
1012 */
1013void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1014
1015/**
1016 * memory_region_set_global_locking: Declares the access processing requires
1017 *                                   QEMU's global lock.
1018 *
1019 * When this is invoked, accesses to the memory region will be processed while
1020 * holding the global lock of QEMU. This is the default behavior of memory
1021 * regions.
1022 *
1023 * @mr: the memory region to be updated.
1024 */
1025void memory_region_set_global_locking(MemoryRegion *mr);
1026
1027/**
1028 * memory_region_clear_global_locking: Declares that access processing does
1029 *                                     not depend on the QEMU global lock.
1030 *
1031 * By clearing this property, accesses to the memory region will be processed
1032 * outside of QEMU's global lock (unless the lock is held on when issuing the
1033 * access request). In this case, the device model implementing the access
1034 * handlers is responsible for synchronization of concurrency.
1035 *
1036 * @mr: the memory region to be updated.
1037 */
1038void memory_region_clear_global_locking(MemoryRegion *mr);
1039
1040/**
1041 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1042 *                            is written to a location.
1043 *
1044 * Marks a word in an IO region (initialized with memory_region_init_io())
1045 * as a trigger for an eventfd event.  The I/O callback will not be called.
1046 * The caller must be prepared to handle failure (that is, take the required
1047 * action if the callback _is_ called).
1048 *
1049 * @mr: the memory region being updated.
1050 * @addr: the address within @mr that is to be monitored
1051 * @size: the size of the access to trigger the eventfd
1052 * @match_data: whether to match against @data, instead of just @addr
1053 * @data: the data to match against the guest write
1054 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1055 **/
1056void memory_region_add_eventfd(MemoryRegion *mr,
1057                               hwaddr addr,
1058                               unsigned size,
1059                               bool match_data,
1060                               uint64_t data,
1061                               EventNotifier *e);
1062
1063/**
1064 * memory_region_del_eventfd: Cancel an eventfd.
1065 *
1066 * Cancels an eventfd trigger requested by a previous
1067 * memory_region_add_eventfd() call.
1068 *
1069 * @mr: the memory region being updated.
1070 * @addr: the address within @mr that is to be monitored
1071 * @size: the size of the access to trigger the eventfd
1072 * @match_data: whether to match against @data, instead of just @addr
1073 * @data: the data to match against the guest write
1074 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1075 */
1076void memory_region_del_eventfd(MemoryRegion *mr,
1077                               hwaddr addr,
1078                               unsigned size,
1079                               bool match_data,
1080                               uint64_t data,
1081                               EventNotifier *e);
1082
1083/**
1084 * memory_region_add_subregion: Add a subregion to a container.
1085 *
1086 * Adds a subregion at @offset.  The subregion may not overlap with other
1087 * subregions (except for those explicitly marked as overlapping).  A region
1088 * may only be added once as a subregion (unless removed with
1089 * memory_region_del_subregion()); use memory_region_init_alias() if you
1090 * want a region to be a subregion in multiple locations.
1091 *
1092 * @mr: the region to contain the new subregion; must be a container
1093 *      initialized with memory_region_init().
1094 * @offset: the offset relative to @mr where @subregion is added.
1095 * @subregion: the subregion to be added.
1096 */
1097void memory_region_add_subregion(MemoryRegion *mr,
1098                                 hwaddr offset,
1099                                 MemoryRegion *subregion);
1100/**
1101 * memory_region_add_subregion_overlap: Add a subregion to a container
1102 *                                      with overlap.
1103 *
1104 * Adds a subregion at @offset.  The subregion may overlap with other
1105 * subregions.  Conflicts are resolved by having a higher @priority hide a
1106 * lower @priority. Subregions without priority are taken as @priority 0.
1107 * A region may only be added once as a subregion (unless removed with
1108 * memory_region_del_subregion()); use memory_region_init_alias() if you
1109 * want a region to be a subregion in multiple locations.
1110 *
1111 * @mr: the region to contain the new subregion; must be a container
1112 *      initialized with memory_region_init().
1113 * @offset: the offset relative to @mr where @subregion is added.
1114 * @subregion: the subregion to be added.
1115 * @priority: used for resolving overlaps; highest priority wins.
1116 */
1117void memory_region_add_subregion_overlap(MemoryRegion *mr,
1118                                         hwaddr offset,
1119                                         MemoryRegion *subregion,
1120                                         int priority);
1121
1122/**
1123 * memory_region_get_ram_addr: Get the ram address associated with a memory
1124 *                             region
1125 */
1126ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1127
1128uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1129/**
1130 * memory_region_del_subregion: Remove a subregion.
1131 *
1132 * Removes a subregion from its container.
1133 *
1134 * @mr: the container to be updated.
1135 * @subregion: the region being removed; must be a current subregion of @mr.
1136 */
1137void memory_region_del_subregion(MemoryRegion *mr,
1138                                 MemoryRegion *subregion);
1139
1140/*
1141 * memory_region_set_enabled: dynamically enable or disable a region
1142 *
1143 * Enables or disables a memory region.  A disabled memory region
1144 * ignores all accesses to itself and its subregions.  It does not
1145 * obscure sibling subregions with lower priority - it simply behaves as
1146 * if it was removed from the hierarchy.
1147 *
1148 * Regions default to being enabled.
1149 *
1150 * @mr: the region to be updated
1151 * @enabled: whether to enable or disable the region
1152 */
1153void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1154
1155/*
1156 * memory_region_set_address: dynamically update the address of a region
1157 *
1158 * Dynamically updates the address of a region, relative to its container.
1159 * May be used on regions are currently part of a memory hierarchy.
1160 *
1161 * @mr: the region to be updated
1162 * @addr: new address, relative to container region
1163 */
1164void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1165
1166/*
1167 * memory_region_set_size: dynamically update the size of a region.
1168 *
1169 * Dynamically updates the size of a region.
1170 *
1171 * @mr: the region to be updated
1172 * @size: used size of the region.
1173 */
1174void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1175
1176/*
1177 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1178 *
1179 * Dynamically updates the offset into the target region that an alias points
1180 * to, as if the fourth argument to memory_region_init_alias() has changed.
1181 *
1182 * @mr: the #MemoryRegion to be updated; should be an alias.
1183 * @offset: the new offset into the target memory region
1184 */
1185void memory_region_set_alias_offset(MemoryRegion *mr,
1186                                    hwaddr offset);
1187
1188/**
1189 * memory_region_present: checks if an address relative to a @container
1190 * translates into #MemoryRegion within @container
1191 *
1192 * Answer whether a #MemoryRegion within @container covers the address
1193 * @addr.
1194 *
1195 * @container: a #MemoryRegion within which @addr is a relative address
1196 * @addr: the area within @container to be searched
1197 */
1198bool memory_region_present(MemoryRegion *container, hwaddr addr);
1199
1200/**
1201 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1202 * into any address space.
1203 *
1204 * @mr: a #MemoryRegion which should be checked if it's mapped
1205 */
1206bool memory_region_is_mapped(MemoryRegion *mr);
1207
1208/**
1209 * memory_region_find: translate an address/size relative to a
1210 * MemoryRegion into a #MemoryRegionSection.
1211 *
1212 * Locates the first #MemoryRegion within @mr that overlaps the range
1213 * given by @addr and @size.
1214 *
1215 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1216 * It will have the following characteristics:
1217 *    .@size = 0 iff no overlap was found
1218 *    .@mr is non-%NULL iff an overlap was found
1219 *
1220 * Remember that in the return value the @offset_within_region is
1221 * relative to the returned region (in the .@mr field), not to the
1222 * @mr argument.
1223 *
1224 * Similarly, the .@offset_within_address_space is relative to the
1225 * address space that contains both regions, the passed and the
1226 * returned one.  However, in the special case where the @mr argument
1227 * has no container (and thus is the root of the address space), the
1228 * following will hold:
1229 *    .@offset_within_address_space >= @addr
1230 *    .@offset_within_address_space + .@size <= @addr + @size
1231 *
1232 * @mr: a MemoryRegion within which @addr is a relative address
1233 * @addr: start of the area within @as to be searched
1234 * @size: size of the area to be searched
1235 */
1236MemoryRegionSection memory_region_find(MemoryRegion *mr,
1237                                       hwaddr addr, uint64_t size);
1238
1239/**
1240 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1241 *
1242 * Synchronizes the dirty page log for all address spaces.
1243 */
1244void memory_global_dirty_log_sync(void);
1245
1246/**
1247 * memory_region_transaction_begin: Start a transaction.
1248 *
1249 * During a transaction, changes will be accumulated and made visible
1250 * only when the transaction ends (is committed).
1251 */
1252void memory_region_transaction_begin(void);
1253
1254/**
1255 * memory_region_transaction_commit: Commit a transaction and make changes
1256 *                                   visible to the guest.
1257 */
1258void memory_region_transaction_commit(void);
1259
1260/**
1261 * memory_listener_register: register callbacks to be called when memory
1262 *                           sections are mapped or unmapped into an address
1263 *                           space
1264 *
1265 * @listener: an object containing the callbacks to be called
1266 * @filter: if non-%NULL, only regions in this address space will be observed
1267 */
1268void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1269
1270/**
1271 * memory_listener_unregister: undo the effect of memory_listener_register()
1272 *
1273 * @listener: an object containing the callbacks to be removed
1274 */
1275void memory_listener_unregister(MemoryListener *listener);
1276
1277/**
1278 * memory_global_dirty_log_start: begin dirty logging for all regions
1279 */
1280void memory_global_dirty_log_start(void);
1281
1282/**
1283 * memory_global_dirty_log_stop: end dirty logging for all regions
1284 */
1285void memory_global_dirty_log_stop(void);
1286
1287void mtree_info(fprintf_function mon_printf, void *f);
1288
1289/**
1290 * memory_region_dispatch_read: perform a read directly to the specified
1291 * MemoryRegion.
1292 *
1293 * @mr: #MemoryRegion to access
1294 * @addr: address within that region
1295 * @pval: pointer to uint64_t which the data is written to
1296 * @size: size of the access in bytes
1297 * @attrs: memory transaction attributes to use for the access
1298 */
1299MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1300                                        hwaddr addr,
1301                                        uint64_t *pval,
1302                                        unsigned size,
1303                                        MemTxAttrs attrs);
1304/**
1305 * memory_region_dispatch_write: perform a write directly to the specified
1306 * MemoryRegion.
1307 *
1308 * @mr: #MemoryRegion to access
1309 * @addr: address within that region
1310 * @data: data to write
1311 * @size: size of the access in bytes
1312 * @attrs: memory transaction attributes to use for the access
1313 */
1314MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1315                                         hwaddr addr,
1316                                         uint64_t data,
1317                                         unsigned size,
1318                                         MemTxAttrs attrs);
1319
1320/**
1321 * address_space_init: initializes an address space
1322 *
1323 * @as: an uninitialized #AddressSpace
1324 * @root: a #MemoryRegion that routes addresses for the address space
1325 * @name: an address space name.  The name is only used for debugging
1326 *        output.
1327 */
1328void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1329
1330/**
1331 * address_space_init_shareable: return an address space for a memory region,
1332 *                               creating it if it does not already exist
1333 *
1334 * @root: a #MemoryRegion that routes addresses for the address space
1335 * @name: an address space name.  The name is only used for debugging
1336 *        output.
1337 *
1338 * This function will return a pointer to an existing AddressSpace
1339 * which was initialized with the specified MemoryRegion, or it will
1340 * create and initialize one if it does not already exist. The ASes
1341 * are reference-counted, so the memory will be freed automatically
1342 * when the AddressSpace is destroyed via address_space_destroy.
1343 */
1344AddressSpace *address_space_init_shareable(MemoryRegion *root,
1345                                           const char *name);
1346
1347/**
1348 * address_space_destroy: destroy an address space
1349 *
1350 * Releases all resources associated with an address space.  After an address space
1351 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1352 * as well.
1353 *
1354 * @as: address space to be destroyed
1355 */
1356void address_space_destroy(AddressSpace *as);
1357
1358/**
1359 * address_space_rw: read from or write to an address space.
1360 *
1361 * Return a MemTxResult indicating whether the operation succeeded
1362 * or failed (eg unassigned memory, device rejected the transaction,
1363 * IOMMU fault).
1364 *
1365 * @as: #AddressSpace to be accessed
1366 * @addr: address within that address space
1367 * @attrs: memory transaction attributes
1368 * @buf: buffer with the data transferred
1369 * @is_write: indicates the transfer direction
1370 */
1371MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1372                             MemTxAttrs attrs, uint8_t *buf,
1373                             int len, bool is_write);
1374
1375/**
1376 * address_space_write: write to address space.
1377 *
1378 * Return a MemTxResult indicating whether the operation succeeded
1379 * or failed (eg unassigned memory, device rejected the transaction,
1380 * IOMMU fault).
1381 *
1382 * @as: #AddressSpace to be accessed
1383 * @addr: address within that address space
1384 * @attrs: memory transaction attributes
1385 * @buf: buffer with the data transferred
1386 */
1387MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1388                                MemTxAttrs attrs,
1389                                const uint8_t *buf, int len);
1390
1391/* address_space_ld*: load from an address space
1392 * address_space_st*: store to an address space
1393 *
1394 * These functions perform a load or store of the byte, word,
1395 * longword or quad to the specified address within the AddressSpace.
1396 * The _le suffixed functions treat the data as little endian;
1397 * _be indicates big endian; no suffix indicates "same endianness
1398 * as guest CPU".
1399 *
1400 * The "guest CPU endianness" accessors are deprecated for use outside
1401 * target-* code; devices should be CPU-agnostic and use either the LE
1402 * or the BE accessors.
1403 *
1404 * @as #AddressSpace to be accessed
1405 * @addr: address within that address space
1406 * @val: data value, for stores
1407 * @attrs: memory transaction attributes
1408 * @result: location to write the success/failure of the transaction;
1409 *   if NULL, this information is discarded
1410 */
1411uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
1412                            MemTxAttrs attrs, MemTxResult *result);
1413uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
1414                            MemTxAttrs attrs, MemTxResult *result);
1415uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
1416                            MemTxAttrs attrs, MemTxResult *result);
1417uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
1418                            MemTxAttrs attrs, MemTxResult *result);
1419uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
1420                            MemTxAttrs attrs, MemTxResult *result);
1421uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
1422                            MemTxAttrs attrs, MemTxResult *result);
1423uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
1424                            MemTxAttrs attrs, MemTxResult *result);
1425void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
1426                            MemTxAttrs attrs, MemTxResult *result);
1427void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
1428                            MemTxAttrs attrs, MemTxResult *result);
1429void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
1430                            MemTxAttrs attrs, MemTxResult *result);
1431void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
1432                            MemTxAttrs attrs, MemTxResult *result);
1433void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
1434                            MemTxAttrs attrs, MemTxResult *result);
1435void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
1436                            MemTxAttrs attrs, MemTxResult *result);
1437void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
1438                            MemTxAttrs attrs, MemTxResult *result);
1439
1440/* address_space_translate: translate an address range into an address space
1441 * into a MemoryRegion and an address range into that section.  Should be
1442 * called from an RCU critical section, to avoid that the last reference
1443 * to the returned region disappears after address_space_translate returns.
1444 *
1445 * @as: #AddressSpace to be accessed
1446 * @addr: address within that address space
1447 * @xlat: pointer to address within the returned memory region section's
1448 * #MemoryRegion.
1449 * @len: pointer to length
1450 * @is_write: indicates the transfer direction
1451 */
1452MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1453                                      hwaddr *xlat, hwaddr *len,
1454                                      bool is_write);
1455
1456MemoryRegion *address_space_translate_attr(AddressSpace *as, hwaddr addr,
1457                                      hwaddr *xlat, hwaddr *plen,
1458                                      bool is_write, MemTxAttrs *attr);
1459
1460/* address_space_access_valid: check for validity of accessing an address
1461 * space range
1462 *
1463 * Check whether memory is assigned to the given address space range, and
1464 * access is permitted by any IOMMU regions that are active for the address
1465 * space.
1466 *
1467 * For now, addr and len should be aligned to a page size.  This limitation
1468 * will be lifted in the future.
1469 *
1470 * @as: #AddressSpace to be accessed
1471 * @addr: address within that address space
1472 * @len: length of the area to be checked
1473 * @is_write: indicates the transfer direction
1474 */
1475bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1476
1477/* address_space_map: map a physical memory region into a host virtual address
1478 *
1479 * May map a subset of the requested range, given by and returned in @plen.
1480 * May return %NULL if resources needed to perform the mapping are exhausted.
1481 * Use only for reads OR writes - not for read-modify-write operations.
1482 * Use cpu_register_map_client() to know when retrying the map operation is
1483 * likely to succeed.
1484 *
1485 * @as: #AddressSpace to be accessed
1486 * @addr: address within that address space
1487 * @plen: pointer to length of buffer; updated on return
1488 * @is_write: indicates the transfer direction
1489 */
1490void *address_space_map(AddressSpace *as, hwaddr addr,
1491                        hwaddr *plen, bool is_write);
1492
1493/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1494 *
1495 * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
1496 * the amount of memory that was actually read or written by the caller.
1497 *
1498 * @as: #AddressSpace used
1499 * @addr: address within that address space
1500 * @len: buffer length as returned by address_space_map()
1501 * @access_len: amount of data actually transferred
1502 * @is_write: indicates the transfer direction
1503 */
1504void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1505                         int is_write, hwaddr access_len);
1506
1507
1508/* Internal functions, part of the implementation of address_space_read.  */
1509MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
1510                                        MemTxAttrs attrs, uint8_t *buf,
1511                                        int len, hwaddr addr1, hwaddr l,
1512                                        MemoryRegion *mr);
1513MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
1514                                    MemTxAttrs attrs, uint8_t *buf, int len);
1515void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
1516
1517static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1518{
1519    if (is_write) {
1520        return memory_region_is_ram(mr) &&
1521               !mr->readonly && !memory_region_is_ram_device(mr);
1522    } else {
1523        return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
1524               memory_region_is_romd(mr);
1525    }
1526}
1527
1528/**
1529 * address_space_read: read from an address space.
1530 *
1531 * Return a MemTxResult indicating whether the operation succeeded
1532 * or failed (eg unassigned memory, device rejected the transaction,
1533 * IOMMU fault).
1534 *
1535 * @as: #AddressSpace to be accessed
1536 * @addr: address within that address space
1537 * @attrs: memory transaction attributes
1538 * @buf: buffer with the data transferred
1539 */
1540static inline __attribute__((__always_inline__))
1541MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
1542                               uint8_t *buf, int len)
1543{
1544    MemTxResult result = MEMTX_OK;
1545    hwaddr l, addr1;
1546    void *ptr;
1547    MemoryRegion *mr;
1548
1549    if (__builtin_constant_p(len)) {
1550        if (len) {
1551            rcu_read_lock();
1552            l = len;
1553            mr = address_space_translate_attr(as, addr, &addr1, &l, false,
1554                                              &attrs);
1555            if (len == l && memory_access_is_direct(mr, false)) {
1556                ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
1557                memcpy(buf, ptr, len);
1558            } else {
1559                result = address_space_read_continue(as, addr, attrs, buf, len,
1560                                                     addr1, l, mr);
1561            }
1562            rcu_read_unlock();
1563        }
1564    } else {
1565        result = address_space_read_full(as, addr, attrs, buf, len);
1566    }
1567    return result;
1568}
1569
1570#endif
1571
1572#endif
1573