qemu/include/exec/memory.h
<<
>>
Prefs
   1/*
   2 * Physical memory management API
   3 *
   4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
   5 *
   6 * Authors:
   7 *  Avi Kivity <avi@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#ifndef MEMORY_H
  15#define MEMORY_H
  16
  17#ifndef CONFIG_USER_ONLY
  18
  19#include "exec/cpu-common.h"
  20#include "exec/hwaddr.h"
  21#include "exec/memattrs.h"
  22#include "exec/memop.h"
  23#include "exec/ramlist.h"
  24#include "qemu/bswap.h"
  25#include "qemu/queue.h"
  26#include "qemu/int128.h"
  27#include "qemu/notify.h"
  28#include "qom/object.h"
  29#include "qemu/rcu.h"
  30
  31#define RAM_ADDR_INVALID (~(ram_addr_t)0)
  32
  33#define MAX_PHYS_ADDR_SPACE_BITS 62
  34#define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
  35
  36#define TYPE_MEMORY_REGION "qemu:memory-region"
  37DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
  38                         TYPE_MEMORY_REGION)
  39
  40#define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
  41typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
  42DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
  43                     IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
  44
  45#ifdef CONFIG_FUZZ
  46void fuzz_dma_read_cb(size_t addr,
  47                      size_t len,
  48                      MemoryRegion *mr,
  49                      bool is_write);
  50#else
  51static inline void fuzz_dma_read_cb(size_t addr,
  52                                    size_t len,
  53                                    MemoryRegion *mr,
  54                                    bool is_write)
  55{
  56    /* Do Nothing */
  57}
  58#endif
  59
  60extern bool global_dirty_log;
  61
  62typedef struct MemoryRegionOps MemoryRegionOps;
  63
  64struct ReservedRegion {
  65    hwaddr low;
  66    hwaddr high;
  67    unsigned type;
  68};
  69
  70typedef struct IOMMUTLBEntry IOMMUTLBEntry;
  71
  72/* See address_space_translate: bit 0 is read, bit 1 is write.  */
  73typedef enum {
  74    IOMMU_NONE = 0,
  75    IOMMU_RO   = 1,
  76    IOMMU_WO   = 2,
  77    IOMMU_RW   = 3,
  78} IOMMUAccessFlags;
  79
  80#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
  81
  82struct IOMMUTLBEntry {
  83    AddressSpace    *target_as;
  84    hwaddr           iova;
  85    hwaddr           translated_addr;
  86    hwaddr           addr_mask;  /* 0xfff = 4k translation */
  87    IOMMUAccessFlags perm;
  88};
  89
  90/*
  91 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
  92 * register with one or multiple IOMMU Notifier capability bit(s).
  93 */
  94typedef enum {
  95    IOMMU_NOTIFIER_NONE = 0,
  96    /* Notify cache invalidations */
  97    IOMMU_NOTIFIER_UNMAP = 0x1,
  98    /* Notify entry changes (newly created entries) */
  99    IOMMU_NOTIFIER_MAP = 0x2,
 100} IOMMUNotifierFlag;
 101
 102#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
 103
 104struct IOMMUNotifier;
 105typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
 106                            IOMMUTLBEntry *data);
 107
 108struct IOMMUNotifier {
 109    IOMMUNotify notify;
 110    IOMMUNotifierFlag notifier_flags;
 111    /* Notify for address space range start <= addr <= end */
 112    hwaddr start;
 113    hwaddr end;
 114    int iommu_idx;
 115    QLIST_ENTRY(IOMMUNotifier) node;
 116};
 117typedef struct IOMMUNotifier IOMMUNotifier;
 118
 119/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
 120#define RAM_PREALLOC   (1 << 0)
 121
 122/* RAM is mmap-ed with MAP_SHARED */
 123#define RAM_SHARED     (1 << 1)
 124
 125/* Only a portion of RAM (used_length) is actually used, and migrated.
 126 * This used_length size can change across reboots.
 127 */
 128#define RAM_RESIZEABLE (1 << 2)
 129
 130/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
 131 * zero the page and wake waiting processes.
 132 * (Set during postcopy)
 133 */
 134#define RAM_UF_ZEROPAGE (1 << 3)
 135
 136/* RAM can be migrated */
 137#define RAM_MIGRATABLE (1 << 4)
 138
 139/* RAM is a persistent kind memory */
 140#define RAM_PMEM (1 << 5)
 141
 142static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
 143                                       IOMMUNotifierFlag flags,
 144                                       hwaddr start, hwaddr end,
 145                                       int iommu_idx)
 146{
 147    n->notify = fn;
 148    n->notifier_flags = flags;
 149    n->start = start;
 150    n->end = end;
 151    n->iommu_idx = iommu_idx;
 152}
 153
 154/*
 155 * Memory region callbacks
 156 */
 157struct MemoryRegionOps {
 158    /* Read from the memory region. @addr is relative to @mr; @size is
 159     * in bytes. */
 160    uint64_t (*read)(void *opaque,
 161                     hwaddr addr,
 162                     unsigned size);
 163    /* Write to the memory region. @addr is relative to @mr; @size is
 164     * in bytes. */
 165    void (*write)(void *opaque,
 166                  hwaddr addr,
 167                  uint64_t data,
 168                  unsigned size);
 169
 170    MemTxResult (*read_with_attrs)(void *opaque,
 171                                   hwaddr addr,
 172                                   uint64_t *data,
 173                                   unsigned size,
 174                                   MemTxAttrs attrs);
 175    MemTxResult (*write_with_attrs)(void *opaque,
 176                                    hwaddr addr,
 177                                    uint64_t data,
 178                                    unsigned size,
 179                                    MemTxAttrs attrs);
 180
 181    enum device_endian endianness;
 182    /* Guest-visible constraints: */
 183    struct {
 184        /* If nonzero, specify bounds on access sizes beyond which a machine
 185         * check is thrown.
 186         */
 187        unsigned min_access_size;
 188        unsigned max_access_size;
 189        /* If true, unaligned accesses are supported.  Otherwise unaligned
 190         * accesses throw machine checks.
 191         */
 192         bool unaligned;
 193        /*
 194         * If present, and returns #false, the transaction is not accepted
 195         * by the device (and results in machine dependent behaviour such
 196         * as a machine check exception).
 197         */
 198        bool (*accepts)(void *opaque, hwaddr addr,
 199                        unsigned size, bool is_write,
 200                        MemTxAttrs attrs);
 201    } valid;
 202    /* Internal implementation constraints: */
 203    struct {
 204        /* If nonzero, specifies the minimum size implemented.  Smaller sizes
 205         * will be rounded upwards and a partial result will be returned.
 206         */
 207        unsigned min_access_size;
 208        /* If nonzero, specifies the maximum size implemented.  Larger sizes
 209         * will be done as a series of accesses with smaller sizes.
 210         */
 211        unsigned max_access_size;
 212        /* If true, unaligned accesses are supported.  Otherwise all accesses
 213         * are converted to (possibly multiple) naturally aligned accesses.
 214         */
 215        bool unaligned;
 216    } impl;
 217};
 218
 219typedef struct MemoryRegionClass {
 220    /* private */
 221    ObjectClass parent_class;
 222} MemoryRegionClass;
 223
 224
 225enum IOMMUMemoryRegionAttr {
 226    IOMMU_ATTR_SPAPR_TCE_FD
 227};
 228
 229/*
 230 * IOMMUMemoryRegionClass:
 231 *
 232 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
 233 * and provide an implementation of at least the @translate method here
 234 * to handle requests to the memory region. Other methods are optional.
 235 *
 236 * The IOMMU implementation must use the IOMMU notifier infrastructure
 237 * to report whenever mappings are changed, by calling
 238 * memory_region_notify_iommu() (or, if necessary, by calling
 239 * memory_region_notify_one() for each registered notifier).
 240 *
 241 * Conceptually an IOMMU provides a mapping from input address
 242 * to an output TLB entry. If the IOMMU is aware of memory transaction
 243 * attributes and the output TLB entry depends on the transaction
 244 * attributes, we represent this using IOMMU indexes. Each index
 245 * selects a particular translation table that the IOMMU has:
 246 *
 247 *   @attrs_to_index returns the IOMMU index for a set of transaction attributes
 248 *
 249 *   @translate takes an input address and an IOMMU index
 250 *
 251 * and the mapping returned can only depend on the input address and the
 252 * IOMMU index.
 253 *
 254 * Most IOMMUs don't care about the transaction attributes and support
 255 * only a single IOMMU index. A more complex IOMMU might have one index
 256 * for secure transactions and one for non-secure transactions.
 257 */
 258struct IOMMUMemoryRegionClass {
 259    /* private: */
 260    MemoryRegionClass parent_class;
 261
 262    /* public: */
 263    /**
 264     * @translate:
 265     *
 266     * Return a TLB entry that contains a given address.
 267     *
 268     * The IOMMUAccessFlags indicated via @flag are optional and may
 269     * be specified as IOMMU_NONE to indicate that the caller needs
 270     * the full translation information for both reads and writes. If
 271     * the access flags are specified then the IOMMU implementation
 272     * may use this as an optimization, to stop doing a page table
 273     * walk as soon as it knows that the requested permissions are not
 274     * allowed. If IOMMU_NONE is passed then the IOMMU must do the
 275     * full page table walk and report the permissions in the returned
 276     * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
 277     * return different mappings for reads and writes.)
 278     *
 279     * The returned information remains valid while the caller is
 280     * holding the big QEMU lock or is inside an RCU critical section;
 281     * if the caller wishes to cache the mapping beyond that it must
 282     * register an IOMMU notifier so it can invalidate its cached
 283     * information when the IOMMU mapping changes.
 284     *
 285     * @iommu: the IOMMUMemoryRegion
 286     *
 287     * @hwaddr: address to be translated within the memory region
 288     *
 289     * @flag: requested access permission
 290     *
 291     * @iommu_idx: IOMMU index for the translation
 292     */
 293    IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
 294                               IOMMUAccessFlags flag, int iommu_idx);
 295    /**
 296     * @get_min_page_size:
 297     *
 298     * Returns minimum supported page size in bytes.
 299     *
 300     * If this method is not provided then the minimum is assumed to
 301     * be TARGET_PAGE_SIZE.
 302     *
 303     * @iommu: the IOMMUMemoryRegion
 304     */
 305    uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
 306    /**
 307     * @notify_flag_changed:
 308     *
 309     * Called when IOMMU Notifier flag changes (ie when the set of
 310     * events which IOMMU users are requesting notification for changes).
 311     * Optional method -- need not be provided if the IOMMU does not
 312     * need to know exactly which events must be notified.
 313     *
 314     * @iommu: the IOMMUMemoryRegion
 315     *
 316     * @old_flags: events which previously needed to be notified
 317     *
 318     * @new_flags: events which now need to be notified
 319     *
 320     * Returns 0 on success, or a negative errno; in particular
 321     * returns -EINVAL if the new flag bitmap is not supported by the
 322     * IOMMU memory region. In case of failure, the error object
 323     * must be created
 324     */
 325    int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
 326                               IOMMUNotifierFlag old_flags,
 327                               IOMMUNotifierFlag new_flags,
 328                               Error **errp);
 329    /**
 330     * @replay:
 331     *
 332     * Called to handle memory_region_iommu_replay().
 333     *
 334     * The default implementation of memory_region_iommu_replay() is to
 335     * call the IOMMU translate method for every page in the address space
 336     * with flag == IOMMU_NONE and then call the notifier if translate
 337     * returns a valid mapping. If this method is implemented then it
 338     * overrides the default behaviour, and must provide the full semantics
 339     * of memory_region_iommu_replay(), by calling @notifier for every
 340     * translation present in the IOMMU.
 341     *
 342     * Optional method -- an IOMMU only needs to provide this method
 343     * if the default is inefficient or produces undesirable side effects.
 344     *
 345     * Note: this is not related to record-and-replay functionality.
 346     */
 347    void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
 348
 349    /**
 350     * @get_attr:
 351     *
 352     * Get IOMMU misc attributes. This is an optional method that
 353     * can be used to allow users of the IOMMU to get implementation-specific
 354     * information. The IOMMU implements this method to handle calls
 355     * by IOMMU users to memory_region_iommu_get_attr() by filling in
 356     * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
 357     * the IOMMU supports. If the method is unimplemented then
 358     * memory_region_iommu_get_attr() will always return -EINVAL.
 359     *
 360     * @iommu: the IOMMUMemoryRegion
 361     *
 362     * @attr: attribute being queried
 363     *
 364     * @data: memory to fill in with the attribute data
 365     *
 366     * Returns 0 on success, or a negative errno; in particular
 367     * returns -EINVAL for unrecognized or unimplemented attribute types.
 368     */
 369    int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
 370                    void *data);
 371
 372    /**
 373     * @attrs_to_index:
 374     *
 375     * Return the IOMMU index to use for a given set of transaction attributes.
 376     *
 377     * Optional method: if an IOMMU only supports a single IOMMU index then
 378     * the default implementation of memory_region_iommu_attrs_to_index()
 379     * will return 0.
 380     *
 381     * The indexes supported by an IOMMU must be contiguous, starting at 0.
 382     *
 383     * @iommu: the IOMMUMemoryRegion
 384     * @attrs: memory transaction attributes
 385     */
 386    int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
 387
 388    /**
 389     * @num_indexes:
 390     *
 391     * Return the number of IOMMU indexes this IOMMU supports.
 392     *
 393     * Optional method: if this method is not provided, then
 394     * memory_region_iommu_num_indexes() will return 1, indicating that
 395     * only a single IOMMU index is supported.
 396     *
 397     * @iommu: the IOMMUMemoryRegion
 398     */
 399    int (*num_indexes)(IOMMUMemoryRegion *iommu);
 400
 401    /**
 402     * @iommu_set_page_size_mask:
 403     *
 404     * Restrict the page size mask that can be supported with a given IOMMU
 405     * memory region. Used for example to propagate host physical IOMMU page
 406     * size mask limitations to the virtual IOMMU.
 407     *
 408     * Optional method: if this method is not provided, then the default global
 409     * page mask is used.
 410     *
 411     * @iommu: the IOMMUMemoryRegion
 412     *
 413     * @page_size_mask: a bitmask of supported page sizes. At least one bit,
 414     * representing the smallest page size, must be set. Additional set bits
 415     * represent supported block sizes. For example a host physical IOMMU that
 416     * uses page tables with a page size of 4kB, and supports 2MB and 4GB
 417     * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
 418     * block sizes is specified with mask 0xfffffffffffff000.
 419     *
 420     * Returns 0 on success, or a negative error. In case of failure, the error
 421     * object must be created.
 422     */
 423     int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
 424                                     uint64_t page_size_mask,
 425                                     Error **errp);
 426};
 427
 428typedef struct CoalescedMemoryRange CoalescedMemoryRange;
 429typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
 430
 431/** MemoryRegion:
 432 *
 433 * A struct representing a memory region.
 434 */
 435struct MemoryRegion {
 436    Object parent_obj;
 437
 438    /* private: */
 439
 440    /* The following fields should fit in a cache line */
 441    bool romd_mode;
 442    bool ram;
 443    bool subpage;
 444    bool readonly; /* For RAM regions */
 445    bool nonvolatile;
 446    bool rom_device;
 447    bool flush_coalesced_mmio;
 448    uint8_t dirty_log_mask;
 449    bool is_iommu;
 450    RAMBlock *ram_block;
 451    Object *owner;
 452
 453    const MemoryRegionOps *ops;
 454    void *opaque;
 455    MemoryRegion *container;
 456    Int128 size;
 457    hwaddr addr;
 458    void (*destructor)(MemoryRegion *mr);
 459    uint64_t align;
 460    bool terminates;
 461    bool ram_device;
 462    bool enabled;
 463    bool warning_printed; /* For reservations */
 464    uint8_t vga_logging_count;
 465    MemoryRegion *alias;
 466    hwaddr alias_offset;
 467    int32_t priority;
 468    QTAILQ_HEAD(, MemoryRegion) subregions;
 469    QTAILQ_ENTRY(MemoryRegion) subregions_link;
 470    QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
 471    const char *name;
 472    unsigned ioeventfd_nb;
 473    MemoryRegionIoeventfd *ioeventfds;
 474};
 475
 476struct IOMMUMemoryRegion {
 477    MemoryRegion parent_obj;
 478
 479    QLIST_HEAD(, IOMMUNotifier) iommu_notify;
 480    IOMMUNotifierFlag iommu_notify_flags;
 481};
 482
 483#define IOMMU_NOTIFIER_FOREACH(n, mr) \
 484    QLIST_FOREACH((n), &(mr)->iommu_notify, node)
 485
 486/**
 487 * struct MemoryListener: callbacks structure for updates to the physical memory map
 488 *
 489 * Allows a component to adjust to changes in the guest-visible memory map.
 490 * Use with memory_listener_register() and memory_listener_unregister().
 491 */
 492struct MemoryListener {
 493    /**
 494     * @begin:
 495     *
 496     * Called at the beginning of an address space update transaction.
 497     * Followed by calls to #MemoryListener.region_add(),
 498     * #MemoryListener.region_del(), #MemoryListener.region_nop(),
 499     * #MemoryListener.log_start() and #MemoryListener.log_stop() in
 500     * increasing address order.
 501     *
 502     * @listener: The #MemoryListener.
 503     */
 504    void (*begin)(MemoryListener *listener);
 505
 506    /**
 507     * @commit:
 508     *
 509     * Called at the end of an address space update transaction,
 510     * after the last call to #MemoryListener.region_add(),
 511     * #MemoryListener.region_del() or #MemoryListener.region_nop(),
 512     * #MemoryListener.log_start() and #MemoryListener.log_stop().
 513     *
 514     * @listener: The #MemoryListener.
 515     */
 516    void (*commit)(MemoryListener *listener);
 517
 518    /**
 519     * @region_add:
 520     *
 521     * Called during an address space update transaction,
 522     * for a section of the address space that is new in this address space
 523     * space since the last transaction.
 524     *
 525     * @listener: The #MemoryListener.
 526     * @section: The new #MemoryRegionSection.
 527     */
 528    void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
 529
 530    /**
 531     * @region_del:
 532     *
 533     * Called during an address space update transaction,
 534     * for a section of the address space that has disappeared in the address
 535     * space since the last transaction.
 536     *
 537     * @listener: The #MemoryListener.
 538     * @section: The old #MemoryRegionSection.
 539     */
 540    void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
 541
 542    /**
 543     * @region_nop:
 544     *
 545     * Called during an address space update transaction,
 546     * for a section of the address space that is in the same place in the address
 547     * space as in the last transaction.
 548     *
 549     * @listener: The #MemoryListener.
 550     * @section: The #MemoryRegionSection.
 551     */
 552    void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
 553
 554    /**
 555     * @log_start:
 556     *
 557     * Called during an address space update transaction, after
 558     * one of #MemoryListener.region_add(),#MemoryListener.region_del() or
 559     * #MemoryListener.region_nop(), if dirty memory logging clients have
 560     * become active since the last transaction.
 561     *
 562     * @listener: The #MemoryListener.
 563     * @section: The #MemoryRegionSection.
 564     * @old: A bitmap of dirty memory logging clients that were active in
 565     * the previous transaction.
 566     * @new: A bitmap of dirty memory logging clients that are active in
 567     * the current transaction.
 568     */
 569    void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
 570                      int old, int new);
 571
 572    /**
 573     * @log_stop:
 574     *
 575     * Called during an address space update transaction, after
 576     * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
 577     * #MemoryListener.region_nop() and possibly after
 578     * #MemoryListener.log_start(), if dirty memory logging clients have
 579     * become inactive since the last transaction.
 580     *
 581     * @listener: The #MemoryListener.
 582     * @section: The #MemoryRegionSection.
 583     * @old: A bitmap of dirty memory logging clients that were active in
 584     * the previous transaction.
 585     * @new: A bitmap of dirty memory logging clients that are active in
 586     * the current transaction.
 587     */
 588    void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
 589                     int old, int new);
 590
 591    /**
 592     * @log_sync:
 593     *
 594     * Called by memory_region_snapshot_and_clear_dirty() and
 595     * memory_global_dirty_log_sync(), before accessing QEMU's "official"
 596     * copy of the dirty memory bitmap for a #MemoryRegionSection.
 597     *
 598     * @listener: The #MemoryListener.
 599     * @section: The #MemoryRegionSection.
 600     */
 601    void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
 602
 603    /**
 604     * @log_clear:
 605     *
 606     * Called before reading the dirty memory bitmap for a
 607     * #MemoryRegionSection.
 608     *
 609     * @listener: The #MemoryListener.
 610     * @section: The #MemoryRegionSection.
 611     */
 612    void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
 613
 614    /**
 615     * @log_global_start:
 616     *
 617     * Called by memory_global_dirty_log_start(), which
 618     * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
 619     * the address space.  #MemoryListener.log_global_start() is also
 620     * called when a #MemoryListener is added, if global dirty logging is
 621     * active at that time.
 622     *
 623     * @listener: The #MemoryListener.
 624     */
 625    void (*log_global_start)(MemoryListener *listener);
 626
 627    /**
 628     * @log_global_stop:
 629     *
 630     * Called by memory_global_dirty_log_stop(), which
 631     * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
 632     * the address space.
 633     *
 634     * @listener: The #MemoryListener.
 635     */
 636    void (*log_global_stop)(MemoryListener *listener);
 637
 638    /**
 639     * @log_global_after_sync:
 640     *
 641     * Called after reading the dirty memory bitmap
 642     * for any #MemoryRegionSection.
 643     *
 644     * @listener: The #MemoryListener.
 645     */
 646    void (*log_global_after_sync)(MemoryListener *listener);
 647
 648    /**
 649     * @eventfd_add:
 650     *
 651     * Called during an address space update transaction,
 652     * for a section of the address space that has had a new ioeventfd
 653     * registration since the last transaction.
 654     *
 655     * @listener: The #MemoryListener.
 656     * @section: The new #MemoryRegionSection.
 657     * @match_data: The @match_data parameter for the new ioeventfd.
 658     * @data: The @data parameter for the new ioeventfd.
 659     * @e: The #EventNotifier parameter for the new ioeventfd.
 660     */
 661    void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
 662                        bool match_data, uint64_t data, EventNotifier *e);
 663
 664    /**
 665     * @eventfd_del:
 666     *
 667     * Called during an address space update transaction,
 668     * for a section of the address space that has dropped an ioeventfd
 669     * registration since the last transaction.
 670     *
 671     * @listener: The #MemoryListener.
 672     * @section: The new #MemoryRegionSection.
 673     * @match_data: The @match_data parameter for the dropped ioeventfd.
 674     * @data: The @data parameter for the dropped ioeventfd.
 675     * @e: The #EventNotifier parameter for the dropped ioeventfd.
 676     */
 677    void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
 678                        bool match_data, uint64_t data, EventNotifier *e);
 679
 680    /**
 681     * @coalesced_io_add:
 682     *
 683     * Called during an address space update transaction,
 684     * for a section of the address space that has had a new coalesced
 685     * MMIO range registration since the last transaction.
 686     *
 687     * @listener: The #MemoryListener.
 688     * @section: The new #MemoryRegionSection.
 689     * @addr: The starting address for the coalesced MMIO range.
 690     * @len: The length of the coalesced MMIO range.
 691     */
 692    void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
 693                               hwaddr addr, hwaddr len);
 694
 695    /**
 696     * @coalesced_io_del:
 697     *
 698     * Called during an address space update transaction,
 699     * for a section of the address space that has dropped a coalesced
 700     * MMIO range since the last transaction.
 701     *
 702     * @listener: The #MemoryListener.
 703     * @section: The new #MemoryRegionSection.
 704     * @addr: The starting address for the coalesced MMIO range.
 705     * @len: The length of the coalesced MMIO range.
 706     */
 707    void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
 708                               hwaddr addr, hwaddr len);
 709    /**
 710     * @priority:
 711     *
 712     * Govern the order in which memory listeners are invoked. Lower priorities
 713     * are invoked earlier for "add" or "start" callbacks, and later for "delete"
 714     * or "stop" callbacks.
 715     */
 716    unsigned priority;
 717
 718    /* private: */
 719    AddressSpace *address_space;
 720    QTAILQ_ENTRY(MemoryListener) link;
 721    QTAILQ_ENTRY(MemoryListener) link_as;
 722};
 723
 724/**
 725 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
 726 */
 727struct AddressSpace {
 728    /* private: */
 729    struct rcu_head rcu;
 730    char *name;
 731    MemoryRegion *root;
 732
 733    /* Accessed via RCU.  */
 734    struct FlatView *current_map;
 735
 736    int ioeventfd_nb;
 737    struct MemoryRegionIoeventfd *ioeventfds;
 738    QTAILQ_HEAD(, MemoryListener) listeners;
 739    QTAILQ_ENTRY(AddressSpace) address_spaces_link;
 740};
 741
 742typedef struct AddressSpaceDispatch AddressSpaceDispatch;
 743typedef struct FlatRange FlatRange;
 744
 745/* Flattened global view of current active memory hierarchy.  Kept in sorted
 746 * order.
 747 */
 748struct FlatView {
 749    struct rcu_head rcu;
 750    unsigned ref;
 751    FlatRange *ranges;
 752    unsigned nr;
 753    unsigned nr_allocated;
 754    struct AddressSpaceDispatch *dispatch;
 755    MemoryRegion *root;
 756};
 757
 758static inline FlatView *address_space_to_flatview(AddressSpace *as)
 759{
 760    return qatomic_rcu_read(&as->current_map);
 761}
 762
 763typedef int (*flatview_cb)(Int128 start,
 764                           Int128 len,
 765                           const MemoryRegion*, void*);
 766
 767void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque);
 768
 769/**
 770 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
 771 *
 772 * @mr: the region, or %NULL if empty
 773 * @fv: the flat view of the address space the region is mapped in
 774 * @offset_within_region: the beginning of the section, relative to @mr's start
 775 * @size: the size of the section; will not exceed @mr's boundaries
 776 * @offset_within_address_space: the address of the first byte of the section
 777 *     relative to the region's address space
 778 * @readonly: writes to this section are ignored
 779 * @nonvolatile: this section is non-volatile
 780 */
 781struct MemoryRegionSection {
 782    Int128 size;
 783    MemoryRegion *mr;
 784    FlatView *fv;
 785    hwaddr offset_within_region;
 786    hwaddr offset_within_address_space;
 787    bool readonly;
 788    bool nonvolatile;
 789};
 790
 791static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
 792                                          MemoryRegionSection *b)
 793{
 794    return a->mr == b->mr &&
 795           a->fv == b->fv &&
 796           a->offset_within_region == b->offset_within_region &&
 797           a->offset_within_address_space == b->offset_within_address_space &&
 798           int128_eq(a->size, b->size) &&
 799           a->readonly == b->readonly &&
 800           a->nonvolatile == b->nonvolatile;
 801}
 802
 803/**
 804 * memory_region_init: Initialize a memory region
 805 *
 806 * The region typically acts as a container for other memory regions.  Use
 807 * memory_region_add_subregion() to add subregions.
 808 *
 809 * @mr: the #MemoryRegion to be initialized
 810 * @owner: the object that tracks the region's reference count
 811 * @name: used for debugging; not visible to the user or ABI
 812 * @size: size of the region; any subregions beyond this size will be clipped
 813 */
 814void memory_region_init(MemoryRegion *mr,
 815                        struct Object *owner,
 816                        const char *name,
 817                        uint64_t size);
 818
 819/**
 820 * memory_region_ref: Add 1 to a memory region's reference count
 821 *
 822 * Whenever memory regions are accessed outside the BQL, they need to be
 823 * preserved against hot-unplug.  MemoryRegions actually do not have their
 824 * own reference count; they piggyback on a QOM object, their "owner".
 825 * This function adds a reference to the owner.
 826 *
 827 * All MemoryRegions must have an owner if they can disappear, even if the
 828 * device they belong to operates exclusively under the BQL.  This is because
 829 * the region could be returned at any time by memory_region_find, and this
 830 * is usually under guest control.
 831 *
 832 * @mr: the #MemoryRegion
 833 */
 834void memory_region_ref(MemoryRegion *mr);
 835
 836/**
 837 * memory_region_unref: Remove 1 to a memory region's reference count
 838 *
 839 * Whenever memory regions are accessed outside the BQL, they need to be
 840 * preserved against hot-unplug.  MemoryRegions actually do not have their
 841 * own reference count; they piggyback on a QOM object, their "owner".
 842 * This function removes a reference to the owner and possibly destroys it.
 843 *
 844 * @mr: the #MemoryRegion
 845 */
 846void memory_region_unref(MemoryRegion *mr);
 847
 848/**
 849 * memory_region_init_io: Initialize an I/O memory region.
 850 *
 851 * Accesses into the region will cause the callbacks in @ops to be called.
 852 * if @size is nonzero, subregions will be clipped to @size.
 853 *
 854 * @mr: the #MemoryRegion to be initialized.
 855 * @owner: the object that tracks the region's reference count
 856 * @ops: a structure containing read and write callbacks to be used when
 857 *       I/O is performed on the region.
 858 * @opaque: passed to the read and write callbacks of the @ops structure.
 859 * @name: used for debugging; not visible to the user or ABI
 860 * @size: size of the region.
 861 */
 862void memory_region_init_io(MemoryRegion *mr,
 863                           struct Object *owner,
 864                           const MemoryRegionOps *ops,
 865                           void *opaque,
 866                           const char *name,
 867                           uint64_t size);
 868
 869/**
 870 * memory_region_init_ram_nomigrate:  Initialize RAM memory region.  Accesses
 871 *                                    into the region will modify memory
 872 *                                    directly.
 873 *
 874 * @mr: the #MemoryRegion to be initialized.
 875 * @owner: the object that tracks the region's reference count
 876 * @name: Region name, becomes part of RAMBlock name used in migration stream
 877 *        must be unique within any device
 878 * @size: size of the region.
 879 * @errp: pointer to Error*, to store an error if it happens.
 880 *
 881 * Note that this function does not do anything to cause the data in the
 882 * RAM memory region to be migrated; that is the responsibility of the caller.
 883 */
 884void memory_region_init_ram_nomigrate(MemoryRegion *mr,
 885                                      struct Object *owner,
 886                                      const char *name,
 887                                      uint64_t size,
 888                                      Error **errp);
 889
 890/**
 891 * memory_region_init_ram_shared_nomigrate:  Initialize RAM memory region.
 892 *                                           Accesses into the region will
 893 *                                           modify memory directly.
 894 *
 895 * @mr: the #MemoryRegion to be initialized.
 896 * @owner: the object that tracks the region's reference count
 897 * @name: Region name, becomes part of RAMBlock name used in migration stream
 898 *        must be unique within any device
 899 * @size: size of the region.
 900 * @share: allow remapping RAM to different addresses
 901 * @errp: pointer to Error*, to store an error if it happens.
 902 *
 903 * Note that this function is similar to memory_region_init_ram_nomigrate.
 904 * The only difference is part of the RAM region can be remapped.
 905 */
 906void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
 907                                             struct Object *owner,
 908                                             const char *name,
 909                                             uint64_t size,
 910                                             bool share,
 911                                             Error **errp);
 912
 913/**
 914 * memory_region_init_resizeable_ram:  Initialize memory region with resizeable
 915 *                                     RAM.  Accesses into the region will
 916 *                                     modify memory directly.  Only an initial
 917 *                                     portion of this RAM is actually used.
 918 *                                     The used size can change across reboots.
 919 *
 920 * @mr: the #MemoryRegion to be initialized.
 921 * @owner: the object that tracks the region's reference count
 922 * @name: Region name, becomes part of RAMBlock name used in migration stream
 923 *        must be unique within any device
 924 * @size: used size of the region.
 925 * @max_size: max size of the region.
 926 * @resized: callback to notify owner about used size change.
 927 * @errp: pointer to Error*, to store an error if it happens.
 928 *
 929 * Note that this function does not do anything to cause the data in the
 930 * RAM memory region to be migrated; that is the responsibility of the caller.
 931 */
 932void memory_region_init_resizeable_ram(MemoryRegion *mr,
 933                                       struct Object *owner,
 934                                       const char *name,
 935                                       uint64_t size,
 936                                       uint64_t max_size,
 937                                       void (*resized)(const char*,
 938                                                       uint64_t length,
 939                                                       void *host),
 940                                       Error **errp);
 941#ifdef CONFIG_POSIX
 942
 943/**
 944 * memory_region_init_ram_from_file:  Initialize RAM memory region with a
 945 *                                    mmap-ed backend.
 946 *
 947 * @mr: the #MemoryRegion to be initialized.
 948 * @owner: the object that tracks the region's reference count
 949 * @name: Region name, becomes part of RAMBlock name used in migration stream
 950 *        must be unique within any device
 951 * @size: size of the region.
 952 * @align: alignment of the region base address; if 0, the default alignment
 953 *         (getpagesize()) will be used.
 954 * @ram_flags: Memory region features:
 955 *             - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
 956 *             - RAM_PMEM: the memory is persistent memory
 957 *             Other bits are ignored now.
 958 * @path: the path in which to allocate the RAM.
 959 * @errp: pointer to Error*, to store an error if it happens.
 960 *
 961 * Note that this function does not do anything to cause the data in the
 962 * RAM memory region to be migrated; that is the responsibility of the caller.
 963 */
 964void memory_region_init_ram_from_file(MemoryRegion *mr,
 965                                      struct Object *owner,
 966                                      const char *name,
 967                                      uint64_t size,
 968                                      uint64_t align,
 969                                      uint32_t ram_flags,
 970                                      const char *path,
 971                                      Error **errp);
 972
 973/**
 974 * memory_region_init_ram_from_fd:  Initialize RAM memory region with a
 975 *                                  mmap-ed backend.
 976 *
 977 * @mr: the #MemoryRegion to be initialized.
 978 * @owner: the object that tracks the region's reference count
 979 * @name: the name of the region.
 980 * @size: size of the region.
 981 * @share: %true if memory must be mmaped with the MAP_SHARED flag
 982 * @fd: the fd to mmap.
 983 * @errp: pointer to Error*, to store an error if it happens.
 984 *
 985 * Note that this function does not do anything to cause the data in the
 986 * RAM memory region to be migrated; that is the responsibility of the caller.
 987 */
 988void memory_region_init_ram_from_fd(MemoryRegion *mr,
 989                                    struct Object *owner,
 990                                    const char *name,
 991                                    uint64_t size,
 992                                    bool share,
 993                                    int fd,
 994                                    Error **errp);
 995#endif
 996
 997/**
 998 * memory_region_init_ram_ptr:  Initialize RAM memory region from a
 999 *                              user-provided pointer.  Accesses into the
1000 *                              region will modify memory directly.
1001 *
1002 * @mr: the #MemoryRegion to be initialized.
1003 * @owner: the object that tracks the region's reference count
1004 * @name: Region name, becomes part of RAMBlock name used in migration stream
1005 *        must be unique within any device
1006 * @size: size of the region.
1007 * @ptr: memory to be mapped; must contain at least @size bytes.
1008 *
1009 * Note that this function does not do anything to cause the data in the
1010 * RAM memory region to be migrated; that is the responsibility of the caller.
1011 */
1012void memory_region_init_ram_ptr(MemoryRegion *mr,
1013                                struct Object *owner,
1014                                const char *name,
1015                                uint64_t size,
1016                                void *ptr);
1017
1018/**
1019 * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
1020 *                                     a user-provided pointer.
1021 *
1022 * A RAM device represents a mapping to a physical device, such as to a PCI
1023 * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
1024 * into the VM address space and access to the region will modify memory
1025 * directly.  However, the memory region should not be included in a memory
1026 * dump (device may not be enabled/mapped at the time of the dump), and
1027 * operations incompatible with manipulating MMIO should be avoided.  Replaces
1028 * skip_dump flag.
1029 *
1030 * @mr: the #MemoryRegion to be initialized.
1031 * @owner: the object that tracks the region's reference count
1032 * @name: the name of the region.
1033 * @size: size of the region.
1034 * @ptr: memory to be mapped; must contain at least @size bytes.
1035 *
1036 * Note that this function does not do anything to cause the data in the
1037 * RAM memory region to be migrated; that is the responsibility of the caller.
1038 * (For RAM device memory regions, migrating the contents rarely makes sense.)
1039 */
1040void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1041                                       struct Object *owner,
1042                                       const char *name,
1043                                       uint64_t size,
1044                                       void *ptr);
1045
1046/**
1047 * memory_region_init_alias: Initialize a memory region that aliases all or a
1048 *                           part of another memory region.
1049 *
1050 * @mr: the #MemoryRegion to be initialized.
1051 * @owner: the object that tracks the region's reference count
1052 * @name: used for debugging; not visible to the user or ABI
1053 * @orig: the region to be referenced; @mr will be equivalent to
1054 *        @orig between @offset and @offset + @size - 1.
1055 * @offset: start of the section in @orig to be referenced.
1056 * @size: size of the region.
1057 */
1058void memory_region_init_alias(MemoryRegion *mr,
1059                              struct Object *owner,
1060                              const char *name,
1061                              MemoryRegion *orig,
1062                              hwaddr offset,
1063                              uint64_t size);
1064
1065/**
1066 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1067 *
1068 * This has the same effect as calling memory_region_init_ram_nomigrate()
1069 * and then marking the resulting region read-only with
1070 * memory_region_set_readonly().
1071 *
1072 * Note that this function does not do anything to cause the data in the
1073 * RAM side of the memory region to be migrated; that is the responsibility
1074 * of the caller.
1075 *
1076 * @mr: the #MemoryRegion to be initialized.
1077 * @owner: the object that tracks the region's reference count
1078 * @name: Region name, becomes part of RAMBlock name used in migration stream
1079 *        must be unique within any device
1080 * @size: size of the region.
1081 * @errp: pointer to Error*, to store an error if it happens.
1082 */
1083void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1084                                      struct Object *owner,
1085                                      const char *name,
1086                                      uint64_t size,
1087                                      Error **errp);
1088
1089/**
1090 * memory_region_init_rom_device_nomigrate:  Initialize a ROM memory region.
1091 *                                 Writes are handled via callbacks.
1092 *
1093 * Note that this function does not do anything to cause the data in the
1094 * RAM side of the memory region to be migrated; that is the responsibility
1095 * of the caller.
1096 *
1097 * @mr: the #MemoryRegion to be initialized.
1098 * @owner: the object that tracks the region's reference count
1099 * @ops: callbacks for write access handling (must not be NULL).
1100 * @opaque: passed to the read and write callbacks of the @ops structure.
1101 * @name: Region name, becomes part of RAMBlock name used in migration stream
1102 *        must be unique within any device
1103 * @size: size of the region.
1104 * @errp: pointer to Error*, to store an error if it happens.
1105 */
1106void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1107                                             struct Object *owner,
1108                                             const MemoryRegionOps *ops,
1109                                             void *opaque,
1110                                             const char *name,
1111                                             uint64_t size,
1112                                             Error **errp);
1113
1114/**
1115 * memory_region_init_iommu: Initialize a memory region of a custom type
1116 * that translates addresses
1117 *
1118 * An IOMMU region translates addresses and forwards accesses to a target
1119 * memory region.
1120 *
1121 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1122 * @_iommu_mr should be a pointer to enough memory for an instance of
1123 * that subclass, @instance_size is the size of that subclass, and
1124 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1125 * instance of the subclass, and its methods will then be called to handle
1126 * accesses to the memory region. See the documentation of
1127 * #IOMMUMemoryRegionClass for further details.
1128 *
1129 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1130 * @instance_size: the IOMMUMemoryRegion subclass instance size
1131 * @mrtypename: the type name of the #IOMMUMemoryRegion
1132 * @owner: the object that tracks the region's reference count
1133 * @name: used for debugging; not visible to the user or ABI
1134 * @size: size of the region.
1135 */
1136void memory_region_init_iommu(void *_iommu_mr,
1137                              size_t instance_size,
1138                              const char *mrtypename,
1139                              Object *owner,
1140                              const char *name,
1141                              uint64_t size);
1142
1143/**
1144 * memory_region_init_ram - Initialize RAM memory region.  Accesses into the
1145 *                          region will modify memory directly.
1146 *
1147 * @mr: the #MemoryRegion to be initialized
1148 * @owner: the object that tracks the region's reference count (must be
1149 *         TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1150 * @name: name of the memory region
1151 * @size: size of the region in bytes
1152 * @errp: pointer to Error*, to store an error if it happens.
1153 *
1154 * This function allocates RAM for a board model or device, and
1155 * arranges for it to be migrated (by calling vmstate_register_ram()
1156 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1157 * @owner is NULL).
1158 *
1159 * TODO: Currently we restrict @owner to being either NULL (for
1160 * global RAM regions with no owner) or devices, so that we can
1161 * give the RAM block a unique name for migration purposes.
1162 * We should lift this restriction and allow arbitrary Objects.
1163 * If you pass a non-NULL non-device @owner then we will assert.
1164 */
1165void memory_region_init_ram(MemoryRegion *mr,
1166                            struct Object *owner,
1167                            const char *name,
1168                            uint64_t size,
1169                            Error **errp);
1170
1171/**
1172 * memory_region_init_rom: Initialize a ROM memory region.
1173 *
1174 * This has the same effect as calling memory_region_init_ram()
1175 * and then marking the resulting region read-only with
1176 * memory_region_set_readonly(). This includes arranging for the
1177 * contents to be migrated.
1178 *
1179 * TODO: Currently we restrict @owner to being either NULL (for
1180 * global RAM regions with no owner) or devices, so that we can
1181 * give the RAM block a unique name for migration purposes.
1182 * We should lift this restriction and allow arbitrary Objects.
1183 * If you pass a non-NULL non-device @owner then we will assert.
1184 *
1185 * @mr: the #MemoryRegion to be initialized.
1186 * @owner: the object that tracks the region's reference count
1187 * @name: Region name, becomes part of RAMBlock name used in migration stream
1188 *        must be unique within any device
1189 * @size: size of the region.
1190 * @errp: pointer to Error*, to store an error if it happens.
1191 */
1192void memory_region_init_rom(MemoryRegion *mr,
1193                            struct Object *owner,
1194                            const char *name,
1195                            uint64_t size,
1196                            Error **errp);
1197
1198/**
1199 * memory_region_init_rom_device:  Initialize a ROM memory region.
1200 *                                 Writes are handled via callbacks.
1201 *
1202 * This function initializes a memory region backed by RAM for reads
1203 * and callbacks for writes, and arranges for the RAM backing to
1204 * be migrated (by calling vmstate_register_ram()
1205 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1206 * @owner is NULL).
1207 *
1208 * TODO: Currently we restrict @owner to being either NULL (for
1209 * global RAM regions with no owner) or devices, so that we can
1210 * give the RAM block a unique name for migration purposes.
1211 * We should lift this restriction and allow arbitrary Objects.
1212 * If you pass a non-NULL non-device @owner then we will assert.
1213 *
1214 * @mr: the #MemoryRegion to be initialized.
1215 * @owner: the object that tracks the region's reference count
1216 * @ops: callbacks for write access handling (must not be NULL).
1217 * @opaque: passed to the read and write callbacks of the @ops structure.
1218 * @name: Region name, becomes part of RAMBlock name used in migration stream
1219 *        must be unique within any device
1220 * @size: size of the region.
1221 * @errp: pointer to Error*, to store an error if it happens.
1222 */
1223void memory_region_init_rom_device(MemoryRegion *mr,
1224                                   struct Object *owner,
1225                                   const MemoryRegionOps *ops,
1226                                   void *opaque,
1227                                   const char *name,
1228                                   uint64_t size,
1229                                   Error **errp);
1230
1231
1232/**
1233 * memory_region_owner: get a memory region's owner.
1234 *
1235 * @mr: the memory region being queried.
1236 */
1237struct Object *memory_region_owner(MemoryRegion *mr);
1238
1239/**
1240 * memory_region_size: get a memory region's size.
1241 *
1242 * @mr: the memory region being queried.
1243 */
1244uint64_t memory_region_size(MemoryRegion *mr);
1245
1246/**
1247 * memory_region_is_ram: check whether a memory region is random access
1248 *
1249 * Returns %true if a memory region is random access.
1250 *
1251 * @mr: the memory region being queried
1252 */
1253static inline bool memory_region_is_ram(MemoryRegion *mr)
1254{
1255    return mr->ram;
1256}
1257
1258/**
1259 * memory_region_is_ram_device: check whether a memory region is a ram device
1260 *
1261 * Returns %true if a memory region is a device backed ram region
1262 *
1263 * @mr: the memory region being queried
1264 */
1265bool memory_region_is_ram_device(MemoryRegion *mr);
1266
1267/**
1268 * memory_region_is_romd: check whether a memory region is in ROMD mode
1269 *
1270 * Returns %true if a memory region is a ROM device and currently set to allow
1271 * direct reads.
1272 *
1273 * @mr: the memory region being queried
1274 */
1275static inline bool memory_region_is_romd(MemoryRegion *mr)
1276{
1277    return mr->rom_device && mr->romd_mode;
1278}
1279
1280/**
1281 * memory_region_get_iommu: check whether a memory region is an iommu
1282 *
1283 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1284 * otherwise NULL.
1285 *
1286 * @mr: the memory region being queried
1287 */
1288static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1289{
1290    if (mr->alias) {
1291        return memory_region_get_iommu(mr->alias);
1292    }
1293    if (mr->is_iommu) {
1294        return (IOMMUMemoryRegion *) mr;
1295    }
1296    return NULL;
1297}
1298
1299/**
1300 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1301 *   if an iommu or NULL if not
1302 *
1303 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1304 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1305 *
1306 * @iommu_mr: the memory region being queried
1307 */
1308static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1309        IOMMUMemoryRegion *iommu_mr)
1310{
1311    return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1312}
1313
1314#define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1315
1316/**
1317 * memory_region_iommu_get_min_page_size: get minimum supported page size
1318 * for an iommu
1319 *
1320 * Returns minimum supported page size for an iommu.
1321 *
1322 * @iommu_mr: the memory region being queried
1323 */
1324uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1325
1326/**
1327 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1328 *
1329 * The notification type will be decided by entry.perm bits:
1330 *
1331 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
1332 * - For MAP (newly added entry) notifies: set entry.perm to the
1333 *   permission of the page (which is definitely !IOMMU_NONE).
1334 *
1335 * Note: for any IOMMU implementation, an in-place mapping change
1336 * should be notified with an UNMAP followed by a MAP.
1337 *
1338 * @iommu_mr: the memory region that was changed
1339 * @iommu_idx: the IOMMU index for the translation table which has changed
1340 * @entry: the new entry in the IOMMU translation table.  The entry
1341 *         replaces all old entries for the same virtual I/O address range.
1342 *         Deleted entries have .@perm == 0.
1343 */
1344void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1345                                int iommu_idx,
1346                                IOMMUTLBEntry entry);
1347
1348/**
1349 * memory_region_notify_one: notify a change in an IOMMU translation
1350 *                           entry to a single notifier
1351 *
1352 * This works just like memory_region_notify_iommu(), but it only
1353 * notifies a specific notifier, not all of them.
1354 *
1355 * @notifier: the notifier to be notified
1356 * @entry: the new entry in the IOMMU translation table.  The entry
1357 *         replaces all old entries for the same virtual I/O address range.
1358 *         Deleted entries have .@perm == 0.
1359 */
1360void memory_region_notify_one(IOMMUNotifier *notifier,
1361                              IOMMUTLBEntry *entry);
1362
1363/**
1364 * memory_region_register_iommu_notifier: register a notifier for changes to
1365 * IOMMU translation entries.
1366 *
1367 * Returns 0 on success, or a negative errno otherwise. In particular,
1368 * -EINVAL indicates that at least one of the attributes of the notifier
1369 * is not supported (flag/range) by the IOMMU memory region. In case of error
1370 * the error object must be created.
1371 *
1372 * @mr: the memory region to observe
1373 * @n: the IOMMUNotifier to be added; the notify callback receives a
1374 *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1375 *     ceases to be valid on exit from the notifier.
1376 * @errp: pointer to Error*, to store an error if it happens.
1377 */
1378int memory_region_register_iommu_notifier(MemoryRegion *mr,
1379                                          IOMMUNotifier *n, Error **errp);
1380
1381/**
1382 * memory_region_iommu_replay: replay existing IOMMU translations to
1383 * a notifier with the minimum page granularity returned by
1384 * mr->iommu_ops->get_page_size().
1385 *
1386 * Note: this is not related to record-and-replay functionality.
1387 *
1388 * @iommu_mr: the memory region to observe
1389 * @n: the notifier to which to replay iommu mappings
1390 */
1391void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1392
1393/**
1394 * memory_region_unregister_iommu_notifier: unregister a notifier for
1395 * changes to IOMMU translation entries.
1396 *
1397 * @mr: the memory region which was observed and for which notity_stopped()
1398 *      needs to be called
1399 * @n: the notifier to be removed.
1400 */
1401void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1402                                             IOMMUNotifier *n);
1403
1404/**
1405 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1406 * defined on the IOMMU.
1407 *
1408 * Returns 0 on success, or a negative errno otherwise. In particular,
1409 * -EINVAL indicates that the IOMMU does not support the requested
1410 * attribute.
1411 *
1412 * @iommu_mr: the memory region
1413 * @attr: the requested attribute
1414 * @data: a pointer to the requested attribute data
1415 */
1416int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1417                                 enum IOMMUMemoryRegionAttr attr,
1418                                 void *data);
1419
1420/**
1421 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1422 * use for translations with the given memory transaction attributes.
1423 *
1424 * @iommu_mr: the memory region
1425 * @attrs: the memory transaction attributes
1426 */
1427int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1428                                       MemTxAttrs attrs);
1429
1430/**
1431 * memory_region_iommu_num_indexes: return the total number of IOMMU
1432 * indexes that this IOMMU supports.
1433 *
1434 * @iommu_mr: the memory region
1435 */
1436int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1437
1438/**
1439 * memory_region_iommu_set_page_size_mask: set the supported page
1440 * sizes for a given IOMMU memory region
1441 *
1442 * @iommu_mr: IOMMU memory region
1443 * @page_size_mask: supported page size mask
1444 * @errp: pointer to Error*, to store an error if it happens.
1445 */
1446int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1447                                           uint64_t page_size_mask,
1448                                           Error **errp);
1449
1450/**
1451 * memory_region_name: get a memory region's name
1452 *
1453 * Returns the string that was used to initialize the memory region.
1454 *
1455 * @mr: the memory region being queried
1456 */
1457const char *memory_region_name(const MemoryRegion *mr);
1458
1459/**
1460 * memory_region_is_logging: return whether a memory region is logging writes
1461 *
1462 * Returns %true if the memory region is logging writes for the given client
1463 *
1464 * @mr: the memory region being queried
1465 * @client: the client being queried
1466 */
1467bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1468
1469/**
1470 * memory_region_get_dirty_log_mask: return the clients for which a
1471 * memory region is logging writes.
1472 *
1473 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1474 * are the bit indices.
1475 *
1476 * @mr: the memory region being queried
1477 */
1478uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1479
1480/**
1481 * memory_region_is_rom: check whether a memory region is ROM
1482 *
1483 * Returns %true if a memory region is read-only memory.
1484 *
1485 * @mr: the memory region being queried
1486 */
1487static inline bool memory_region_is_rom(MemoryRegion *mr)
1488{
1489    return mr->ram && mr->readonly;
1490}
1491
1492/**
1493 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1494 *
1495 * Returns %true is a memory region is non-volatile memory.
1496 *
1497 * @mr: the memory region being queried
1498 */
1499static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1500{
1501    return mr->nonvolatile;
1502}
1503
1504/**
1505 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1506 *
1507 * Returns a file descriptor backing a file-based RAM memory region,
1508 * or -1 if the region is not a file-based RAM memory region.
1509 *
1510 * @mr: the RAM or alias memory region being queried.
1511 */
1512int memory_region_get_fd(MemoryRegion *mr);
1513
1514/**
1515 * memory_region_from_host: Convert a pointer into a RAM memory region
1516 * and an offset within it.
1517 *
1518 * Given a host pointer inside a RAM memory region (created with
1519 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1520 * the MemoryRegion and the offset within it.
1521 *
1522 * Use with care; by the time this function returns, the returned pointer is
1523 * not protected by RCU anymore.  If the caller is not within an RCU critical
1524 * section and does not hold the iothread lock, it must have other means of
1525 * protecting the pointer, such as a reference to the region that includes
1526 * the incoming ram_addr_t.
1527 *
1528 * @ptr: the host pointer to be converted
1529 * @offset: the offset within memory region
1530 */
1531MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1532
1533/**
1534 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1535 *
1536 * Returns a host pointer to a RAM memory region (created with
1537 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1538 *
1539 * Use with care; by the time this function returns, the returned pointer is
1540 * not protected by RCU anymore.  If the caller is not within an RCU critical
1541 * section and does not hold the iothread lock, it must have other means of
1542 * protecting the pointer, such as a reference to the region that includes
1543 * the incoming ram_addr_t.
1544 *
1545 * @mr: the memory region being queried.
1546 */
1547void *memory_region_get_ram_ptr(MemoryRegion *mr);
1548
1549/* memory_region_ram_resize: Resize a RAM region.
1550 *
1551 * Only legal before guest might have detected the memory size: e.g. on
1552 * incoming migration, or right after reset.
1553 *
1554 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1555 * @newsize: the new size the region
1556 * @errp: pointer to Error*, to store an error if it happens.
1557 */
1558void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1559                              Error **errp);
1560
1561/**
1562 * memory_region_msync: Synchronize selected address range of
1563 * a memory mapped region
1564 *
1565 * @mr: the memory region to be msync
1566 * @addr: the initial address of the range to be sync
1567 * @size: the size of the range to be sync
1568 */
1569void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
1570
1571/**
1572 * memory_region_writeback: Trigger cache writeback for
1573 * selected address range
1574 *
1575 * @mr: the memory region to be updated
1576 * @addr: the initial address of the range to be written back
1577 * @size: the size of the range to be written back
1578 */
1579void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
1580
1581/**
1582 * memory_region_set_log: Turn dirty logging on or off for a region.
1583 *
1584 * Turns dirty logging on or off for a specified client (display, migration).
1585 * Only meaningful for RAM regions.
1586 *
1587 * @mr: the memory region being updated.
1588 * @log: whether dirty logging is to be enabled or disabled.
1589 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1590 */
1591void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1592
1593/**
1594 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1595 *
1596 * Marks a range of bytes as dirty, after it has been dirtied outside
1597 * guest code.
1598 *
1599 * @mr: the memory region being dirtied.
1600 * @addr: the address (relative to the start of the region) being dirtied.
1601 * @size: size of the range being dirtied.
1602 */
1603void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1604                             hwaddr size);
1605
1606/**
1607 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1608 *
1609 * This function is called when the caller wants to clear the remote
1610 * dirty bitmap of a memory range within the memory region.  This can
1611 * be used by e.g. KVM to manually clear dirty log when
1612 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1613 * kernel.
1614 *
1615 * @mr:     the memory region to clear the dirty log upon
1616 * @start:  start address offset within the memory region
1617 * @len:    length of the memory region to clear dirty bitmap
1618 */
1619void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
1620                                      hwaddr len);
1621
1622/**
1623 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1624 *                                         bitmap and clear it.
1625 *
1626 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1627 * returns the snapshot.  The snapshot can then be used to query dirty
1628 * status, using memory_region_snapshot_get_dirty.  Snapshotting allows
1629 * querying the same page multiple times, which is especially useful for
1630 * display updates where the scanlines often are not page aligned.
1631 *
1632 * The dirty bitmap region which gets copyed into the snapshot (and
1633 * cleared afterwards) can be larger than requested.  The boundaries
1634 * are rounded up/down so complete bitmap longs (covering 64 pages on
1635 * 64bit hosts) can be copied over into the bitmap snapshot.  Which
1636 * isn't a problem for display updates as the extra pages are outside
1637 * the visible area, and in case the visible area changes a full
1638 * display redraw is due anyway.  Should other use cases for this
1639 * function emerge we might have to revisit this implementation
1640 * detail.
1641 *
1642 * Use g_free to release DirtyBitmapSnapshot.
1643 *
1644 * @mr: the memory region being queried.
1645 * @addr: the address (relative to the start of the region) being queried.
1646 * @size: the size of the range being queried.
1647 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1648 */
1649DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1650                                                            hwaddr addr,
1651                                                            hwaddr size,
1652                                                            unsigned client);
1653
1654/**
1655 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1656 *                                   in the specified dirty bitmap snapshot.
1657 *
1658 * @mr: the memory region being queried.
1659 * @snap: the dirty bitmap snapshot
1660 * @addr: the address (relative to the start of the region) being queried.
1661 * @size: the size of the range being queried.
1662 */
1663bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1664                                      DirtyBitmapSnapshot *snap,
1665                                      hwaddr addr, hwaddr size);
1666
1667/**
1668 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1669 *                            client.
1670 *
1671 * Marks a range of pages as no longer dirty.
1672 *
1673 * @mr: the region being updated.
1674 * @addr: the start of the subrange being cleaned.
1675 * @size: the size of the subrange being cleaned.
1676 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1677 *          %DIRTY_MEMORY_VGA.
1678 */
1679void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1680                               hwaddr size, unsigned client);
1681
1682/**
1683 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
1684 *                                 TBs (for self-modifying code).
1685 *
1686 * The MemoryRegionOps->write() callback of a ROM device must use this function
1687 * to mark byte ranges that have been modified internally, such as by directly
1688 * accessing the memory returned by memory_region_get_ram_ptr().
1689 *
1690 * This function marks the range dirty and invalidates TBs so that TCG can
1691 * detect self-modifying code.
1692 *
1693 * @mr: the region being flushed.
1694 * @addr: the start, relative to the start of the region, of the range being
1695 *        flushed.
1696 * @size: the size, in bytes, of the range being flushed.
1697 */
1698void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
1699
1700/**
1701 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1702 *
1703 * Allows a memory region to be marked as read-only (turning it into a ROM).
1704 * only useful on RAM regions.
1705 *
1706 * @mr: the region being updated.
1707 * @readonly: whether rhe region is to be ROM or RAM.
1708 */
1709void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1710
1711/**
1712 * memory_region_set_nonvolatile: Turn a memory region non-volatile
1713 *
1714 * Allows a memory region to be marked as non-volatile.
1715 * only useful on RAM regions.
1716 *
1717 * @mr: the region being updated.
1718 * @nonvolatile: whether rhe region is to be non-volatile.
1719 */
1720void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
1721
1722/**
1723 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1724 *
1725 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1726 * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
1727 * device is mapped to guest memory and satisfies read access directly.
1728 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1729 * Writes are always handled by the #MemoryRegion.write function.
1730 *
1731 * @mr: the memory region to be updated
1732 * @romd_mode: %true to put the region into ROMD mode
1733 */
1734void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1735
1736/**
1737 * memory_region_set_coalescing: Enable memory coalescing for the region.
1738 *
1739 * Enabled writes to a region to be queued for later processing. MMIO ->write
1740 * callbacks may be delayed until a non-coalesced MMIO is issued.
1741 * Only useful for IO regions.  Roughly similar to write-combining hardware.
1742 *
1743 * @mr: the memory region to be write coalesced
1744 */
1745void memory_region_set_coalescing(MemoryRegion *mr);
1746
1747/**
1748 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1749 *                               a region.
1750 *
1751 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1752 * Multiple calls can be issued coalesced disjoint ranges.
1753 *
1754 * @mr: the memory region to be updated.
1755 * @offset: the start of the range within the region to be coalesced.
1756 * @size: the size of the subrange to be coalesced.
1757 */
1758void memory_region_add_coalescing(MemoryRegion *mr,
1759                                  hwaddr offset,
1760                                  uint64_t size);
1761
1762/**
1763 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1764 *
1765 * Disables any coalescing caused by memory_region_set_coalescing() or
1766 * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
1767 * hardware.
1768 *
1769 * @mr: the memory region to be updated.
1770 */
1771void memory_region_clear_coalescing(MemoryRegion *mr);
1772
1773/**
1774 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1775 *                                    accesses.
1776 *
1777 * Ensure that pending coalesced MMIO request are flushed before the memory
1778 * region is accessed. This property is automatically enabled for all regions
1779 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1780 *
1781 * @mr: the memory region to be updated.
1782 */
1783void memory_region_set_flush_coalesced(MemoryRegion *mr);
1784
1785/**
1786 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1787 *                                      accesses.
1788 *
1789 * Clear the automatic coalesced MMIO flushing enabled via
1790 * memory_region_set_flush_coalesced. Note that this service has no effect on
1791 * memory regions that have MMIO coalescing enabled for themselves. For them,
1792 * automatic flushing will stop once coalescing is disabled.
1793 *
1794 * @mr: the memory region to be updated.
1795 */
1796void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1797
1798/**
1799 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1800 *                            is written to a location.
1801 *
1802 * Marks a word in an IO region (initialized with memory_region_init_io())
1803 * as a trigger for an eventfd event.  The I/O callback will not be called.
1804 * The caller must be prepared to handle failure (that is, take the required
1805 * action if the callback _is_ called).
1806 *
1807 * @mr: the memory region being updated.
1808 * @addr: the address within @mr that is to be monitored
1809 * @size: the size of the access to trigger the eventfd
1810 * @match_data: whether to match against @data, instead of just @addr
1811 * @data: the data to match against the guest write
1812 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1813 **/
1814void memory_region_add_eventfd(MemoryRegion *mr,
1815                               hwaddr addr,
1816                               unsigned size,
1817                               bool match_data,
1818                               uint64_t data,
1819                               EventNotifier *e);
1820
1821/**
1822 * memory_region_del_eventfd: Cancel an eventfd.
1823 *
1824 * Cancels an eventfd trigger requested by a previous
1825 * memory_region_add_eventfd() call.
1826 *
1827 * @mr: the memory region being updated.
1828 * @addr: the address within @mr that is to be monitored
1829 * @size: the size of the access to trigger the eventfd
1830 * @match_data: whether to match against @data, instead of just @addr
1831 * @data: the data to match against the guest write
1832 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1833 */
1834void memory_region_del_eventfd(MemoryRegion *mr,
1835                               hwaddr addr,
1836                               unsigned size,
1837                               bool match_data,
1838                               uint64_t data,
1839                               EventNotifier *e);
1840
1841/**
1842 * memory_region_add_subregion: Add a subregion to a container.
1843 *
1844 * Adds a subregion at @offset.  The subregion may not overlap with other
1845 * subregions (except for those explicitly marked as overlapping).  A region
1846 * may only be added once as a subregion (unless removed with
1847 * memory_region_del_subregion()); use memory_region_init_alias() if you
1848 * want a region to be a subregion in multiple locations.
1849 *
1850 * @mr: the region to contain the new subregion; must be a container
1851 *      initialized with memory_region_init().
1852 * @offset: the offset relative to @mr where @subregion is added.
1853 * @subregion: the subregion to be added.
1854 */
1855void memory_region_add_subregion(MemoryRegion *mr,
1856                                 hwaddr offset,
1857                                 MemoryRegion *subregion);
1858/**
1859 * memory_region_add_subregion_overlap: Add a subregion to a container
1860 *                                      with overlap.
1861 *
1862 * Adds a subregion at @offset.  The subregion may overlap with other
1863 * subregions.  Conflicts are resolved by having a higher @priority hide a
1864 * lower @priority. Subregions without priority are taken as @priority 0.
1865 * A region may only be added once as a subregion (unless removed with
1866 * memory_region_del_subregion()); use memory_region_init_alias() if you
1867 * want a region to be a subregion in multiple locations.
1868 *
1869 * @mr: the region to contain the new subregion; must be a container
1870 *      initialized with memory_region_init().
1871 * @offset: the offset relative to @mr where @subregion is added.
1872 * @subregion: the subregion to be added.
1873 * @priority: used for resolving overlaps; highest priority wins.
1874 */
1875void memory_region_add_subregion_overlap(MemoryRegion *mr,
1876                                         hwaddr offset,
1877                                         MemoryRegion *subregion,
1878                                         int priority);
1879
1880/**
1881 * memory_region_get_ram_addr: Get the ram address associated with a memory
1882 *                             region
1883 *
1884 * @mr: the region to be queried
1885 */
1886ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1887
1888uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1889/**
1890 * memory_region_del_subregion: Remove a subregion.
1891 *
1892 * Removes a subregion from its container.
1893 *
1894 * @mr: the container to be updated.
1895 * @subregion: the region being removed; must be a current subregion of @mr.
1896 */
1897void memory_region_del_subregion(MemoryRegion *mr,
1898                                 MemoryRegion *subregion);
1899
1900/*
1901 * memory_region_set_enabled: dynamically enable or disable a region
1902 *
1903 * Enables or disables a memory region.  A disabled memory region
1904 * ignores all accesses to itself and its subregions.  It does not
1905 * obscure sibling subregions with lower priority - it simply behaves as
1906 * if it was removed from the hierarchy.
1907 *
1908 * Regions default to being enabled.
1909 *
1910 * @mr: the region to be updated
1911 * @enabled: whether to enable or disable the region
1912 */
1913void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1914
1915/*
1916 * memory_region_set_address: dynamically update the address of a region
1917 *
1918 * Dynamically updates the address of a region, relative to its container.
1919 * May be used on regions are currently part of a memory hierarchy.
1920 *
1921 * @mr: the region to be updated
1922 * @addr: new address, relative to container region
1923 */
1924void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1925
1926/*
1927 * memory_region_set_size: dynamically update the size of a region.
1928 *
1929 * Dynamically updates the size of a region.
1930 *
1931 * @mr: the region to be updated
1932 * @size: used size of the region.
1933 */
1934void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1935
1936/*
1937 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1938 *
1939 * Dynamically updates the offset into the target region that an alias points
1940 * to, as if the fourth argument to memory_region_init_alias() has changed.
1941 *
1942 * @mr: the #MemoryRegion to be updated; should be an alias.
1943 * @offset: the new offset into the target memory region
1944 */
1945void memory_region_set_alias_offset(MemoryRegion *mr,
1946                                    hwaddr offset);
1947
1948/**
1949 * memory_region_present: checks if an address relative to a @container
1950 * translates into #MemoryRegion within @container
1951 *
1952 * Answer whether a #MemoryRegion within @container covers the address
1953 * @addr.
1954 *
1955 * @container: a #MemoryRegion within which @addr is a relative address
1956 * @addr: the area within @container to be searched
1957 */
1958bool memory_region_present(MemoryRegion *container, hwaddr addr);
1959
1960/**
1961 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1962 * into any address space.
1963 *
1964 * @mr: a #MemoryRegion which should be checked if it's mapped
1965 */
1966bool memory_region_is_mapped(MemoryRegion *mr);
1967
1968/**
1969 * memory_region_find: translate an address/size relative to a
1970 * MemoryRegion into a #MemoryRegionSection.
1971 *
1972 * Locates the first #MemoryRegion within @mr that overlaps the range
1973 * given by @addr and @size.
1974 *
1975 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1976 * It will have the following characteristics:
1977 * - @size = 0 iff no overlap was found
1978 * - @mr is non-%NULL iff an overlap was found
1979 *
1980 * Remember that in the return value the @offset_within_region is
1981 * relative to the returned region (in the .@mr field), not to the
1982 * @mr argument.
1983 *
1984 * Similarly, the .@offset_within_address_space is relative to the
1985 * address space that contains both regions, the passed and the
1986 * returned one.  However, in the special case where the @mr argument
1987 * has no container (and thus is the root of the address space), the
1988 * following will hold:
1989 * - @offset_within_address_space >= @addr
1990 * - @offset_within_address_space + .@size <= @addr + @size
1991 *
1992 * @mr: a MemoryRegion within which @addr is a relative address
1993 * @addr: start of the area within @as to be searched
1994 * @size: size of the area to be searched
1995 */
1996MemoryRegionSection memory_region_find(MemoryRegion *mr,
1997                                       hwaddr addr, uint64_t size);
1998
1999/**
2000 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2001 *
2002 * Synchronizes the dirty page log for all address spaces.
2003 */
2004void memory_global_dirty_log_sync(void);
2005
2006/**
2007 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2008 *
2009 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2010 * This function must be called after the dirty log bitmap is cleared, and
2011 * before dirty guest memory pages are read.  If you are using
2012 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2013 * care of doing this.
2014 */
2015void memory_global_after_dirty_log_sync(void);
2016
2017/**
2018 * memory_region_transaction_begin: Start a transaction.
2019 *
2020 * During a transaction, changes will be accumulated and made visible
2021 * only when the transaction ends (is committed).
2022 */
2023void memory_region_transaction_begin(void);
2024
2025/**
2026 * memory_region_transaction_commit: Commit a transaction and make changes
2027 *                                   visible to the guest.
2028 */
2029void memory_region_transaction_commit(void);
2030
2031/**
2032 * memory_listener_register: register callbacks to be called when memory
2033 *                           sections are mapped or unmapped into an address
2034 *                           space
2035 *
2036 * @listener: an object containing the callbacks to be called
2037 * @filter: if non-%NULL, only regions in this address space will be observed
2038 */
2039void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2040
2041/**
2042 * memory_listener_unregister: undo the effect of memory_listener_register()
2043 *
2044 * @listener: an object containing the callbacks to be removed
2045 */
2046void memory_listener_unregister(MemoryListener *listener);
2047
2048/**
2049 * memory_global_dirty_log_start: begin dirty logging for all regions
2050 */
2051void memory_global_dirty_log_start(void);
2052
2053/**
2054 * memory_global_dirty_log_stop: end dirty logging for all regions
2055 */
2056void memory_global_dirty_log_stop(void);
2057
2058void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2059
2060/**
2061 * memory_region_dispatch_read: perform a read directly to the specified
2062 * MemoryRegion.
2063 *
2064 * @mr: #MemoryRegion to access
2065 * @addr: address within that region
2066 * @pval: pointer to uint64_t which the data is written to
2067 * @op: size, sign, and endianness of the memory operation
2068 * @attrs: memory transaction attributes to use for the access
2069 */
2070MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2071                                        hwaddr addr,
2072                                        uint64_t *pval,
2073                                        MemOp op,
2074                                        MemTxAttrs attrs);
2075/**
2076 * memory_region_dispatch_write: perform a write directly to the specified
2077 * MemoryRegion.
2078 *
2079 * @mr: #MemoryRegion to access
2080 * @addr: address within that region
2081 * @data: data to write
2082 * @op: size, sign, and endianness of the memory operation
2083 * @attrs: memory transaction attributes to use for the access
2084 */
2085MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2086                                         hwaddr addr,
2087                                         uint64_t data,
2088                                         MemOp op,
2089                                         MemTxAttrs attrs);
2090
2091/**
2092 * address_space_init: initializes an address space
2093 *
2094 * @as: an uninitialized #AddressSpace
2095 * @root: a #MemoryRegion that routes addresses for the address space
2096 * @name: an address space name.  The name is only used for debugging
2097 *        output.
2098 */
2099void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2100
2101/**
2102 * address_space_destroy: destroy an address space
2103 *
2104 * Releases all resources associated with an address space.  After an address space
2105 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2106 * as well.
2107 *
2108 * @as: address space to be destroyed
2109 */
2110void address_space_destroy(AddressSpace *as);
2111
2112/**
2113 * address_space_remove_listeners: unregister all listeners of an address space
2114 *
2115 * Removes all callbacks previously registered with memory_listener_register()
2116 * for @as.
2117 *
2118 * @as: an initialized #AddressSpace
2119 */
2120void address_space_remove_listeners(AddressSpace *as);
2121
2122/**
2123 * address_space_rw: read from or write to an address space.
2124 *
2125 * Return a MemTxResult indicating whether the operation succeeded
2126 * or failed (eg unassigned memory, device rejected the transaction,
2127 * IOMMU fault).
2128 *
2129 * @as: #AddressSpace to be accessed
2130 * @addr: address within that address space
2131 * @attrs: memory transaction attributes
2132 * @buf: buffer with the data transferred
2133 * @len: the number of bytes to read or write
2134 * @is_write: indicates the transfer direction
2135 */
2136MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2137                             MemTxAttrs attrs, void *buf,
2138                             hwaddr len, bool is_write);
2139
2140/**
2141 * address_space_write: write to address space.
2142 *
2143 * Return a MemTxResult indicating whether the operation succeeded
2144 * or failed (eg unassigned memory, device rejected the transaction,
2145 * IOMMU fault).
2146 *
2147 * @as: #AddressSpace to be accessed
2148 * @addr: address within that address space
2149 * @attrs: memory transaction attributes
2150 * @buf: buffer with the data transferred
2151 * @len: the number of bytes to write
2152 */
2153MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2154                                MemTxAttrs attrs,
2155                                const void *buf, hwaddr len);
2156
2157/**
2158 * address_space_write_rom: write to address space, including ROM.
2159 *
2160 * This function writes to the specified address space, but will
2161 * write data to both ROM and RAM. This is used for non-guest
2162 * writes like writes from the gdb debug stub or initial loading
2163 * of ROM contents.
2164 *
2165 * Note that portions of the write which attempt to write data to
2166 * a device will be silently ignored -- only real RAM and ROM will
2167 * be written to.
2168 *
2169 * Return a MemTxResult indicating whether the operation succeeded
2170 * or failed (eg unassigned memory, device rejected the transaction,
2171 * IOMMU fault).
2172 *
2173 * @as: #AddressSpace to be accessed
2174 * @addr: address within that address space
2175 * @attrs: memory transaction attributes
2176 * @buf: buffer with the data transferred
2177 * @len: the number of bytes to write
2178 */
2179MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2180                                    MemTxAttrs attrs,
2181                                    const void *buf, hwaddr len);
2182
2183/* address_space_ld*: load from an address space
2184 * address_space_st*: store to an address space
2185 *
2186 * These functions perform a load or store of the byte, word,
2187 * longword or quad to the specified address within the AddressSpace.
2188 * The _le suffixed functions treat the data as little endian;
2189 * _be indicates big endian; no suffix indicates "same endianness
2190 * as guest CPU".
2191 *
2192 * The "guest CPU endianness" accessors are deprecated for use outside
2193 * target-* code; devices should be CPU-agnostic and use either the LE
2194 * or the BE accessors.
2195 *
2196 * @as #AddressSpace to be accessed
2197 * @addr: address within that address space
2198 * @val: data value, for stores
2199 * @attrs: memory transaction attributes
2200 * @result: location to write the success/failure of the transaction;
2201 *   if NULL, this information is discarded
2202 */
2203
2204#define SUFFIX
2205#define ARG1         as
2206#define ARG1_DECL    AddressSpace *as
2207#include "exec/memory_ldst.h.inc"
2208
2209#define SUFFIX
2210#define ARG1         as
2211#define ARG1_DECL    AddressSpace *as
2212#include "exec/memory_ldst_phys.h.inc"
2213
2214struct MemoryRegionCache {
2215    void *ptr;
2216    hwaddr xlat;
2217    hwaddr len;
2218    FlatView *fv;
2219    MemoryRegionSection mrs;
2220    bool is_write;
2221};
2222
2223#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
2224
2225
2226/* address_space_ld*_cached: load from a cached #MemoryRegion
2227 * address_space_st*_cached: store into a cached #MemoryRegion
2228 *
2229 * These functions perform a load or store of the byte, word,
2230 * longword or quad to the specified address.  The address is
2231 * a physical address in the AddressSpace, but it must lie within
2232 * a #MemoryRegion that was mapped with address_space_cache_init.
2233 *
2234 * The _le suffixed functions treat the data as little endian;
2235 * _be indicates big endian; no suffix indicates "same endianness
2236 * as guest CPU".
2237 *
2238 * The "guest CPU endianness" accessors are deprecated for use outside
2239 * target-* code; devices should be CPU-agnostic and use either the LE
2240 * or the BE accessors.
2241 *
2242 * @cache: previously initialized #MemoryRegionCache to be accessed
2243 * @addr: address within the address space
2244 * @val: data value, for stores
2245 * @attrs: memory transaction attributes
2246 * @result: location to write the success/failure of the transaction;
2247 *   if NULL, this information is discarded
2248 */
2249
2250#define SUFFIX       _cached_slow
2251#define ARG1         cache
2252#define ARG1_DECL    MemoryRegionCache *cache
2253#include "exec/memory_ldst.h.inc"
2254
2255/* Inline fast path for direct RAM access.  */
2256static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2257    hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2258{
2259    assert(addr < cache->len);
2260    if (likely(cache->ptr)) {
2261        return ldub_p(cache->ptr + addr);
2262    } else {
2263        return address_space_ldub_cached_slow(cache, addr, attrs, result);
2264    }
2265}
2266
2267static inline void address_space_stb_cached(MemoryRegionCache *cache,
2268    hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
2269{
2270    assert(addr < cache->len);
2271    if (likely(cache->ptr)) {
2272        stb_p(cache->ptr + addr, val);
2273    } else {
2274        address_space_stb_cached_slow(cache, addr, val, attrs, result);
2275    }
2276}
2277
2278#define ENDIANNESS   _le
2279#include "exec/memory_ldst_cached.h.inc"
2280
2281#define ENDIANNESS   _be
2282#include "exec/memory_ldst_cached.h.inc"
2283
2284#define SUFFIX       _cached
2285#define ARG1         cache
2286#define ARG1_DECL    MemoryRegionCache *cache
2287#include "exec/memory_ldst_phys.h.inc"
2288
2289/* address_space_cache_init: prepare for repeated access to a physical
2290 * memory region
2291 *
2292 * @cache: #MemoryRegionCache to be filled
2293 * @as: #AddressSpace to be accessed
2294 * @addr: address within that address space
2295 * @len: length of buffer
2296 * @is_write: indicates the transfer direction
2297 *
2298 * Will only work with RAM, and may map a subset of the requested range by
2299 * returning a value that is less than @len.  On failure, return a negative
2300 * errno value.
2301 *
2302 * Because it only works with RAM, this function can be used for
2303 * read-modify-write operations.  In this case, is_write should be %true.
2304 *
2305 * Note that addresses passed to the address_space_*_cached functions
2306 * are relative to @addr.
2307 */
2308int64_t address_space_cache_init(MemoryRegionCache *cache,
2309                                 AddressSpace *as,
2310                                 hwaddr addr,
2311                                 hwaddr len,
2312                                 bool is_write);
2313
2314/**
2315 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2316 *
2317 * @cache: The #MemoryRegionCache to operate on.
2318 * @addr: The first physical address that was written, relative to the
2319 * address that was passed to @address_space_cache_init.
2320 * @access_len: The number of bytes that were written starting at @addr.
2321 */
2322void address_space_cache_invalidate(MemoryRegionCache *cache,
2323                                    hwaddr addr,
2324                                    hwaddr access_len);
2325
2326/**
2327 * address_space_cache_destroy: free a #MemoryRegionCache
2328 *
2329 * @cache: The #MemoryRegionCache whose memory should be released.
2330 */
2331void address_space_cache_destroy(MemoryRegionCache *cache);
2332
2333/* address_space_get_iotlb_entry: translate an address into an IOTLB
2334 * entry. Should be called from an RCU critical section.
2335 */
2336IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2337                                            bool is_write, MemTxAttrs attrs);
2338
2339/* address_space_translate: translate an address range into an address space
2340 * into a MemoryRegion and an address range into that section.  Should be
2341 * called from an RCU critical section, to avoid that the last reference
2342 * to the returned region disappears after address_space_translate returns.
2343 *
2344 * @fv: #FlatView to be accessed
2345 * @addr: address within that address space
2346 * @xlat: pointer to address within the returned memory region section's
2347 * #MemoryRegion.
2348 * @len: pointer to length
2349 * @is_write: indicates the transfer direction
2350 * @attrs: memory attributes
2351 */
2352MemoryRegion *flatview_translate(FlatView *fv,
2353                                 hwaddr addr, hwaddr *xlat,
2354                                 hwaddr *len, bool is_write,
2355                                 MemTxAttrs attrs);
2356
2357static inline MemoryRegion *address_space_translate(AddressSpace *as,
2358                                                    hwaddr addr, hwaddr *xlat,
2359                                                    hwaddr *len, bool is_write,
2360                                                    MemTxAttrs attrs)
2361{
2362    return flatview_translate(address_space_to_flatview(as),
2363                              addr, xlat, len, is_write, attrs);
2364}
2365
2366/* address_space_access_valid: check for validity of accessing an address
2367 * space range
2368 *
2369 * Check whether memory is assigned to the given address space range, and
2370 * access is permitted by any IOMMU regions that are active for the address
2371 * space.
2372 *
2373 * For now, addr and len should be aligned to a page size.  This limitation
2374 * will be lifted in the future.
2375 *
2376 * @as: #AddressSpace to be accessed
2377 * @addr: address within that address space
2378 * @len: length of the area to be checked
2379 * @is_write: indicates the transfer direction
2380 * @attrs: memory attributes
2381 */
2382bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2383                                bool is_write, MemTxAttrs attrs);
2384
2385/* address_space_map: map a physical memory region into a host virtual address
2386 *
2387 * May map a subset of the requested range, given by and returned in @plen.
2388 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2389 * the mapping are exhausted.
2390 * Use only for reads OR writes - not for read-modify-write operations.
2391 * Use cpu_register_map_client() to know when retrying the map operation is
2392 * likely to succeed.
2393 *
2394 * @as: #AddressSpace to be accessed
2395 * @addr: address within that address space
2396 * @plen: pointer to length of buffer; updated on return
2397 * @is_write: indicates the transfer direction
2398 * @attrs: memory attributes
2399 */
2400void *address_space_map(AddressSpace *as, hwaddr addr,
2401                        hwaddr *plen, bool is_write, MemTxAttrs attrs);
2402
2403/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2404 *
2405 * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
2406 * the amount of memory that was actually read or written by the caller.
2407 *
2408 * @as: #AddressSpace used
2409 * @buffer: host pointer as returned by address_space_map()
2410 * @len: buffer length as returned by address_space_map()
2411 * @access_len: amount of data actually transferred
2412 * @is_write: indicates the transfer direction
2413 */
2414void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2415                         bool is_write, hwaddr access_len);
2416
2417
2418/* Internal functions, part of the implementation of address_space_read.  */
2419MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2420                                    MemTxAttrs attrs, void *buf, hwaddr len);
2421MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2422                                   MemTxAttrs attrs, void *buf,
2423                                   hwaddr len, hwaddr addr1, hwaddr l,
2424                                   MemoryRegion *mr);
2425void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2426
2427/* Internal functions, part of the implementation of address_space_read_cached
2428 * and address_space_write_cached.  */
2429MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
2430                                           hwaddr addr, void *buf, hwaddr len);
2431MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
2432                                            hwaddr addr, const void *buf,
2433                                            hwaddr len);
2434
2435static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2436{
2437    if (is_write) {
2438        return memory_region_is_ram(mr) && !mr->readonly &&
2439               !mr->rom_device && !memory_region_is_ram_device(mr);
2440    } else {
2441        return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2442               memory_region_is_romd(mr);
2443    }
2444}
2445
2446/**
2447 * address_space_read: read from an address space.
2448 *
2449 * Return a MemTxResult indicating whether the operation succeeded
2450 * or failed (eg unassigned memory, device rejected the transaction,
2451 * IOMMU fault).  Called within RCU critical section.
2452 *
2453 * @as: #AddressSpace to be accessed
2454 * @addr: address within that address space
2455 * @attrs: memory transaction attributes
2456 * @buf: buffer with the data transferred
2457 * @len: length of the data transferred
2458 */
2459static inline __attribute__((__always_inline__))
2460MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2461                               MemTxAttrs attrs, void *buf,
2462                               hwaddr len)
2463{
2464    MemTxResult result = MEMTX_OK;
2465    hwaddr l, addr1;
2466    void *ptr;
2467    MemoryRegion *mr;
2468    FlatView *fv;
2469
2470    if (__builtin_constant_p(len)) {
2471        if (len) {
2472            RCU_READ_LOCK_GUARD();
2473            fv = address_space_to_flatview(as);
2474            l = len;
2475            mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2476            if (len == l && memory_access_is_direct(mr, false)) {
2477                ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2478                memcpy(buf, ptr, len);
2479            } else {
2480                result = flatview_read_continue(fv, addr, attrs, buf, len,
2481                                                addr1, l, mr);
2482            }
2483        }
2484    } else {
2485        result = address_space_read_full(as, addr, attrs, buf, len);
2486    }
2487    return result;
2488}
2489
2490/**
2491 * address_space_read_cached: read from a cached RAM region
2492 *
2493 * @cache: Cached region to be addressed
2494 * @addr: address relative to the base of the RAM region
2495 * @buf: buffer with the data transferred
2496 * @len: length of the data transferred
2497 */
2498static inline MemTxResult
2499address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2500                          void *buf, hwaddr len)
2501{
2502    assert(addr < cache->len && len <= cache->len - addr);
2503    fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr, false);
2504    if (likely(cache->ptr)) {
2505        memcpy(buf, cache->ptr + addr, len);
2506        return MEMTX_OK;
2507    } else {
2508        return address_space_read_cached_slow(cache, addr, buf, len);
2509    }
2510}
2511
2512/**
2513 * address_space_write_cached: write to a cached RAM region
2514 *
2515 * @cache: Cached region to be addressed
2516 * @addr: address relative to the base of the RAM region
2517 * @buf: buffer with the data transferred
2518 * @len: length of the data transferred
2519 */
2520static inline MemTxResult
2521address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2522                           const void *buf, hwaddr len)
2523{
2524    assert(addr < cache->len && len <= cache->len - addr);
2525    if (likely(cache->ptr)) {
2526        memcpy(cache->ptr + addr, buf, len);
2527        return MEMTX_OK;
2528    } else {
2529        return address_space_write_cached_slow(cache, addr, buf, len);
2530    }
2531}
2532
2533#ifdef NEED_CPU_H
2534/* enum device_endian to MemOp.  */
2535static inline MemOp devend_memop(enum device_endian end)
2536{
2537    QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
2538                      DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
2539
2540#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
2541    /* Swap if non-host endianness or native (target) endianness */
2542    return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
2543#else
2544    const int non_host_endianness =
2545        DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
2546
2547    /* In this case, native (target) endianness needs no swap.  */
2548    return (end == non_host_endianness) ? MO_BSWAP : 0;
2549#endif
2550}
2551#endif
2552
2553/*
2554 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
2555 * to manage the actual amount of memory consumed by the VM (then, the memory
2556 * provided by RAM blocks might be bigger than the desired memory consumption).
2557 * This *must* be set if:
2558 * - Discarding parts of a RAM blocks does not result in the change being
2559 *   reflected in the VM and the pages getting freed.
2560 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
2561 *   discards blindly.
2562 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
2563 *   encrypted VMs).
2564 * Technologies that only temporarily pin the current working set of a
2565 * driver are fine, because we don't expect such pages to be discarded
2566 * (esp. based on guest action like balloon inflation).
2567 *
2568 * This is *not* to be used to protect from concurrent discards (esp.,
2569 * postcopy).
2570 *
2571 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
2572 * discards to work reliably is active.
2573 */
2574int ram_block_discard_disable(bool state);
2575
2576/*
2577 * Inhibit technologies that disable discarding of pages in RAM blocks.
2578 *
2579 * Returns 0 if successful. Returns -EBUSY if discards are already set to
2580 * broken.
2581 */
2582int ram_block_discard_require(bool state);
2583
2584/*
2585 * Test if discarding of memory in ram blocks is disabled.
2586 */
2587bool ram_block_discard_is_disabled(void);
2588
2589/*
2590 * Test if discarding of memory in ram blocks is required to work reliably.
2591 */
2592bool ram_block_discard_is_required(void);
2593
2594#endif
2595
2596#endif
2597