qemu/include/exec/memory.h
<<
>>
Prefs
   1/*
   2 * Physical memory management API
   3 *
   4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
   5 *
   6 * Authors:
   7 *  Avi Kivity <avi@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#ifndef MEMORY_H
  15#define MEMORY_H
  16
  17#ifndef CONFIG_USER_ONLY
  18
  19#include "exec/cpu-common.h"
  20#include "exec/hwaddr.h"
  21#include "exec/memattrs.h"
  22#include "exec/ramlist.h"
  23#include "qemu/queue.h"
  24#include "qemu/int128.h"
  25#include "qemu/notify.h"
  26#include "qom/object.h"
  27#include "qemu/rcu.h"
  28#include "hw/qdev-core.h"
  29
  30#define RAM_ADDR_INVALID (~(ram_addr_t)0)
  31
  32#define MAX_PHYS_ADDR_SPACE_BITS 62
  33#define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
  34
  35#define TYPE_MEMORY_REGION "qemu:memory-region"
  36#define MEMORY_REGION(obj) \
  37        OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
  38
  39#define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
  40#define IOMMU_MEMORY_REGION(obj) \
  41        OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
  42#define IOMMU_MEMORY_REGION_CLASS(klass) \
  43        OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
  44                         TYPE_IOMMU_MEMORY_REGION)
  45#define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
  46        OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
  47                         TYPE_IOMMU_MEMORY_REGION)
  48
  49typedef struct MemoryRegionOps MemoryRegionOps;
  50typedef struct MemoryRegionMmio MemoryRegionMmio;
  51
  52struct MemoryRegionMmio {
  53    CPUReadMemoryFunc *read[3];
  54    CPUWriteMemoryFunc *write[3];
  55};
  56
  57typedef struct IOMMUTLBEntry IOMMUTLBEntry;
  58
  59/* See address_space_translate: bit 0 is read, bit 1 is write.  */
  60typedef enum {
  61    IOMMU_NONE = 0,
  62    IOMMU_RO   = 1,
  63    IOMMU_WO   = 2,
  64    IOMMU_RW   = 3,
  65} IOMMUAccessFlags;
  66
  67#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
  68
  69struct IOMMUTLBEntry {
  70    AddressSpace    *target_as;
  71    hwaddr           iova;
  72    hwaddr           translated_addr;
  73    hwaddr           addr_mask;  /* 0xfff = 4k translation */
  74    IOMMUAccessFlags perm;
  75};
  76
  77/*
  78 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
  79 * register with one or multiple IOMMU Notifier capability bit(s).
  80 */
  81typedef enum {
  82    IOMMU_NOTIFIER_NONE = 0,
  83    /* Notify cache invalidations */
  84    IOMMU_NOTIFIER_UNMAP = 0x1,
  85    /* Notify entry changes (newly created entries) */
  86    IOMMU_NOTIFIER_MAP = 0x2,
  87} IOMMUNotifierFlag;
  88
  89#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
  90
  91struct IOMMUNotifier;
  92typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
  93                            IOMMUTLBEntry *data);
  94
  95struct IOMMUNotifier {
  96    IOMMUNotify notify;
  97    IOMMUNotifierFlag notifier_flags;
  98    /* Notify for address space range start <= addr <= end */
  99    hwaddr start;
 100    hwaddr end;
 101    int iommu_idx;
 102    QLIST_ENTRY(IOMMUNotifier) node;
 103};
 104typedef struct IOMMUNotifier IOMMUNotifier;
 105
 106static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
 107                                       IOMMUNotifierFlag flags,
 108                                       hwaddr start, hwaddr end,
 109                                       int iommu_idx)
 110{
 111    n->notify = fn;
 112    n->notifier_flags = flags;
 113    n->start = start;
 114    n->end = end;
 115    n->iommu_idx = iommu_idx;
 116}
 117
 118/*
 119 * Memory region callbacks
 120 */
 121struct MemoryRegionOps {
 122    /* Read from the memory region. @addr is relative to @mr; @size is
 123     * in bytes. */
 124    uint64_t (*read)(void *opaque,
 125                     hwaddr addr,
 126                     unsigned size);
 127    /* Write to the memory region. @addr is relative to @mr; @size is
 128     * in bytes. */
 129    void (*write)(void *opaque,
 130                  hwaddr addr,
 131                  uint64_t data,
 132                  unsigned size);
 133
 134    MemTxResult (*read_with_attrs)(void *opaque,
 135                                   hwaddr addr,
 136                                   uint64_t *data,
 137                                   unsigned size,
 138                                   MemTxAttrs attrs);
 139    MemTxResult (*write_with_attrs)(void *opaque,
 140                                    hwaddr addr,
 141                                    uint64_t data,
 142                                    unsigned size,
 143                                    MemTxAttrs attrs);
 144    /* Instruction execution pre-callback:
 145     * @addr is the address of the access relative to the @mr.
 146     * @size is the size of the area returned by the callback.
 147     * @offset is the location of the pointer inside @mr.
 148     *
 149     * Returns a pointer to a location which contains guest code.
 150     */
 151    void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size,
 152                         unsigned *offset);
 153
 154    enum device_endian endianness;
 155    /* Guest-visible constraints: */
 156    struct {
 157        /* If nonzero, specify bounds on access sizes beyond which a machine
 158         * check is thrown.
 159         */
 160        unsigned min_access_size;
 161        unsigned max_access_size;
 162        /* If true, unaligned accesses are supported.  Otherwise unaligned
 163         * accesses throw machine checks.
 164         */
 165         bool unaligned;
 166        /*
 167         * If present, and returns #false, the transaction is not accepted
 168         * by the device (and results in machine dependent behaviour such
 169         * as a machine check exception).
 170         */
 171        bool (*accepts)(void *opaque, hwaddr addr,
 172                        unsigned size, bool is_write,
 173                        MemTxAttrs attrs);
 174    } valid;
 175    /* Internal implementation constraints: */
 176    struct {
 177        /* If nonzero, specifies the minimum size implemented.  Smaller sizes
 178         * will be rounded upwards and a partial result will be returned.
 179         */
 180        unsigned min_access_size;
 181        /* If nonzero, specifies the maximum size implemented.  Larger sizes
 182         * will be done as a series of accesses with smaller sizes.
 183         */
 184        unsigned max_access_size;
 185        /* If true, unaligned accesses are supported.  Otherwise all accesses
 186         * are converted to (possibly multiple) naturally aligned accesses.
 187         */
 188        bool unaligned;
 189    } impl;
 190
 191    /* If .read and .write are not present, old_mmio may be used for
 192     * backwards compatibility with old mmio registration
 193     */
 194    const MemoryRegionMmio old_mmio;
 195};
 196
 197enum IOMMUMemoryRegionAttr {
 198    IOMMU_ATTR_SPAPR_TCE_FD
 199};
 200
 201/**
 202 * IOMMUMemoryRegionClass:
 203 *
 204 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
 205 * and provide an implementation of at least the @translate method here
 206 * to handle requests to the memory region. Other methods are optional.
 207 *
 208 * The IOMMU implementation must use the IOMMU notifier infrastructure
 209 * to report whenever mappings are changed, by calling
 210 * memory_region_notify_iommu() (or, if necessary, by calling
 211 * memory_region_notify_one() for each registered notifier).
 212 *
 213 * Conceptually an IOMMU provides a mapping from input address
 214 * to an output TLB entry. If the IOMMU is aware of memory transaction
 215 * attributes and the output TLB entry depends on the transaction
 216 * attributes, we represent this using IOMMU indexes. Each index
 217 * selects a particular translation table that the IOMMU has:
 218 *   @attrs_to_index returns the IOMMU index for a set of transaction attributes
 219 *   @translate takes an input address and an IOMMU index
 220 * and the mapping returned can only depend on the input address and the
 221 * IOMMU index.
 222 *
 223 * Most IOMMUs don't care about the transaction attributes and support
 224 * only a single IOMMU index. A more complex IOMMU might have one index
 225 * for secure transactions and one for non-secure transactions.
 226 */
 227typedef struct IOMMUMemoryRegionClass {
 228    /* private */
 229    struct DeviceClass parent_class;
 230
 231    /*
 232     * Return a TLB entry that contains a given address.
 233     *
 234     * The IOMMUAccessFlags indicated via @flag are optional and may
 235     * be specified as IOMMU_NONE to indicate that the caller needs
 236     * the full translation information for both reads and writes. If
 237     * the access flags are specified then the IOMMU implementation
 238     * may use this as an optimization, to stop doing a page table
 239     * walk as soon as it knows that the requested permissions are not
 240     * allowed. If IOMMU_NONE is passed then the IOMMU must do the
 241     * full page table walk and report the permissions in the returned
 242     * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
 243     * return different mappings for reads and writes.)
 244     *
 245     * The returned information remains valid while the caller is
 246     * holding the big QEMU lock or is inside an RCU critical section;
 247     * if the caller wishes to cache the mapping beyond that it must
 248     * register an IOMMU notifier so it can invalidate its cached
 249     * information when the IOMMU mapping changes.
 250     *
 251     * @iommu: the IOMMUMemoryRegion
 252     * @hwaddr: address to be translated within the memory region
 253     * @flag: requested access permissions
 254     * @iommu_idx: IOMMU index for the translation
 255     */
 256    IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
 257                               IOMMUAccessFlags flag, int iommu_idx);
 258    /* Returns minimum supported page size in bytes.
 259     * If this method is not provided then the minimum is assumed to
 260     * be TARGET_PAGE_SIZE.
 261     *
 262     * @iommu: the IOMMUMemoryRegion
 263     */
 264    uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
 265    /* Called when IOMMU Notifier flag changes (ie when the set of
 266     * events which IOMMU users are requesting notification for changes).
 267     * Optional method -- need not be provided if the IOMMU does not
 268     * need to know exactly which events must be notified.
 269     *
 270     * @iommu: the IOMMUMemoryRegion
 271     * @old_flags: events which previously needed to be notified
 272     * @new_flags: events which now need to be notified
 273     */
 274    void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
 275                                IOMMUNotifierFlag old_flags,
 276                                IOMMUNotifierFlag new_flags);
 277    /* Called to handle memory_region_iommu_replay().
 278     *
 279     * The default implementation of memory_region_iommu_replay() is to
 280     * call the IOMMU translate method for every page in the address space
 281     * with flag == IOMMU_NONE and then call the notifier if translate
 282     * returns a valid mapping. If this method is implemented then it
 283     * overrides the default behaviour, and must provide the full semantics
 284     * of memory_region_iommu_replay(), by calling @notifier for every
 285     * translation present in the IOMMU.
 286     *
 287     * Optional method -- an IOMMU only needs to provide this method
 288     * if the default is inefficient or produces undesirable side effects.
 289     *
 290     * Note: this is not related to record-and-replay functionality.
 291     */
 292    void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
 293
 294    /* Get IOMMU misc attributes. This is an optional method that
 295     * can be used to allow users of the IOMMU to get implementation-specific
 296     * information. The IOMMU implements this method to handle calls
 297     * by IOMMU users to memory_region_iommu_get_attr() by filling in
 298     * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
 299     * the IOMMU supports. If the method is unimplemented then
 300     * memory_region_iommu_get_attr() will always return -EINVAL.
 301     *
 302     * @iommu: the IOMMUMemoryRegion
 303     * @attr: attribute being queried
 304     * @data: memory to fill in with the attribute data
 305     *
 306     * Returns 0 on success, or a negative errno; in particular
 307     * returns -EINVAL for unrecognized or unimplemented attribute types.
 308     */
 309    int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
 310                    void *data);
 311
 312    /* Return the IOMMU index to use for a given set of transaction attributes.
 313     *
 314     * Optional method: if an IOMMU only supports a single IOMMU index then
 315     * the default implementation of memory_region_iommu_attrs_to_index()
 316     * will return 0.
 317     *
 318     * The indexes supported by an IOMMU must be contiguous, starting at 0.
 319     *
 320     * @iommu: the IOMMUMemoryRegion
 321     * @attrs: memory transaction attributes
 322     */
 323    int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
 324
 325    /* Return the number of IOMMU indexes this IOMMU supports.
 326     *
 327     * Optional method: if this method is not provided, then
 328     * memory_region_iommu_num_indexes() will return 1, indicating that
 329     * only a single IOMMU index is supported.
 330     *
 331     * @iommu: the IOMMUMemoryRegion
 332     */
 333    int (*num_indexes)(IOMMUMemoryRegion *iommu);
 334} IOMMUMemoryRegionClass;
 335
 336typedef struct CoalescedMemoryRange CoalescedMemoryRange;
 337typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
 338
 339struct MemoryRegion {
 340    Object parent_obj;
 341
 342    /* All fields are private - violators will be prosecuted */
 343
 344    /* The following fields should fit in a cache line */
 345    bool romd_mode;
 346    bool ram;
 347    bool subpage;
 348    bool readonly; /* For RAM regions */
 349    bool rom_device;
 350    bool flush_coalesced_mmio;
 351    bool global_locking;
 352    uint8_t dirty_log_mask;
 353    bool is_iommu;
 354    RAMBlock *ram_block;
 355    Object *owner;
 356
 357    const MemoryRegionOps *ops;
 358    void *opaque;
 359    MemoryRegion *container;
 360    Int128 size;
 361    hwaddr addr;
 362    void (*destructor)(MemoryRegion *mr);
 363    uint64_t align;
 364    bool terminates;
 365    bool ram_device;
 366    bool enabled;
 367    bool warning_printed; /* For reservations */
 368    uint8_t vga_logging_count;
 369    MemoryRegion *alias;
 370    hwaddr alias_offset;
 371    int32_t priority;
 372    QTAILQ_HEAD(subregions, MemoryRegion) subregions;
 373    QTAILQ_ENTRY(MemoryRegion) subregions_link;
 374    QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
 375    const char *name;
 376    unsigned ioeventfd_nb;
 377    MemoryRegionIoeventfd *ioeventfds;
 378};
 379
 380struct IOMMUMemoryRegion {
 381    MemoryRegion parent_obj;
 382
 383    QLIST_HEAD(, IOMMUNotifier) iommu_notify;
 384    IOMMUNotifierFlag iommu_notify_flags;
 385};
 386
 387#define IOMMU_NOTIFIER_FOREACH(n, mr) \
 388    QLIST_FOREACH((n), &(mr)->iommu_notify, node)
 389
 390/**
 391 * MemoryListener: callbacks structure for updates to the physical memory map
 392 *
 393 * Allows a component to adjust to changes in the guest-visible memory map.
 394 * Use with memory_listener_register() and memory_listener_unregister().
 395 */
 396struct MemoryListener {
 397    void (*begin)(MemoryListener *listener);
 398    void (*commit)(MemoryListener *listener);
 399    void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
 400    void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
 401    void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
 402    void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
 403                      int old, int new);
 404    void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
 405                     int old, int new);
 406    void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
 407    void (*log_global_start)(MemoryListener *listener);
 408    void (*log_global_stop)(MemoryListener *listener);
 409    void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
 410                        bool match_data, uint64_t data, EventNotifier *e);
 411    void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
 412                        bool match_data, uint64_t data, EventNotifier *e);
 413    void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
 414                               hwaddr addr, hwaddr len);
 415    void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
 416                               hwaddr addr, hwaddr len);
 417    /* Lower = earlier (during add), later (during del) */
 418    unsigned priority;
 419    AddressSpace *address_space;
 420    QTAILQ_ENTRY(MemoryListener) link;
 421    QTAILQ_ENTRY(MemoryListener) link_as;
 422};
 423
 424/**
 425 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
 426 */
 427struct AddressSpace {
 428    /* All fields are private. */
 429    struct rcu_head rcu;
 430    char *name;
 431    MemoryRegion *root;
 432
 433    /* Accessed via RCU.  */
 434    struct FlatView *current_map;
 435
 436    int ioeventfd_nb;
 437    struct MemoryRegionIoeventfd *ioeventfds;
 438    QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners;
 439    QTAILQ_ENTRY(AddressSpace) address_spaces_link;
 440};
 441
 442typedef struct AddressSpaceDispatch AddressSpaceDispatch;
 443typedef struct FlatRange FlatRange;
 444
 445/* Flattened global view of current active memory hierarchy.  Kept in sorted
 446 * order.
 447 */
 448struct FlatView {
 449    struct rcu_head rcu;
 450    unsigned ref;
 451    FlatRange *ranges;
 452    unsigned nr;
 453    unsigned nr_allocated;
 454    struct AddressSpaceDispatch *dispatch;
 455    MemoryRegion *root;
 456};
 457
 458static inline FlatView *address_space_to_flatview(AddressSpace *as)
 459{
 460    return atomic_rcu_read(&as->current_map);
 461}
 462
 463
 464/**
 465 * MemoryRegionSection: describes a fragment of a #MemoryRegion
 466 *
 467 * @mr: the region, or %NULL if empty
 468 * @fv: the flat view of the address space the region is mapped in
 469 * @offset_within_region: the beginning of the section, relative to @mr's start
 470 * @size: the size of the section; will not exceed @mr's boundaries
 471 * @offset_within_address_space: the address of the first byte of the section
 472 *     relative to the region's address space
 473 * @readonly: writes to this section are ignored
 474 */
 475struct MemoryRegionSection {
 476    MemoryRegion *mr;
 477    FlatView *fv;
 478    hwaddr offset_within_region;
 479    Int128 size;
 480    hwaddr offset_within_address_space;
 481    bool readonly;
 482};
 483
 484/**
 485 * memory_region_init: Initialize a memory region
 486 *
 487 * The region typically acts as a container for other memory regions.  Use
 488 * memory_region_add_subregion() to add subregions.
 489 *
 490 * @mr: the #MemoryRegion to be initialized
 491 * @owner: the object that tracks the region's reference count
 492 * @name: used for debugging; not visible to the user or ABI
 493 * @size: size of the region; any subregions beyond this size will be clipped
 494 */
 495void memory_region_init(MemoryRegion *mr,
 496                        struct Object *owner,
 497                        const char *name,
 498                        uint64_t size);
 499
 500/**
 501 * memory_region_ref: Add 1 to a memory region's reference count
 502 *
 503 * Whenever memory regions are accessed outside the BQL, they need to be
 504 * preserved against hot-unplug.  MemoryRegions actually do not have their
 505 * own reference count; they piggyback on a QOM object, their "owner".
 506 * This function adds a reference to the owner.
 507 *
 508 * All MemoryRegions must have an owner if they can disappear, even if the
 509 * device they belong to operates exclusively under the BQL.  This is because
 510 * the region could be returned at any time by memory_region_find, and this
 511 * is usually under guest control.
 512 *
 513 * @mr: the #MemoryRegion
 514 */
 515void memory_region_ref(MemoryRegion *mr);
 516
 517/**
 518 * memory_region_unref: Remove 1 to a memory region's reference count
 519 *
 520 * Whenever memory regions are accessed outside the BQL, they need to be
 521 * preserved against hot-unplug.  MemoryRegions actually do not have their
 522 * own reference count; they piggyback on a QOM object, their "owner".
 523 * This function removes a reference to the owner and possibly destroys it.
 524 *
 525 * @mr: the #MemoryRegion
 526 */
 527void memory_region_unref(MemoryRegion *mr);
 528
 529/**
 530 * memory_region_init_io: Initialize an I/O memory region.
 531 *
 532 * Accesses into the region will cause the callbacks in @ops to be called.
 533 * if @size is nonzero, subregions will be clipped to @size.
 534 *
 535 * @mr: the #MemoryRegion to be initialized.
 536 * @owner: the object that tracks the region's reference count
 537 * @ops: a structure containing read and write callbacks to be used when
 538 *       I/O is performed on the region.
 539 * @opaque: passed to the read and write callbacks of the @ops structure.
 540 * @name: used for debugging; not visible to the user or ABI
 541 * @size: size of the region.
 542 */
 543void memory_region_init_io(MemoryRegion *mr,
 544                           struct Object *owner,
 545                           const MemoryRegionOps *ops,
 546                           void *opaque,
 547                           const char *name,
 548                           uint64_t size);
 549
 550/**
 551 * memory_region_init_ram_nomigrate:  Initialize RAM memory region.  Accesses
 552 *                                    into the region will modify memory
 553 *                                    directly.
 554 *
 555 * @mr: the #MemoryRegion to be initialized.
 556 * @owner: the object that tracks the region's reference count
 557 * @name: Region name, becomes part of RAMBlock name used in migration stream
 558 *        must be unique within any device
 559 * @size: size of the region.
 560 * @errp: pointer to Error*, to store an error if it happens.
 561 *
 562 * Note that this function does not do anything to cause the data in the
 563 * RAM memory region to be migrated; that is the responsibility of the caller.
 564 */
 565void memory_region_init_ram_nomigrate(MemoryRegion *mr,
 566                                      struct Object *owner,
 567                                      const char *name,
 568                                      uint64_t size,
 569                                      Error **errp);
 570
 571/**
 572 * memory_region_init_ram_shared_nomigrate:  Initialize RAM memory region.
 573 *                                           Accesses into the region will
 574 *                                           modify memory directly.
 575 *
 576 * @mr: the #MemoryRegion to be initialized.
 577 * @owner: the object that tracks the region's reference count
 578 * @name: Region name, becomes part of RAMBlock name used in migration stream
 579 *        must be unique within any device
 580 * @size: size of the region.
 581 * @share: allow remapping RAM to different addresses
 582 * @errp: pointer to Error*, to store an error if it happens.
 583 *
 584 * Note that this function is similar to memory_region_init_ram_nomigrate.
 585 * The only difference is part of the RAM region can be remapped.
 586 */
 587void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
 588                                             struct Object *owner,
 589                                             const char *name,
 590                                             uint64_t size,
 591                                             bool share,
 592                                             Error **errp);
 593
 594/**
 595 * memory_region_init_resizeable_ram:  Initialize memory region with resizeable
 596 *                                     RAM.  Accesses into the region will
 597 *                                     modify memory directly.  Only an initial
 598 *                                     portion of this RAM is actually used.
 599 *                                     The used size can change across reboots.
 600 *
 601 * @mr: the #MemoryRegion to be initialized.
 602 * @owner: the object that tracks the region's reference count
 603 * @name: Region name, becomes part of RAMBlock name used in migration stream
 604 *        must be unique within any device
 605 * @size: used size of the region.
 606 * @max_size: max size of the region.
 607 * @resized: callback to notify owner about used size change.
 608 * @errp: pointer to Error*, to store an error if it happens.
 609 *
 610 * Note that this function does not do anything to cause the data in the
 611 * RAM memory region to be migrated; that is the responsibility of the caller.
 612 */
 613void memory_region_init_resizeable_ram(MemoryRegion *mr,
 614                                       struct Object *owner,
 615                                       const char *name,
 616                                       uint64_t size,
 617                                       uint64_t max_size,
 618                                       void (*resized)(const char*,
 619                                                       uint64_t length,
 620                                                       void *host),
 621                                       Error **errp);
 622#ifdef __linux__
 623/**
 624 * memory_region_init_ram_from_file:  Initialize RAM memory region with a
 625 *                                    mmap-ed backend.
 626 *
 627 * @mr: the #MemoryRegion to be initialized.
 628 * @owner: the object that tracks the region's reference count
 629 * @name: Region name, becomes part of RAMBlock name used in migration stream
 630 *        must be unique within any device
 631 * @size: size of the region.
 632 * @align: alignment of the region base address; if 0, the default alignment
 633 *         (getpagesize()) will be used.
 634 * @share: %true if memory must be mmaped with the MAP_SHARED flag
 635 * @path: the path in which to allocate the RAM.
 636 * @errp: pointer to Error*, to store an error if it happens.
 637 *
 638 * Note that this function does not do anything to cause the data in the
 639 * RAM memory region to be migrated; that is the responsibility of the caller.
 640 */
 641void memory_region_init_ram_from_file(MemoryRegion *mr,
 642                                      struct Object *owner,
 643                                      const char *name,
 644                                      uint64_t size,
 645                                      uint64_t align,
 646                                      bool share,
 647                                      const char *path,
 648                                      Error **errp);
 649
 650/**
 651 * memory_region_init_ram_from_fd:  Initialize RAM memory region with a
 652 *                                  mmap-ed backend.
 653 *
 654 * @mr: the #MemoryRegion to be initialized.
 655 * @owner: the object that tracks the region's reference count
 656 * @name: the name of the region.
 657 * @size: size of the region.
 658 * @share: %true if memory must be mmaped with the MAP_SHARED flag
 659 * @fd: the fd to mmap.
 660 * @errp: pointer to Error*, to store an error if it happens.
 661 *
 662 * Note that this function does not do anything to cause the data in the
 663 * RAM memory region to be migrated; that is the responsibility of the caller.
 664 */
 665void memory_region_init_ram_from_fd(MemoryRegion *mr,
 666                                    struct Object *owner,
 667                                    const char *name,
 668                                    uint64_t size,
 669                                    bool share,
 670                                    int fd,
 671                                    Error **errp);
 672#endif
 673
 674/**
 675 * memory_region_init_ram_ptr:  Initialize RAM memory region from a
 676 *                              user-provided pointer.  Accesses into the
 677 *                              region will modify memory directly.
 678 *
 679 * @mr: the #MemoryRegion to be initialized.
 680 * @owner: the object that tracks the region's reference count
 681 * @name: Region name, becomes part of RAMBlock name used in migration stream
 682 *        must be unique within any device
 683 * @size: size of the region.
 684 * @ptr: memory to be mapped; must contain at least @size bytes.
 685 *
 686 * Note that this function does not do anything to cause the data in the
 687 * RAM memory region to be migrated; that is the responsibility of the caller.
 688 */
 689void memory_region_init_ram_ptr(MemoryRegion *mr,
 690                                struct Object *owner,
 691                                const char *name,
 692                                uint64_t size,
 693                                void *ptr);
 694
 695/**
 696 * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
 697 *                                     a user-provided pointer.
 698 *
 699 * A RAM device represents a mapping to a physical device, such as to a PCI
 700 * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
 701 * into the VM address space and access to the region will modify memory
 702 * directly.  However, the memory region should not be included in a memory
 703 * dump (device may not be enabled/mapped at the time of the dump), and
 704 * operations incompatible with manipulating MMIO should be avoided.  Replaces
 705 * skip_dump flag.
 706 *
 707 * @mr: the #MemoryRegion to be initialized.
 708 * @owner: the object that tracks the region's reference count
 709 * @name: the name of the region.
 710 * @size: size of the region.
 711 * @ptr: memory to be mapped; must contain at least @size bytes.
 712 *
 713 * Note that this function does not do anything to cause the data in the
 714 * RAM memory region to be migrated; that is the responsibility of the caller.
 715 * (For RAM device memory regions, migrating the contents rarely makes sense.)
 716 */
 717void memory_region_init_ram_device_ptr(MemoryRegion *mr,
 718                                       struct Object *owner,
 719                                       const char *name,
 720                                       uint64_t size,
 721                                       void *ptr);
 722
 723/**
 724 * memory_region_init_alias: Initialize a memory region that aliases all or a
 725 *                           part of another memory region.
 726 *
 727 * @mr: the #MemoryRegion to be initialized.
 728 * @owner: the object that tracks the region's reference count
 729 * @name: used for debugging; not visible to the user or ABI
 730 * @orig: the region to be referenced; @mr will be equivalent to
 731 *        @orig between @offset and @offset + @size - 1.
 732 * @offset: start of the section in @orig to be referenced.
 733 * @size: size of the region.
 734 */
 735void memory_region_init_alias(MemoryRegion *mr,
 736                              struct Object *owner,
 737                              const char *name,
 738                              MemoryRegion *orig,
 739                              hwaddr offset,
 740                              uint64_t size);
 741
 742/**
 743 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
 744 *
 745 * This has the same effect as calling memory_region_init_ram_nomigrate()
 746 * and then marking the resulting region read-only with
 747 * memory_region_set_readonly().
 748 *
 749 * Note that this function does not do anything to cause the data in the
 750 * RAM side of the memory region to be migrated; that is the responsibility
 751 * of the caller.
 752 *
 753 * @mr: the #MemoryRegion to be initialized.
 754 * @owner: the object that tracks the region's reference count
 755 * @name: Region name, becomes part of RAMBlock name used in migration stream
 756 *        must be unique within any device
 757 * @size: size of the region.
 758 * @errp: pointer to Error*, to store an error if it happens.
 759 */
 760void memory_region_init_rom_nomigrate(MemoryRegion *mr,
 761                                      struct Object *owner,
 762                                      const char *name,
 763                                      uint64_t size,
 764                                      Error **errp);
 765
 766/**
 767 * memory_region_init_rom_device_nomigrate:  Initialize a ROM memory region.
 768 *                                 Writes are handled via callbacks.
 769 *
 770 * Note that this function does not do anything to cause the data in the
 771 * RAM side of the memory region to be migrated; that is the responsibility
 772 * of the caller.
 773 *
 774 * @mr: the #MemoryRegion to be initialized.
 775 * @owner: the object that tracks the region's reference count
 776 * @ops: callbacks for write access handling (must not be NULL).
 777 * @opaque: passed to the read and write callbacks of the @ops structure.
 778 * @name: Region name, becomes part of RAMBlock name used in migration stream
 779 *        must be unique within any device
 780 * @size: size of the region.
 781 * @errp: pointer to Error*, to store an error if it happens.
 782 */
 783void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
 784                                             struct Object *owner,
 785                                             const MemoryRegionOps *ops,
 786                                             void *opaque,
 787                                             const char *name,
 788                                             uint64_t size,
 789                                             Error **errp);
 790
 791/**
 792 * memory_region_init_iommu: Initialize a memory region of a custom type
 793 * that translates addresses
 794 *
 795 * An IOMMU region translates addresses and forwards accesses to a target
 796 * memory region.
 797 *
 798 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
 799 * @_iommu_mr should be a pointer to enough memory for an instance of
 800 * that subclass, @instance_size is the size of that subclass, and
 801 * @mrtypename is its name. This function will initialize @_iommu_mr as an
 802 * instance of the subclass, and its methods will then be called to handle
 803 * accesses to the memory region. See the documentation of
 804 * #IOMMUMemoryRegionClass for further details.
 805 *
 806 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
 807 * @instance_size: the IOMMUMemoryRegion subclass instance size
 808 * @mrtypename: the type name of the #IOMMUMemoryRegion
 809 * @owner: the object that tracks the region's reference count
 810 * @name: used for debugging; not visible to the user or ABI
 811 * @size: size of the region.
 812 */
 813void memory_region_init_iommu(void *_iommu_mr,
 814                              size_t instance_size,
 815                              const char *mrtypename,
 816                              Object *owner,
 817                              const char *name,
 818                              uint64_t size);
 819
 820/**
 821 * memory_region_init_ram - Initialize RAM memory region.  Accesses into the
 822 *                          region will modify memory directly.
 823 *
 824 * @mr: the #MemoryRegion to be initialized
 825 * @owner: the object that tracks the region's reference count (must be
 826 *         TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
 827 * @name: name of the memory region
 828 * @size: size of the region in bytes
 829 * @errp: pointer to Error*, to store an error if it happens.
 830 *
 831 * This function allocates RAM for a board model or device, and
 832 * arranges for it to be migrated (by calling vmstate_register_ram()
 833 * if @owner is a DeviceState, or vmstate_register_ram_global() if
 834 * @owner is NULL).
 835 *
 836 * TODO: Currently we restrict @owner to being either NULL (for
 837 * global RAM regions with no owner) or devices, so that we can
 838 * give the RAM block a unique name for migration purposes.
 839 * We should lift this restriction and allow arbitrary Objects.
 840 * If you pass a non-NULL non-device @owner then we will assert.
 841 */
 842void memory_region_init_ram(MemoryRegion *mr,
 843                            struct Object *owner,
 844                            const char *name,
 845                            uint64_t size,
 846                            Error **errp);
 847
 848/**
 849 * memory_region_init_rom: Initialize a ROM memory region.
 850 *
 851 * This has the same effect as calling memory_region_init_ram()
 852 * and then marking the resulting region read-only with
 853 * memory_region_set_readonly(). This includes arranging for the
 854 * contents to be migrated.
 855 *
 856 * TODO: Currently we restrict @owner to being either NULL (for
 857 * global RAM regions with no owner) or devices, so that we can
 858 * give the RAM block a unique name for migration purposes.
 859 * We should lift this restriction and allow arbitrary Objects.
 860 * If you pass a non-NULL non-device @owner then we will assert.
 861 *
 862 * @mr: the #MemoryRegion to be initialized.
 863 * @owner: the object that tracks the region's reference count
 864 * @name: Region name, becomes part of RAMBlock name used in migration stream
 865 *        must be unique within any device
 866 * @size: size of the region.
 867 * @errp: pointer to Error*, to store an error if it happens.
 868 */
 869void memory_region_init_rom(MemoryRegion *mr,
 870                            struct Object *owner,
 871                            const char *name,
 872                            uint64_t size,
 873                            Error **errp);
 874
 875/**
 876 * memory_region_init_rom_device:  Initialize a ROM memory region.
 877 *                                 Writes are handled via callbacks.
 878 *
 879 * This function initializes a memory region backed by RAM for reads
 880 * and callbacks for writes, and arranges for the RAM backing to
 881 * be migrated (by calling vmstate_register_ram()
 882 * if @owner is a DeviceState, or vmstate_register_ram_global() if
 883 * @owner is NULL).
 884 *
 885 * TODO: Currently we restrict @owner to being either NULL (for
 886 * global RAM regions with no owner) or devices, so that we can
 887 * give the RAM block a unique name for migration purposes.
 888 * We should lift this restriction and allow arbitrary Objects.
 889 * If you pass a non-NULL non-device @owner then we will assert.
 890 *
 891 * @mr: the #MemoryRegion to be initialized.
 892 * @owner: the object that tracks the region's reference count
 893 * @ops: callbacks for write access handling (must not be NULL).
 894 * @name: Region name, becomes part of RAMBlock name used in migration stream
 895 *        must be unique within any device
 896 * @size: size of the region.
 897 * @errp: pointer to Error*, to store an error if it happens.
 898 */
 899void memory_region_init_rom_device(MemoryRegion *mr,
 900                                   struct Object *owner,
 901                                   const MemoryRegionOps *ops,
 902                                   void *opaque,
 903                                   const char *name,
 904                                   uint64_t size,
 905                                   Error **errp);
 906
 907
 908/**
 909 * memory_region_owner: get a memory region's owner.
 910 *
 911 * @mr: the memory region being queried.
 912 */
 913struct Object *memory_region_owner(MemoryRegion *mr);
 914
 915/**
 916 * memory_region_size: get a memory region's size.
 917 *
 918 * @mr: the memory region being queried.
 919 */
 920uint64_t memory_region_size(MemoryRegion *mr);
 921
 922/**
 923 * memory_region_is_ram: check whether a memory region is random access
 924 *
 925 * Returns %true is a memory region is random access.
 926 *
 927 * @mr: the memory region being queried
 928 */
 929static inline bool memory_region_is_ram(MemoryRegion *mr)
 930{
 931    return mr->ram;
 932}
 933
 934/**
 935 * memory_region_is_ram_device: check whether a memory region is a ram device
 936 *
 937 * Returns %true is a memory region is a device backed ram region
 938 *
 939 * @mr: the memory region being queried
 940 */
 941bool memory_region_is_ram_device(MemoryRegion *mr);
 942
 943/**
 944 * memory_region_is_romd: check whether a memory region is in ROMD mode
 945 *
 946 * Returns %true if a memory region is a ROM device and currently set to allow
 947 * direct reads.
 948 *
 949 * @mr: the memory region being queried
 950 */
 951static inline bool memory_region_is_romd(MemoryRegion *mr)
 952{
 953    return mr->rom_device && mr->romd_mode;
 954}
 955
 956/**
 957 * memory_region_get_iommu: check whether a memory region is an iommu
 958 *
 959 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
 960 * otherwise NULL.
 961 *
 962 * @mr: the memory region being queried
 963 */
 964static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
 965{
 966    if (mr->alias) {
 967        return memory_region_get_iommu(mr->alias);
 968    }
 969    if (mr->is_iommu) {
 970        return (IOMMUMemoryRegion *) mr;
 971    }
 972    return NULL;
 973}
 974
 975/**
 976 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
 977 *   if an iommu or NULL if not
 978 *
 979 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
 980 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
 981 *
 982 * @mr: the memory region being queried
 983 */
 984static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
 985        IOMMUMemoryRegion *iommu_mr)
 986{
 987    return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
 988}
 989
 990#define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
 991
 992/**
 993 * memory_region_iommu_get_min_page_size: get minimum supported page size
 994 * for an iommu
 995 *
 996 * Returns minimum supported page size for an iommu.
 997 *
 998 * @iommu_mr: the memory region being queried
 999 */
1000uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1001
1002/**
1003 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1004 *
1005 * The notification type will be decided by entry.perm bits:
1006 *
1007 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
1008 * - For MAP (newly added entry) notifies: set entry.perm to the
1009 *   permission of the page (which is definitely !IOMMU_NONE).
1010 *
1011 * Note: for any IOMMU implementation, an in-place mapping change
1012 * should be notified with an UNMAP followed by a MAP.
1013 *
1014 * @iommu_mr: the memory region that was changed
1015 * @iommu_idx: the IOMMU index for the translation table which has changed
1016 * @entry: the new entry in the IOMMU translation table.  The entry
1017 *         replaces all old entries for the same virtual I/O address range.
1018 *         Deleted entries have .@perm == 0.
1019 */
1020void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1021                                int iommu_idx,
1022                                IOMMUTLBEntry entry);
1023
1024/**
1025 * memory_region_notify_one: notify a change in an IOMMU translation
1026 *                           entry to a single notifier
1027 *
1028 * This works just like memory_region_notify_iommu(), but it only
1029 * notifies a specific notifier, not all of them.
1030 *
1031 * @notifier: the notifier to be notified
1032 * @entry: the new entry in the IOMMU translation table.  The entry
1033 *         replaces all old entries for the same virtual I/O address range.
1034 *         Deleted entries have .@perm == 0.
1035 */
1036void memory_region_notify_one(IOMMUNotifier *notifier,
1037                              IOMMUTLBEntry *entry);
1038
1039/**
1040 * memory_region_register_iommu_notifier: register a notifier for changes to
1041 * IOMMU translation entries.
1042 *
1043 * @mr: the memory region to observe
1044 * @n: the IOMMUNotifier to be added; the notify callback receives a
1045 *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1046 *     ceases to be valid on exit from the notifier.
1047 */
1048void memory_region_register_iommu_notifier(MemoryRegion *mr,
1049                                           IOMMUNotifier *n);
1050
1051/**
1052 * memory_region_iommu_replay: replay existing IOMMU translations to
1053 * a notifier with the minimum page granularity returned by
1054 * mr->iommu_ops->get_page_size().
1055 *
1056 * Note: this is not related to record-and-replay functionality.
1057 *
1058 * @iommu_mr: the memory region to observe
1059 * @n: the notifier to which to replay iommu mappings
1060 */
1061void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1062
1063/**
1064 * memory_region_iommu_replay_all: replay existing IOMMU translations
1065 * to all the notifiers registered.
1066 *
1067 * Note: this is not related to record-and-replay functionality.
1068 *
1069 * @iommu_mr: the memory region to observe
1070 */
1071void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
1072
1073/**
1074 * memory_region_unregister_iommu_notifier: unregister a notifier for
1075 * changes to IOMMU translation entries.
1076 *
1077 * @mr: the memory region which was observed and for which notity_stopped()
1078 *      needs to be called
1079 * @n: the notifier to be removed.
1080 */
1081void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1082                                             IOMMUNotifier *n);
1083
1084/**
1085 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1086 * defined on the IOMMU.
1087 *
1088 * Returns 0 on success, or a negative errno otherwise. In particular,
1089 * -EINVAL indicates that the IOMMU does not support the requested
1090 * attribute.
1091 *
1092 * @iommu_mr: the memory region
1093 * @attr: the requested attribute
1094 * @data: a pointer to the requested attribute data
1095 */
1096int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1097                                 enum IOMMUMemoryRegionAttr attr,
1098                                 void *data);
1099
1100/**
1101 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1102 * use for translations with the given memory transaction attributes.
1103 *
1104 * @iommu_mr: the memory region
1105 * @attrs: the memory transaction attributes
1106 */
1107int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1108                                       MemTxAttrs attrs);
1109
1110/**
1111 * memory_region_iommu_num_indexes: return the total number of IOMMU
1112 * indexes that this IOMMU supports.
1113 *
1114 * @iommu_mr: the memory region
1115 */
1116int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1117
1118/**
1119 * memory_region_name: get a memory region's name
1120 *
1121 * Returns the string that was used to initialize the memory region.
1122 *
1123 * @mr: the memory region being queried
1124 */
1125const char *memory_region_name(const MemoryRegion *mr);
1126
1127/**
1128 * memory_region_is_logging: return whether a memory region is logging writes
1129 *
1130 * Returns %true if the memory region is logging writes for the given client
1131 *
1132 * @mr: the memory region being queried
1133 * @client: the client being queried
1134 */
1135bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1136
1137/**
1138 * memory_region_get_dirty_log_mask: return the clients for which a
1139 * memory region is logging writes.
1140 *
1141 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1142 * are the bit indices.
1143 *
1144 * @mr: the memory region being queried
1145 */
1146uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1147
1148/**
1149 * memory_region_is_rom: check whether a memory region is ROM
1150 *
1151 * Returns %true is a memory region is read-only memory.
1152 *
1153 * @mr: the memory region being queried
1154 */
1155static inline bool memory_region_is_rom(MemoryRegion *mr)
1156{
1157    return mr->ram && mr->readonly;
1158}
1159
1160
1161/**
1162 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1163 *
1164 * Returns a file descriptor backing a file-based RAM memory region,
1165 * or -1 if the region is not a file-based RAM memory region.
1166 *
1167 * @mr: the RAM or alias memory region being queried.
1168 */
1169int memory_region_get_fd(MemoryRegion *mr);
1170
1171/**
1172 * memory_region_from_host: Convert a pointer into a RAM memory region
1173 * and an offset within it.
1174 *
1175 * Given a host pointer inside a RAM memory region (created with
1176 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1177 * the MemoryRegion and the offset within it.
1178 *
1179 * Use with care; by the time this function returns, the returned pointer is
1180 * not protected by RCU anymore.  If the caller is not within an RCU critical
1181 * section and does not hold the iothread lock, it must have other means of
1182 * protecting the pointer, such as a reference to the region that includes
1183 * the incoming ram_addr_t.
1184 *
1185 * @ptr: the host pointer to be converted
1186 * @offset: the offset within memory region
1187 */
1188MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1189
1190/**
1191 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1192 *
1193 * Returns a host pointer to a RAM memory region (created with
1194 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1195 *
1196 * Use with care; by the time this function returns, the returned pointer is
1197 * not protected by RCU anymore.  If the caller is not within an RCU critical
1198 * section and does not hold the iothread lock, it must have other means of
1199 * protecting the pointer, such as a reference to the region that includes
1200 * the incoming ram_addr_t.
1201 *
1202 * @mr: the memory region being queried.
1203 */
1204void *memory_region_get_ram_ptr(MemoryRegion *mr);
1205
1206/* memory_region_ram_resize: Resize a RAM region.
1207 *
1208 * Only legal before guest might have detected the memory size: e.g. on
1209 * incoming migration, or right after reset.
1210 *
1211 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1212 * @newsize: the new size the region
1213 * @errp: pointer to Error*, to store an error if it happens.
1214 */
1215void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1216                              Error **errp);
1217
1218/**
1219 * memory_region_set_log: Turn dirty logging on or off for a region.
1220 *
1221 * Turns dirty logging on or off for a specified client (display, migration).
1222 * Only meaningful for RAM regions.
1223 *
1224 * @mr: the memory region being updated.
1225 * @log: whether dirty logging is to be enabled or disabled.
1226 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1227 */
1228void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1229
1230/**
1231 * memory_region_get_dirty: Check whether a range of bytes is dirty
1232 *                          for a specified client.
1233 *
1234 * Checks whether a range of bytes has been written to since the last
1235 * call to memory_region_reset_dirty() with the same @client.  Dirty logging
1236 * must be enabled.
1237 *
1238 * @mr: the memory region being queried.
1239 * @addr: the address (relative to the start of the region) being queried.
1240 * @size: the size of the range being queried.
1241 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1242 *          %DIRTY_MEMORY_VGA.
1243 */
1244bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1245                             hwaddr size, unsigned client);
1246
1247/**
1248 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1249 *
1250 * Marks a range of bytes as dirty, after it has been dirtied outside
1251 * guest code.
1252 *
1253 * @mr: the memory region being dirtied.
1254 * @addr: the address (relative to the start of the region) being dirtied.
1255 * @size: size of the range being dirtied.
1256 */
1257void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1258                             hwaddr size);
1259
1260/**
1261 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1262 *                                         bitmap and clear it.
1263 *
1264 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1265 * returns the snapshot.  The snapshot can then be used to query dirty
1266 * status, using memory_region_snapshot_get_dirty.  Snapshotting allows
1267 * querying the same page multiple times, which is especially useful for
1268 * display updates where the scanlines often are not page aligned.
1269 *
1270 * The dirty bitmap region which gets copyed into the snapshot (and
1271 * cleared afterwards) can be larger than requested.  The boundaries
1272 * are rounded up/down so complete bitmap longs (covering 64 pages on
1273 * 64bit hosts) can be copied over into the bitmap snapshot.  Which
1274 * isn't a problem for display updates as the extra pages are outside
1275 * the visible area, and in case the visible area changes a full
1276 * display redraw is due anyway.  Should other use cases for this
1277 * function emerge we might have to revisit this implementation
1278 * detail.
1279 *
1280 * Use g_free to release DirtyBitmapSnapshot.
1281 *
1282 * @mr: the memory region being queried.
1283 * @addr: the address (relative to the start of the region) being queried.
1284 * @size: the size of the range being queried.
1285 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1286 */
1287DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1288                                                            hwaddr addr,
1289                                                            hwaddr size,
1290                                                            unsigned client);
1291
1292/**
1293 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1294 *                                   in the specified dirty bitmap snapshot.
1295 *
1296 * @mr: the memory region being queried.
1297 * @snap: the dirty bitmap snapshot
1298 * @addr: the address (relative to the start of the region) being queried.
1299 * @size: the size of the range being queried.
1300 */
1301bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1302                                      DirtyBitmapSnapshot *snap,
1303                                      hwaddr addr, hwaddr size);
1304
1305/**
1306 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1307 *                            client.
1308 *
1309 * Marks a range of pages as no longer dirty.
1310 *
1311 * @mr: the region being updated.
1312 * @addr: the start of the subrange being cleaned.
1313 * @size: the size of the subrange being cleaned.
1314 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1315 *          %DIRTY_MEMORY_VGA.
1316 */
1317void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1318                               hwaddr size, unsigned client);
1319
1320/**
1321 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1322 *
1323 * Allows a memory region to be marked as read-only (turning it into a ROM).
1324 * only useful on RAM regions.
1325 *
1326 * @mr: the region being updated.
1327 * @readonly: whether rhe region is to be ROM or RAM.
1328 */
1329void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1330
1331/**
1332 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1333 *
1334 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1335 * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
1336 * device is mapped to guest memory and satisfies read access directly.
1337 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1338 * Writes are always handled by the #MemoryRegion.write function.
1339 *
1340 * @mr: the memory region to be updated
1341 * @romd_mode: %true to put the region into ROMD mode
1342 */
1343void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1344
1345/**
1346 * memory_region_set_coalescing: Enable memory coalescing for the region.
1347 *
1348 * Enabled writes to a region to be queued for later processing. MMIO ->write
1349 * callbacks may be delayed until a non-coalesced MMIO is issued.
1350 * Only useful for IO regions.  Roughly similar to write-combining hardware.
1351 *
1352 * @mr: the memory region to be write coalesced
1353 */
1354void memory_region_set_coalescing(MemoryRegion *mr);
1355
1356/**
1357 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1358 *                               a region.
1359 *
1360 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1361 * Multiple calls can be issued coalesced disjoint ranges.
1362 *
1363 * @mr: the memory region to be updated.
1364 * @offset: the start of the range within the region to be coalesced.
1365 * @size: the size of the subrange to be coalesced.
1366 */
1367void memory_region_add_coalescing(MemoryRegion *mr,
1368                                  hwaddr offset,
1369                                  uint64_t size);
1370
1371/**
1372 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1373 *
1374 * Disables any coalescing caused by memory_region_set_coalescing() or
1375 * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
1376 * hardware.
1377 *
1378 * @mr: the memory region to be updated.
1379 */
1380void memory_region_clear_coalescing(MemoryRegion *mr);
1381
1382/**
1383 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1384 *                                    accesses.
1385 *
1386 * Ensure that pending coalesced MMIO request are flushed before the memory
1387 * region is accessed. This property is automatically enabled for all regions
1388 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1389 *
1390 * @mr: the memory region to be updated.
1391 */
1392void memory_region_set_flush_coalesced(MemoryRegion *mr);
1393
1394/**
1395 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1396 *                                      accesses.
1397 *
1398 * Clear the automatic coalesced MMIO flushing enabled via
1399 * memory_region_set_flush_coalesced. Note that this service has no effect on
1400 * memory regions that have MMIO coalescing enabled for themselves. For them,
1401 * automatic flushing will stop once coalescing is disabled.
1402 *
1403 * @mr: the memory region to be updated.
1404 */
1405void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1406
1407/**
1408 * memory_region_clear_global_locking: Declares that access processing does
1409 *                                     not depend on the QEMU global lock.
1410 *
1411 * By clearing this property, accesses to the memory region will be processed
1412 * outside of QEMU's global lock (unless the lock is held on when issuing the
1413 * access request). In this case, the device model implementing the access
1414 * handlers is responsible for synchronization of concurrency.
1415 *
1416 * @mr: the memory region to be updated.
1417 */
1418void memory_region_clear_global_locking(MemoryRegion *mr);
1419
1420/**
1421 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1422 *                            is written to a location.
1423 *
1424 * Marks a word in an IO region (initialized with memory_region_init_io())
1425 * as a trigger for an eventfd event.  The I/O callback will not be called.
1426 * The caller must be prepared to handle failure (that is, take the required
1427 * action if the callback _is_ called).
1428 *
1429 * @mr: the memory region being updated.
1430 * @addr: the address within @mr that is to be monitored
1431 * @size: the size of the access to trigger the eventfd
1432 * @match_data: whether to match against @data, instead of just @addr
1433 * @data: the data to match against the guest write
1434 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1435 **/
1436void memory_region_add_eventfd(MemoryRegion *mr,
1437                               hwaddr addr,
1438                               unsigned size,
1439                               bool match_data,
1440                               uint64_t data,
1441                               EventNotifier *e);
1442
1443/**
1444 * memory_region_del_eventfd: Cancel an eventfd.
1445 *
1446 * Cancels an eventfd trigger requested by a previous
1447 * memory_region_add_eventfd() call.
1448 *
1449 * @mr: the memory region being updated.
1450 * @addr: the address within @mr that is to be monitored
1451 * @size: the size of the access to trigger the eventfd
1452 * @match_data: whether to match against @data, instead of just @addr
1453 * @data: the data to match against the guest write
1454 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1455 */
1456void memory_region_del_eventfd(MemoryRegion *mr,
1457                               hwaddr addr,
1458                               unsigned size,
1459                               bool match_data,
1460                               uint64_t data,
1461                               EventNotifier *e);
1462
1463/**
1464 * memory_region_add_subregion: Add a subregion to a container.
1465 *
1466 * Adds a subregion at @offset.  The subregion may not overlap with other
1467 * subregions (except for those explicitly marked as overlapping).  A region
1468 * may only be added once as a subregion (unless removed with
1469 * memory_region_del_subregion()); use memory_region_init_alias() if you
1470 * want a region to be a subregion in multiple locations.
1471 *
1472 * @mr: the region to contain the new subregion; must be a container
1473 *      initialized with memory_region_init().
1474 * @offset: the offset relative to @mr where @subregion is added.
1475 * @subregion: the subregion to be added.
1476 */
1477void memory_region_add_subregion(MemoryRegion *mr,
1478                                 hwaddr offset,
1479                                 MemoryRegion *subregion);
1480/**
1481 * memory_region_add_subregion_overlap: Add a subregion to a container
1482 *                                      with overlap.
1483 *
1484 * Adds a subregion at @offset.  The subregion may overlap with other
1485 * subregions.  Conflicts are resolved by having a higher @priority hide a
1486 * lower @priority. Subregions without priority are taken as @priority 0.
1487 * A region may only be added once as a subregion (unless removed with
1488 * memory_region_del_subregion()); use memory_region_init_alias() if you
1489 * want a region to be a subregion in multiple locations.
1490 *
1491 * @mr: the region to contain the new subregion; must be a container
1492 *      initialized with memory_region_init().
1493 * @offset: the offset relative to @mr where @subregion is added.
1494 * @subregion: the subregion to be added.
1495 * @priority: used for resolving overlaps; highest priority wins.
1496 */
1497void memory_region_add_subregion_overlap(MemoryRegion *mr,
1498                                         hwaddr offset,
1499                                         MemoryRegion *subregion,
1500                                         int priority);
1501
1502/**
1503 * memory_region_get_ram_addr: Get the ram address associated with a memory
1504 *                             region
1505 */
1506ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1507
1508uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1509/**
1510 * memory_region_del_subregion: Remove a subregion.
1511 *
1512 * Removes a subregion from its container.
1513 *
1514 * @mr: the container to be updated.
1515 * @subregion: the region being removed; must be a current subregion of @mr.
1516 */
1517void memory_region_del_subregion(MemoryRegion *mr,
1518                                 MemoryRegion *subregion);
1519
1520/*
1521 * memory_region_set_enabled: dynamically enable or disable a region
1522 *
1523 * Enables or disables a memory region.  A disabled memory region
1524 * ignores all accesses to itself and its subregions.  It does not
1525 * obscure sibling subregions with lower priority - it simply behaves as
1526 * if it was removed from the hierarchy.
1527 *
1528 * Regions default to being enabled.
1529 *
1530 * @mr: the region to be updated
1531 * @enabled: whether to enable or disable the region
1532 */
1533void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1534
1535/*
1536 * memory_region_set_address: dynamically update the address of a region
1537 *
1538 * Dynamically updates the address of a region, relative to its container.
1539 * May be used on regions are currently part of a memory hierarchy.
1540 *
1541 * @mr: the region to be updated
1542 * @addr: new address, relative to container region
1543 */
1544void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1545
1546/*
1547 * memory_region_set_size: dynamically update the size of a region.
1548 *
1549 * Dynamically updates the size of a region.
1550 *
1551 * @mr: the region to be updated
1552 * @size: used size of the region.
1553 */
1554void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1555
1556/*
1557 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1558 *
1559 * Dynamically updates the offset into the target region that an alias points
1560 * to, as if the fourth argument to memory_region_init_alias() has changed.
1561 *
1562 * @mr: the #MemoryRegion to be updated; should be an alias.
1563 * @offset: the new offset into the target memory region
1564 */
1565void memory_region_set_alias_offset(MemoryRegion *mr,
1566                                    hwaddr offset);
1567
1568/**
1569 * memory_region_present: checks if an address relative to a @container
1570 * translates into #MemoryRegion within @container
1571 *
1572 * Answer whether a #MemoryRegion within @container covers the address
1573 * @addr.
1574 *
1575 * @container: a #MemoryRegion within which @addr is a relative address
1576 * @addr: the area within @container to be searched
1577 */
1578bool memory_region_present(MemoryRegion *container, hwaddr addr);
1579
1580/**
1581 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1582 * into any address space.
1583 *
1584 * @mr: a #MemoryRegion which should be checked if it's mapped
1585 */
1586bool memory_region_is_mapped(MemoryRegion *mr);
1587
1588/**
1589 * memory_region_find: translate an address/size relative to a
1590 * MemoryRegion into a #MemoryRegionSection.
1591 *
1592 * Locates the first #MemoryRegion within @mr that overlaps the range
1593 * given by @addr and @size.
1594 *
1595 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1596 * It will have the following characteristics:
1597 *    .@size = 0 iff no overlap was found
1598 *    .@mr is non-%NULL iff an overlap was found
1599 *
1600 * Remember that in the return value the @offset_within_region is
1601 * relative to the returned region (in the .@mr field), not to the
1602 * @mr argument.
1603 *
1604 * Similarly, the .@offset_within_address_space is relative to the
1605 * address space that contains both regions, the passed and the
1606 * returned one.  However, in the special case where the @mr argument
1607 * has no container (and thus is the root of the address space), the
1608 * following will hold:
1609 *    .@offset_within_address_space >= @addr
1610 *    .@offset_within_address_space + .@size <= @addr + @size
1611 *
1612 * @mr: a MemoryRegion within which @addr is a relative address
1613 * @addr: start of the area within @as to be searched
1614 * @size: size of the area to be searched
1615 */
1616MemoryRegionSection memory_region_find(MemoryRegion *mr,
1617                                       hwaddr addr, uint64_t size);
1618
1619/**
1620 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1621 *
1622 * Synchronizes the dirty page log for all address spaces.
1623 */
1624void memory_global_dirty_log_sync(void);
1625
1626/**
1627 * memory_region_transaction_begin: Start a transaction.
1628 *
1629 * During a transaction, changes will be accumulated and made visible
1630 * only when the transaction ends (is committed).
1631 */
1632void memory_region_transaction_begin(void);
1633
1634/**
1635 * memory_region_transaction_commit: Commit a transaction and make changes
1636 *                                   visible to the guest.
1637 */
1638void memory_region_transaction_commit(void);
1639
1640/**
1641 * memory_listener_register: register callbacks to be called when memory
1642 *                           sections are mapped or unmapped into an address
1643 *                           space
1644 *
1645 * @listener: an object containing the callbacks to be called
1646 * @filter: if non-%NULL, only regions in this address space will be observed
1647 */
1648void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1649
1650/**
1651 * memory_listener_unregister: undo the effect of memory_listener_register()
1652 *
1653 * @listener: an object containing the callbacks to be removed
1654 */
1655void memory_listener_unregister(MemoryListener *listener);
1656
1657/**
1658 * memory_global_dirty_log_start: begin dirty logging for all regions
1659 */
1660void memory_global_dirty_log_start(void);
1661
1662/**
1663 * memory_global_dirty_log_stop: end dirty logging for all regions
1664 */
1665void memory_global_dirty_log_stop(void);
1666
1667void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
1668                bool dispatch_tree, bool owner);
1669
1670/**
1671 * memory_region_request_mmio_ptr: request a pointer to an mmio
1672 * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer.
1673 * When the device wants to invalidate the pointer it will call
1674 * memory_region_invalidate_mmio_ptr.
1675 *
1676 * @mr: #MemoryRegion to check
1677 * @addr: address within that region
1678 *
1679 * Returns true on success, false otherwise.
1680 */
1681bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr);
1682
1683/**
1684 * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio
1685 * previously requested.
1686 * In the end that means that if something wants to execute from this area it
1687 * will need to request the pointer again.
1688 *
1689 * @mr: #MemoryRegion associated to the pointer.
1690 * @offset: offset within the memory region
1691 * @size: size of that area.
1692 */
1693void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
1694                                       unsigned size);
1695
1696/**
1697 * memory_region_dispatch_read: perform a read directly to the specified
1698 * MemoryRegion.
1699 *
1700 * @mr: #MemoryRegion to access
1701 * @addr: address within that region
1702 * @pval: pointer to uint64_t which the data is written to
1703 * @size: size of the access in bytes
1704 * @attrs: memory transaction attributes to use for the access
1705 */
1706MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1707                                        hwaddr addr,
1708                                        uint64_t *pval,
1709                                        unsigned size,
1710                                        MemTxAttrs attrs);
1711/**
1712 * memory_region_dispatch_write: perform a write directly to the specified
1713 * MemoryRegion.
1714 *
1715 * @mr: #MemoryRegion to access
1716 * @addr: address within that region
1717 * @data: data to write
1718 * @size: size of the access in bytes
1719 * @attrs: memory transaction attributes to use for the access
1720 */
1721MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1722                                         hwaddr addr,
1723                                         uint64_t data,
1724                                         unsigned size,
1725                                         MemTxAttrs attrs);
1726
1727/**
1728 * address_space_init: initializes an address space
1729 *
1730 * @as: an uninitialized #AddressSpace
1731 * @root: a #MemoryRegion that routes addresses for the address space
1732 * @name: an address space name.  The name is only used for debugging
1733 *        output.
1734 */
1735void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1736
1737/**
1738 * address_space_destroy: destroy an address space
1739 *
1740 * Releases all resources associated with an address space.  After an address space
1741 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1742 * as well.
1743 *
1744 * @as: address space to be destroyed
1745 */
1746void address_space_destroy(AddressSpace *as);
1747
1748/**
1749 * address_space_rw: read from or write to an address space.
1750 *
1751 * Return a MemTxResult indicating whether the operation succeeded
1752 * or failed (eg unassigned memory, device rejected the transaction,
1753 * IOMMU fault).
1754 *
1755 * @as: #AddressSpace to be accessed
1756 * @addr: address within that address space
1757 * @attrs: memory transaction attributes
1758 * @buf: buffer with the data transferred
1759 * @len: the number of bytes to read or write
1760 * @is_write: indicates the transfer direction
1761 */
1762MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1763                             MemTxAttrs attrs, uint8_t *buf,
1764                             int len, bool is_write);
1765
1766/**
1767 * address_space_write: write to address space.
1768 *
1769 * Return a MemTxResult indicating whether the operation succeeded
1770 * or failed (eg unassigned memory, device rejected the transaction,
1771 * IOMMU fault).
1772 *
1773 * @as: #AddressSpace to be accessed
1774 * @addr: address within that address space
1775 * @attrs: memory transaction attributes
1776 * @buf: buffer with the data transferred
1777 * @len: the number of bytes to write
1778 */
1779MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1780                                MemTxAttrs attrs,
1781                                const uint8_t *buf, int len);
1782
1783/* address_space_ld*: load from an address space
1784 * address_space_st*: store to an address space
1785 *
1786 * These functions perform a load or store of the byte, word,
1787 * longword or quad to the specified address within the AddressSpace.
1788 * The _le suffixed functions treat the data as little endian;
1789 * _be indicates big endian; no suffix indicates "same endianness
1790 * as guest CPU".
1791 *
1792 * The "guest CPU endianness" accessors are deprecated for use outside
1793 * target-* code; devices should be CPU-agnostic and use either the LE
1794 * or the BE accessors.
1795 *
1796 * @as #AddressSpace to be accessed
1797 * @addr: address within that address space
1798 * @val: data value, for stores
1799 * @attrs: memory transaction attributes
1800 * @result: location to write the success/failure of the transaction;
1801 *   if NULL, this information is discarded
1802 */
1803
1804#define SUFFIX
1805#define ARG1         as
1806#define ARG1_DECL    AddressSpace *as
1807#include "exec/memory_ldst.inc.h"
1808
1809#define SUFFIX
1810#define ARG1         as
1811#define ARG1_DECL    AddressSpace *as
1812#include "exec/memory_ldst_phys.inc.h"
1813
1814struct MemoryRegionCache {
1815    void *ptr;
1816    hwaddr xlat;
1817    hwaddr len;
1818    FlatView *fv;
1819    MemoryRegionSection mrs;
1820    bool is_write;
1821};
1822
1823#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
1824
1825
1826/* address_space_ld*_cached: load from a cached #MemoryRegion
1827 * address_space_st*_cached: store into a cached #MemoryRegion
1828 *
1829 * These functions perform a load or store of the byte, word,
1830 * longword or quad to the specified address.  The address is
1831 * a physical address in the AddressSpace, but it must lie within
1832 * a #MemoryRegion that was mapped with address_space_cache_init.
1833 *
1834 * The _le suffixed functions treat the data as little endian;
1835 * _be indicates big endian; no suffix indicates "same endianness
1836 * as guest CPU".
1837 *
1838 * The "guest CPU endianness" accessors are deprecated for use outside
1839 * target-* code; devices should be CPU-agnostic and use either the LE
1840 * or the BE accessors.
1841 *
1842 * @cache: previously initialized #MemoryRegionCache to be accessed
1843 * @addr: address within the address space
1844 * @val: data value, for stores
1845 * @attrs: memory transaction attributes
1846 * @result: location to write the success/failure of the transaction;
1847 *   if NULL, this information is discarded
1848 */
1849
1850#define SUFFIX       _cached_slow
1851#define ARG1         cache
1852#define ARG1_DECL    MemoryRegionCache *cache
1853#include "exec/memory_ldst.inc.h"
1854
1855/* Inline fast path for direct RAM access.  */
1856static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
1857    hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
1858{
1859    assert(addr < cache->len);
1860    if (likely(cache->ptr)) {
1861        return ldub_p(cache->ptr + addr);
1862    } else {
1863        return address_space_ldub_cached_slow(cache, addr, attrs, result);
1864    }
1865}
1866
1867static inline void address_space_stb_cached(MemoryRegionCache *cache,
1868    hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1869{
1870    assert(addr < cache->len);
1871    if (likely(cache->ptr)) {
1872        stb_p(cache->ptr + addr, val);
1873    } else {
1874        address_space_stb_cached_slow(cache, addr, val, attrs, result);
1875    }
1876}
1877
1878#define ENDIANNESS   _le
1879#include "exec/memory_ldst_cached.inc.h"
1880
1881#define ENDIANNESS   _be
1882#include "exec/memory_ldst_cached.inc.h"
1883
1884#define SUFFIX       _cached
1885#define ARG1         cache
1886#define ARG1_DECL    MemoryRegionCache *cache
1887#include "exec/memory_ldst_phys.inc.h"
1888
1889/* address_space_cache_init: prepare for repeated access to a physical
1890 * memory region
1891 *
1892 * @cache: #MemoryRegionCache to be filled
1893 * @as: #AddressSpace to be accessed
1894 * @addr: address within that address space
1895 * @len: length of buffer
1896 * @is_write: indicates the transfer direction
1897 *
1898 * Will only work with RAM, and may map a subset of the requested range by
1899 * returning a value that is less than @len.  On failure, return a negative
1900 * errno value.
1901 *
1902 * Because it only works with RAM, this function can be used for
1903 * read-modify-write operations.  In this case, is_write should be %true.
1904 *
1905 * Note that addresses passed to the address_space_*_cached functions
1906 * are relative to @addr.
1907 */
1908int64_t address_space_cache_init(MemoryRegionCache *cache,
1909                                 AddressSpace *as,
1910                                 hwaddr addr,
1911                                 hwaddr len,
1912                                 bool is_write);
1913
1914/**
1915 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1916 *
1917 * @cache: The #MemoryRegionCache to operate on.
1918 * @addr: The first physical address that was written, relative to the
1919 * address that was passed to @address_space_cache_init.
1920 * @access_len: The number of bytes that were written starting at @addr.
1921 */
1922void address_space_cache_invalidate(MemoryRegionCache *cache,
1923                                    hwaddr addr,
1924                                    hwaddr access_len);
1925
1926/**
1927 * address_space_cache_destroy: free a #MemoryRegionCache
1928 *
1929 * @cache: The #MemoryRegionCache whose memory should be released.
1930 */
1931void address_space_cache_destroy(MemoryRegionCache *cache);
1932
1933/* address_space_get_iotlb_entry: translate an address into an IOTLB
1934 * entry. Should be called from an RCU critical section.
1935 */
1936IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
1937                                            bool is_write, MemTxAttrs attrs);
1938
1939/* address_space_translate: translate an address range into an address space
1940 * into a MemoryRegion and an address range into that section.  Should be
1941 * called from an RCU critical section, to avoid that the last reference
1942 * to the returned region disappears after address_space_translate returns.
1943 *
1944 * @fv: #FlatView to be accessed
1945 * @addr: address within that address space
1946 * @xlat: pointer to address within the returned memory region section's
1947 * #MemoryRegion.
1948 * @len: pointer to length
1949 * @is_write: indicates the transfer direction
1950 * @attrs: memory attributes
1951 */
1952MemoryRegion *flatview_translate(FlatView *fv,
1953                                 hwaddr addr, hwaddr *xlat,
1954                                 hwaddr *len, bool is_write,
1955                                 MemTxAttrs attrs);
1956
1957static inline MemoryRegion *address_space_translate(AddressSpace *as,
1958                                                    hwaddr addr, hwaddr *xlat,
1959                                                    hwaddr *len, bool is_write,
1960                                                    MemTxAttrs attrs)
1961{
1962    return flatview_translate(address_space_to_flatview(as),
1963                              addr, xlat, len, is_write, attrs);
1964}
1965
1966/* address_space_access_valid: check for validity of accessing an address
1967 * space range
1968 *
1969 * Check whether memory is assigned to the given address space range, and
1970 * access is permitted by any IOMMU regions that are active for the address
1971 * space.
1972 *
1973 * For now, addr and len should be aligned to a page size.  This limitation
1974 * will be lifted in the future.
1975 *
1976 * @as: #AddressSpace to be accessed
1977 * @addr: address within that address space
1978 * @len: length of the area to be checked
1979 * @is_write: indicates the transfer direction
1980 * @attrs: memory attributes
1981 */
1982bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len,
1983                                bool is_write, MemTxAttrs attrs);
1984
1985/* address_space_map: map a physical memory region into a host virtual address
1986 *
1987 * May map a subset of the requested range, given by and returned in @plen.
1988 * May return %NULL if resources needed to perform the mapping are exhausted.
1989 * Use only for reads OR writes - not for read-modify-write operations.
1990 * Use cpu_register_map_client() to know when retrying the map operation is
1991 * likely to succeed.
1992 *
1993 * @as: #AddressSpace to be accessed
1994 * @addr: address within that address space
1995 * @plen: pointer to length of buffer; updated on return
1996 * @is_write: indicates the transfer direction
1997 * @attrs: memory attributes
1998 */
1999void *address_space_map(AddressSpace *as, hwaddr addr,
2000                        hwaddr *plen, bool is_write, MemTxAttrs attrs);
2001
2002/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2003 *
2004 * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
2005 * the amount of memory that was actually read or written by the caller.
2006 *
2007 * @as: #AddressSpace used
2008 * @buffer: host pointer as returned by address_space_map()
2009 * @len: buffer length as returned by address_space_map()
2010 * @access_len: amount of data actually transferred
2011 * @is_write: indicates the transfer direction
2012 */
2013void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2014                         int is_write, hwaddr access_len);
2015
2016
2017/* Internal functions, part of the implementation of address_space_read.  */
2018MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2019                                    MemTxAttrs attrs, uint8_t *buf, int len);
2020MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2021                                   MemTxAttrs attrs, uint8_t *buf,
2022                                   int len, hwaddr addr1, hwaddr l,
2023                                   MemoryRegion *mr);
2024void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2025
2026/* Internal functions, part of the implementation of address_space_read_cached
2027 * and address_space_write_cached.  */
2028void address_space_read_cached_slow(MemoryRegionCache *cache,
2029                                    hwaddr addr, void *buf, int len);
2030void address_space_write_cached_slow(MemoryRegionCache *cache,
2031                                     hwaddr addr, const void *buf, int len);
2032
2033static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2034{
2035    if (is_write) {
2036        return memory_region_is_ram(mr) &&
2037               !mr->readonly && !memory_region_is_ram_device(mr);
2038    } else {
2039        return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2040               memory_region_is_romd(mr);
2041    }
2042}
2043
2044/**
2045 * address_space_read: read from an address space.
2046 *
2047 * Return a MemTxResult indicating whether the operation succeeded
2048 * or failed (eg unassigned memory, device rejected the transaction,
2049 * IOMMU fault).  Called within RCU critical section.
2050 *
2051 * @as: #AddressSpace to be accessed
2052 * @addr: address within that address space
2053 * @attrs: memory transaction attributes
2054 * @buf: buffer with the data transferred
2055 */
2056static inline __attribute__((__always_inline__))
2057MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2058                               MemTxAttrs attrs, uint8_t *buf,
2059                               int len)
2060{
2061    MemTxResult result = MEMTX_OK;
2062    hwaddr l, addr1;
2063    void *ptr;
2064    MemoryRegion *mr;
2065    FlatView *fv;
2066
2067    if (__builtin_constant_p(len)) {
2068        if (len) {
2069            rcu_read_lock();
2070            fv = address_space_to_flatview(as);
2071            l = len;
2072            mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2073            if (len == l && memory_access_is_direct(mr, false)) {
2074                ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2075                memcpy(buf, ptr, len);
2076            } else {
2077                result = flatview_read_continue(fv, addr, attrs, buf, len,
2078                                                addr1, l, mr);
2079            }
2080            rcu_read_unlock();
2081        }
2082    } else {
2083        result = address_space_read_full(as, addr, attrs, buf, len);
2084    }
2085    return result;
2086}
2087
2088/**
2089 * address_space_read_cached: read from a cached RAM region
2090 *
2091 * @cache: Cached region to be addressed
2092 * @addr: address relative to the base of the RAM region
2093 * @buf: buffer with the data transferred
2094 * @len: length of the data transferred
2095 */
2096static inline void
2097address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2098                          void *buf, int len)
2099{
2100    assert(addr < cache->len && len <= cache->len - addr);
2101    if (likely(cache->ptr)) {
2102        memcpy(buf, cache->ptr + addr, len);
2103    } else {
2104        address_space_read_cached_slow(cache, addr, buf, len);
2105    }
2106}
2107
2108/**
2109 * address_space_write_cached: write to a cached RAM region
2110 *
2111 * @cache: Cached region to be addressed
2112 * @addr: address relative to the base of the RAM region
2113 * @buf: buffer with the data transferred
2114 * @len: length of the data transferred
2115 */
2116static inline void
2117address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2118                           void *buf, int len)
2119{
2120    assert(addr < cache->len && len <= cache->len - addr);
2121    if (likely(cache->ptr)) {
2122        memcpy(cache->ptr + addr, buf, len);
2123    } else {
2124        address_space_write_cached_slow(cache, addr, buf, len);
2125    }
2126}
2127
2128#endif
2129
2130#endif
2131