1/* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14#ifndef MEMORY_H 15#define MEMORY_H 16 17#ifndef CONFIG_USER_ONLY 18 19#include "exec/cpu-common.h" 20#include "exec/hwaddr.h" 21#include "exec/memattrs.h" 22#include "exec/ramlist.h" 23#include "qemu/queue.h" 24#include "qemu/int128.h" 25#include "qemu/notify.h" 26#include "qom/object.h" 27#include "qemu/rcu.h" 28#include "hw/qdev-core.h" 29 30#define RAM_ADDR_INVALID (~(ram_addr_t)0) 31 32#define MAX_PHYS_ADDR_SPACE_BITS 62 33#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 34 35#define TYPE_MEMORY_REGION "qemu:memory-region" 36#define MEMORY_REGION(obj) \ 37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) 38 39#define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region" 40#define IOMMU_MEMORY_REGION(obj) \ 41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION) 42#define IOMMU_MEMORY_REGION_CLASS(klass) \ 43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \ 44 TYPE_IOMMU_MEMORY_REGION) 45#define IOMMU_MEMORY_REGION_GET_CLASS(obj) \ 46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \ 47 TYPE_IOMMU_MEMORY_REGION) 48 49extern bool global_dirty_log; 50 51typedef struct MemoryRegionOps MemoryRegionOps; 52typedef struct MemoryRegionMmio MemoryRegionMmio; 53 54struct MemoryRegionMmio { 55 CPUReadMemoryFunc *read[3]; 56 CPUWriteMemoryFunc *write[3]; 57}; 58 59typedef struct IOMMUTLBEntry IOMMUTLBEntry; 60 61/* See address_space_translate: bit 0 is read, bit 1 is write. */ 62typedef enum { 63 IOMMU_NONE = 0, 64 IOMMU_RO = 1, 65 IOMMU_WO = 2, 66 IOMMU_RW = 3, 67} IOMMUAccessFlags; 68 69#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) 70 71struct IOMMUTLBEntry { 72 AddressSpace *target_as; 73 hwaddr iova; 74 hwaddr translated_addr; 75 hwaddr addr_mask; /* 0xfff = 4k translation */ 76 IOMMUAccessFlags perm; 77}; 78 79/* 80 * Bitmap for different IOMMUNotifier capabilities. Each notifier can 81 * register with one or multiple IOMMU Notifier capability bit(s). 82 */ 83typedef enum { 84 IOMMU_NOTIFIER_NONE = 0, 85 /* Notify cache invalidations */ 86 IOMMU_NOTIFIER_UNMAP = 0x1, 87 /* Notify entry changes (newly created entries) */ 88 IOMMU_NOTIFIER_MAP = 0x2, 89} IOMMUNotifierFlag; 90 91#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) 92 93struct IOMMUNotifier; 94typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, 95 IOMMUTLBEntry *data); 96 97struct IOMMUNotifier { 98 IOMMUNotify notify; 99 IOMMUNotifierFlag notifier_flags; 100 /* Notify for address space range start <= addr <= end */ 101 hwaddr start; 102 hwaddr end; 103 int iommu_idx; 104 QLIST_ENTRY(IOMMUNotifier) node; 105}; 106typedef struct IOMMUNotifier IOMMUNotifier; 107 108/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ 109#define RAM_PREALLOC (1 << 0) 110 111/* RAM is mmap-ed with MAP_SHARED */ 112#define RAM_SHARED (1 << 1) 113 114/* Only a portion of RAM (used_length) is actually used, and migrated. 115 * This used_length size can change across reboots. 116 */ 117#define RAM_RESIZEABLE (1 << 2) 118 119/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically 120 * zero the page and wake waiting processes. 121 * (Set during postcopy) 122 */ 123#define RAM_UF_ZEROPAGE (1 << 3) 124 125/* RAM can be migrated */ 126#define RAM_MIGRATABLE (1 << 4) 127 128/* RAM is a persistent kind memory */ 129#define RAM_PMEM (1 << 5) 130 131static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, 132 IOMMUNotifierFlag flags, 133 hwaddr start, hwaddr end, 134 int iommu_idx) 135{ 136 n->notify = fn; 137 n->notifier_flags = flags; 138 n->start = start; 139 n->end = end; 140 n->iommu_idx = iommu_idx; 141} 142 143/* 144 * Memory region callbacks 145 */ 146struct MemoryRegionOps { 147 /* Read from the memory region. @addr is relative to @mr; @size is 148 * in bytes. */ 149 uint64_t (*read)(void *opaque, 150 hwaddr addr, 151 unsigned size); 152 /* Write to the memory region. @addr is relative to @mr; @size is 153 * in bytes. */ 154 void (*write)(void *opaque, 155 hwaddr addr, 156 uint64_t data, 157 unsigned size); 158 159 MemTxResult (*read_with_attrs)(void *opaque, 160 hwaddr addr, 161 uint64_t *data, 162 unsigned size, 163 MemTxAttrs attrs); 164 MemTxResult (*write_with_attrs)(void *opaque, 165 hwaddr addr, 166 uint64_t data, 167 unsigned size, 168 MemTxAttrs attrs); 169 170 enum device_endian endianness; 171 /* Guest-visible constraints: */ 172 struct { 173 /* If nonzero, specify bounds on access sizes beyond which a machine 174 * check is thrown. 175 */ 176 unsigned min_access_size; 177 unsigned max_access_size; 178 /* If true, unaligned accesses are supported. Otherwise unaligned 179 * accesses throw machine checks. 180 */ 181 bool unaligned; 182 /* 183 * If present, and returns #false, the transaction is not accepted 184 * by the device (and results in machine dependent behaviour such 185 * as a machine check exception). 186 */ 187 bool (*accepts)(void *opaque, hwaddr addr, 188 unsigned size, bool is_write, 189 MemTxAttrs attrs); 190 } valid; 191 /* Internal implementation constraints: */ 192 struct { 193 /* If nonzero, specifies the minimum size implemented. Smaller sizes 194 * will be rounded upwards and a partial result will be returned. 195 */ 196 unsigned min_access_size; 197 /* If nonzero, specifies the maximum size implemented. Larger sizes 198 * will be done as a series of accesses with smaller sizes. 199 */ 200 unsigned max_access_size; 201 /* If true, unaligned accesses are supported. Otherwise all accesses 202 * are converted to (possibly multiple) naturally aligned accesses. 203 */ 204 bool unaligned; 205 } impl; 206}; 207 208enum IOMMUMemoryRegionAttr { 209 IOMMU_ATTR_SPAPR_TCE_FD 210}; 211 212/** 213 * IOMMUMemoryRegionClass: 214 * 215 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION 216 * and provide an implementation of at least the @translate method here 217 * to handle requests to the memory region. Other methods are optional. 218 * 219 * The IOMMU implementation must use the IOMMU notifier infrastructure 220 * to report whenever mappings are changed, by calling 221 * memory_region_notify_iommu() (or, if necessary, by calling 222 * memory_region_notify_one() for each registered notifier). 223 * 224 * Conceptually an IOMMU provides a mapping from input address 225 * to an output TLB entry. If the IOMMU is aware of memory transaction 226 * attributes and the output TLB entry depends on the transaction 227 * attributes, we represent this using IOMMU indexes. Each index 228 * selects a particular translation table that the IOMMU has: 229 * @attrs_to_index returns the IOMMU index for a set of transaction attributes 230 * @translate takes an input address and an IOMMU index 231 * and the mapping returned can only depend on the input address and the 232 * IOMMU index. 233 * 234 * Most IOMMUs don't care about the transaction attributes and support 235 * only a single IOMMU index. A more complex IOMMU might have one index 236 * for secure transactions and one for non-secure transactions. 237 */ 238typedef struct IOMMUMemoryRegionClass { 239 /* private */ 240 struct DeviceClass parent_class; 241 242 /* 243 * Return a TLB entry that contains a given address. 244 * 245 * The IOMMUAccessFlags indicated via @flag are optional and may 246 * be specified as IOMMU_NONE to indicate that the caller needs 247 * the full translation information for both reads and writes. If 248 * the access flags are specified then the IOMMU implementation 249 * may use this as an optimization, to stop doing a page table 250 * walk as soon as it knows that the requested permissions are not 251 * allowed. If IOMMU_NONE is passed then the IOMMU must do the 252 * full page table walk and report the permissions in the returned 253 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not 254 * return different mappings for reads and writes.) 255 * 256 * The returned information remains valid while the caller is 257 * holding the big QEMU lock or is inside an RCU critical section; 258 * if the caller wishes to cache the mapping beyond that it must 259 * register an IOMMU notifier so it can invalidate its cached 260 * information when the IOMMU mapping changes. 261 * 262 * @iommu: the IOMMUMemoryRegion 263 * @hwaddr: address to be translated within the memory region 264 * @flag: requested access permissions 265 * @iommu_idx: IOMMU index for the translation 266 */ 267 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, 268 IOMMUAccessFlags flag, int iommu_idx); 269 /* Returns minimum supported page size in bytes. 270 * If this method is not provided then the minimum is assumed to 271 * be TARGET_PAGE_SIZE. 272 * 273 * @iommu: the IOMMUMemoryRegion 274 */ 275 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); 276 /* Called when IOMMU Notifier flag changes (ie when the set of 277 * events which IOMMU users are requesting notification for changes). 278 * Optional method -- need not be provided if the IOMMU does not 279 * need to know exactly which events must be notified. 280 * 281 * @iommu: the IOMMUMemoryRegion 282 * @old_flags: events which previously needed to be notified 283 * @new_flags: events which now need to be notified 284 */ 285 void (*notify_flag_changed)(IOMMUMemoryRegion *iommu, 286 IOMMUNotifierFlag old_flags, 287 IOMMUNotifierFlag new_flags); 288 /* Called to handle memory_region_iommu_replay(). 289 * 290 * The default implementation of memory_region_iommu_replay() is to 291 * call the IOMMU translate method for every page in the address space 292 * with flag == IOMMU_NONE and then call the notifier if translate 293 * returns a valid mapping. If this method is implemented then it 294 * overrides the default behaviour, and must provide the full semantics 295 * of memory_region_iommu_replay(), by calling @notifier for every 296 * translation present in the IOMMU. 297 * 298 * Optional method -- an IOMMU only needs to provide this method 299 * if the default is inefficient or produces undesirable side effects. 300 * 301 * Note: this is not related to record-and-replay functionality. 302 */ 303 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier); 304 305 /* Get IOMMU misc attributes. This is an optional method that 306 * can be used to allow users of the IOMMU to get implementation-specific 307 * information. The IOMMU implements this method to handle calls 308 * by IOMMU users to memory_region_iommu_get_attr() by filling in 309 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that 310 * the IOMMU supports. If the method is unimplemented then 311 * memory_region_iommu_get_attr() will always return -EINVAL. 312 * 313 * @iommu: the IOMMUMemoryRegion 314 * @attr: attribute being queried 315 * @data: memory to fill in with the attribute data 316 * 317 * Returns 0 on success, or a negative errno; in particular 318 * returns -EINVAL for unrecognized or unimplemented attribute types. 319 */ 320 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr, 321 void *data); 322 323 /* Return the IOMMU index to use for a given set of transaction attributes. 324 * 325 * Optional method: if an IOMMU only supports a single IOMMU index then 326 * the default implementation of memory_region_iommu_attrs_to_index() 327 * will return 0. 328 * 329 * The indexes supported by an IOMMU must be contiguous, starting at 0. 330 * 331 * @iommu: the IOMMUMemoryRegion 332 * @attrs: memory transaction attributes 333 */ 334 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs); 335 336 /* Return the number of IOMMU indexes this IOMMU supports. 337 * 338 * Optional method: if this method is not provided, then 339 * memory_region_iommu_num_indexes() will return 1, indicating that 340 * only a single IOMMU index is supported. 341 * 342 * @iommu: the IOMMUMemoryRegion 343 */ 344 int (*num_indexes)(IOMMUMemoryRegion *iommu); 345} IOMMUMemoryRegionClass; 346 347typedef struct CoalescedMemoryRange CoalescedMemoryRange; 348typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 349 350struct MemoryRegion { 351 Object parent_obj; 352 353 /* All fields are private - violators will be prosecuted */ 354 355 /* The following fields should fit in a cache line */ 356 bool romd_mode; 357 bool ram; 358 bool subpage; 359 bool readonly; /* For RAM regions */ 360 bool nonvolatile; 361 bool rom_device; 362 bool flush_coalesced_mmio; 363 bool global_locking; 364 uint8_t dirty_log_mask; 365 bool is_iommu; 366 RAMBlock *ram_block; 367 Object *owner; 368 369 const MemoryRegionOps *ops; 370 void *opaque; 371 MemoryRegion *container; 372 Int128 size; 373 hwaddr addr; 374 void (*destructor)(MemoryRegion *mr); 375 uint64_t align; 376 bool terminates; 377 bool ram_device; 378 bool enabled; 379 bool warning_printed; /* For reservations */ 380 uint8_t vga_logging_count; 381 MemoryRegion *alias; 382 hwaddr alias_offset; 383 int32_t priority; 384 QTAILQ_HEAD(, MemoryRegion) subregions; 385 QTAILQ_ENTRY(MemoryRegion) subregions_link; 386 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced; 387 const char *name; 388 unsigned ioeventfd_nb; 389 MemoryRegionIoeventfd *ioeventfds; 390}; 391 392struct IOMMUMemoryRegion { 393 MemoryRegion parent_obj; 394 395 QLIST_HEAD(, IOMMUNotifier) iommu_notify; 396 IOMMUNotifierFlag iommu_notify_flags; 397}; 398 399#define IOMMU_NOTIFIER_FOREACH(n, mr) \ 400 QLIST_FOREACH((n), &(mr)->iommu_notify, node) 401 402/** 403 * MemoryListener: callbacks structure for updates to the physical memory map 404 * 405 * Allows a component to adjust to changes in the guest-visible memory map. 406 * Use with memory_listener_register() and memory_listener_unregister(). 407 */ 408struct MemoryListener { 409 void (*begin)(MemoryListener *listener); 410 void (*commit)(MemoryListener *listener); 411 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 412 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 413 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 414 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, 415 int old, int new); 416 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, 417 int old, int new); 418 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 419 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section); 420 void (*log_global_start)(MemoryListener *listener); 421 void (*log_global_stop)(MemoryListener *listener); 422 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 423 bool match_data, uint64_t data, EventNotifier *e); 424 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 425 bool match_data, uint64_t data, EventNotifier *e); 426 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section, 427 hwaddr addr, hwaddr len); 428 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section, 429 hwaddr addr, hwaddr len); 430 /* Lower = earlier (during add), later (during del) */ 431 unsigned priority; 432 AddressSpace *address_space; 433 QTAILQ_ENTRY(MemoryListener) link; 434 QTAILQ_ENTRY(MemoryListener) link_as; 435}; 436 437/** 438 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 439 */ 440struct AddressSpace { 441 /* All fields are private. */ 442 struct rcu_head rcu; 443 char *name; 444 MemoryRegion *root; 445 446 /* Accessed via RCU. */ 447 struct FlatView *current_map; 448 449 int ioeventfd_nb; 450 struct MemoryRegionIoeventfd *ioeventfds; 451 QTAILQ_HEAD(, MemoryListener) listeners; 452 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 453}; 454 455typedef struct AddressSpaceDispatch AddressSpaceDispatch; 456typedef struct FlatRange FlatRange; 457 458/* Flattened global view of current active memory hierarchy. Kept in sorted 459 * order. 460 */ 461struct FlatView { 462 struct rcu_head rcu; 463 unsigned ref; 464 FlatRange *ranges; 465 unsigned nr; 466 unsigned nr_allocated; 467 struct AddressSpaceDispatch *dispatch; 468 MemoryRegion *root; 469}; 470 471static inline FlatView *address_space_to_flatview(AddressSpace *as) 472{ 473 return atomic_rcu_read(&as->current_map); 474} 475 476 477/** 478 * MemoryRegionSection: describes a fragment of a #MemoryRegion 479 * 480 * @mr: the region, or %NULL if empty 481 * @fv: the flat view of the address space the region is mapped in 482 * @offset_within_region: the beginning of the section, relative to @mr's start 483 * @size: the size of the section; will not exceed @mr's boundaries 484 * @offset_within_address_space: the address of the first byte of the section 485 * relative to the region's address space 486 * @readonly: writes to this section are ignored 487 * @nonvolatile: this section is non-volatile 488 */ 489struct MemoryRegionSection { 490 Int128 size; 491 MemoryRegion *mr; 492 FlatView *fv; 493 hwaddr offset_within_region; 494 hwaddr offset_within_address_space; 495 bool readonly; 496 bool nonvolatile; 497}; 498 499static inline bool MemoryRegionSection_eq(MemoryRegionSection *a, 500 MemoryRegionSection *b) 501{ 502 return a->mr == b->mr && 503 a->fv == b->fv && 504 a->offset_within_region == b->offset_within_region && 505 a->offset_within_address_space == b->offset_within_address_space && 506 int128_eq(a->size, b->size) && 507 a->readonly == b->readonly && 508 a->nonvolatile == b->nonvolatile; 509} 510 511/** 512 * memory_region_init: Initialize a memory region 513 * 514 * The region typically acts as a container for other memory regions. Use 515 * memory_region_add_subregion() to add subregions. 516 * 517 * @mr: the #MemoryRegion to be initialized 518 * @owner: the object that tracks the region's reference count 519 * @name: used for debugging; not visible to the user or ABI 520 * @size: size of the region; any subregions beyond this size will be clipped 521 */ 522void memory_region_init(MemoryRegion *mr, 523 struct Object *owner, 524 const char *name, 525 uint64_t size); 526 527/** 528 * memory_region_ref: Add 1 to a memory region's reference count 529 * 530 * Whenever memory regions are accessed outside the BQL, they need to be 531 * preserved against hot-unplug. MemoryRegions actually do not have their 532 * own reference count; they piggyback on a QOM object, their "owner". 533 * This function adds a reference to the owner. 534 * 535 * All MemoryRegions must have an owner if they can disappear, even if the 536 * device they belong to operates exclusively under the BQL. This is because 537 * the region could be returned at any time by memory_region_find, and this 538 * is usually under guest control. 539 * 540 * @mr: the #MemoryRegion 541 */ 542void memory_region_ref(MemoryRegion *mr); 543 544/** 545 * memory_region_unref: Remove 1 to a memory region's reference count 546 * 547 * Whenever memory regions are accessed outside the BQL, they need to be 548 * preserved against hot-unplug. MemoryRegions actually do not have their 549 * own reference count; they piggyback on a QOM object, their "owner". 550 * This function removes a reference to the owner and possibly destroys it. 551 * 552 * @mr: the #MemoryRegion 553 */ 554void memory_region_unref(MemoryRegion *mr); 555 556/** 557 * memory_region_init_io: Initialize an I/O memory region. 558 * 559 * Accesses into the region will cause the callbacks in @ops to be called. 560 * if @size is nonzero, subregions will be clipped to @size. 561 * 562 * @mr: the #MemoryRegion to be initialized. 563 * @owner: the object that tracks the region's reference count 564 * @ops: a structure containing read and write callbacks to be used when 565 * I/O is performed on the region. 566 * @opaque: passed to the read and write callbacks of the @ops structure. 567 * @name: used for debugging; not visible to the user or ABI 568 * @size: size of the region. 569 */ 570void memory_region_init_io(MemoryRegion *mr, 571 struct Object *owner, 572 const MemoryRegionOps *ops, 573 void *opaque, 574 const char *name, 575 uint64_t size); 576 577/** 578 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses 579 * into the region will modify memory 580 * directly. 581 * 582 * @mr: the #MemoryRegion to be initialized. 583 * @owner: the object that tracks the region's reference count 584 * @name: Region name, becomes part of RAMBlock name used in migration stream 585 * must be unique within any device 586 * @size: size of the region. 587 * @errp: pointer to Error*, to store an error if it happens. 588 * 589 * Note that this function does not do anything to cause the data in the 590 * RAM memory region to be migrated; that is the responsibility of the caller. 591 */ 592void memory_region_init_ram_nomigrate(MemoryRegion *mr, 593 struct Object *owner, 594 const char *name, 595 uint64_t size, 596 Error **errp); 597 598/** 599 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region. 600 * Accesses into the region will 601 * modify memory directly. 602 * 603 * @mr: the #MemoryRegion to be initialized. 604 * @owner: the object that tracks the region's reference count 605 * @name: Region name, becomes part of RAMBlock name used in migration stream 606 * must be unique within any device 607 * @size: size of the region. 608 * @share: allow remapping RAM to different addresses 609 * @errp: pointer to Error*, to store an error if it happens. 610 * 611 * Note that this function is similar to memory_region_init_ram_nomigrate. 612 * The only difference is part of the RAM region can be remapped. 613 */ 614void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, 615 struct Object *owner, 616 const char *name, 617 uint64_t size, 618 bool share, 619 Error **errp); 620 621/** 622 * memory_region_init_resizeable_ram: Initialize memory region with resizeable 623 * RAM. Accesses into the region will 624 * modify memory directly. Only an initial 625 * portion of this RAM is actually used. 626 * The used size can change across reboots. 627 * 628 * @mr: the #MemoryRegion to be initialized. 629 * @owner: the object that tracks the region's reference count 630 * @name: Region name, becomes part of RAMBlock name used in migration stream 631 * must be unique within any device 632 * @size: used size of the region. 633 * @max_size: max size of the region. 634 * @resized: callback to notify owner about used size change. 635 * @errp: pointer to Error*, to store an error if it happens. 636 * 637 * Note that this function does not do anything to cause the data in the 638 * RAM memory region to be migrated; that is the responsibility of the caller. 639 */ 640void memory_region_init_resizeable_ram(MemoryRegion *mr, 641 struct Object *owner, 642 const char *name, 643 uint64_t size, 644 uint64_t max_size, 645 void (*resized)(const char*, 646 uint64_t length, 647 void *host), 648 Error **errp); 649#ifdef CONFIG_POSIX 650 651/** 652 * memory_region_init_ram_from_file: Initialize RAM memory region with a 653 * mmap-ed backend. 654 * 655 * @mr: the #MemoryRegion to be initialized. 656 * @owner: the object that tracks the region's reference count 657 * @name: Region name, becomes part of RAMBlock name used in migration stream 658 * must be unique within any device 659 * @size: size of the region. 660 * @align: alignment of the region base address; if 0, the default alignment 661 * (getpagesize()) will be used. 662 * @ram_flags: Memory region features: 663 * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag 664 * - RAM_PMEM: the memory is persistent memory 665 * Other bits are ignored now. 666 * @path: the path in which to allocate the RAM. 667 * @errp: pointer to Error*, to store an error if it happens. 668 * 669 * Note that this function does not do anything to cause the data in the 670 * RAM memory region to be migrated; that is the responsibility of the caller. 671 */ 672void memory_region_init_ram_from_file(MemoryRegion *mr, 673 struct Object *owner, 674 const char *name, 675 uint64_t size, 676 uint64_t align, 677 uint32_t ram_flags, 678 const char *path, 679 Error **errp); 680 681/** 682 * memory_region_init_ram_from_fd: Initialize RAM memory region with a 683 * mmap-ed backend. 684 * 685 * @mr: the #MemoryRegion to be initialized. 686 * @owner: the object that tracks the region's reference count 687 * @name: the name of the region. 688 * @size: size of the region. 689 * @share: %true if memory must be mmaped with the MAP_SHARED flag 690 * @fd: the fd to mmap. 691 * @errp: pointer to Error*, to store an error if it happens. 692 * 693 * Note that this function does not do anything to cause the data in the 694 * RAM memory region to be migrated; that is the responsibility of the caller. 695 */ 696void memory_region_init_ram_from_fd(MemoryRegion *mr, 697 struct Object *owner, 698 const char *name, 699 uint64_t size, 700 bool share, 701 int fd, 702 Error **errp); 703#endif 704 705/** 706 * memory_region_init_ram_ptr: Initialize RAM memory region from a 707 * user-provided pointer. Accesses into the 708 * region will modify memory directly. 709 * 710 * @mr: the #MemoryRegion to be initialized. 711 * @owner: the object that tracks the region's reference count 712 * @name: Region name, becomes part of RAMBlock name used in migration stream 713 * must be unique within any device 714 * @size: size of the region. 715 * @ptr: memory to be mapped; must contain at least @size bytes. 716 * 717 * Note that this function does not do anything to cause the data in the 718 * RAM memory region to be migrated; that is the responsibility of the caller. 719 */ 720void memory_region_init_ram_ptr(MemoryRegion *mr, 721 struct Object *owner, 722 const char *name, 723 uint64_t size, 724 void *ptr); 725 726/** 727 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from 728 * a user-provided pointer. 729 * 730 * A RAM device represents a mapping to a physical device, such as to a PCI 731 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped 732 * into the VM address space and access to the region will modify memory 733 * directly. However, the memory region should not be included in a memory 734 * dump (device may not be enabled/mapped at the time of the dump), and 735 * operations incompatible with manipulating MMIO should be avoided. Replaces 736 * skip_dump flag. 737 * 738 * @mr: the #MemoryRegion to be initialized. 739 * @owner: the object that tracks the region's reference count 740 * @name: the name of the region. 741 * @size: size of the region. 742 * @ptr: memory to be mapped; must contain at least @size bytes. 743 * 744 * Note that this function does not do anything to cause the data in the 745 * RAM memory region to be migrated; that is the responsibility of the caller. 746 * (For RAM device memory regions, migrating the contents rarely makes sense.) 747 */ 748void memory_region_init_ram_device_ptr(MemoryRegion *mr, 749 struct Object *owner, 750 const char *name, 751 uint64_t size, 752 void *ptr); 753 754/** 755 * memory_region_init_alias: Initialize a memory region that aliases all or a 756 * part of another memory region. 757 * 758 * @mr: the #MemoryRegion to be initialized. 759 * @owner: the object that tracks the region's reference count 760 * @name: used for debugging; not visible to the user or ABI 761 * @orig: the region to be referenced; @mr will be equivalent to 762 * @orig between @offset and @offset + @size - 1. 763 * @offset: start of the section in @orig to be referenced. 764 * @size: size of the region. 765 */ 766void memory_region_init_alias(MemoryRegion *mr, 767 struct Object *owner, 768 const char *name, 769 MemoryRegion *orig, 770 hwaddr offset, 771 uint64_t size); 772 773/** 774 * memory_region_init_rom_nomigrate: Initialize a ROM memory region. 775 * 776 * This has the same effect as calling memory_region_init_ram_nomigrate() 777 * and then marking the resulting region read-only with 778 * memory_region_set_readonly(). 779 * 780 * Note that this function does not do anything to cause the data in the 781 * RAM side of the memory region to be migrated; that is the responsibility 782 * of the caller. 783 * 784 * @mr: the #MemoryRegion to be initialized. 785 * @owner: the object that tracks the region's reference count 786 * @name: Region name, becomes part of RAMBlock name used in migration stream 787 * must be unique within any device 788 * @size: size of the region. 789 * @errp: pointer to Error*, to store an error if it happens. 790 */ 791void memory_region_init_rom_nomigrate(MemoryRegion *mr, 792 struct Object *owner, 793 const char *name, 794 uint64_t size, 795 Error **errp); 796 797/** 798 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region. 799 * Writes are handled via callbacks. 800 * 801 * Note that this function does not do anything to cause the data in the 802 * RAM side of the memory region to be migrated; that is the responsibility 803 * of the caller. 804 * 805 * @mr: the #MemoryRegion to be initialized. 806 * @owner: the object that tracks the region's reference count 807 * @ops: callbacks for write access handling (must not be NULL). 808 * @opaque: passed to the read and write callbacks of the @ops structure. 809 * @name: Region name, becomes part of RAMBlock name used in migration stream 810 * must be unique within any device 811 * @size: size of the region. 812 * @errp: pointer to Error*, to store an error if it happens. 813 */ 814void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 815 struct Object *owner, 816 const MemoryRegionOps *ops, 817 void *opaque, 818 const char *name, 819 uint64_t size, 820 Error **errp); 821 822/** 823 * memory_region_init_iommu: Initialize a memory region of a custom type 824 * that translates addresses 825 * 826 * An IOMMU region translates addresses and forwards accesses to a target 827 * memory region. 828 * 829 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION. 830 * @_iommu_mr should be a pointer to enough memory for an instance of 831 * that subclass, @instance_size is the size of that subclass, and 832 * @mrtypename is its name. This function will initialize @_iommu_mr as an 833 * instance of the subclass, and its methods will then be called to handle 834 * accesses to the memory region. See the documentation of 835 * #IOMMUMemoryRegionClass for further details. 836 * 837 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized 838 * @instance_size: the IOMMUMemoryRegion subclass instance size 839 * @mrtypename: the type name of the #IOMMUMemoryRegion 840 * @owner: the object that tracks the region's reference count 841 * @name: used for debugging; not visible to the user or ABI 842 * @size: size of the region. 843 */ 844void memory_region_init_iommu(void *_iommu_mr, 845 size_t instance_size, 846 const char *mrtypename, 847 Object *owner, 848 const char *name, 849 uint64_t size); 850 851/** 852 * memory_region_init_ram - Initialize RAM memory region. Accesses into the 853 * region will modify memory directly. 854 * 855 * @mr: the #MemoryRegion to be initialized 856 * @owner: the object that tracks the region's reference count (must be 857 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) 858 * @name: name of the memory region 859 * @size: size of the region in bytes 860 * @errp: pointer to Error*, to store an error if it happens. 861 * 862 * This function allocates RAM for a board model or device, and 863 * arranges for it to be migrated (by calling vmstate_register_ram() 864 * if @owner is a DeviceState, or vmstate_register_ram_global() if 865 * @owner is NULL). 866 * 867 * TODO: Currently we restrict @owner to being either NULL (for 868 * global RAM regions with no owner) or devices, so that we can 869 * give the RAM block a unique name for migration purposes. 870 * We should lift this restriction and allow arbitrary Objects. 871 * If you pass a non-NULL non-device @owner then we will assert. 872 */ 873void memory_region_init_ram(MemoryRegion *mr, 874 struct Object *owner, 875 const char *name, 876 uint64_t size, 877 Error **errp); 878 879/** 880 * memory_region_init_rom: Initialize a ROM memory region. 881 * 882 * This has the same effect as calling memory_region_init_ram() 883 * and then marking the resulting region read-only with 884 * memory_region_set_readonly(). This includes arranging for the 885 * contents to be migrated. 886 * 887 * TODO: Currently we restrict @owner to being either NULL (for 888 * global RAM regions with no owner) or devices, so that we can 889 * give the RAM block a unique name for migration purposes. 890 * We should lift this restriction and allow arbitrary Objects. 891 * If you pass a non-NULL non-device @owner then we will assert. 892 * 893 * @mr: the #MemoryRegion to be initialized. 894 * @owner: the object that tracks the region's reference count 895 * @name: Region name, becomes part of RAMBlock name used in migration stream 896 * must be unique within any device 897 * @size: size of the region. 898 * @errp: pointer to Error*, to store an error if it happens. 899 */ 900void memory_region_init_rom(MemoryRegion *mr, 901 struct Object *owner, 902 const char *name, 903 uint64_t size, 904 Error **errp); 905 906/** 907 * memory_region_init_rom_device: Initialize a ROM memory region. 908 * Writes are handled via callbacks. 909 * 910 * This function initializes a memory region backed by RAM for reads 911 * and callbacks for writes, and arranges for the RAM backing to 912 * be migrated (by calling vmstate_register_ram() 913 * if @owner is a DeviceState, or vmstate_register_ram_global() if 914 * @owner is NULL). 915 * 916 * TODO: Currently we restrict @owner to being either NULL (for 917 * global RAM regions with no owner) or devices, so that we can 918 * give the RAM block a unique name for migration purposes. 919 * We should lift this restriction and allow arbitrary Objects. 920 * If you pass a non-NULL non-device @owner then we will assert. 921 * 922 * @mr: the #MemoryRegion to be initialized. 923 * @owner: the object that tracks the region's reference count 924 * @ops: callbacks for write access handling (must not be NULL). 925 * @name: Region name, becomes part of RAMBlock name used in migration stream 926 * must be unique within any device 927 * @size: size of the region. 928 * @errp: pointer to Error*, to store an error if it happens. 929 */ 930void memory_region_init_rom_device(MemoryRegion *mr, 931 struct Object *owner, 932 const MemoryRegionOps *ops, 933 void *opaque, 934 const char *name, 935 uint64_t size, 936 Error **errp); 937 938 939/** 940 * memory_region_owner: get a memory region's owner. 941 * 942 * @mr: the memory region being queried. 943 */ 944struct Object *memory_region_owner(MemoryRegion *mr); 945 946/** 947 * memory_region_size: get a memory region's size. 948 * 949 * @mr: the memory region being queried. 950 */ 951uint64_t memory_region_size(MemoryRegion *mr); 952 953/** 954 * memory_region_is_ram: check whether a memory region is random access 955 * 956 * Returns %true if a memory region is random access. 957 * 958 * @mr: the memory region being queried 959 */ 960static inline bool memory_region_is_ram(MemoryRegion *mr) 961{ 962 return mr->ram; 963} 964 965/** 966 * memory_region_is_ram_device: check whether a memory region is a ram device 967 * 968 * Returns %true if a memory region is a device backed ram region 969 * 970 * @mr: the memory region being queried 971 */ 972bool memory_region_is_ram_device(MemoryRegion *mr); 973 974/** 975 * memory_region_is_romd: check whether a memory region is in ROMD mode 976 * 977 * Returns %true if a memory region is a ROM device and currently set to allow 978 * direct reads. 979 * 980 * @mr: the memory region being queried 981 */ 982static inline bool memory_region_is_romd(MemoryRegion *mr) 983{ 984 return mr->rom_device && mr->romd_mode; 985} 986 987/** 988 * memory_region_get_iommu: check whether a memory region is an iommu 989 * 990 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, 991 * otherwise NULL. 992 * 993 * @mr: the memory region being queried 994 */ 995static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) 996{ 997 if (mr->alias) { 998 return memory_region_get_iommu(mr->alias); 999 } 1000 if (mr->is_iommu) {
1001 return (IOMMUMemoryRegion *) mr; 1002 } 1003 return NULL; 1004} 1005 1006/** 1007 * memory_region_get_iommu_class_nocheck: returns iommu memory region class 1008 * if an iommu or NULL if not 1009 * 1010 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, 1011 * otherwise NULL. This is fast path avoiding QOM checking, use with caution. 1012 * 1013 * @mr: the memory region being queried 1014 */ 1015static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( 1016 IOMMUMemoryRegion *iommu_mr) 1017{ 1018 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class); 1019} 1020 1021#define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL) 1022 1023/** 1024 * memory_region_iommu_get_min_page_size: get minimum supported page size 1025 * for an iommu 1026 * 1027 * Returns minimum supported page size for an iommu. 1028 * 1029 * @iommu_mr: the memory region being queried 1030 */ 1031uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr); 1032 1033/** 1034 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 1035 * 1036 * The notification type will be decided by entry.perm bits: 1037 * 1038 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. 1039 * - For MAP (newly added entry) notifies: set entry.perm to the 1040 * permission of the page (which is definitely !IOMMU_NONE). 1041 * 1042 * Note: for any IOMMU implementation, an in-place mapping change 1043 * should be notified with an UNMAP followed by a MAP. 1044 * 1045 * @iommu_mr: the memory region that was changed 1046 * @iommu_idx: the IOMMU index for the translation table which has changed 1047 * @entry: the new entry in the IOMMU translation table. The entry 1048 * replaces all old entries for the same virtual I/O address range. 1049 * Deleted entries have .@perm == 0. 1050 */ 1051void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 1052 int iommu_idx, 1053 IOMMUTLBEntry entry); 1054 1055/** 1056 * memory_region_notify_one: notify a change in an IOMMU translation 1057 * entry to a single notifier 1058 * 1059 * This works just like memory_region_notify_iommu(), but it only 1060 * notifies a specific notifier, not all of them. 1061 * 1062 * @notifier: the notifier to be notified 1063 * @entry: the new entry in the IOMMU translation table. The entry 1064 * replaces all old entries for the same virtual I/O address range. 1065 * Deleted entries have .@perm == 0. 1066 */ 1067void memory_region_notify_one(IOMMUNotifier *notifier, 1068 IOMMUTLBEntry *entry); 1069 1070/** 1071 * memory_region_register_iommu_notifier: register a notifier for changes to 1072 * IOMMU translation entries. 1073 * 1074 * @mr: the memory region to observe 1075 * @n: the IOMMUNotifier to be added; the notify callback receives a 1076 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer 1077 * ceases to be valid on exit from the notifier. 1078 */ 1079void memory_region_register_iommu_notifier(MemoryRegion *mr, 1080 IOMMUNotifier *n); 1081 1082/** 1083 * memory_region_iommu_replay: replay existing IOMMU translations to 1084 * a notifier with the minimum page granularity returned by 1085 * mr->iommu_ops->get_page_size(). 1086 * 1087 * Note: this is not related to record-and-replay functionality. 1088 * 1089 * @iommu_mr: the memory region to observe 1090 * @n: the notifier to which to replay iommu mappings 1091 */ 1092void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); 1093 1094/** 1095 * memory_region_iommu_replay_all: replay existing IOMMU translations 1096 * to all the notifiers registered. 1097 * 1098 * Note: this is not related to record-and-replay functionality. 1099 * 1100 * @iommu_mr: the memory region to observe 1101 */ 1102void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr); 1103 1104/** 1105 * memory_region_unregister_iommu_notifier: unregister a notifier for 1106 * changes to IOMMU translation entries. 1107 * 1108 * @mr: the memory region which was observed and for which notity_stopped() 1109 * needs to be called 1110 * @n: the notifier to be removed. 1111 */ 1112void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 1113 IOMMUNotifier *n); 1114 1115/** 1116 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is 1117 * defined on the IOMMU. 1118 * 1119 * Returns 0 on success, or a negative errno otherwise. In particular, 1120 * -EINVAL indicates that the IOMMU does not support the requested 1121 * attribute. 1122 * 1123 * @iommu_mr: the memory region 1124 * @attr: the requested attribute 1125 * @data: a pointer to the requested attribute data 1126 */ 1127int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 1128 enum IOMMUMemoryRegionAttr attr, 1129 void *data); 1130 1131/** 1132 * memory_region_iommu_attrs_to_index: return the IOMMU index to 1133 * use for translations with the given memory transaction attributes. 1134 * 1135 * @iommu_mr: the memory region 1136 * @attrs: the memory transaction attributes 1137 */ 1138int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, 1139 MemTxAttrs attrs); 1140 1141/** 1142 * memory_region_iommu_num_indexes: return the total number of IOMMU 1143 * indexes that this IOMMU supports. 1144 * 1145 * @iommu_mr: the memory region 1146 */ 1147int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr); 1148 1149/** 1150 * memory_region_name: get a memory region's name 1151 * 1152 * Returns the string that was used to initialize the memory region. 1153 * 1154 * @mr: the memory region being queried 1155 */ 1156const char *memory_region_name(const MemoryRegion *mr); 1157 1158/** 1159 * memory_region_is_logging: return whether a memory region is logging writes 1160 * 1161 * Returns %true if the memory region is logging writes for the given client 1162 * 1163 * @mr: the memory region being queried 1164 * @client: the client being queried 1165 */ 1166bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); 1167 1168/** 1169 * memory_region_get_dirty_log_mask: return the clients for which a 1170 * memory region is logging writes. 1171 * 1172 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants 1173 * are the bit indices. 1174 * 1175 * @mr: the memory region being queried 1176 */ 1177uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); 1178 1179/** 1180 * memory_region_is_rom: check whether a memory region is ROM 1181 * 1182 * Returns %true if a memory region is read-only memory. 1183 * 1184 * @mr: the memory region being queried 1185 */ 1186static inline bool memory_region_is_rom(MemoryRegion *mr) 1187{ 1188 return mr->ram && mr->readonly; 1189} 1190 1191/** 1192 * memory_region_is_nonvolatile: check whether a memory region is non-volatile 1193 * 1194 * Returns %true is a memory region is non-volatile memory. 1195 * 1196 * @mr: the memory region being queried 1197 */ 1198static inline bool memory_region_is_nonvolatile(MemoryRegion *mr) 1199{ 1200 return mr->nonvolatile; 1201} 1202 1203/** 1204 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 1205 * 1206 * Returns a file descriptor backing a file-based RAM memory region, 1207 * or -1 if the region is not a file-based RAM memory region. 1208 * 1209 * @mr: the RAM or alias memory region being queried. 1210 */ 1211int memory_region_get_fd(MemoryRegion *mr); 1212 1213/** 1214 * memory_region_from_host: Convert a pointer into a RAM memory region 1215 * and an offset within it. 1216 * 1217 * Given a host pointer inside a RAM memory region (created with 1218 * memory_region_init_ram() or memory_region_init_ram_ptr()), return 1219 * the MemoryRegion and the offset within it. 1220 * 1221 * Use with care; by the time this function returns, the returned pointer is 1222 * not protected by RCU anymore. If the caller is not within an RCU critical 1223 * section and does not hold the iothread lock, it must have other means of 1224 * protecting the pointer, such as a reference to the region that includes 1225 * the incoming ram_addr_t. 1226 * 1227 * @ptr: the host pointer to be converted 1228 * @offset: the offset within memory region 1229 */ 1230MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 1231 1232/** 1233 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 1234 * 1235 * Returns a host pointer to a RAM memory region (created with 1236 * memory_region_init_ram() or memory_region_init_ram_ptr()). 1237 * 1238 * Use with care; by the time this function returns, the returned pointer is 1239 * not protected by RCU anymore. If the caller is not within an RCU critical 1240 * section and does not hold the iothread lock, it must have other means of 1241 * protecting the pointer, such as a reference to the region that includes 1242 * the incoming ram_addr_t. 1243 * 1244 * @mr: the memory region being queried. 1245 */ 1246void *memory_region_get_ram_ptr(MemoryRegion *mr); 1247 1248/* memory_region_ram_resize: Resize a RAM region. 1249 * 1250 * Only legal before guest might have detected the memory size: e.g. on 1251 * incoming migration, or right after reset. 1252 * 1253 * @mr: a memory region created with @memory_region_init_resizeable_ram. 1254 * @newsize: the new size the region 1255 * @errp: pointer to Error*, to store an error if it happens. 1256 */ 1257void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, 1258 Error **errp); 1259 1260/** 1261 * memory_region_set_log: Turn dirty logging on or off for a region. 1262 * 1263 * Turns dirty logging on or off for a specified client (display, migration). 1264 * Only meaningful for RAM regions. 1265 * 1266 * @mr: the memory region being updated. 1267 * @log: whether dirty logging is to be enabled or disabled. 1268 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. 1269 */ 1270void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 1271 1272/** 1273 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 1274 * 1275 * Marks a range of bytes as dirty, after it has been dirtied outside 1276 * guest code. 1277 * 1278 * @mr: the memory region being dirtied. 1279 * @addr: the address (relative to the start of the region) being dirtied. 1280 * @size: size of the range being dirtied. 1281 */ 1282void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 1283 hwaddr size); 1284 1285/** 1286 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range 1287 * 1288 * This function is called when the caller wants to clear the remote 1289 * dirty bitmap of a memory range within the memory region. This can 1290 * be used by e.g. KVM to manually clear dirty log when 1291 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host 1292 * kernel. 1293 * 1294 * @mr: the memory region to clear the dirty log upon 1295 * @start: start address offset within the memory region 1296 * @len: length of the memory region to clear dirty bitmap 1297 */ 1298void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, 1299 hwaddr len); 1300 1301/** 1302 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty 1303 * bitmap and clear it. 1304 * 1305 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and 1306 * returns the snapshot. The snapshot can then be used to query dirty 1307 * status, using memory_region_snapshot_get_dirty. Snapshotting allows 1308 * querying the same page multiple times, which is especially useful for 1309 * display updates where the scanlines often are not page aligned. 1310 * 1311 * The dirty bitmap region which gets copyed into the snapshot (and 1312 * cleared afterwards) can be larger than requested. The boundaries 1313 * are rounded up/down so complete bitmap longs (covering 64 pages on 1314 * 64bit hosts) can be copied over into the bitmap snapshot. Which 1315 * isn't a problem for display updates as the extra pages are outside 1316 * the visible area, and in case the visible area changes a full 1317 * display redraw is due anyway. Should other use cases for this 1318 * function emerge we might have to revisit this implementation 1319 * detail. 1320 * 1321 * Use g_free to release DirtyBitmapSnapshot. 1322 * 1323 * @mr: the memory region being queried. 1324 * @addr: the address (relative to the start of the region) being queried. 1325 * @size: the size of the range being queried. 1326 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. 1327 */ 1328DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 1329 hwaddr addr, 1330 hwaddr size, 1331 unsigned client); 1332 1333/** 1334 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty 1335 * in the specified dirty bitmap snapshot. 1336 * 1337 * @mr: the memory region being queried. 1338 * @snap: the dirty bitmap snapshot 1339 * @addr: the address (relative to the start of the region) being queried. 1340 * @size: the size of the range being queried. 1341 */ 1342bool memory_region_snapshot_get_dirty(MemoryRegion *mr, 1343 DirtyBitmapSnapshot *snap, 1344 hwaddr addr, hwaddr size); 1345 1346/** 1347 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 1348 * client. 1349 * 1350 * Marks a range of pages as no longer dirty. 1351 * 1352 * @mr: the region being updated. 1353 * @addr: the start of the subrange being cleaned. 1354 * @size: the size of the subrange being cleaned. 1355 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 1356 * %DIRTY_MEMORY_VGA. 1357 */ 1358void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 1359 hwaddr size, unsigned client); 1360 1361/** 1362 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate 1363 * TBs (for self-modifying code). 1364 * 1365 * The MemoryRegionOps->write() callback of a ROM device must use this function 1366 * to mark byte ranges that have been modified internally, such as by directly 1367 * accessing the memory returned by memory_region_get_ram_ptr(). 1368 * 1369 * This function marks the range dirty and invalidates TBs so that TCG can 1370 * detect self-modifying code. 1371 * 1372 * @mr: the region being flushed. 1373 * @addr: the start, relative to the start of the region, of the range being 1374 * flushed. 1375 * @size: the size, in bytes, of the range being flushed. 1376 */ 1377void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size); 1378 1379/** 1380 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 1381 * 1382 * Allows a memory region to be marked as read-only (turning it into a ROM). 1383 * only useful on RAM regions. 1384 * 1385 * @mr: the region being updated. 1386 * @readonly: whether rhe region is to be ROM or RAM. 1387 */ 1388void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 1389 1390/** 1391 * memory_region_set_nonvolatile: Turn a memory region non-volatile 1392 * 1393 * Allows a memory region to be marked as non-volatile. 1394 * only useful on RAM regions. 1395 * 1396 * @mr: the region being updated. 1397 * @nonvolatile: whether rhe region is to be non-volatile. 1398 */ 1399void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile); 1400 1401/** 1402 * memory_region_rom_device_set_romd: enable/disable ROMD mode 1403 * 1404 * Allows a ROM device (initialized with memory_region_init_rom_device() to 1405 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 1406 * device is mapped to guest memory and satisfies read access directly. 1407 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 1408 * Writes are always handled by the #MemoryRegion.write function. 1409 * 1410 * @mr: the memory region to be updated 1411 * @romd_mode: %true to put the region into ROMD mode 1412 */ 1413void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 1414 1415/** 1416 * memory_region_set_coalescing: Enable memory coalescing for the region. 1417 * 1418 * Enabled writes to a region to be queued for later processing. MMIO ->write 1419 * callbacks may be delayed until a non-coalesced MMIO is issued. 1420 * Only useful for IO regions. Roughly similar to write-combining hardware. 1421 * 1422 * @mr: the memory region to be write coalesced 1423 */ 1424void memory_region_set_coalescing(MemoryRegion *mr); 1425 1426/** 1427 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 1428 * a region. 1429 * 1430 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 1431 * Multiple calls can be issued coalesced disjoint ranges. 1432 * 1433 * @mr: the memory region to be updated. 1434 * @offset: the start of the range within the region to be coalesced. 1435 * @size: the size of the subrange to be coalesced. 1436 */ 1437void memory_region_add_coalescing(MemoryRegion *mr, 1438 hwaddr offset, 1439 uint64_t size); 1440 1441/** 1442 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 1443 * 1444 * Disables any coalescing caused by memory_region_set_coalescing() or 1445 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 1446 * hardware. 1447 * 1448 * @mr: the memory region to be updated. 1449 */ 1450void memory_region_clear_coalescing(MemoryRegion *mr); 1451 1452/** 1453 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 1454 * accesses. 1455 * 1456 * Ensure that pending coalesced MMIO request are flushed before the memory 1457 * region is accessed. This property is automatically enabled for all regions 1458 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 1459 * 1460 * @mr: the memory region to be updated. 1461 */ 1462void memory_region_set_flush_coalesced(MemoryRegion *mr); 1463 1464/** 1465 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 1466 * accesses. 1467 * 1468 * Clear the automatic coalesced MMIO flushing enabled via 1469 * memory_region_set_flush_coalesced. Note that this service has no effect on 1470 * memory regions that have MMIO coalescing enabled for themselves. For them, 1471 * automatic flushing will stop once coalescing is disabled. 1472 * 1473 * @mr: the memory region to be updated. 1474 */ 1475void memory_region_clear_flush_coalesced(MemoryRegion *mr); 1476 1477/** 1478 * memory_region_clear_global_locking: Declares that access processing does 1479 * not depend on the QEMU global lock. 1480 * 1481 * By clearing this property, accesses to the memory region will be processed 1482 * outside of QEMU's global lock (unless the lock is held on when issuing the 1483 * access request). In this case, the device model implementing the access 1484 * handlers is responsible for synchronization of concurrency. 1485 * 1486 * @mr: the memory region to be updated. 1487 */ 1488void memory_region_clear_global_locking(MemoryRegion *mr); 1489 1490/** 1491 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 1492 * is written to a location. 1493 * 1494 * Marks a word in an IO region (initialized with memory_region_init_io()) 1495 * as a trigger for an eventfd event. The I/O callback will not be called. 1496 * The caller must be prepared to handle failure (that is, take the required 1497 * action if the callback _is_ called). 1498 * 1499 * @mr: the memory region being updated. 1500 * @addr: the address within @mr that is to be monitored 1501 * @size: the size of the access to trigger the eventfd 1502 * @match_data: whether to match against @data, instead of just @addr 1503 * @data: the data to match against the guest write 1504 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1505 **/ 1506void memory_region_add_eventfd(MemoryRegion *mr, 1507 hwaddr addr, 1508 unsigned size, 1509 bool match_data, 1510 uint64_t data, 1511 EventNotifier *e); 1512 1513/** 1514 * memory_region_del_eventfd: Cancel an eventfd. 1515 * 1516 * Cancels an eventfd trigger requested by a previous 1517 * memory_region_add_eventfd() call. 1518 * 1519 * @mr: the memory region being updated. 1520 * @addr: the address within @mr that is to be monitored 1521 * @size: the size of the access to trigger the eventfd 1522 * @match_data: whether to match against @data, instead of just @addr 1523 * @data: the data to match against the guest write 1524 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1525 */ 1526void memory_region_del_eventfd(MemoryRegion *mr, 1527 hwaddr addr, 1528 unsigned size, 1529 bool match_data, 1530 uint64_t data, 1531 EventNotifier *e); 1532 1533/** 1534 * memory_region_add_subregion: Add a subregion to a container. 1535 * 1536 * Adds a subregion at @offset. The subregion may not overlap with other 1537 * subregions (except for those explicitly marked as overlapping). A region 1538 * may only be added once as a subregion (unless removed with 1539 * memory_region_del_subregion()); use memory_region_init_alias() if you 1540 * want a region to be a subregion in multiple locations. 1541 * 1542 * @mr: the region to contain the new subregion; must be a container 1543 * initialized with memory_region_init(). 1544 * @offset: the offset relative to @mr where @subregion is added. 1545 * @subregion: the subregion to be added. 1546 */ 1547void memory_region_add_subregion(MemoryRegion *mr, 1548 hwaddr offset, 1549 MemoryRegion *subregion); 1550/** 1551 * memory_region_add_subregion_overlap: Add a subregion to a container 1552 * with overlap. 1553 * 1554 * Adds a subregion at @offset. The subregion may overlap with other 1555 * subregions. Conflicts are resolved by having a higher @priority hide a 1556 * lower @priority. Subregions without priority are taken as @priority 0. 1557 * A region may only be added once as a subregion (unless removed with 1558 * memory_region_del_subregion()); use memory_region_init_alias() if you 1559 * want a region to be a subregion in multiple locations. 1560 * 1561 * @mr: the region to contain the new subregion; must be a container 1562 * initialized with memory_region_init(). 1563 * @offset: the offset relative to @mr where @subregion is added. 1564 * @subregion: the subregion to be added. 1565 * @priority: used for resolving overlaps; highest priority wins. 1566 */ 1567void memory_region_add_subregion_overlap(MemoryRegion *mr, 1568 hwaddr offset, 1569 MemoryRegion *subregion, 1570 int priority); 1571 1572/** 1573 * memory_region_get_ram_addr: Get the ram address associated with a memory 1574 * region 1575 */ 1576ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 1577 1578uint64_t memory_region_get_alignment(const MemoryRegion *mr); 1579/** 1580 * memory_region_del_subregion: Remove a subregion. 1581 * 1582 * Removes a subregion from its container. 1583 * 1584 * @mr: the container to be updated. 1585 * @subregion: the region being removed; must be a current subregion of @mr. 1586 */ 1587void memory_region_del_subregion(MemoryRegion *mr, 1588 MemoryRegion *subregion); 1589 1590/* 1591 * memory_region_set_enabled: dynamically enable or disable a region 1592 * 1593 * Enables or disables a memory region. A disabled memory region 1594 * ignores all accesses to itself and its subregions. It does not 1595 * obscure sibling subregions with lower priority - it simply behaves as 1596 * if it was removed from the hierarchy. 1597 * 1598 * Regions default to being enabled. 1599 * 1600 * @mr: the region to be updated 1601 * @enabled: whether to enable or disable the region 1602 */ 1603void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 1604 1605/* 1606 * memory_region_set_address: dynamically update the address of a region 1607 * 1608 * Dynamically updates the address of a region, relative to its container. 1609 * May be used on regions are currently part of a memory hierarchy. 1610 * 1611 * @mr: the region to be updated 1612 * @addr: new address, relative to container region 1613 */ 1614void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 1615 1616/* 1617 * memory_region_set_size: dynamically update the size of a region. 1618 * 1619 * Dynamically updates the size of a region. 1620 * 1621 * @mr: the region to be updated 1622 * @size: used size of the region. 1623 */ 1624void memory_region_set_size(MemoryRegion *mr, uint64_t size); 1625 1626/* 1627 * memory_region_set_alias_offset: dynamically update a memory alias's offset 1628 * 1629 * Dynamically updates the offset into the target region that an alias points 1630 * to, as if the fourth argument to memory_region_init_alias() has changed. 1631 * 1632 * @mr: the #MemoryRegion to be updated; should be an alias. 1633 * @offset: the new offset into the target memory region 1634 */ 1635void memory_region_set_alias_offset(MemoryRegion *mr, 1636 hwaddr offset); 1637 1638/** 1639 * memory_region_present: checks if an address relative to a @container 1640 * translates into #MemoryRegion within @container 1641 * 1642 * Answer whether a #MemoryRegion within @container covers the address 1643 * @addr. 1644 * 1645 * @container: a #MemoryRegion within which @addr is a relative address 1646 * @addr: the area within @container to be searched 1647 */ 1648bool memory_region_present(MemoryRegion *container, hwaddr addr); 1649 1650/** 1651 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 1652 * into any address space. 1653 * 1654 * @mr: a #MemoryRegion which should be checked if it's mapped 1655 */ 1656bool memory_region_is_mapped(MemoryRegion *mr); 1657 1658/** 1659 * memory_region_find: translate an address/size relative to a 1660 * MemoryRegion into a #MemoryRegionSection. 1661 * 1662 * Locates the first #MemoryRegion within @mr that overlaps the range 1663 * given by @addr and @size. 1664 * 1665 * Returns a #MemoryRegionSection that describes a contiguous overlap. 1666 * It will have the following characteristics: 1667 * .@size = 0 iff no overlap was found 1668 * .@mr is non-%NULL iff an overlap was found 1669 * 1670 * Remember that in the return value the @offset_within_region is 1671 * relative to the returned region (in the .@mr field), not to the 1672 * @mr argument. 1673 * 1674 * Similarly, the .@offset_within_address_space is relative to the 1675 * address space that contains both regions, the passed and the 1676 * returned one. However, in the special case where the @mr argument 1677 * has no container (and thus is the root of the address space), the 1678 * following will hold: 1679 * .@offset_within_address_space >= @addr 1680 * .@offset_within_address_space + .@size <= @addr + @size 1681 * 1682 * @mr: a MemoryRegion within which @addr is a relative address 1683 * @addr: start of the area within @as to be searched 1684 * @size: size of the area to be searched 1685 */ 1686MemoryRegionSection memory_region_find(MemoryRegion *mr, 1687 hwaddr addr, uint64_t size); 1688 1689/** 1690 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 1691 * 1692 * Synchronizes the dirty page log for all address spaces. 1693 */ 1694void memory_global_dirty_log_sync(void); 1695 1696/** 1697 * memory_region_transaction_begin: Start a transaction. 1698 * 1699 * During a transaction, changes will be accumulated and made visible 1700 * only when the transaction ends (is committed). 1701 */ 1702void memory_region_transaction_begin(void); 1703 1704/** 1705 * memory_region_transaction_commit: Commit a transaction and make changes 1706 * visible to the guest. 1707 */ 1708void memory_region_transaction_commit(void); 1709 1710/** 1711 * memory_listener_register: register callbacks to be called when memory 1712 * sections are mapped or unmapped into an address 1713 * space 1714 * 1715 * @listener: an object containing the callbacks to be called 1716 * @filter: if non-%NULL, only regions in this address space will be observed 1717 */ 1718void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 1719 1720/** 1721 * memory_listener_unregister: undo the effect of memory_listener_register() 1722 * 1723 * @listener: an object containing the callbacks to be removed 1724 */ 1725void memory_listener_unregister(MemoryListener *listener); 1726 1727/** 1728 * memory_global_dirty_log_start: begin dirty logging for all regions 1729 */ 1730void memory_global_dirty_log_start(void); 1731 1732/** 1733 * memory_global_dirty_log_stop: end dirty logging for all regions 1734 */ 1735void memory_global_dirty_log_stop(void); 1736 1737void mtree_info(bool flatview, bool dispatch_tree, bool owner); 1738 1739/** 1740 * memory_region_dispatch_read: perform a read directly to the specified 1741 * MemoryRegion. 1742 * 1743 * @mr: #MemoryRegion to access 1744 * @addr: address within that region 1745 * @pval: pointer to uint64_t which the data is written to 1746 * @size: size of the access in bytes 1747 * @attrs: memory transaction attributes to use for the access 1748 */ 1749MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1750 hwaddr addr, 1751 uint64_t *pval, 1752 unsigned size, 1753 MemTxAttrs attrs); 1754/** 1755 * memory_region_dispatch_write: perform a write directly to the specified 1756 * MemoryRegion. 1757 * 1758 * @mr: #MemoryRegion to access 1759 * @addr: address within that region 1760 * @data: data to write 1761 * @size: size of the access in bytes 1762 * @attrs: memory transaction attributes to use for the access 1763 */ 1764MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1765 hwaddr addr, 1766 uint64_t data, 1767 unsigned size, 1768 MemTxAttrs attrs); 1769 1770/** 1771 * address_space_init: initializes an address space 1772 * 1773 * @as: an uninitialized #AddressSpace 1774 * @root: a #MemoryRegion that routes addresses for the address space 1775 * @name: an address space name. The name is only used for debugging 1776 * output. 1777 */ 1778void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 1779 1780/** 1781 * address_space_destroy: destroy an address space 1782 * 1783 * Releases all resources associated with an address space. After an address space 1784 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 1785 * as well. 1786 * 1787 * @as: address space to be destroyed 1788 */ 1789void address_space_destroy(AddressSpace *as); 1790 1791/** 1792 * address_space_remove_listeners: unregister all listeners of an address space 1793 * 1794 * Removes all callbacks previously registered with memory_listener_register() 1795 * for @as. 1796 * 1797 * @as: an initialized #AddressSpace 1798 */ 1799void address_space_remove_listeners(AddressSpace *as); 1800 1801/** 1802 * address_space_rw: read from or write to an address space. 1803 * 1804 * Return a MemTxResult indicating whether the operation succeeded 1805 * or failed (eg unassigned memory, device rejected the transaction, 1806 * IOMMU fault). 1807 * 1808 * @as: #AddressSpace to be accessed 1809 * @addr: address within that address space 1810 * @attrs: memory transaction attributes 1811 * @buf: buffer with the data transferred 1812 * @len: the number of bytes to read or write 1813 * @is_write: indicates the transfer direction 1814 */ 1815MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, 1816 MemTxAttrs attrs, uint8_t *buf, 1817 hwaddr len, bool is_write); 1818 1819/** 1820 * address_space_write: write to address space. 1821 * 1822 * Return a MemTxResult indicating whether the operation succeeded 1823 * or failed (eg unassigned memory, device rejected the transaction, 1824 * IOMMU fault). 1825 * 1826 * @as: #AddressSpace to be accessed 1827 * @addr: address within that address space 1828 * @attrs: memory transaction attributes 1829 * @buf: buffer with the data transferred 1830 * @len: the number of bytes to write 1831 */ 1832MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 1833 MemTxAttrs attrs, 1834 const uint8_t *buf, hwaddr len); 1835 1836/** 1837 * address_space_write_rom: write to address space, including ROM. 1838 * 1839 * This function writes to the specified address space, but will 1840 * write data to both ROM and RAM. This is used for non-guest 1841 * writes like writes from the gdb debug stub or initial loading 1842 * of ROM contents. 1843 * 1844 * Note that portions of the write which attempt to write data to 1845 * a device will be silently ignored -- only real RAM and ROM will 1846 * be written to. 1847 * 1848 * Return a MemTxResult indicating whether the operation succeeded 1849 * or failed (eg unassigned memory, device rejected the transaction, 1850 * IOMMU fault). 1851 * 1852 * @as: #AddressSpace to be accessed 1853 * @addr: address within that address space 1854 * @attrs: memory transaction attributes 1855 * @buf: buffer with the data transferred 1856 * @len: the number of bytes to write 1857 */ 1858MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 1859 MemTxAttrs attrs, 1860 const uint8_t *buf, hwaddr len); 1861 1862/* address_space_ld*: load from an address space 1863 * address_space_st*: store to an address space 1864 * 1865 * These functions perform a load or store of the byte, word, 1866 * longword or quad to the specified address within the AddressSpace. 1867 * The _le suffixed functions treat the data as little endian; 1868 * _be indicates big endian; no suffix indicates "same endianness 1869 * as guest CPU". 1870 * 1871 * The "guest CPU endianness" accessors are deprecated for use outside 1872 * target-* code; devices should be CPU-agnostic and use either the LE 1873 * or the BE accessors. 1874 * 1875 * @as #AddressSpace to be accessed 1876 * @addr: address within that address space 1877 * @val: data value, for stores 1878 * @attrs: memory transaction attributes 1879 * @result: location to write the success/failure of the transaction; 1880 * if NULL, this information is discarded 1881 */ 1882 1883#define SUFFIX 1884#define ARG1 as 1885#define ARG1_DECL AddressSpace *as 1886#include "exec/memory_ldst.inc.h" 1887 1888#define SUFFIX 1889#define ARG1 as 1890#define ARG1_DECL AddressSpace *as 1891#include "exec/memory_ldst_phys.inc.h" 1892 1893struct MemoryRegionCache { 1894 void *ptr; 1895 hwaddr xlat; 1896 hwaddr len; 1897 FlatView *fv; 1898 MemoryRegionSection mrs; 1899 bool is_write; 1900}; 1901 1902#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL }) 1903 1904 1905/* address_space_ld*_cached: load from a cached #MemoryRegion 1906 * address_space_st*_cached: store into a cached #MemoryRegion 1907 * 1908 * These functions perform a load or store of the byte, word, 1909 * longword or quad to the specified address. The address is 1910 * a physical address in the AddressSpace, but it must lie within 1911 * a #MemoryRegion that was mapped with address_space_cache_init. 1912 * 1913 * The _le suffixed functions treat the data as little endian; 1914 * _be indicates big endian; no suffix indicates "same endianness 1915 * as guest CPU". 1916 * 1917 * The "guest CPU endianness" accessors are deprecated for use outside 1918 * target-* code; devices should be CPU-agnostic and use either the LE 1919 * or the BE accessors. 1920 * 1921 * @cache: previously initialized #MemoryRegionCache to be accessed 1922 * @addr: address within the address space 1923 * @val: data value, for stores 1924 * @attrs: memory transaction attributes 1925 * @result: location to write the success/failure of the transaction; 1926 * if NULL, this information is discarded 1927 */ 1928 1929#define SUFFIX _cached_slow 1930#define ARG1 cache 1931#define ARG1_DECL MemoryRegionCache *cache 1932#include "exec/memory_ldst.inc.h" 1933 1934/* Inline fast path for direct RAM access. */ 1935static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache, 1936 hwaddr addr, MemTxAttrs attrs, MemTxResult *result) 1937{ 1938 assert(addr < cache->len); 1939 if (likely(cache->ptr)) { 1940 return ldub_p(cache->ptr + addr); 1941 } else { 1942 return address_space_ldub_cached_slow(cache, addr, attrs, result); 1943 } 1944} 1945 1946static inline void address_space_stb_cached(MemoryRegionCache *cache, 1947 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) 1948{ 1949 assert(addr < cache->len); 1950 if (likely(cache->ptr)) { 1951 stb_p(cache->ptr + addr, val); 1952 } else { 1953 address_space_stb_cached_slow(cache, addr, val, attrs, result); 1954 } 1955} 1956 1957#define ENDIANNESS _le 1958#include "exec/memory_ldst_cached.inc.h" 1959 1960#define ENDIANNESS _be 1961#include "exec/memory_ldst_cached.inc.h" 1962 1963#define SUFFIX _cached 1964#define ARG1 cache 1965#define ARG1_DECL MemoryRegionCache *cache 1966#include "exec/memory_ldst_phys.inc.h" 1967 1968/* address_space_cache_init: prepare for repeated access to a physical 1969 * memory region 1970 * 1971 * @cache: #MemoryRegionCache to be filled 1972 * @as: #AddressSpace to be accessed 1973 * @addr: address within that address space 1974 * @len: length of buffer 1975 * @is_write: indicates the transfer direction 1976 * 1977 * Will only work with RAM, and may map a subset of the requested range by 1978 * returning a value that is less than @len. On failure, return a negative 1979 * errno value. 1980 * 1981 * Because it only works with RAM, this function can be used for 1982 * read-modify-write operations. In this case, is_write should be %true. 1983 * 1984 * Note that addresses passed to the address_space_*_cached functions 1985 * are relative to @addr. 1986 */ 1987int64_t address_space_cache_init(MemoryRegionCache *cache, 1988 AddressSpace *as, 1989 hwaddr addr, 1990 hwaddr len, 1991 bool is_write); 1992 1993/** 1994 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache 1995 * 1996 * @cache: The #MemoryRegionCache to operate on. 1997 * @addr: The first physical address that was written, relative to the 1998 * address that was passed to @address_space_cache_init. 1999 * @access_len: The number of bytes that were written starting at @addr. 2000 */
2001void address_space_cache_invalidate(MemoryRegionCache *cache, 2002 hwaddr addr, 2003 hwaddr access_len); 2004 2005/** 2006 * address_space_cache_destroy: free a #MemoryRegionCache 2007 * 2008 * @cache: The #MemoryRegionCache whose memory should be released. 2009 */ 2010void address_space_cache_destroy(MemoryRegionCache *cache); 2011 2012/* address_space_get_iotlb_entry: translate an address into an IOTLB 2013 * entry. Should be called from an RCU critical section. 2014 */ 2015IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 2016 bool is_write, MemTxAttrs attrs); 2017 2018/* address_space_translate: translate an address range into an address space 2019 * into a MemoryRegion and an address range into that section. Should be 2020 * called from an RCU critical section, to avoid that the last reference 2021 * to the returned region disappears after address_space_translate returns. 2022 * 2023 * @fv: #FlatView to be accessed 2024 * @addr: address within that address space 2025 * @xlat: pointer to address within the returned memory region section's 2026 * #MemoryRegion. 2027 * @len: pointer to length 2028 * @is_write: indicates the transfer direction 2029 * @attrs: memory attributes 2030 */ 2031MemoryRegion *flatview_translate(FlatView *fv, 2032 hwaddr addr, hwaddr *xlat, 2033 hwaddr *len, bool is_write, 2034 MemTxAttrs attrs); 2035 2036static inline MemoryRegion *address_space_translate(AddressSpace *as, 2037 hwaddr addr, hwaddr *xlat, 2038 hwaddr *len, bool is_write, 2039 MemTxAttrs attrs) 2040{ 2041 return flatview_translate(address_space_to_flatview(as), 2042 addr, xlat, len, is_write, attrs); 2043} 2044 2045/* address_space_access_valid: check for validity of accessing an address 2046 * space range 2047 * 2048 * Check whether memory is assigned to the given address space range, and 2049 * access is permitted by any IOMMU regions that are active for the address 2050 * space. 2051 * 2052 * For now, addr and len should be aligned to a page size. This limitation 2053 * will be lifted in the future. 2054 * 2055 * @as: #AddressSpace to be accessed 2056 * @addr: address within that address space 2057 * @len: length of the area to be checked 2058 * @is_write: indicates the transfer direction 2059 * @attrs: memory attributes 2060 */ 2061bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, 2062 bool is_write, MemTxAttrs attrs); 2063 2064/* address_space_map: map a physical memory region into a host virtual address 2065 * 2066 * May map a subset of the requested range, given by and returned in @plen. 2067 * May return %NULL if resources needed to perform the mapping are exhausted. 2068 * Use only for reads OR writes - not for read-modify-write operations. 2069 * Use cpu_register_map_client() to know when retrying the map operation is 2070 * likely to succeed. 2071 * 2072 * @as: #AddressSpace to be accessed 2073 * @addr: address within that address space 2074 * @plen: pointer to length of buffer; updated on return 2075 * @is_write: indicates the transfer direction 2076 * @attrs: memory attributes 2077 */ 2078void *address_space_map(AddressSpace *as, hwaddr addr, 2079 hwaddr *plen, bool is_write, MemTxAttrs attrs); 2080 2081/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 2082 * 2083 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 2084 * the amount of memory that was actually read or written by the caller. 2085 * 2086 * @as: #AddressSpace used 2087 * @buffer: host pointer as returned by address_space_map() 2088 * @len: buffer length as returned by address_space_map() 2089 * @access_len: amount of data actually transferred 2090 * @is_write: indicates the transfer direction 2091 */ 2092void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 2093 int is_write, hwaddr access_len); 2094 2095 2096/* Internal functions, part of the implementation of address_space_read. */ 2097MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 2098 MemTxAttrs attrs, uint8_t *buf, hwaddr len); 2099MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 2100 MemTxAttrs attrs, uint8_t *buf, 2101 hwaddr len, hwaddr addr1, hwaddr l, 2102 MemoryRegion *mr); 2103void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); 2104 2105/* Internal functions, part of the implementation of address_space_read_cached 2106 * and address_space_write_cached. */ 2107void address_space_read_cached_slow(MemoryRegionCache *cache, 2108 hwaddr addr, void *buf, hwaddr len); 2109void address_space_write_cached_slow(MemoryRegionCache *cache, 2110 hwaddr addr, const void *buf, hwaddr len); 2111 2112static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 2113{ 2114 if (is_write) { 2115 return memory_region_is_ram(mr) && 2116 !mr->readonly && !memory_region_is_ram_device(mr); 2117 } else { 2118 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || 2119 memory_region_is_romd(mr); 2120 } 2121} 2122 2123/** 2124 * address_space_read: read from an address space. 2125 * 2126 * Return a MemTxResult indicating whether the operation succeeded 2127 * or failed (eg unassigned memory, device rejected the transaction, 2128 * IOMMU fault). Called within RCU critical section. 2129 * 2130 * @as: #AddressSpace to be accessed 2131 * @addr: address within that address space 2132 * @attrs: memory transaction attributes 2133 * @buf: buffer with the data transferred 2134 */ 2135static inline __attribute__((__always_inline__)) 2136MemTxResult address_space_read(AddressSpace *as, hwaddr addr, 2137 MemTxAttrs attrs, uint8_t *buf, 2138 hwaddr len) 2139{ 2140 MemTxResult result = MEMTX_OK; 2141 hwaddr l, addr1; 2142 void *ptr; 2143 MemoryRegion *mr; 2144 FlatView *fv; 2145 2146 if (__builtin_constant_p(len)) { 2147 if (len) { 2148 rcu_read_lock(); 2149 fv = address_space_to_flatview(as); 2150 l = len; 2151 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); 2152 if (len == l && memory_access_is_direct(mr, false)) { 2153 ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 2154 memcpy(buf, ptr, len); 2155 } else { 2156 result = flatview_read_continue(fv, addr, attrs, buf, len, 2157 addr1, l, mr); 2158 } 2159 rcu_read_unlock(); 2160 } 2161 } else { 2162 result = address_space_read_full(as, addr, attrs, buf, len); 2163 } 2164 return result; 2165} 2166 2167/** 2168 * address_space_read_cached: read from a cached RAM region 2169 * 2170 * @cache: Cached region to be addressed 2171 * @addr: address relative to the base of the RAM region 2172 * @buf: buffer with the data transferred 2173 * @len: length of the data transferred 2174 */ 2175static inline void 2176address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, 2177 void *buf, hwaddr len) 2178{ 2179 assert(addr < cache->len && len <= cache->len - addr); 2180 if (likely(cache->ptr)) { 2181 memcpy(buf, cache->ptr + addr, len); 2182 } else { 2183 address_space_read_cached_slow(cache, addr, buf, len); 2184 } 2185} 2186 2187/** 2188 * address_space_write_cached: write to a cached RAM region 2189 * 2190 * @cache: Cached region to be addressed 2191 * @addr: address relative to the base of the RAM region 2192 * @buf: buffer with the data transferred 2193 * @len: length of the data transferred 2194 */ 2195static inline void 2196address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, 2197 void *buf, hwaddr len) 2198{ 2199 assert(addr < cache->len && len <= cache->len - addr); 2200 if (likely(cache->ptr)) { 2201 memcpy(cache->ptr + addr, buf, len); 2202 } else { 2203 address_space_write_cached_slow(cache, addr, buf, len); 2204 } 2205} 2206 2207#endif 2208 2209#endif 2210