1/* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14#ifndef MEMORY_H 15#define MEMORY_H 16 17#ifndef CONFIG_USER_ONLY 18 19#include <stdint.h> 20#include <stdbool.h> 21#include "qemu-common.h" 22#include "cpu-common.h" 23#include "targphys.h" 24#include "qemu-queue.h" 25#include "iorange.h" 26#include "ioport.h" 27#include "int128.h" 28 29typedef struct MemoryRegionOps MemoryRegionOps; 30typedef struct MemoryRegion MemoryRegion; 31typedef struct MemoryRegionPortio MemoryRegionPortio; 32typedef struct MemoryRegionMmio MemoryRegionMmio; 33 34/* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic 35 * registration. 36 */ 37#define DIRTY_MEMORY_VGA 0 38#define DIRTY_MEMORY_CODE 1 39#define DIRTY_MEMORY_MIGRATION 3 40 41struct MemoryRegionMmio { 42 CPUReadMemoryFunc *read[3]; 43 CPUWriteMemoryFunc *write[3]; 44}; 45 46/* 47 * Memory region callbacks 48 */ 49struct MemoryRegionOps { 50 /* Read from the memory region. @addr is relative to @mr; @size is 51 * in bytes. */ 52 uint64_t (*read)(void *opaque, 53 target_phys_addr_t addr, 54 unsigned size); 55 /* Write to the memory region. @addr is relative to @mr; @size is 56 * in bytes. */ 57 void (*write)(void *opaque, 58 target_phys_addr_t addr, 59 uint64_t data, 60 unsigned size); 61 62 enum device_endian endianness; 63 /* Guest-visible constraints: */ 64 struct { 65 /* If nonzero, specify bounds on access sizes beyond which a machine 66 * check is thrown. 67 */ 68 unsigned min_access_size; 69 unsigned max_access_size; 70 /* If true, unaligned accesses are supported. Otherwise unaligned 71 * accesses throw machine checks. 72 */ 73 bool unaligned; 74 } valid; 75 /* Internal implementation constraints: */ 76 struct { 77 /* If nonzero, specifies the minimum size implemented. Smaller sizes 78 * will be rounded upwards and a partial result will be returned. 79 */ 80 unsigned min_access_size; 81 /* If nonzero, specifies the maximum size implemented. Larger sizes 82 * will be done as a series of accesses with smaller sizes. 83 */ 84 unsigned max_access_size; 85 /* If true, unaligned accesses are supported. Otherwise all accesses 86 * are converted to (possibly multiple) naturally aligned accesses. 87 */ 88 bool unaligned; 89 } impl; 90 91 /* If .read and .write are not present, old_portio may be used for 92 * backwards compatibility with old portio registration 93 */ 94 const MemoryRegionPortio *old_portio; 95 /* If .read and .write are not present, old_mmio may be used for 96 * backwards compatibility with old mmio registration 97 */ 98 const MemoryRegionMmio old_mmio; 99}; 100 101typedef struct CoalescedMemoryRange CoalescedMemoryRange; 102typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 103 104struct MemoryRegion { 105 /* All fields are private - violators will be prosecuted */ 106 const MemoryRegionOps *ops; 107 void *opaque; 108 MemoryRegion *parent; 109 Int128 size; 110 target_phys_addr_t addr; 111 target_phys_addr_t offset; 112 bool backend_registered; 113 void (*destructor)(MemoryRegion *mr); 114 ram_addr_t ram_addr; 115 IORange iorange; 116 bool terminates; 117 bool readable; 118 bool readonly; /* For RAM regions */ 119 MemoryRegion *alias; 120 target_phys_addr_t alias_offset; 121 unsigned priority; 122 bool may_overlap; 123 QTAILQ_HEAD(subregions, MemoryRegion) subregions; 124 QTAILQ_ENTRY(MemoryRegion) subregions_link; 125 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; 126 const char *name; 127 uint8_t dirty_log_mask; 128 unsigned ioeventfd_nb; 129 MemoryRegionIoeventfd *ioeventfds; 130}; 131 132struct MemoryRegionPortio { 133 uint32_t offset; 134 uint32_t len; 135 unsigned size; 136 IOPortReadFunc *read; 137 IOPortWriteFunc *write; 138}; 139 140#define PORTIO_END_OF_LIST() { } 141 142/** 143 * memory_region_init: Initialize a memory region 144 * 145 * The region typically acts as a container for other memory regions. Us 146 * memory_region_add_subregion() to add subregions. 147 * 148 * @mr: the #MemoryRegion to be initialized 149 * @name: used for debugging; not visible to the user or ABI 150 * @size: size of the region; any subregions beyond this size will be clipped 151 */ 152void memory_region_init(MemoryRegion *mr, 153 const char *name, 154 uint64_t size); 155/** 156 * memory_region_init_io: Initialize an I/O memory region. 157 * 158 * Accesses into the region will be cause the callbacks in @ops to be called. 159 * if @size is nonzero, subregions will be clipped to @size. 160 * 161 * @mr: the #MemoryRegion to be initialized. 162 * @ops: a structure containing read and write callbacks to be used when 163 * I/O is performed on the region. 164 * @opaque: passed to to the read and write callbacks of the @ops structure. 165 * @name: used for debugging; not visible to the user or ABI 166 * @size: size of the region. 167 */ 168void memory_region_init_io(MemoryRegion *mr, 169 const MemoryRegionOps *ops, 170 void *opaque, 171 const char *name, 172 uint64_t size); 173 174/** 175 * memory_region_init_ram: Initialize RAM memory region. Accesses into the 176 * region will be modify memory directly. 177 * 178 * @mr: the #MemoryRegion to be initialized. 179 * @dev: a device associated with the region; may be %NULL. 180 * @name: the name of the region; the pair (@dev, @name) must be globally 181 * unique. The name is part of the save/restore ABI and so cannot be 182 * changed. 183 * @size: size of the region. 184 */ 185void memory_region_init_ram(MemoryRegion *mr, 186 DeviceState *dev, /* FIXME: layering violation */ 187 const char *name, 188 uint64_t size); 189 190/** 191 * memory_region_init_ram: Initialize RAM memory region from a user-provided. 192 * pointer. Accesses into the region will be modify 193 * memory directly. 194 * 195 * @mr: the #MemoryRegion to be initialized. 196 * @dev: a device associated with the region; may be %NULL. 197 * @name: the name of the region; the pair (@dev, @name) must be globally 198 * unique. The name is part of the save/restore ABI and so cannot be 199 * changed. 200 * @size: size of the region. 201 * @ptr: memory to be mapped; must contain at least @size bytes. 202 */ 203void memory_region_init_ram_ptr(MemoryRegion *mr, 204 DeviceState *dev, /* FIXME: layering violation */ 205 const char *name, 206 uint64_t size, 207 void *ptr); 208 209/** 210 * memory_region_init_alias: Initialize a memory region that aliases all or a 211 * part of another memory region. 212 * 213 * @mr: the #MemoryRegion to be initialized. 214 * @name: used for debugging; not visible to the user or ABI 215 * @orig: the region to be referenced; @mr will be equivalent to 216 * @orig between @offset and @offset + @size - 1. 217 * @offset: start of the section in @orig to be referenced. 218 * @size: size of the region. 219 */ 220void memory_region_init_alias(MemoryRegion *mr, 221 const char *name, 222 MemoryRegion *orig, 223 target_phys_addr_t offset, 224 uint64_t size); 225 226/** 227 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are 228 * handled via callbacks. 229 * 230 * @mr: the #MemoryRegion to be initialized. 231 * @ops: callbacks for write access handling. 232 * @dev: a device associated with the region; may be %NULL. 233 * @name: the name of the region; the pair (@dev, @name) must be globally 234 * unique. The name is part of the save/restore ABI and so cannot be 235 * changed. 236 * @size: size of the region. 237 */ 238void memory_region_init_rom_device(MemoryRegion *mr, 239 const MemoryRegionOps *ops, 240 void *opaque, 241 DeviceState *dev, /* FIXME: layering violation */ 242 const char *name, 243 uint64_t size); 244 245/** 246 * memory_region_destroy: Destroy a memory region and relaim all resources. 247 * 248 * @mr: the region to be destroyed. May not currently be a subregion 249 * (see memory_region_add_subregion()) or referenced in an alias 250 * (see memory_region_init_alias()). 251 */ 252void memory_region_destroy(MemoryRegion *mr); 253 254/** 255 * memory_region_size: get a memory region's size. 256 * 257 * @mr: the memory region being queried. 258 */ 259uint64_t memory_region_size(MemoryRegion *mr); 260 261/** 262 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 263 * 264 * Returns a host pointer to a RAM memory region (created with 265 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with 266 * care. 267 * 268 * @mr: the memory region being queried. 269 */ 270void *memory_region_get_ram_ptr(MemoryRegion *mr); 271 272/** 273 * memory_region_set_offset: Sets an offset to be added to MemoryRegionOps 274 * callbacks. 275 * 276 * This function is deprecated and should not be used in new code. 277 */ 278void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset); 279 280/** 281 * memory_region_set_log: Turn dirty logging on or off for a region. 282 * 283 * Turns dirty logging on or off for a specified client (display, migration). 284 * Only meaningful for RAM regions. 285 * 286 * @mr: the memory region being updated. 287 * @log: whether dirty logging is to be enabled or disabled. 288 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 289 * %DIRTY_MEMORY_VGA. 290 */ 291void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 292 293/** 294 * memory_region_get_dirty: Check whether a page is dirty for a specified 295 * client. 296 * 297 * Checks whether a page has been written to since the last 298 * call to memory_region_reset_dirty() with the same @client. Dirty logging 299 * must be enabled. 300 * 301 * @mr: the memory region being queried. 302 * @addr: the address (relative to the start of the region) being queried. 303 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 304 * %DIRTY_MEMORY_VGA. 305 */ 306bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr, 307 unsigned client); 308 309/** 310 * memory_region_set_dirty: Mark a page as dirty in a memory region. 311 * 312 * Marks a page as dirty, after it has been dirtied outside guest code. 313 * 314 * @mr: the memory region being queried. 315 * @addr: the address (relative to the start of the region) being dirtied. 316 */ 317void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr); 318 319/** 320 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with 321 * any external TLBs (e.g. kvm) 322 * 323 * Flushes dirty information from accelerators such as kvm and vhost-net 324 * and makes it available to users of the memory API. 325 * 326 * @mr: the region being flushed. 327 */ 328void memory_region_sync_dirty_bitmap(MemoryRegion *mr); 329 330/** 331 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 332 * client. 333 * 334 * Marks a range of pages as no longer dirty. 335 * 336 * @mr: the region being updated. 337 * @addr: the start of the subrange being cleaned. 338 * @size: the size of the subrange being cleaned. 339 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 340 * %DIRTY_MEMORY_VGA. 341 */ 342void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr, 343 target_phys_addr_t size, unsigned client); 344 345/** 346 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 347 * 348 * Allows a memory region to be marked as read-only (turning it into a ROM). 349 * only useful on RAM regions. 350 * 351 * @mr: the region being updated. 352 * @readonly: whether rhe region is to be ROM or RAM. 353 */ 354void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 355 356/** 357 * memory_region_rom_device_set_readable: enable/disable ROM readability 358 * 359 * Allows a ROM device (initialized with memory_region_init_rom_device() to 360 * to be marked as readable (default) or not readable. When it is readable, 361 * the device is mapped to guest memory. When not readable, reads are 362 * forwarded to the #MemoryRegion.read function. 363 * 364 * @mr: the memory region to be updated 365 * @readable: whether reads are satisified directly (%true) or via callbacks 366 * (%false) 367 */ 368void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable); 369 370/** 371 * memory_region_set_coalescing: Enable memory coalescing for the region. 372 * 373 * Enabled writes to a region to be queued for later processing. MMIO ->write 374 * callbacks may be delayed until a non-coalesced MMIO is issued. 375 * Only useful for IO regions. Roughly similar to write-combining hardware. 376 * 377 * @mr: the memory region to be write coalesced 378 */ 379void memory_region_set_coalescing(MemoryRegion *mr); 380 381/** 382 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 383 * a region. 384 * 385 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 386 * Multiple calls can be issued coalesced disjoint ranges. 387 * 388 * @mr: the memory region to be updated. 389 * @offset: the start of the range within the region to be coalesced. 390 * @size: the size of the subrange to be coalesced. 391 */ 392void memory_region_add_coalescing(MemoryRegion *mr, 393 target_phys_addr_t offset, 394 uint64_t size); 395 396/** 397 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 398 * 399 * Disables any coalescing caused by memory_region_set_coalescing() or 400 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 401 * hardware. 402 * 403 * @mr: the memory region to be updated. 404 */ 405void memory_region_clear_coalescing(MemoryRegion *mr); 406 407/** 408 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 409 * is written to a location. 410 * 411 * Marks a word in an IO region (initialized with memory_region_init_io()) 412 * as a trigger for an eventfd event. The I/O callback will not be called. 413 * The caller must be prepared to handle failure (hat is, take the required 414 * action if the callback _is_ called). 415 * 416 * @mr: the memory region being updated. 417 * @addr: the address within @mr that is to be monitored 418 * @size: the size of the access to trigger the eventfd 419 * @match_data: whether to match against @data, instead of just @addr 420 * @data: the data to match against the guest write 421 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 422 **/ 423void memory_region_add_eventfd(MemoryRegion *mr, 424 target_phys_addr_t addr, 425 unsigned size, 426 bool match_data, 427 uint64_t data, 428 int fd); 429 430/** 431 * memory_region_del_eventfd: Cancel and eventfd. 432 * 433 * Cancels an eventfd trigger request by a previous memory_region_add_eventfd() 434 * call. 435 * 436 * @mr: the memory region being updated. 437 * @addr: the address within @mr that is to be monitored 438 * @size: the size of the access to trigger the eventfd 439 * @match_data: whether to match against @data, instead of just @addr 440 * @data: the data to match against the guest write 441 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 442 */ 443void memory_region_del_eventfd(MemoryRegion *mr, 444 target_phys_addr_t addr, 445 unsigned size, 446 bool match_data, 447 uint64_t data, 448 int fd); 449/** 450 * memory_region_add_subregion: Add a sub-region to a container. 451 * 452 * Adds a sub-region at @offset. The sub-region may not overlap with other 453 * subregions (except for those explicitly marked as overlapping). A region 454 * may only be added once as a subregion (unless removed with 455 * memory_region_del_subregion()); use memory_region_init_alias() if you 456 * want a region to be a subregion in multiple locations. 457 * 458 * @mr: the region to contain the new subregion; must be a container 459 * initialized with memory_region_init(). 460 * @offset: the offset relative to @mr where @subregion is added. 461 * @subregion: the subregion to be added. 462 */ 463void memory_region_add_subregion(MemoryRegion *mr, 464 target_phys_addr_t offset, 465 MemoryRegion *subregion); 466/** 467 * memory_region_add_subregion: Add a sub-region to a container, with overlap. 468 * 469 * Adds a sub-region at @offset. The sub-region may overlap with other 470 * subregions. Conflicts are resolved by having a higher @priority hide a 471 * lower @priority. Subregions without priority are taken as @priority 0. 472 * A region may only be added once as a subregion (unless removed with 473 * memory_region_del_subregion()); use memory_region_init_alias() if you 474 * want a region to be a subregion in multiple locations. 475 * 476 * @mr: the region to contain the new subregion; must be a container 477 * initialized with memory_region_init(). 478 * @offset: the offset relative to @mr where @subregion is added. 479 * @subregion: the subregion to be added. 480 * @priority: used for resolving overlaps; highest priority wins. 481 */ 482void memory_region_add_subregion_overlap(MemoryRegion *mr, 483 target_phys_addr_t offset, 484 MemoryRegion *subregion, 485 unsigned priority); 486/** 487 * memory_region_del_subregion: Remove a subregion. 488 * 489 * Removes a subregion from its container. 490 * 491 * @mr: the container to be updated. 492 * @subregion: the region being removed; must be a current subregion of @mr. 493 */ 494void memory_region_del_subregion(MemoryRegion *mr, 495 MemoryRegion *subregion); 496 497/* Start a transaction; changes will be accumulated and made visible only 498 * when the transaction ends. 499 */ 500void memory_region_transaction_begin(void); 501/* Commit a transaction and make changes visible to the guest. 502 */ 503void memory_region_transaction_commit(void); 504 505void mtree_info(fprintf_function mon_printf, void *f); 506 507#endif 508 509#endif 510