qemu/softmmu/physmem.c
<<
>>
Prefs
   1/*
   2 * RAM allocation and memory access
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu-common.h"
  22#include "qapi/error.h"
  23
  24#include "qemu/cutils.h"
  25#include "qemu/cacheflush.h"
  26#include "cpu.h"
  27
  28#ifdef CONFIG_TCG
  29#include "hw/core/tcg-cpu-ops.h"
  30#endif /* CONFIG_TCG */
  31
  32#include "exec/exec-all.h"
  33#include "exec/target_page.h"
  34#include "hw/qdev-core.h"
  35#include "hw/qdev-properties.h"
  36#include "hw/boards.h"
  37#include "hw/xen/xen.h"
  38#include "sysemu/kvm.h"
  39#include "sysemu/sysemu.h"
  40#include "sysemu/tcg.h"
  41#include "sysemu/qtest.h"
  42#include "qemu/timer.h"
  43#include "qemu/config-file.h"
  44#include "qemu/error-report.h"
  45#include "qemu/qemu-print.h"
  46#include "exec/memory.h"
  47#include "exec/ioport.h"
  48#include "sysemu/dma.h"
  49#include "sysemu/hostmem.h"
  50#include "sysemu/hw_accel.h"
  51#include "exec/address-spaces.h"
  52#include "sysemu/xen-mapcache.h"
  53#include "trace/trace-root.h"
  54
  55#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
  56#include <linux/falloc.h>
  57#endif
  58
  59#include "qemu/rcu_queue.h"
  60#include "qemu/main-loop.h"
  61#include "exec/translate-all.h"
  62#include "sysemu/replay.h"
  63
  64#include "exec/memory-internal.h"
  65#include "exec/ram_addr.h"
  66#include "exec/log.h"
  67
  68#include "qemu/pmem.h"
  69
  70#include "migration/vmstate.h"
  71
  72#include "qemu/range.h"
  73#ifndef _WIN32
  74#include "qemu/mmap-alloc.h"
  75#endif
  76
  77#include "monitor/monitor.h"
  78
  79#ifdef CONFIG_LIBDAXCTL
  80#include <daxctl/libdaxctl.h>
  81#endif
  82
  83//#define DEBUG_SUBPAGE
  84
  85/* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
  86 * are protected by the ramlist lock.
  87 */
  88RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
  89
  90static MemoryRegion *system_memory;
  91static MemoryRegion *system_io;
  92
  93AddressSpace address_space_io;
  94AddressSpace address_space_memory;
  95
  96static MemoryRegion io_mem_unassigned;
  97
  98typedef struct PhysPageEntry PhysPageEntry;
  99
 100struct PhysPageEntry {
 101    /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
 102    uint32_t skip : 6;
 103     /* index into phys_sections (!skip) or phys_map_nodes (skip) */
 104    uint32_t ptr : 26;
 105};
 106
 107#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
 108
 109/* Size of the L2 (and L3, etc) page tables.  */
 110#define ADDR_SPACE_BITS 64
 111
 112#define P_L2_BITS 9
 113#define P_L2_SIZE (1 << P_L2_BITS)
 114
 115#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
 116
 117typedef PhysPageEntry Node[P_L2_SIZE];
 118
 119typedef struct PhysPageMap {
 120    struct rcu_head rcu;
 121
 122    unsigned sections_nb;
 123    unsigned sections_nb_alloc;
 124    unsigned nodes_nb;
 125    unsigned nodes_nb_alloc;
 126    Node *nodes;
 127    MemoryRegionSection *sections;
 128} PhysPageMap;
 129
 130struct AddressSpaceDispatch {
 131    MemoryRegionSection *mru_section;
 132    /* This is a multi-level map on the physical address space.
 133     * The bottom level has pointers to MemoryRegionSections.
 134     */
 135    PhysPageEntry phys_map;
 136    PhysPageMap map;
 137};
 138
 139#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
 140typedef struct subpage_t {
 141    MemoryRegion iomem;
 142    FlatView *fv;
 143    hwaddr base;
 144    uint16_t sub_section[];
 145} subpage_t;
 146
 147#define PHYS_SECTION_UNASSIGNED 0
 148
 149static void io_mem_init(void);
 150static void memory_map_init(void);
 151static void tcg_log_global_after_sync(MemoryListener *listener);
 152static void tcg_commit(MemoryListener *listener);
 153
 154/**
 155 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
 156 * @cpu: the CPU whose AddressSpace this is
 157 * @as: the AddressSpace itself
 158 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
 159 * @tcg_as_listener: listener for tracking changes to the AddressSpace
 160 */
 161struct CPUAddressSpace {
 162    CPUState *cpu;
 163    AddressSpace *as;
 164    struct AddressSpaceDispatch *memory_dispatch;
 165    MemoryListener tcg_as_listener;
 166};
 167
 168struct DirtyBitmapSnapshot {
 169    ram_addr_t start;
 170    ram_addr_t end;
 171    unsigned long dirty[];
 172};
 173
 174static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
 175{
 176    static unsigned alloc_hint = 16;
 177    if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
 178        map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes);
 179        map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
 180        alloc_hint = map->nodes_nb_alloc;
 181    }
 182}
 183
 184static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
 185{
 186    unsigned i;
 187    uint32_t ret;
 188    PhysPageEntry e;
 189    PhysPageEntry *p;
 190
 191    ret = map->nodes_nb++;
 192    p = map->nodes[ret];
 193    assert(ret != PHYS_MAP_NODE_NIL);
 194    assert(ret != map->nodes_nb_alloc);
 195
 196    e.skip = leaf ? 0 : 1;
 197    e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
 198    for (i = 0; i < P_L2_SIZE; ++i) {
 199        memcpy(&p[i], &e, sizeof(e));
 200    }
 201    return ret;
 202}
 203
 204static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
 205                                hwaddr *index, uint64_t *nb, uint16_t leaf,
 206                                int level)
 207{
 208    PhysPageEntry *p;
 209    hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
 210
 211    if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
 212        lp->ptr = phys_map_node_alloc(map, level == 0);
 213    }
 214    p = map->nodes[lp->ptr];
 215    lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
 216
 217    while (*nb && lp < &p[P_L2_SIZE]) {
 218        if ((*index & (step - 1)) == 0 && *nb >= step) {
 219            lp->skip = 0;
 220            lp->ptr = leaf;
 221            *index += step;
 222            *nb -= step;
 223        } else {
 224            phys_page_set_level(map, lp, index, nb, leaf, level - 1);
 225        }
 226        ++lp;
 227    }
 228}
 229
 230static void phys_page_set(AddressSpaceDispatch *d,
 231                          hwaddr index, uint64_t nb,
 232                          uint16_t leaf)
 233{
 234    /* Wildly overreserve - it doesn't matter much. */
 235    phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
 236
 237    phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
 238}
 239
 240/* Compact a non leaf page entry. Simply detect that the entry has a single child,
 241 * and update our entry so we can skip it and go directly to the destination.
 242 */
 243static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
 244{
 245    unsigned valid_ptr = P_L2_SIZE;
 246    int valid = 0;
 247    PhysPageEntry *p;
 248    int i;
 249
 250    if (lp->ptr == PHYS_MAP_NODE_NIL) {
 251        return;
 252    }
 253
 254    p = nodes[lp->ptr];
 255    for (i = 0; i < P_L2_SIZE; i++) {
 256        if (p[i].ptr == PHYS_MAP_NODE_NIL) {
 257            continue;
 258        }
 259
 260        valid_ptr = i;
 261        valid++;
 262        if (p[i].skip) {
 263            phys_page_compact(&p[i], nodes);
 264        }
 265    }
 266
 267    /* We can only compress if there's only one child. */
 268    if (valid != 1) {
 269        return;
 270    }
 271
 272    assert(valid_ptr < P_L2_SIZE);
 273
 274    /* Don't compress if it won't fit in the # of bits we have. */
 275    if (P_L2_LEVELS >= (1 << 6) &&
 276        lp->skip + p[valid_ptr].skip >= (1 << 6)) {
 277        return;
 278    }
 279
 280    lp->ptr = p[valid_ptr].ptr;
 281    if (!p[valid_ptr].skip) {
 282        /* If our only child is a leaf, make this a leaf. */
 283        /* By design, we should have made this node a leaf to begin with so we
 284         * should never reach here.
 285         * But since it's so simple to handle this, let's do it just in case we
 286         * change this rule.
 287         */
 288        lp->skip = 0;
 289    } else {
 290        lp->skip += p[valid_ptr].skip;
 291    }
 292}
 293
 294void address_space_dispatch_compact(AddressSpaceDispatch *d)
 295{
 296    if (d->phys_map.skip) {
 297        phys_page_compact(&d->phys_map, d->map.nodes);
 298    }
 299}
 300
 301static inline bool section_covers_addr(const MemoryRegionSection *section,
 302                                       hwaddr addr)
 303{
 304    /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
 305     * the section must cover the entire address space.
 306     */
 307    return int128_gethi(section->size) ||
 308           range_covers_byte(section->offset_within_address_space,
 309                             int128_getlo(section->size), addr);
 310}
 311
 312static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
 313{
 314    PhysPageEntry lp = d->phys_map, *p;
 315    Node *nodes = d->map.nodes;
 316    MemoryRegionSection *sections = d->map.sections;
 317    hwaddr index = addr >> TARGET_PAGE_BITS;
 318    int i;
 319
 320    for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
 321        if (lp.ptr == PHYS_MAP_NODE_NIL) {
 322            return &sections[PHYS_SECTION_UNASSIGNED];
 323        }
 324        p = nodes[lp.ptr];
 325        lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
 326    }
 327
 328    if (section_covers_addr(&sections[lp.ptr], addr)) {
 329        return &sections[lp.ptr];
 330    } else {
 331        return &sections[PHYS_SECTION_UNASSIGNED];
 332    }
 333}
 334
 335/* Called from RCU critical section */
 336static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
 337                                                        hwaddr addr,
 338                                                        bool resolve_subpage)
 339{
 340    MemoryRegionSection *section = qatomic_read(&d->mru_section);
 341    subpage_t *subpage;
 342
 343    if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
 344        !section_covers_addr(section, addr)) {
 345        section = phys_page_find(d, addr);
 346        qatomic_set(&d->mru_section, section);
 347    }
 348    if (resolve_subpage && section->mr->subpage) {
 349        subpage = container_of(section->mr, subpage_t, iomem);
 350        section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
 351    }
 352    return section;
 353}
 354
 355/* Called from RCU critical section */
 356static MemoryRegionSection *
 357address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
 358                                 hwaddr *plen, bool resolve_subpage)
 359{
 360    MemoryRegionSection *section;
 361    MemoryRegion *mr;
 362    Int128 diff;
 363
 364    section = address_space_lookup_region(d, addr, resolve_subpage);
 365    /* Compute offset within MemoryRegionSection */
 366    addr -= section->offset_within_address_space;
 367
 368    /* Compute offset within MemoryRegion */
 369    *xlat = addr + section->offset_within_region;
 370
 371    mr = section->mr;
 372
 373    /* MMIO registers can be expected to perform full-width accesses based only
 374     * on their address, without considering adjacent registers that could
 375     * decode to completely different MemoryRegions.  When such registers
 376     * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
 377     * regions overlap wildly.  For this reason we cannot clamp the accesses
 378     * here.
 379     *
 380     * If the length is small (as is the case for address_space_ldl/stl),
 381     * everything works fine.  If the incoming length is large, however,
 382     * the caller really has to do the clamping through memory_access_size.
 383     */
 384    if (memory_region_is_ram(mr)) {
 385        diff = int128_sub(section->size, int128_make64(addr));
 386        *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
 387    }
 388    return section;
 389}
 390
 391/**
 392 * address_space_translate_iommu - translate an address through an IOMMU
 393 * memory region and then through the target address space.
 394 *
 395 * @iommu_mr: the IOMMU memory region that we start the translation from
 396 * @addr: the address to be translated through the MMU
 397 * @xlat: the translated address offset within the destination memory region.
 398 *        It cannot be %NULL.
 399 * @plen_out: valid read/write length of the translated address. It
 400 *            cannot be %NULL.
 401 * @page_mask_out: page mask for the translated address. This
 402 *            should only be meaningful for IOMMU translated
 403 *            addresses, since there may be huge pages that this bit
 404 *            would tell. It can be %NULL if we don't care about it.
 405 * @is_write: whether the translation operation is for write
 406 * @is_mmio: whether this can be MMIO, set true if it can
 407 * @target_as: the address space targeted by the IOMMU
 408 * @attrs: transaction attributes
 409 *
 410 * This function is called from RCU critical section.  It is the common
 411 * part of flatview_do_translate and address_space_translate_cached.
 412 */
 413static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
 414                                                         hwaddr *xlat,
 415                                                         hwaddr *plen_out,
 416                                                         hwaddr *page_mask_out,
 417                                                         bool is_write,
 418                                                         bool is_mmio,
 419                                                         AddressSpace **target_as,
 420                                                         MemTxAttrs attrs)
 421{
 422    MemoryRegionSection *section;
 423    hwaddr page_mask = (hwaddr)-1;
 424
 425    do {
 426        hwaddr addr = *xlat;
 427        IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
 428        int iommu_idx = 0;
 429        IOMMUTLBEntry iotlb;
 430
 431        if (imrc->attrs_to_index) {
 432            iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
 433        }
 434
 435        iotlb = imrc->translate(iommu_mr, addr, is_write ?
 436                                IOMMU_WO : IOMMU_RO, iommu_idx);
 437
 438        if (!(iotlb.perm & (1 << is_write))) {
 439            goto unassigned;
 440        }
 441
 442        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
 443                | (addr & iotlb.addr_mask));
 444        page_mask &= iotlb.addr_mask;
 445        *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
 446        *target_as = iotlb.target_as;
 447
 448        section = address_space_translate_internal(
 449                address_space_to_dispatch(iotlb.target_as), addr, xlat,
 450                plen_out, is_mmio);
 451
 452        iommu_mr = memory_region_get_iommu(section->mr);
 453    } while (unlikely(iommu_mr));
 454
 455    if (page_mask_out) {
 456        *page_mask_out = page_mask;
 457    }
 458    return *section;
 459
 460unassigned:
 461    return (MemoryRegionSection) { .mr = &io_mem_unassigned };
 462}
 463
 464/**
 465 * flatview_do_translate - translate an address in FlatView
 466 *
 467 * @fv: the flat view that we want to translate on
 468 * @addr: the address to be translated in above address space
 469 * @xlat: the translated address offset within memory region. It
 470 *        cannot be @NULL.
 471 * @plen_out: valid read/write length of the translated address. It
 472 *            can be @NULL when we don't care about it.
 473 * @page_mask_out: page mask for the translated address. This
 474 *            should only be meaningful for IOMMU translated
 475 *            addresses, since there may be huge pages that this bit
 476 *            would tell. It can be @NULL if we don't care about it.
 477 * @is_write: whether the translation operation is for write
 478 * @is_mmio: whether this can be MMIO, set true if it can
 479 * @target_as: the address space targeted by the IOMMU
 480 * @attrs: memory transaction attributes
 481 *
 482 * This function is called from RCU critical section
 483 */
 484static MemoryRegionSection flatview_do_translate(FlatView *fv,
 485                                                 hwaddr addr,
 486                                                 hwaddr *xlat,
 487                                                 hwaddr *plen_out,
 488                                                 hwaddr *page_mask_out,
 489                                                 bool is_write,
 490                                                 bool is_mmio,
 491                                                 AddressSpace **target_as,
 492                                                 MemTxAttrs attrs)
 493{
 494    MemoryRegionSection *section;
 495    IOMMUMemoryRegion *iommu_mr;
 496    hwaddr plen = (hwaddr)(-1);
 497
 498    if (!plen_out) {
 499        plen_out = &plen;
 500    }
 501
 502    section = address_space_translate_internal(
 503            flatview_to_dispatch(fv), addr, xlat,
 504            plen_out, is_mmio);
 505
 506    iommu_mr = memory_region_get_iommu(section->mr);
 507    if (unlikely(iommu_mr)) {
 508        return address_space_translate_iommu(iommu_mr, xlat,
 509                                             plen_out, page_mask_out,
 510                                             is_write, is_mmio,
 511                                             target_as, attrs);
 512    }
 513    if (page_mask_out) {
 514        /* Not behind an IOMMU, use default page size. */
 515        *page_mask_out = ~TARGET_PAGE_MASK;
 516    }
 517
 518    return *section;
 519}
 520
 521/* Called from RCU critical section */
 522IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
 523                                            bool is_write, MemTxAttrs attrs)
 524{
 525    MemoryRegionSection section;
 526    hwaddr xlat, page_mask;
 527
 528    /*
 529     * This can never be MMIO, and we don't really care about plen,
 530     * but page mask.
 531     */
 532    section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
 533                                    NULL, &page_mask, is_write, false, &as,
 534                                    attrs);
 535
 536    /* Illegal translation */
 537    if (section.mr == &io_mem_unassigned) {
 538        goto iotlb_fail;
 539    }
 540
 541    /* Convert memory region offset into address space offset */
 542    xlat += section.offset_within_address_space -
 543        section.offset_within_region;
 544
 545    return (IOMMUTLBEntry) {
 546        .target_as = as,
 547        .iova = addr & ~page_mask,
 548        .translated_addr = xlat & ~page_mask,
 549        .addr_mask = page_mask,
 550        /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
 551        .perm = IOMMU_RW,
 552    };
 553
 554iotlb_fail:
 555    return (IOMMUTLBEntry) {0};
 556}
 557
 558/* Called from RCU critical section */
 559MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
 560                                 hwaddr *plen, bool is_write,
 561                                 MemTxAttrs attrs)
 562{
 563    MemoryRegion *mr;
 564    MemoryRegionSection section;
 565    AddressSpace *as = NULL;
 566
 567    /* This can be MMIO, so setup MMIO bit. */
 568    section = flatview_do_translate(fv, addr, xlat, plen, NULL,
 569                                    is_write, true, &as, attrs);
 570    mr = section.mr;
 571
 572    if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
 573        hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
 574        *plen = MIN(page, *plen);
 575    }
 576
 577    return mr;
 578}
 579
 580typedef struct TCGIOMMUNotifier {
 581    IOMMUNotifier n;
 582    MemoryRegion *mr;
 583    CPUState *cpu;
 584    int iommu_idx;
 585    bool active;
 586} TCGIOMMUNotifier;
 587
 588static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
 589{
 590    TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n);
 591
 592    if (!notifier->active) {
 593        return;
 594    }
 595    tlb_flush(notifier->cpu);
 596    notifier->active = false;
 597    /* We leave the notifier struct on the list to avoid reallocating it later.
 598     * Generally the number of IOMMUs a CPU deals with will be small.
 599     * In any case we can't unregister the iommu notifier from a notify
 600     * callback.
 601     */
 602}
 603
 604static void tcg_register_iommu_notifier(CPUState *cpu,
 605                                        IOMMUMemoryRegion *iommu_mr,
 606                                        int iommu_idx)
 607{
 608    /* Make sure this CPU has an IOMMU notifier registered for this
 609     * IOMMU/IOMMU index combination, so that we can flush its TLB
 610     * when the IOMMU tells us the mappings we've cached have changed.
 611     */
 612    MemoryRegion *mr = MEMORY_REGION(iommu_mr);
 613    TCGIOMMUNotifier *notifier = NULL;
 614    int i;
 615
 616    for (i = 0; i < cpu->iommu_notifiers->len; i++) {
 617        notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
 618        if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) {
 619            break;
 620        }
 621    }
 622    if (i == cpu->iommu_notifiers->len) {
 623        /* Not found, add a new entry at the end of the array */
 624        cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1);
 625        notifier = g_new0(TCGIOMMUNotifier, 1);
 626        g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier;
 627
 628        notifier->mr = mr;
 629        notifier->iommu_idx = iommu_idx;
 630        notifier->cpu = cpu;
 631        /* Rather than trying to register interest in the specific part
 632         * of the iommu's address space that we've accessed and then
 633         * expand it later as subsequent accesses touch more of it, we
 634         * just register interest in the whole thing, on the assumption
 635         * that iommu reconfiguration will be rare.
 636         */
 637        iommu_notifier_init(&notifier->n,
 638                            tcg_iommu_unmap_notify,
 639                            IOMMU_NOTIFIER_UNMAP,
 640                            0,
 641                            HWADDR_MAX,
 642                            iommu_idx);
 643        memory_region_register_iommu_notifier(notifier->mr, &notifier->n,
 644                                              &error_fatal);
 645    }
 646
 647    if (!notifier->active) {
 648        notifier->active = true;
 649    }
 650}
 651
 652void tcg_iommu_free_notifier_list(CPUState *cpu)
 653{
 654    /* Destroy the CPU's notifier list */
 655    int i;
 656    TCGIOMMUNotifier *notifier;
 657
 658    for (i = 0; i < cpu->iommu_notifiers->len; i++) {
 659        notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
 660        memory_region_unregister_iommu_notifier(notifier->mr, &notifier->n);
 661        g_free(notifier);
 662    }
 663    g_array_free(cpu->iommu_notifiers, true);
 664}
 665
 666void tcg_iommu_init_notifier_list(CPUState *cpu)
 667{
 668    cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *));
 669}
 670
 671/* Called from RCU critical section */
 672MemoryRegionSection *
 673address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
 674                                  hwaddr *xlat, hwaddr *plen,
 675                                  MemTxAttrs attrs, int *prot)
 676{
 677    MemoryRegionSection *section;
 678    IOMMUMemoryRegion *iommu_mr;
 679    IOMMUMemoryRegionClass *imrc;
 680    IOMMUTLBEntry iotlb;
 681    int iommu_idx;
 682    AddressSpaceDispatch *d =
 683        qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
 684
 685    for (;;) {
 686        section = address_space_translate_internal(d, addr, &addr, plen, false);
 687
 688        iommu_mr = memory_region_get_iommu(section->mr);
 689        if (!iommu_mr) {
 690            break;
 691        }
 692
 693        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
 694
 695        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
 696        tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
 697        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
 698         * doesn't short-cut its translation table walk.
 699         */
 700        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
 701        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
 702                | (addr & iotlb.addr_mask));
 703        /* Update the caller's prot bits to remove permissions the IOMMU
 704         * is giving us a failure response for. If we get down to no
 705         * permissions left at all we can give up now.
 706         */
 707        if (!(iotlb.perm & IOMMU_RO)) {
 708            *prot &= ~(PAGE_READ | PAGE_EXEC);
 709        }
 710        if (!(iotlb.perm & IOMMU_WO)) {
 711            *prot &= ~PAGE_WRITE;
 712        }
 713
 714        if (!*prot) {
 715            goto translate_fail;
 716        }
 717
 718        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
 719    }
 720
 721    assert(!memory_region_is_iommu(section->mr));
 722    *xlat = addr;
 723    return section;
 724
 725translate_fail:
 726    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
 727}
 728
 729void cpu_address_space_init(CPUState *cpu, int asidx,
 730                            const char *prefix, MemoryRegion *mr)
 731{
 732    CPUAddressSpace *newas;
 733    AddressSpace *as = g_new0(AddressSpace, 1);
 734    char *as_name;
 735
 736    assert(mr);
 737    as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
 738    address_space_init(as, mr, as_name);
 739    g_free(as_name);
 740
 741    /* Target code should have set num_ases before calling us */
 742    assert(asidx < cpu->num_ases);
 743
 744    if (asidx == 0) {
 745        /* address space 0 gets the convenience alias */
 746        cpu->as = as;
 747    }
 748
 749    /* KVM cannot currently support multiple address spaces. */
 750    assert(asidx == 0 || !kvm_enabled());
 751
 752    if (!cpu->cpu_ases) {
 753        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
 754    }
 755
 756    newas = &cpu->cpu_ases[asidx];
 757    newas->cpu = cpu;
 758    newas->as = as;
 759    if (tcg_enabled()) {
 760        newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync;
 761        newas->tcg_as_listener.commit = tcg_commit;
 762        memory_listener_register(&newas->tcg_as_listener, as);
 763    }
 764}
 765
 766AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
 767{
 768    /* Return the AddressSpace corresponding to the specified index */
 769    return cpu->cpu_ases[asidx].as;
 770}
 771
 772/* Add a watchpoint.  */
 773int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
 774                          int flags, CPUWatchpoint **watchpoint)
 775{
 776    CPUWatchpoint *wp;
 777    vaddr in_page;
 778
 779    /* forbid ranges which are empty or run off the end of the address space */
 780    if (len == 0 || (addr + len - 1) < addr) {
 781        error_report("tried to set invalid watchpoint at %"
 782                     VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
 783        return -EINVAL;
 784    }
 785    wp = g_malloc(sizeof(*wp));
 786
 787    wp->vaddr = addr;
 788    wp->len = len;
 789    wp->flags = flags;
 790
 791    /* keep all GDB-injected watchpoints in front */
 792    if (flags & BP_GDB) {
 793        QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
 794    } else {
 795        QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
 796    }
 797
 798    in_page = -(addr | TARGET_PAGE_MASK);
 799    if (len <= in_page) {
 800        tlb_flush_page(cpu, addr);
 801    } else {
 802        tlb_flush(cpu);
 803    }
 804
 805    if (watchpoint)
 806        *watchpoint = wp;
 807    return 0;
 808}
 809
 810/* Remove a specific watchpoint.  */
 811int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
 812                          int flags)
 813{
 814    CPUWatchpoint *wp;
 815
 816    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
 817        if (addr == wp->vaddr && len == wp->len
 818                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
 819            cpu_watchpoint_remove_by_ref(cpu, wp);
 820            return 0;
 821        }
 822    }
 823    return -ENOENT;
 824}
 825
 826/* Remove a specific watchpoint by reference.  */
 827void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
 828{
 829    QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
 830
 831    tlb_flush_page(cpu, watchpoint->vaddr);
 832
 833    g_free(watchpoint);
 834}
 835
 836/* Remove all matching watchpoints.  */
 837void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
 838{
 839    CPUWatchpoint *wp, *next;
 840
 841    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
 842        if (wp->flags & mask) {
 843            cpu_watchpoint_remove_by_ref(cpu, wp);
 844        }
 845    }
 846}
 847
 848#ifdef CONFIG_TCG
 849/* Return true if this watchpoint address matches the specified
 850 * access (ie the address range covered by the watchpoint overlaps
 851 * partially or completely with the address range covered by the
 852 * access).
 853 */
 854static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
 855                                              vaddr addr, vaddr len)
 856{
 857    /* We know the lengths are non-zero, but a little caution is
 858     * required to avoid errors in the case where the range ends
 859     * exactly at the top of the address space and so addr + len
 860     * wraps round to zero.
 861     */
 862    vaddr wpend = wp->vaddr + wp->len - 1;
 863    vaddr addrend = addr + len - 1;
 864
 865    return !(addr > wpend || wp->vaddr > addrend);
 866}
 867
 868/* Return flags for watchpoints that match addr + prot.  */
 869int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
 870{
 871    CPUWatchpoint *wp;
 872    int ret = 0;
 873
 874    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
 875        if (watchpoint_address_matches(wp, addr, len)) {
 876            ret |= wp->flags;
 877        }
 878    }
 879    return ret;
 880}
 881
 882/* Generate a debug exception if a watchpoint has been hit.  */
 883void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
 884                          MemTxAttrs attrs, int flags, uintptr_t ra)
 885{
 886    CPUClass *cc = CPU_GET_CLASS(cpu);
 887    CPUWatchpoint *wp;
 888
 889    assert(tcg_enabled());
 890    if (cpu->watchpoint_hit) {
 891        /*
 892         * We re-entered the check after replacing the TB.
 893         * Now raise the debug interrupt so that it will
 894         * trigger after the current instruction.
 895         */
 896        qemu_mutex_lock_iothread();
 897        cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
 898        qemu_mutex_unlock_iothread();
 899        return;
 900    }
 901
 902    if (cc->tcg_ops->adjust_watchpoint_address) {
 903        /* this is currently used only by ARM BE32 */
 904        addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
 905    }
 906    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
 907        if (watchpoint_address_matches(wp, addr, len)
 908            && (wp->flags & flags)) {
 909            if (replay_running_debug()) {
 910                /*
 911                 * Don't process the watchpoints when we are
 912                 * in a reverse debugging operation.
 913                 */
 914                replay_breakpoint();
 915                return;
 916            }
 917            if (flags == BP_MEM_READ) {
 918                wp->flags |= BP_WATCHPOINT_HIT_READ;
 919            } else {
 920                wp->flags |= BP_WATCHPOINT_HIT_WRITE;
 921            }
 922            wp->hitaddr = MAX(addr, wp->vaddr);
 923            wp->hitattrs = attrs;
 924            if (!cpu->watchpoint_hit) {
 925                if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint &&
 926                    !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
 927                    wp->flags &= ~BP_WATCHPOINT_HIT;
 928                    continue;
 929                }
 930                cpu->watchpoint_hit = wp;
 931
 932                mmap_lock();
 933                tb_check_watchpoint(cpu, ra);
 934                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
 935                    cpu->exception_index = EXCP_DEBUG;
 936                    mmap_unlock();
 937                    cpu_loop_exit_restore(cpu, ra);
 938                } else {
 939                    /* Force execution of one insn next time.  */
 940                    cpu->cflags_next_tb = 1 | curr_cflags(cpu);
 941                    mmap_unlock();
 942                    if (ra) {
 943                        cpu_restore_state(cpu, ra, true);
 944                    }
 945                    cpu_loop_exit_noexc(cpu);
 946                }
 947            }
 948        } else {
 949            wp->flags &= ~BP_WATCHPOINT_HIT;
 950        }
 951    }
 952}
 953
 954#endif /* CONFIG_TCG */
 955
 956/* Called from RCU critical section */
 957static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
 958{
 959    RAMBlock *block;
 960
 961    block = qatomic_rcu_read(&ram_list.mru_block);
 962    if (block && addr - block->offset < block->max_length) {
 963        return block;
 964    }
 965    RAMBLOCK_FOREACH(block) {
 966        if (addr - block->offset < block->max_length) {
 967            goto found;
 968        }
 969    }
 970
 971    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
 972    abort();
 973
 974found:
 975    /* It is safe to write mru_block outside the iothread lock.  This
 976     * is what happens:
 977     *
 978     *     mru_block = xxx
 979     *     rcu_read_unlock()
 980     *                                        xxx removed from list
 981     *                  rcu_read_lock()
 982     *                  read mru_block
 983     *                                        mru_block = NULL;
 984     *                                        call_rcu(reclaim_ramblock, xxx);
 985     *                  rcu_read_unlock()
 986     *
 987     * qatomic_rcu_set is not needed here.  The block was already published
 988     * when it was placed into the list.  Here we're just making an extra
 989     * copy of the pointer.
 990     */
 991    ram_list.mru_block = block;
 992    return block;
 993}
 994
 995static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
 996{
 997    CPUState *cpu;
 998    ram_addr_t start1;
 999    RAMBlock *block;
1000    ram_addr_t end;
1001
1002    assert(tcg_enabled());
1003    end = TARGET_PAGE_ALIGN(start + length);
1004    start &= TARGET_PAGE_MASK;
1005
1006    RCU_READ_LOCK_GUARD();
1007    block = qemu_get_ram_block(start);
1008    assert(block == qemu_get_ram_block(end - 1));
1009    start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1010    CPU_FOREACH(cpu) {
1011        tlb_reset_dirty(cpu, start1, length);
1012    }
1013}
1014
1015/* Note: start and end must be within the same ram block.  */
1016bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1017                                              ram_addr_t length,
1018                                              unsigned client)
1019{
1020    DirtyMemoryBlocks *blocks;
1021    unsigned long end, page, start_page;
1022    bool dirty = false;
1023    RAMBlock *ramblock;
1024    uint64_t mr_offset, mr_size;
1025
1026    if (length == 0) {
1027        return false;
1028    }
1029
1030    end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1031    start_page = start >> TARGET_PAGE_BITS;
1032    page = start_page;
1033
1034    WITH_RCU_READ_LOCK_GUARD() {
1035        blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
1036        ramblock = qemu_get_ram_block(start);
1037        /* Range sanity check on the ramblock */
1038        assert(start >= ramblock->offset &&
1039               start + length <= ramblock->offset + ramblock->used_length);
1040
1041        while (page < end) {
1042            unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1043            unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1044            unsigned long num = MIN(end - page,
1045                                    DIRTY_MEMORY_BLOCK_SIZE - offset);
1046
1047            dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1048                                                  offset, num);
1049            page += num;
1050        }
1051
1052        mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset;
1053        mr_size = (end - start_page) << TARGET_PAGE_BITS;
1054        memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
1055    }
1056
1057    if (dirty && tcg_enabled()) {
1058        tlb_reset_dirty_range_all(start, length);
1059    }
1060
1061    return dirty;
1062}
1063
1064DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
1065    (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
1066{
1067    DirtyMemoryBlocks *blocks;
1068    ram_addr_t start = memory_region_get_ram_addr(mr) + offset;
1069    unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
1070    ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
1071    ram_addr_t last  = QEMU_ALIGN_UP(start + length, align);
1072    DirtyBitmapSnapshot *snap;
1073    unsigned long page, end, dest;
1074
1075    snap = g_malloc0(sizeof(*snap) +
1076                     ((last - first) >> (TARGET_PAGE_BITS + 3)));
1077    snap->start = first;
1078    snap->end   = last;
1079
1080    page = first >> TARGET_PAGE_BITS;
1081    end  = last  >> TARGET_PAGE_BITS;
1082    dest = 0;
1083
1084    WITH_RCU_READ_LOCK_GUARD() {
1085        blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
1086
1087        while (page < end) {
1088            unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1089            unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1090            unsigned long num = MIN(end - page,
1091                                    DIRTY_MEMORY_BLOCK_SIZE - offset);
1092
1093            assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
1094            assert(QEMU_IS_ALIGNED(num,    (1 << BITS_PER_LEVEL)));
1095            offset >>= BITS_PER_LEVEL;
1096
1097            bitmap_copy_and_clear_atomic(snap->dirty + dest,
1098                                         blocks->blocks[idx] + offset,
1099                                         num);
1100            page += num;
1101            dest += num >> BITS_PER_LEVEL;
1102        }
1103    }
1104
1105    if (tcg_enabled()) {
1106        tlb_reset_dirty_range_all(start, length);
1107    }
1108
1109    memory_region_clear_dirty_bitmap(mr, offset, length);
1110
1111    return snap;
1112}
1113
1114bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
1115                                            ram_addr_t start,
1116                                            ram_addr_t length)
1117{
1118    unsigned long page, end;
1119
1120    assert(start >= snap->start);
1121    assert(start + length <= snap->end);
1122
1123    end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
1124    page = (start - snap->start) >> TARGET_PAGE_BITS;
1125
1126    while (page < end) {
1127        if (test_bit(page, snap->dirty)) {
1128            return true;
1129        }
1130        page++;
1131    }
1132    return false;
1133}
1134
1135/* Called from RCU critical section */
1136hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1137                                       MemoryRegionSection *section)
1138{
1139    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
1140    return section - d->map.sections;
1141}
1142
1143static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
1144                            uint16_t section);
1145static subpage_t *subpage_init(FlatView *fv, hwaddr base);
1146
1147static uint16_t phys_section_add(PhysPageMap *map,
1148                                 MemoryRegionSection *section)
1149{
1150    /* The physical section number is ORed with a page-aligned
1151     * pointer to produce the iotlb entries.  Thus it should
1152     * never overflow into the page-aligned value.
1153     */
1154    assert(map->sections_nb < TARGET_PAGE_SIZE);
1155
1156    if (map->sections_nb == map->sections_nb_alloc) {
1157        map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1158        map->sections = g_renew(MemoryRegionSection, map->sections,
1159                                map->sections_nb_alloc);
1160    }
1161    map->sections[map->sections_nb] = *section;
1162    memory_region_ref(section->mr);
1163    return map->sections_nb++;
1164}
1165
1166static void phys_section_destroy(MemoryRegion *mr)
1167{
1168    bool have_sub_page = mr->subpage;
1169
1170    memory_region_unref(mr);
1171
1172    if (have_sub_page) {
1173        subpage_t *subpage = container_of(mr, subpage_t, iomem);
1174        object_unref(OBJECT(&subpage->iomem));
1175        g_free(subpage);
1176    }
1177}
1178
1179static void phys_sections_free(PhysPageMap *map)
1180{
1181    while (map->sections_nb > 0) {
1182        MemoryRegionSection *section = &map->sections[--map->sections_nb];
1183        phys_section_destroy(section->mr);
1184    }
1185    g_free(map->sections);
1186    g_free(map->nodes);
1187}
1188
1189static void register_subpage(FlatView *fv, MemoryRegionSection *section)
1190{
1191    AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1192    subpage_t *subpage;
1193    hwaddr base = section->offset_within_address_space
1194        & TARGET_PAGE_MASK;
1195    MemoryRegionSection *existing = phys_page_find(d, base);
1196    MemoryRegionSection subsection = {
1197        .offset_within_address_space = base,
1198        .size = int128_make64(TARGET_PAGE_SIZE),
1199    };
1200    hwaddr start, end;
1201
1202    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1203
1204    if (!(existing->mr->subpage)) {
1205        subpage = subpage_init(fv, base);
1206        subsection.fv = fv;
1207        subsection.mr = &subpage->iomem;
1208        phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1209                      phys_section_add(&d->map, &subsection));
1210    } else {
1211        subpage = container_of(existing->mr, subpage_t, iomem);
1212    }
1213    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1214    end = start + int128_get64(section->size) - 1;
1215    subpage_register(subpage, start, end,
1216                     phys_section_add(&d->map, section));
1217}
1218
1219
1220static void register_multipage(FlatView *fv,
1221                               MemoryRegionSection *section)
1222{
1223    AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1224    hwaddr start_addr = section->offset_within_address_space;
1225    uint16_t section_index = phys_section_add(&d->map, section);
1226    uint64_t num_pages = int128_get64(int128_rshift(section->size,
1227                                                    TARGET_PAGE_BITS));
1228
1229    assert(num_pages);
1230    phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1231}
1232
1233/*
1234 * The range in *section* may look like this:
1235 *
1236 *      |s|PPPPPPP|s|
1237 *
1238 * where s stands for subpage and P for page.
1239 */
1240void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
1241{
1242    MemoryRegionSection remain = *section;
1243    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1244
1245    /* register first subpage */
1246    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1247        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
1248                        - remain.offset_within_address_space;
1249
1250        MemoryRegionSection now = remain;
1251        now.size = int128_min(int128_make64(left), now.size);
1252        register_subpage(fv, &now);
1253        if (int128_eq(remain.size, now.size)) {
1254            return;
1255        }
1256        remain.size = int128_sub(remain.size, now.size);
1257        remain.offset_within_address_space += int128_get64(now.size);
1258        remain.offset_within_region += int128_get64(now.size);
1259    }
1260
1261    /* register whole pages */
1262    if (int128_ge(remain.size, page_size)) {
1263        MemoryRegionSection now = remain;
1264        now.size = int128_and(now.size, int128_neg(page_size));
1265        register_multipage(fv, &now);
1266        if (int128_eq(remain.size, now.size)) {
1267            return;
1268        }
1269        remain.size = int128_sub(remain.size, now.size);
1270        remain.offset_within_address_space += int128_get64(now.size);
1271        remain.offset_within_region += int128_get64(now.size);
1272    }
1273
1274    /* register last subpage */
1275    register_subpage(fv, &remain);
1276}
1277
1278void qemu_flush_coalesced_mmio_buffer(void)
1279{
1280    if (kvm_enabled())
1281        kvm_flush_coalesced_mmio_buffer();
1282}
1283
1284void qemu_mutex_lock_ramlist(void)
1285{
1286    qemu_mutex_lock(&ram_list.mutex);
1287}
1288
1289void qemu_mutex_unlock_ramlist(void)
1290{
1291    qemu_mutex_unlock(&ram_list.mutex);
1292}
1293
1294void ram_block_dump(Monitor *mon)
1295{
1296    RAMBlock *block;
1297    char *psize;
1298
1299    RCU_READ_LOCK_GUARD();
1300    monitor_printf(mon, "%24s %8s  %18s %18s %18s\n",
1301                   "Block Name", "PSize", "Offset", "Used", "Total");
1302    RAMBLOCK_FOREACH(block) {
1303        psize = size_to_str(block->page_size);
1304        monitor_printf(mon, "%24s %8s  0x%016" PRIx64 " 0x%016" PRIx64
1305                       " 0x%016" PRIx64 "\n", block->idstr, psize,
1306                       (uint64_t)block->offset,
1307                       (uint64_t)block->used_length,
1308                       (uint64_t)block->max_length);
1309        g_free(psize);
1310    }
1311}
1312
1313#ifdef __linux__
1314/*
1315 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1316 * may or may not name the same files / on the same filesystem now as
1317 * when we actually open and map them.  Iterate over the file
1318 * descriptors instead, and use qemu_fd_getpagesize().
1319 */
1320static int find_min_backend_pagesize(Object *obj, void *opaque)
1321{
1322    long *hpsize_min = opaque;
1323
1324    if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1325        HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1326        long hpsize = host_memory_backend_pagesize(backend);
1327
1328        if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) {
1329            *hpsize_min = hpsize;
1330        }
1331    }
1332
1333    return 0;
1334}
1335
1336static int find_max_backend_pagesize(Object *obj, void *opaque)
1337{
1338    long *hpsize_max = opaque;
1339
1340    if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1341        HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1342        long hpsize = host_memory_backend_pagesize(backend);
1343
1344        if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) {
1345            *hpsize_max = hpsize;
1346        }
1347    }
1348
1349    return 0;
1350}
1351
1352/*
1353 * TODO: We assume right now that all mapped host memory backends are
1354 * used as RAM, however some might be used for different purposes.
1355 */
1356long qemu_minrampagesize(void)
1357{
1358    long hpsize = LONG_MAX;
1359    Object *memdev_root = object_resolve_path("/objects", NULL);
1360
1361    object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
1362    return hpsize;
1363}
1364
1365long qemu_maxrampagesize(void)
1366{
1367    long pagesize = 0;
1368    Object *memdev_root = object_resolve_path("/objects", NULL);
1369
1370    object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize);
1371    return pagesize;
1372}
1373#else
1374long qemu_minrampagesize(void)
1375{
1376    return qemu_real_host_page_size;
1377}
1378long qemu_maxrampagesize(void)
1379{
1380    return qemu_real_host_page_size;
1381}
1382#endif
1383
1384#ifdef CONFIG_POSIX
1385static int64_t get_file_size(int fd)
1386{
1387    int64_t size;
1388#if defined(__linux__)
1389    struct stat st;
1390
1391    if (fstat(fd, &st) < 0) {
1392        return -errno;
1393    }
1394
1395    /* Special handling for devdax character devices */
1396    if (S_ISCHR(st.st_mode)) {
1397        g_autofree char *subsystem_path = NULL;
1398        g_autofree char *subsystem = NULL;
1399
1400        subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem",
1401                                         major(st.st_rdev), minor(st.st_rdev));
1402        subsystem = g_file_read_link(subsystem_path, NULL);
1403
1404        if (subsystem && g_str_has_suffix(subsystem, "/dax")) {
1405            g_autofree char *size_path = NULL;
1406            g_autofree char *size_str = NULL;
1407
1408            size_path = g_strdup_printf("/sys/dev/char/%d:%d/size",
1409                                    major(st.st_rdev), minor(st.st_rdev));
1410
1411            if (g_file_get_contents(size_path, &size_str, NULL, NULL)) {
1412                return g_ascii_strtoll(size_str, NULL, 0);
1413            }
1414        }
1415    }
1416#endif /* defined(__linux__) */
1417
1418    /* st.st_size may be zero for special files yet lseek(2) works */
1419    size = lseek(fd, 0, SEEK_END);
1420    if (size < 0) {
1421        return -errno;
1422    }
1423    return size;
1424}
1425
1426static int64_t get_file_align(int fd)
1427{
1428    int64_t align = -1;
1429#if defined(__linux__) && defined(CONFIG_LIBDAXCTL)
1430    struct stat st;
1431
1432    if (fstat(fd, &st) < 0) {
1433        return -errno;
1434    }
1435
1436    /* Special handling for devdax character devices */
1437    if (S_ISCHR(st.st_mode)) {
1438        g_autofree char *path = NULL;
1439        g_autofree char *rpath = NULL;
1440        struct daxctl_ctx *ctx;
1441        struct daxctl_region *region;
1442        int rc = 0;
1443
1444        path = g_strdup_printf("/sys/dev/char/%d:%d",
1445                    major(st.st_rdev), minor(st.st_rdev));
1446        rpath = realpath(path, NULL);
1447
1448        rc = daxctl_new(&ctx);
1449        if (rc) {
1450            return -1;
1451        }
1452
1453        daxctl_region_foreach(ctx, region) {
1454            if (strstr(rpath, daxctl_region_get_path(region))) {
1455                align = daxctl_region_get_align(region);
1456                break;
1457            }
1458        }
1459        daxctl_unref(ctx);
1460    }
1461#endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */
1462
1463    return align;
1464}
1465
1466static int file_ram_open(const char *path,
1467                         const char *region_name,
1468                         bool readonly,
1469                         bool *created,
1470                         Error **errp)
1471{
1472    char *filename;
1473    char *sanitized_name;
1474    char *c;
1475    int fd = -1;
1476
1477    *created = false;
1478    for (;;) {
1479        fd = open(path, readonly ? O_RDONLY : O_RDWR);
1480        if (fd >= 0) {
1481            /* @path names an existing file, use it */
1482            break;
1483        }
1484        if (errno == ENOENT) {
1485            /* @path names a file that doesn't exist, create it */
1486            fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1487            if (fd >= 0) {
1488                *created = true;
1489                break;
1490            }
1491        } else if (errno == EISDIR) {
1492            /* @path names a directory, create a file there */
1493            /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1494            sanitized_name = g_strdup(region_name);
1495            for (c = sanitized_name; *c != '\0'; c++) {
1496                if (*c == '/') {
1497                    *c = '_';
1498                }
1499            }
1500
1501            filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1502                                       sanitized_name);
1503            g_free(sanitized_name);
1504
1505            fd = mkstemp(filename);
1506            if (fd >= 0) {
1507                unlink(filename);
1508                g_free(filename);
1509                break;
1510            }
1511            g_free(filename);
1512        }
1513        if (errno != EEXIST && errno != EINTR) {
1514            error_setg_errno(errp, errno,
1515                             "can't open backing store %s for guest RAM",
1516                             path);
1517            return -1;
1518        }
1519        /*
1520         * Try again on EINTR and EEXIST.  The latter happens when
1521         * something else creates the file between our two open().
1522         */
1523    }
1524
1525    return fd;
1526}
1527
1528static void *file_ram_alloc(RAMBlock *block,
1529                            ram_addr_t memory,
1530                            int fd,
1531                            bool readonly,
1532                            bool truncate,
1533                            off_t offset,
1534                            Error **errp)
1535{
1536    void *area;
1537
1538    block->page_size = qemu_fd_getpagesize(fd);
1539    if (block->mr->align % block->page_size) {
1540        error_setg(errp, "alignment 0x%" PRIx64
1541                   " must be multiples of page size 0x%zx",
1542                   block->mr->align, block->page_size);
1543        return NULL;
1544    } else if (block->mr->align && !is_power_of_2(block->mr->align)) {
1545        error_setg(errp, "alignment 0x%" PRIx64
1546                   " must be a power of two", block->mr->align);
1547        return NULL;
1548    }
1549    block->mr->align = MAX(block->page_size, block->mr->align);
1550#if defined(__s390x__)
1551    if (kvm_enabled()) {
1552        block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1553    }
1554#endif
1555
1556    if (memory < block->page_size) {
1557        error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1558                   "or larger than page size 0x%zx",
1559                   memory, block->page_size);
1560        return NULL;
1561    }
1562
1563    memory = ROUND_UP(memory, block->page_size);
1564
1565    /*
1566     * ftruncate is not supported by hugetlbfs in older
1567     * hosts, so don't bother bailing out on errors.
1568     * If anything goes wrong with it under other filesystems,
1569     * mmap will fail.
1570     *
1571     * Do not truncate the non-empty backend file to avoid corrupting
1572     * the existing data in the file. Disabling shrinking is not
1573     * enough. For example, the current vNVDIMM implementation stores
1574     * the guest NVDIMM labels at the end of the backend file. If the
1575     * backend file is later extended, QEMU will not be able to find
1576     * those labels. Therefore, extending the non-empty backend file
1577     * is disabled as well.
1578     */
1579    if (truncate && ftruncate(fd, memory)) {
1580        perror("ftruncate");
1581    }
1582
1583    area = qemu_ram_mmap(fd, memory, block->mr->align, readonly,
1584                         block->flags & RAM_SHARED, block->flags & RAM_PMEM,
1585                         offset);
1586    if (area == MAP_FAILED) {
1587        error_setg_errno(errp, errno,
1588                         "unable to map backing store for guest RAM");
1589        return NULL;
1590    }
1591
1592    block->fd = fd;
1593    return area;
1594}
1595#endif
1596
1597/* Allocate space within the ram_addr_t space that governs the
1598 * dirty bitmaps.
1599 * Called with the ramlist lock held.
1600 */
1601static ram_addr_t find_ram_offset(ram_addr_t size)
1602{
1603    RAMBlock *block, *next_block;
1604    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1605
1606    assert(size != 0); /* it would hand out same offset multiple times */
1607
1608    if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1609        return 0;
1610    }
1611
1612    RAMBLOCK_FOREACH(block) {
1613        ram_addr_t candidate, next = RAM_ADDR_MAX;
1614
1615        /* Align blocks to start on a 'long' in the bitmap
1616         * which makes the bitmap sync'ing take the fast path.
1617         */
1618        candidate = block->offset + block->max_length;
1619        candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
1620
1621        /* Search for the closest following block
1622         * and find the gap.
1623         */
1624        RAMBLOCK_FOREACH(next_block) {
1625            if (next_block->offset >= candidate) {
1626                next = MIN(next, next_block->offset);
1627            }
1628        }
1629
1630        /* If it fits remember our place and remember the size
1631         * of gap, but keep going so that we might find a smaller
1632         * gap to fill so avoiding fragmentation.
1633         */
1634        if (next - candidate >= size && next - candidate < mingap) {
1635            offset = candidate;
1636            mingap = next - candidate;
1637        }
1638
1639        trace_find_ram_offset_loop(size, candidate, offset, next, mingap);
1640    }
1641
1642    if (offset == RAM_ADDR_MAX) {
1643        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1644                (uint64_t)size);
1645        abort();
1646    }
1647
1648    trace_find_ram_offset(size, offset);
1649
1650    return offset;
1651}
1652
1653static unsigned long last_ram_page(void)
1654{
1655    RAMBlock *block;
1656    ram_addr_t last = 0;
1657
1658    RCU_READ_LOCK_GUARD();
1659    RAMBLOCK_FOREACH(block) {
1660        last = MAX(last, block->offset + block->max_length);
1661    }
1662    return last >> TARGET_PAGE_BITS;
1663}
1664
1665static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1666{
1667    int ret;
1668
1669    /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1670    if (!machine_dump_guest_core(current_machine)) {
1671        ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1672        if (ret) {
1673            perror("qemu_madvise");
1674            fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1675                            "but dump_guest_core=off specified\n");
1676        }
1677    }
1678}
1679
1680const char *qemu_ram_get_idstr(RAMBlock *rb)
1681{
1682    return rb->idstr;
1683}
1684
1685void *qemu_ram_get_host_addr(RAMBlock *rb)
1686{
1687    return rb->host;
1688}
1689
1690ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
1691{
1692    return rb->offset;
1693}
1694
1695ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
1696{
1697    return rb->used_length;
1698}
1699
1700bool qemu_ram_is_shared(RAMBlock *rb)
1701{
1702    return rb->flags & RAM_SHARED;
1703}
1704
1705/* Note: Only set at the start of postcopy */
1706bool qemu_ram_is_uf_zeroable(RAMBlock *rb)
1707{
1708    return rb->flags & RAM_UF_ZEROPAGE;
1709}
1710
1711void qemu_ram_set_uf_zeroable(RAMBlock *rb)
1712{
1713    rb->flags |= RAM_UF_ZEROPAGE;
1714}
1715
1716bool qemu_ram_is_migratable(RAMBlock *rb)
1717{
1718    return rb->flags & RAM_MIGRATABLE;
1719}
1720
1721void qemu_ram_set_migratable(RAMBlock *rb)
1722{
1723    rb->flags |= RAM_MIGRATABLE;
1724}
1725
1726void qemu_ram_unset_migratable(RAMBlock *rb)
1727{
1728    rb->flags &= ~RAM_MIGRATABLE;
1729}
1730
1731/* Called with iothread lock held.  */
1732void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1733{
1734    RAMBlock *block;
1735
1736    assert(new_block);
1737    assert(!new_block->idstr[0]);
1738
1739    if (dev) {
1740        char *id = qdev_get_dev_path(dev);
1741        if (id) {
1742            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1743            g_free(id);
1744        }
1745    }
1746    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1747
1748    RCU_READ_LOCK_GUARD();
1749    RAMBLOCK_FOREACH(block) {
1750        if (block != new_block &&
1751            !strcmp(block->idstr, new_block->idstr)) {
1752            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1753                    new_block->idstr);
1754            abort();
1755        }
1756    }
1757}
1758
1759/* Called with iothread lock held.  */
1760void qemu_ram_unset_idstr(RAMBlock *block)
1761{
1762    /* FIXME: arch_init.c assumes that this is not called throughout
1763     * migration.  Ignore the problem since hot-unplug during migration
1764     * does not work anyway.
1765     */
1766    if (block) {
1767        memset(block->idstr, 0, sizeof(block->idstr));
1768    }
1769}
1770
1771size_t qemu_ram_pagesize(RAMBlock *rb)
1772{
1773    return rb->page_size;
1774}
1775
1776/* Returns the largest size of page in use */
1777size_t qemu_ram_pagesize_largest(void)
1778{
1779    RAMBlock *block;
1780    size_t largest = 0;
1781
1782    RAMBLOCK_FOREACH(block) {
1783        largest = MAX(largest, qemu_ram_pagesize(block));
1784    }
1785
1786    return largest;
1787}
1788
1789static int memory_try_enable_merging(void *addr, size_t len)
1790{
1791    if (!machine_mem_merge(current_machine)) {
1792        /* disabled by the user */
1793        return 0;
1794    }
1795
1796    return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1797}
1798
1799/* Only legal before guest might have detected the memory size: e.g. on
1800 * incoming migration, or right after reset.
1801 *
1802 * As memory core doesn't know how is memory accessed, it is up to
1803 * resize callback to update device state and/or add assertions to detect
1804 * misuse, if necessary.
1805 */
1806int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1807{
1808    const ram_addr_t unaligned_size = newsize;
1809
1810    assert(block);
1811
1812    newsize = HOST_PAGE_ALIGN(newsize);
1813
1814    if (block->used_length == newsize) {
1815        /*
1816         * We don't have to resize the ram block (which only knows aligned
1817         * sizes), however, we have to notify if the unaligned size changed.
1818         */
1819        if (unaligned_size != memory_region_size(block->mr)) {
1820            memory_region_set_size(block->mr, unaligned_size);
1821            if (block->resized) {
1822                block->resized(block->idstr, unaligned_size, block->host);
1823            }
1824        }
1825        return 0;
1826    }
1827
1828    if (!(block->flags & RAM_RESIZEABLE)) {
1829        error_setg_errno(errp, EINVAL,
1830                         "Size mismatch: %s: 0x" RAM_ADDR_FMT
1831                         " != 0x" RAM_ADDR_FMT, block->idstr,
1832                         newsize, block->used_length);
1833        return -EINVAL;
1834    }
1835
1836    if (block->max_length < newsize) {
1837        error_setg_errno(errp, EINVAL,
1838                         "Size too large: %s: 0x" RAM_ADDR_FMT
1839                         " > 0x" RAM_ADDR_FMT, block->idstr,
1840                         newsize, block->max_length);
1841        return -EINVAL;
1842    }
1843
1844    cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1845    block->used_length = newsize;
1846    cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1847                                        DIRTY_CLIENTS_ALL);
1848    memory_region_set_size(block->mr, unaligned_size);
1849    if (block->resized) {
1850        block->resized(block->idstr, unaligned_size, block->host);
1851    }
1852    return 0;
1853}
1854
1855/*
1856 * Trigger sync on the given ram block for range [start, start + length]
1857 * with the backing store if one is available.
1858 * Otherwise no-op.
1859 * @Note: this is supposed to be a synchronous op.
1860 */
1861void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
1862{
1863    /* The requested range should fit in within the block range */
1864    g_assert((start + length) <= block->used_length);
1865
1866#ifdef CONFIG_LIBPMEM
1867    /* The lack of support for pmem should not block the sync */
1868    if (ramblock_is_pmem(block)) {
1869        void *addr = ramblock_ptr(block, start);
1870        pmem_persist(addr, length);
1871        return;
1872    }
1873#endif
1874    if (block->fd >= 0) {
1875        /**
1876         * Case there is no support for PMEM or the memory has not been
1877         * specified as persistent (or is not one) - use the msync.
1878         * Less optimal but still achieves the same goal
1879         */
1880        void *addr = ramblock_ptr(block, start);
1881        if (qemu_msync(addr, length, block->fd)) {
1882            warn_report("%s: failed to sync memory range: start: "
1883                    RAM_ADDR_FMT " length: " RAM_ADDR_FMT,
1884                    __func__, start, length);
1885        }
1886    }
1887}
1888
1889/* Called with ram_list.mutex held */
1890static void dirty_memory_extend(ram_addr_t old_ram_size,
1891                                ram_addr_t new_ram_size)
1892{
1893    ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1894                                             DIRTY_MEMORY_BLOCK_SIZE);
1895    ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1896                                             DIRTY_MEMORY_BLOCK_SIZE);
1897    int i;
1898
1899    /* Only need to extend if block count increased */
1900    if (new_num_blocks <= old_num_blocks) {
1901        return;
1902    }
1903
1904    for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1905        DirtyMemoryBlocks *old_blocks;
1906        DirtyMemoryBlocks *new_blocks;
1907        int j;
1908
1909        old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]);
1910        new_blocks = g_malloc(sizeof(*new_blocks) +
1911                              sizeof(new_blocks->blocks[0]) * new_num_blocks);
1912
1913        if (old_num_blocks) {
1914            memcpy(new_blocks->blocks, old_blocks->blocks,
1915                   old_num_blocks * sizeof(old_blocks->blocks[0]));
1916        }
1917
1918        for (j = old_num_blocks; j < new_num_blocks; j++) {
1919            new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1920        }
1921
1922        qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1923
1924        if (old_blocks) {
1925            g_free_rcu(old_blocks, rcu);
1926        }
1927    }
1928}
1929
1930static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared)
1931{
1932    RAMBlock *block;
1933    RAMBlock *last_block = NULL;
1934    ram_addr_t old_ram_size, new_ram_size;
1935    Error *err = NULL;
1936
1937    old_ram_size = last_ram_page();
1938
1939    qemu_mutex_lock_ramlist();
1940    new_block->offset = find_ram_offset(new_block->max_length);
1941
1942    if (!new_block->host) {
1943        if (xen_enabled()) {
1944            xen_ram_alloc(new_block->offset, new_block->max_length,
1945                          new_block->mr, &err);
1946            if (err) {
1947                error_propagate(errp, err);
1948                qemu_mutex_unlock_ramlist();
1949                return;
1950            }
1951        } else {
1952            new_block->host = qemu_anon_ram_alloc(new_block->max_length,
1953                                                  &new_block->mr->align,
1954                                                  shared);
1955            if (!new_block->host) {
1956                error_setg_errno(errp, errno,
1957                                 "cannot set up guest memory '%s'",
1958                                 memory_region_name(new_block->mr));
1959                qemu_mutex_unlock_ramlist();
1960                return;
1961            }
1962            memory_try_enable_merging(new_block->host, new_block->max_length);
1963        }
1964    }
1965
1966    new_ram_size = MAX(old_ram_size,
1967              (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1968    if (new_ram_size > old_ram_size) {
1969        dirty_memory_extend(old_ram_size, new_ram_size);
1970    }
1971    /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
1972     * QLIST (which has an RCU-friendly variant) does not have insertion at
1973     * tail, so save the last element in last_block.
1974     */
1975    RAMBLOCK_FOREACH(block) {
1976        last_block = block;
1977        if (block->max_length < new_block->max_length) {
1978            break;
1979        }
1980    }
1981    if (block) {
1982        QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1983    } else if (last_block) {
1984        QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1985    } else { /* list is empty */
1986        QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1987    }
1988    ram_list.mru_block = NULL;
1989
1990    /* Write list before version */
1991    smp_wmb();
1992    ram_list.version++;
1993    qemu_mutex_unlock_ramlist();
1994
1995    cpu_physical_memory_set_dirty_range(new_block->offset,
1996                                        new_block->used_length,
1997                                        DIRTY_CLIENTS_ALL);
1998
1999    if (new_block->host) {
2000        qemu_ram_setup_dump(new_block->host, new_block->max_length);
2001        qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
2002        /*
2003         * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
2004         * Configure it unless the machine is a qtest server, in which case
2005         * KVM is not used and it may be forked (eg for fuzzing purposes).
2006         */
2007        if (!qtest_enabled()) {
2008            qemu_madvise(new_block->host, new_block->max_length,
2009                         QEMU_MADV_DONTFORK);
2010        }
2011        ram_block_notify_add(new_block->host, new_block->max_length);
2012    }
2013}
2014
2015#ifdef CONFIG_POSIX
2016RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
2017                                 uint32_t ram_flags, int fd, off_t offset,
2018                                 bool readonly, Error **errp)
2019{
2020    RAMBlock *new_block;
2021    Error *local_err = NULL;
2022    int64_t file_size, file_align;
2023
2024    /* Just support these ram flags by now. */
2025    assert((ram_flags & ~(RAM_SHARED | RAM_PMEM)) == 0);
2026
2027    if (xen_enabled()) {
2028        error_setg(errp, "-mem-path not supported with Xen");
2029        return NULL;
2030    }
2031
2032    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2033        error_setg(errp,
2034                   "host lacks kvm mmu notifiers, -mem-path unsupported");
2035        return NULL;
2036    }
2037
2038    size = HOST_PAGE_ALIGN(size);
2039    file_size = get_file_size(fd);
2040    if (file_size > 0 && file_size < size) {
2041        error_setg(errp, "backing store size 0x%" PRIx64
2042                   " does not match 'size' option 0x" RAM_ADDR_FMT,
2043                   file_size, size);
2044        return NULL;
2045    }
2046
2047    file_align = get_file_align(fd);
2048    if (file_align > 0 && mr && file_align > mr->align) {
2049        error_setg(errp, "backing store align 0x%" PRIx64
2050                   " is larger than 'align' option 0x%" PRIx64,
2051                   file_align, mr->align);
2052        return NULL;
2053    }
2054
2055    new_block = g_malloc0(sizeof(*new_block));
2056    new_block->mr = mr;
2057    new_block->used_length = size;
2058    new_block->max_length = size;
2059    new_block->flags = ram_flags;
2060    new_block->host = file_ram_alloc(new_block, size, fd, readonly,
2061                                     !file_size, offset, errp);
2062    if (!new_block->host) {
2063        g_free(new_block);
2064        return NULL;
2065    }
2066
2067    ram_block_add(new_block, &local_err, ram_flags & RAM_SHARED);
2068    if (local_err) {
2069        g_free(new_block);
2070        error_propagate(errp, local_err);
2071        return NULL;
2072    }
2073    return new_block;
2074
2075}
2076
2077
2078RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2079                                   uint32_t ram_flags, const char *mem_path,
2080                                   bool readonly, Error **errp)
2081{
2082    int fd;
2083    bool created;
2084    RAMBlock *block;
2085
2086    fd = file_ram_open(mem_path, memory_region_name(mr), readonly, &created,
2087                       errp);
2088    if (fd < 0) {
2089        return NULL;
2090    }
2091
2092    block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, 0, readonly, errp);
2093    if (!block) {
2094        if (created) {
2095            unlink(mem_path);
2096        }
2097        close(fd);
2098        return NULL;
2099    }
2100
2101    return block;
2102}
2103#endif
2104
2105static
2106RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
2107                                  void (*resized)(const char*,
2108                                                  uint64_t length,
2109                                                  void *host),
2110                                  void *host, bool resizeable, bool share,
2111                                  MemoryRegion *mr, Error **errp)
2112{
2113    RAMBlock *new_block;
2114    Error *local_err = NULL;
2115
2116    size = HOST_PAGE_ALIGN(size);
2117    max_size = HOST_PAGE_ALIGN(max_size);
2118    new_block = g_malloc0(sizeof(*new_block));
2119    new_block->mr = mr;
2120    new_block->resized = resized;
2121    new_block->used_length = size;
2122    new_block->max_length = max_size;
2123    assert(max_size >= size);
2124    new_block->fd = -1;
2125    new_block->page_size = qemu_real_host_page_size;
2126    new_block->host = host;
2127    if (host) {
2128        new_block->flags |= RAM_PREALLOC;
2129    }
2130    if (resizeable) {
2131        new_block->flags |= RAM_RESIZEABLE;
2132    }
2133    ram_block_add(new_block, &local_err, share);
2134    if (local_err) {
2135        g_free(new_block);
2136        error_propagate(errp, local_err);
2137        return NULL;
2138    }
2139    return new_block;
2140}
2141
2142RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2143                                   MemoryRegion *mr, Error **errp)
2144{
2145    return qemu_ram_alloc_internal(size, size, NULL, host, false,
2146                                   false, mr, errp);
2147}
2148
2149RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share,
2150                         MemoryRegion *mr, Error **errp)
2151{
2152    return qemu_ram_alloc_internal(size, size, NULL, NULL, false,
2153                                   share, mr, errp);
2154}
2155
2156RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
2157                                     void (*resized)(const char*,
2158                                                     uint64_t length,
2159                                                     void *host),
2160                                     MemoryRegion *mr, Error **errp)
2161{
2162    return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true,
2163                                   false, mr, errp);
2164}
2165
2166static void reclaim_ramblock(RAMBlock *block)
2167{
2168    if (block->flags & RAM_PREALLOC) {
2169        ;
2170    } else if (xen_enabled()) {
2171        xen_invalidate_map_cache_entry(block->host);
2172#ifndef _WIN32
2173    } else if (block->fd >= 0) {
2174        qemu_ram_munmap(block->fd, block->host, block->max_length);
2175        close(block->fd);
2176#endif
2177    } else {
2178        qemu_anon_ram_free(block->host, block->max_length);
2179    }
2180    g_free(block);
2181}
2182
2183void qemu_ram_free(RAMBlock *block)
2184{
2185    if (!block) {
2186        return;
2187    }
2188
2189    if (block->host) {
2190        ram_block_notify_remove(block->host, block->max_length);
2191    }
2192
2193    qemu_mutex_lock_ramlist();
2194    QLIST_REMOVE_RCU(block, next);
2195    ram_list.mru_block = NULL;
2196    /* Write list before version */
2197    smp_wmb();
2198    ram_list.version++;
2199    call_rcu(block, reclaim_ramblock, rcu);
2200    qemu_mutex_unlock_ramlist();
2201}
2202
2203#ifndef _WIN32
2204void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2205{
2206    RAMBlock *block;
2207    ram_addr_t offset;
2208    int flags;
2209    void *area, *vaddr;
2210
2211    RAMBLOCK_FOREACH(block) {
2212        offset = addr - block->offset;
2213        if (offset < block->max_length) {
2214            vaddr = ramblock_ptr(block, offset);
2215            if (block->flags & RAM_PREALLOC) {
2216                ;
2217            } else if (xen_enabled()) {
2218                abort();
2219            } else {
2220                flags = MAP_FIXED;
2221                if (block->fd >= 0) {
2222                    flags |= (block->flags & RAM_SHARED ?
2223                              MAP_SHARED : MAP_PRIVATE);
2224                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2225                                flags, block->fd, offset);
2226                } else {
2227                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2228                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2229                                flags, -1, 0);
2230                }
2231                if (area != vaddr) {
2232                    error_report("Could not remap addr: "
2233                                 RAM_ADDR_FMT "@" RAM_ADDR_FMT "",
2234                                 length, addr);
2235                    exit(1);
2236                }
2237                memory_try_enable_merging(vaddr, length);
2238                qemu_ram_setup_dump(vaddr, length);
2239            }
2240        }
2241    }
2242}
2243#endif /* !_WIN32 */
2244
2245/* Return a host pointer to ram allocated with qemu_ram_alloc.
2246 * This should not be used for general purpose DMA.  Use address_space_map
2247 * or address_space_rw instead. For local memory (e.g. video ram) that the
2248 * device owns, use memory_region_get_ram_ptr.
2249 *
2250 * Called within RCU critical section.
2251 */
2252void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2253{
2254    RAMBlock *block = ram_block;
2255
2256    if (block == NULL) {
2257        block = qemu_get_ram_block(addr);
2258        addr -= block->offset;
2259    }
2260
2261    if (xen_enabled() && block->host == NULL) {
2262        /* We need to check if the requested address is in the RAM
2263         * because we don't want to map the entire memory in QEMU.
2264         * In that case just map until the end of the page.
2265         */
2266        if (block->offset == 0) {
2267            return xen_map_cache(addr, 0, 0, false);
2268        }
2269
2270        block->host = xen_map_cache(block->offset, block->max_length, 1, false);
2271    }
2272    return ramblock_ptr(block, addr);
2273}
2274
2275/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2276 * but takes a size argument.
2277 *
2278 * Called within RCU critical section.
2279 */
2280static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
2281                                 hwaddr *size, bool lock)
2282{
2283    RAMBlock *block = ram_block;
2284    if (*size == 0) {
2285        return NULL;
2286    }
2287
2288    if (block == NULL) {
2289        block = qemu_get_ram_block(addr);
2290        addr -= block->offset;
2291    }
2292    *size = MIN(*size, block->max_length - addr);
2293
2294    if (xen_enabled() && block->host == NULL) {
2295        /* We need to check if the requested address is in the RAM
2296         * because we don't want to map the entire memory in QEMU.
2297         * In that case just map the requested area.
2298         */
2299        if (block->offset == 0) {
2300            return xen_map_cache(addr, *size, lock, lock);
2301        }
2302
2303        block->host = xen_map_cache(block->offset, block->max_length, 1, lock);
2304    }
2305
2306    return ramblock_ptr(block, addr);
2307}
2308
2309/* Return the offset of a hostpointer within a ramblock */
2310ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host)
2311{
2312    ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host;
2313    assert((uintptr_t)host >= (uintptr_t)rb->host);
2314    assert(res < rb->max_length);
2315
2316    return res;
2317}
2318
2319/*
2320 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2321 * in that RAMBlock.
2322 *
2323 * ptr: Host pointer to look up
2324 * round_offset: If true round the result offset down to a page boundary
2325 * *ram_addr: set to result ram_addr
2326 * *offset: set to result offset within the RAMBlock
2327 *
2328 * Returns: RAMBlock (or NULL if not found)
2329 *
2330 * By the time this function returns, the returned pointer is not protected
2331 * by RCU anymore.  If the caller is not within an RCU critical section and
2332 * does not hold the iothread lock, it must have other means of protecting the
2333 * pointer, such as a reference to the region that includes the incoming
2334 * ram_addr_t.
2335 */
2336RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2337                                   ram_addr_t *offset)
2338{
2339    RAMBlock *block;
2340    uint8_t *host = ptr;
2341
2342    if (xen_enabled()) {
2343        ram_addr_t ram_addr;
2344        RCU_READ_LOCK_GUARD();
2345        ram_addr = xen_ram_addr_from_mapcache(ptr);
2346        block = qemu_get_ram_block(ram_addr);
2347        if (block) {
2348            *offset = ram_addr - block->offset;
2349        }
2350        return block;
2351    }
2352
2353    RCU_READ_LOCK_GUARD();
2354    block = qatomic_rcu_read(&ram_list.mru_block);
2355    if (block && block->host && host - block->host < block->max_length) {
2356        goto found;
2357    }
2358
2359    RAMBLOCK_FOREACH(block) {
2360        /* This case append when the block is not mapped. */
2361        if (block->host == NULL) {
2362            continue;
2363        }
2364        if (host - block->host < block->max_length) {
2365            goto found;
2366        }
2367    }
2368
2369    return NULL;
2370
2371found:
2372    *offset = (host - block->host);
2373    if (round_offset) {
2374        *offset &= TARGET_PAGE_MASK;
2375    }
2376    return block;
2377}
2378
2379/*
2380 * Finds the named RAMBlock
2381 *
2382 * name: The name of RAMBlock to find
2383 *
2384 * Returns: RAMBlock (or NULL if not found)
2385 */
2386RAMBlock *qemu_ram_block_by_name(const char *name)
2387{
2388    RAMBlock *block;
2389
2390    RAMBLOCK_FOREACH(block) {
2391        if (!strcmp(name, block->idstr)) {
2392            return block;
2393        }
2394    }
2395
2396    return NULL;
2397}
2398
2399/* Some of the softmmu routines need to translate from a host pointer
2400   (typically a TLB entry) back to a ram offset.  */
2401ram_addr_t qemu_ram_addr_from_host(void *ptr)
2402{
2403    RAMBlock *block;
2404    ram_addr_t offset;
2405
2406    block = qemu_ram_block_from_host(ptr, false, &offset);
2407    if (!block) {
2408        return RAM_ADDR_INVALID;
2409    }
2410
2411    return block->offset + offset;
2412}
2413
2414static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2415                                 MemTxAttrs attrs, void *buf, hwaddr len);
2416static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2417                                  const void *buf, hwaddr len);
2418static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
2419                                  bool is_write, MemTxAttrs attrs);
2420
2421static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2422                                unsigned len, MemTxAttrs attrs)
2423{
2424    subpage_t *subpage = opaque;
2425    uint8_t buf[8];
2426    MemTxResult res;
2427
2428#if defined(DEBUG_SUBPAGE)
2429    printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2430           subpage, len, addr);
2431#endif
2432    res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
2433    if (res) {
2434        return res;
2435    }
2436    *data = ldn_p(buf, len);
2437    return MEMTX_OK;
2438}
2439
2440static MemTxResult subpage_write(void *opaque, hwaddr addr,
2441                                 uint64_t value, unsigned len, MemTxAttrs attrs)
2442{
2443    subpage_t *subpage = opaque;
2444    uint8_t buf[8];
2445
2446#if defined(DEBUG_SUBPAGE)
2447    printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2448           " value %"PRIx64"\n",
2449           __func__, subpage, len, addr, value);
2450#endif
2451    stn_p(buf, len, value);
2452    return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
2453}
2454
2455static bool subpage_accepts(void *opaque, hwaddr addr,
2456                            unsigned len, bool is_write,
2457                            MemTxAttrs attrs)
2458{
2459    subpage_t *subpage = opaque;
2460#if defined(DEBUG_SUBPAGE)
2461    printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2462           __func__, subpage, is_write ? 'w' : 'r', len, addr);
2463#endif
2464
2465    return flatview_access_valid(subpage->fv, addr + subpage->base,
2466                                 len, is_write, attrs);
2467}
2468
2469static const MemoryRegionOps subpage_ops = {
2470    .read_with_attrs = subpage_read,
2471    .write_with_attrs = subpage_write,
2472    .impl.min_access_size = 1,
2473    .impl.max_access_size = 8,
2474    .valid.min_access_size = 1,
2475    .valid.max_access_size = 8,
2476    .valid.accepts = subpage_accepts,
2477    .endianness = DEVICE_NATIVE_ENDIAN,
2478};
2479
2480static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
2481                            uint16_t section)
2482{
2483    int idx, eidx;
2484
2485    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2486        return -1;
2487    idx = SUBPAGE_IDX(start);
2488    eidx = SUBPAGE_IDX(end);
2489#if defined(DEBUG_SUBPAGE)
2490    printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2491           __func__, mmio, start, end, idx, eidx, section);
2492#endif
2493    for (; idx <= eidx; idx++) {
2494        mmio->sub_section[idx] = section;
2495    }
2496
2497    return 0;
2498}
2499
2500static subpage_t *subpage_init(FlatView *fv, hwaddr base)
2501{
2502    subpage_t *mmio;
2503
2504    /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */
2505    mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2506    mmio->fv = fv;
2507    mmio->base = base;
2508    memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2509                          NULL, TARGET_PAGE_SIZE);
2510    mmio->iomem.subpage = true;
2511#if defined(DEBUG_SUBPAGE)
2512    printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2513           mmio, base, TARGET_PAGE_SIZE);
2514#endif
2515
2516    return mmio;
2517}
2518
2519static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
2520{
2521    assert(fv);
2522    MemoryRegionSection section = {
2523        .fv = fv,
2524        .mr = mr,
2525        .offset_within_address_space = 0,
2526        .offset_within_region = 0,
2527        .size = int128_2_64(),
2528    };
2529
2530    return phys_section_add(map, &section);
2531}
2532
2533MemoryRegionSection *iotlb_to_section(CPUState *cpu,
2534                                      hwaddr index, MemTxAttrs attrs)
2535{
2536    int asidx = cpu_asidx_from_attrs(cpu, attrs);
2537    CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2538    AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
2539    MemoryRegionSection *sections = d->map.sections;
2540
2541    return &sections[index & ~TARGET_PAGE_MASK];
2542}
2543
2544static void io_mem_init(void)
2545{
2546    memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2547                          NULL, UINT64_MAX);
2548}
2549
2550AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
2551{
2552    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2553    uint16_t n;
2554
2555    n = dummy_section(&d->map, fv, &io_mem_unassigned);
2556    assert(n == PHYS_SECTION_UNASSIGNED);
2557
2558    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2559
2560    return d;
2561}
2562
2563void address_space_dispatch_free(AddressSpaceDispatch *d)
2564{
2565    phys_sections_free(&d->map);
2566    g_free(d);
2567}
2568
2569static void do_nothing(CPUState *cpu, run_on_cpu_data d)
2570{
2571}
2572
2573static void tcg_log_global_after_sync(MemoryListener *listener)
2574{
2575    CPUAddressSpace *cpuas;
2576
2577    /* Wait for the CPU to end the current TB.  This avoids the following
2578     * incorrect race:
2579     *
2580     *      vCPU                         migration
2581     *      ----------------------       -------------------------
2582     *      TLB check -> slow path
2583     *        notdirty_mem_write
2584     *          write to RAM
2585     *          mark dirty
2586     *                                   clear dirty flag
2587     *      TLB check -> fast path
2588     *                                   read memory
2589     *        write to RAM
2590     *
2591     * by pushing the migration thread's memory read after the vCPU thread has
2592     * written the memory.
2593     */
2594    if (replay_mode == REPLAY_MODE_NONE) {
2595        /*
2596         * VGA can make calls to this function while updating the screen.
2597         * In record/replay mode this causes a deadlock, because
2598         * run_on_cpu waits for rr mutex. Therefore no races are possible
2599         * in this case and no need for making run_on_cpu when
2600         * record/replay is not enabled.
2601         */
2602        cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2603        run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
2604    }
2605}
2606
2607static void tcg_commit(MemoryListener *listener)
2608{
2609    CPUAddressSpace *cpuas;
2610    AddressSpaceDispatch *d;
2611
2612    assert(tcg_enabled());
2613    /* since each CPU stores ram addresses in its TLB cache, we must
2614       reset the modified entries */
2615    cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2616    cpu_reloading_memory_map();
2617    /* The CPU and TLB are protected by the iothread lock.
2618     * We reload the dispatch pointer now because cpu_reloading_memory_map()
2619     * may have split the RCU critical section.
2620     */
2621    d = address_space_to_dispatch(cpuas->as);
2622    qatomic_rcu_set(&cpuas->memory_dispatch, d);
2623    tlb_flush(cpuas->cpu);
2624}
2625
2626static void memory_map_init(void)
2627{
2628    system_memory = g_malloc(sizeof(*system_memory));
2629
2630    memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2631    address_space_init(&address_space_memory, system_memory, "memory");
2632
2633    system_io = g_malloc(sizeof(*system_io));
2634    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2635                          65536);
2636    address_space_init(&address_space_io, system_io, "I/O");
2637}
2638
2639MemoryRegion *get_system_memory(void)
2640{
2641    return system_memory;
2642}
2643
2644MemoryRegion *get_system_io(void)
2645{
2646    return system_io;
2647}
2648
2649static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2650                                     hwaddr length)
2651{
2652    uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2653    addr += memory_region_get_ram_addr(mr);
2654
2655    /* No early return if dirty_log_mask is or becomes 0, because
2656     * cpu_physical_memory_set_dirty_range will still call
2657     * xen_modified_memory.
2658     */
2659    if (dirty_log_mask) {
2660        dirty_log_mask =
2661            cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2662    }
2663    if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2664        assert(tcg_enabled());
2665        tb_invalidate_phys_range(addr, addr + length);
2666        dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2667    }
2668    cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2669}
2670
2671void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size)
2672{
2673    /*
2674     * In principle this function would work on other memory region types too,
2675     * but the ROM device use case is the only one where this operation is
2676     * necessary.  Other memory regions should use the
2677     * address_space_read/write() APIs.
2678     */
2679    assert(memory_region_is_romd(mr));
2680
2681    invalidate_and_set_dirty(mr, addr, size);
2682}
2683
2684static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2685{
2686    unsigned access_size_max = mr->ops->valid.max_access_size;
2687
2688    /* Regions are assumed to support 1-4 byte accesses unless
2689       otherwise specified.  */
2690    if (access_size_max == 0) {
2691        access_size_max = 4;
2692    }
2693
2694    /* Bound the maximum access by the alignment of the address.  */
2695    if (!mr->ops->impl.unaligned) {
2696        unsigned align_size_max = addr & -addr;
2697        if (align_size_max != 0 && align_size_max < access_size_max) {
2698            access_size_max = align_size_max;
2699        }
2700    }
2701
2702    /* Don't attempt accesses larger than the maximum.  */
2703    if (l > access_size_max) {
2704        l = access_size_max;
2705    }
2706    l = pow2floor(l);
2707
2708    return l;
2709}
2710
2711static bool prepare_mmio_access(MemoryRegion *mr)
2712{
2713    bool release_lock = false;
2714
2715    if (!qemu_mutex_iothread_locked()) {
2716        qemu_mutex_lock_iothread();
2717        release_lock = true;
2718    }
2719    if (mr->flush_coalesced_mmio) {
2720        qemu_flush_coalesced_mmio_buffer();
2721    }
2722
2723    return release_lock;
2724}
2725
2726/* Called within RCU critical section.  */
2727static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2728                                           MemTxAttrs attrs,
2729                                           const void *ptr,
2730                                           hwaddr len, hwaddr addr1,
2731                                           hwaddr l, MemoryRegion *mr)
2732{
2733    uint8_t *ram_ptr;
2734    uint64_t val;
2735    MemTxResult result = MEMTX_OK;
2736    bool release_lock = false;
2737    const uint8_t *buf = ptr;
2738
2739    for (;;) {
2740        if (!memory_access_is_direct(mr, true)) {
2741            release_lock |= prepare_mmio_access(mr);
2742            l = memory_access_size(mr, l, addr1);
2743            /* XXX: could force current_cpu to NULL to avoid
2744               potential bugs */
2745            val = ldn_he_p(buf, l);
2746            result |= memory_region_dispatch_write(mr, addr1, val,
2747                                                   size_memop(l), attrs);
2748        } else {
2749            /* RAM case */
2750            ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
2751            memcpy(ram_ptr, buf, l);
2752            invalidate_and_set_dirty(mr, addr1, l);
2753        }
2754
2755        if (release_lock) {
2756            qemu_mutex_unlock_iothread();
2757            release_lock = false;
2758        }
2759
2760        len -= l;
2761        buf += l;
2762        addr += l;
2763
2764        if (!len) {
2765            break;
2766        }
2767
2768        l = len;
2769        mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
2770    }
2771
2772    return result;
2773}
2774
2775/* Called from RCU critical section.  */
2776static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2777                                  const void *buf, hwaddr len)
2778{
2779    hwaddr l;
2780    hwaddr addr1;
2781    MemoryRegion *mr;
2782    MemTxResult result = MEMTX_OK;
2783
2784    l = len;
2785    mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
2786    result = flatview_write_continue(fv, addr, attrs, buf, len,
2787                                     addr1, l, mr);
2788
2789    return result;
2790}
2791
2792/* Called within RCU critical section.  */
2793MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2794                                   MemTxAttrs attrs, void *ptr,
2795                                   hwaddr len, hwaddr addr1, hwaddr l,
2796                                   MemoryRegion *mr)
2797{
2798    uint8_t *ram_ptr;
2799    uint64_t val;
2800    MemTxResult result = MEMTX_OK;
2801    bool release_lock = false;
2802    uint8_t *buf = ptr;
2803
2804    fuzz_dma_read_cb(addr, len, mr);
2805    for (;;) {
2806        if (!memory_access_is_direct(mr, false)) {
2807            /* I/O case */
2808            release_lock |= prepare_mmio_access(mr);
2809            l = memory_access_size(mr, l, addr1);
2810            result |= memory_region_dispatch_read(mr, addr1, &val,
2811                                                  size_memop(l), attrs);
2812            stn_he_p(buf, l, val);
2813        } else {
2814            /* RAM case */
2815            ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
2816            memcpy(buf, ram_ptr, l);
2817        }
2818
2819        if (release_lock) {
2820            qemu_mutex_unlock_iothread();
2821            release_lock = false;
2822        }
2823
2824        len -= l;
2825        buf += l;
2826        addr += l;
2827
2828        if (!len) {
2829            break;
2830        }
2831
2832        l = len;
2833        mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2834    }
2835
2836    return result;
2837}
2838
2839/* Called from RCU critical section.  */
2840static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2841                                 MemTxAttrs attrs, void *buf, hwaddr len)
2842{
2843    hwaddr l;
2844    hwaddr addr1;
2845    MemoryRegion *mr;
2846
2847    l = len;
2848    mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2849    return flatview_read_continue(fv, addr, attrs, buf, len,
2850                                  addr1, l, mr);
2851}
2852
2853MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2854                                    MemTxAttrs attrs, void *buf, hwaddr len)
2855{
2856    MemTxResult result = MEMTX_OK;
2857    FlatView *fv;
2858
2859    if (len > 0) {
2860        RCU_READ_LOCK_GUARD();
2861        fv = address_space_to_flatview(as);
2862        result = flatview_read(fv, addr, attrs, buf, len);
2863    }
2864
2865    return result;
2866}
2867
2868MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2869                                MemTxAttrs attrs,
2870                                const void *buf, hwaddr len)
2871{
2872    MemTxResult result = MEMTX_OK;
2873    FlatView *fv;
2874
2875    if (len > 0) {
2876        RCU_READ_LOCK_GUARD();
2877        fv = address_space_to_flatview(as);
2878        result = flatview_write(fv, addr, attrs, buf, len);
2879    }
2880
2881    return result;
2882}
2883
2884MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2885                             void *buf, hwaddr len, bool is_write)
2886{
2887    if (is_write) {
2888        return address_space_write(as, addr, attrs, buf, len);
2889    } else {
2890        return address_space_read_full(as, addr, attrs, buf, len);
2891    }
2892}
2893
2894void cpu_physical_memory_rw(hwaddr addr, void *buf,
2895                            hwaddr len, bool is_write)
2896{
2897    address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2898                     buf, len, is_write);
2899}
2900
2901enum write_rom_type {
2902    WRITE_DATA,
2903    FLUSH_CACHE,
2904};
2905
2906static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
2907                                                           hwaddr addr,
2908                                                           MemTxAttrs attrs,
2909                                                           const void *ptr,
2910                                                           hwaddr len,
2911                                                           enum write_rom_type type)
2912{
2913    hwaddr l;
2914    uint8_t *ram_ptr;
2915    hwaddr addr1;
2916    MemoryRegion *mr;
2917    const uint8_t *buf = ptr;
2918
2919    RCU_READ_LOCK_GUARD();
2920    while (len > 0) {
2921        l = len;
2922        mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
2923
2924        if (!(memory_region_is_ram(mr) ||
2925              memory_region_is_romd(mr))) {
2926            l = memory_access_size(mr, l, addr1);
2927        } else {
2928            /* ROM/RAM case */
2929            ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2930            switch (type) {
2931            case WRITE_DATA:
2932                memcpy(ram_ptr, buf, l);
2933                invalidate_and_set_dirty(mr, addr1, l);
2934                break;
2935            case FLUSH_CACHE:
2936                flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l);
2937                break;
2938            }
2939        }
2940        len -= l;
2941        buf += l;
2942        addr += l;
2943    }
2944    return MEMTX_OK;
2945}
2946
2947/* used for ROM loading : can write in RAM and ROM */
2948MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2949                                    MemTxAttrs attrs,
2950                                    const void *buf, hwaddr len)
2951{
2952    return address_space_write_rom_internal(as, addr, attrs,
2953                                            buf, len, WRITE_DATA);
2954}
2955
2956void cpu_flush_icache_range(hwaddr start, hwaddr len)
2957{
2958    /*
2959     * This function should do the same thing as an icache flush that was
2960     * triggered from within the guest. For TCG we are always cache coherent,
2961     * so there is no need to flush anything. For KVM / Xen we need to flush
2962     * the host's instruction cache at least.
2963     */
2964    if (tcg_enabled()) {
2965        return;
2966    }
2967
2968    address_space_write_rom_internal(&address_space_memory,
2969                                     start, MEMTXATTRS_UNSPECIFIED,
2970                                     NULL, len, FLUSH_CACHE);
2971}
2972
2973typedef struct {
2974    MemoryRegion *mr;
2975    void *buffer;
2976    hwaddr addr;
2977    hwaddr len;
2978    bool in_use;
2979} BounceBuffer;
2980
2981static BounceBuffer bounce;
2982
2983typedef struct MapClient {
2984    QEMUBH *bh;
2985    QLIST_ENTRY(MapClient) link;
2986} MapClient;
2987
2988QemuMutex map_client_list_lock;
2989static QLIST_HEAD(, MapClient) map_client_list
2990    = QLIST_HEAD_INITIALIZER(map_client_list);
2991
2992static void cpu_unregister_map_client_do(MapClient *client)
2993{
2994    QLIST_REMOVE(client, link);
2995    g_free(client);
2996}
2997
2998static void cpu_notify_map_clients_locked(void)
2999{
3000    MapClient *client;
3001
3002    while (!QLIST_EMPTY(&map_client_list)) {
3003        client = QLIST_FIRST(&map_client_list);
3004        qemu_bh_schedule(client->bh);
3005        cpu_unregister_map_client_do(client);
3006    }
3007}
3008
3009void cpu_register_map_client(QEMUBH *bh)
3010{
3011    MapClient *client = g_malloc(sizeof(*client));
3012
3013    qemu_mutex_lock(&map_client_list_lock);
3014    client->bh = bh;
3015    QLIST_INSERT_HEAD(&map_client_list, client, link);
3016    if (!qatomic_read(&bounce.in_use)) {
3017        cpu_notify_map_clients_locked();
3018    }
3019    qemu_mutex_unlock(&map_client_list_lock);
3020}
3021
3022void cpu_exec_init_all(void)
3023{
3024    qemu_mutex_init(&ram_list.mutex);
3025    /* The data structures we set up here depend on knowing the page size,
3026     * so no more changes can be made after this point.
3027     * In an ideal world, nothing we did before we had finished the
3028     * machine setup would care about the target page size, and we could
3029     * do this much later, rather than requiring board models to state
3030     * up front what their requirements are.
3031     */
3032    finalize_target_page_bits();
3033    io_mem_init();
3034    memory_map_init();
3035    qemu_mutex_init(&map_client_list_lock);
3036}
3037
3038void cpu_unregister_map_client(QEMUBH *bh)
3039{
3040    MapClient *client;
3041
3042    qemu_mutex_lock(&map_client_list_lock);
3043    QLIST_FOREACH(client, &map_client_list, link) {
3044        if (client->bh == bh) {
3045            cpu_unregister_map_client_do(client);
3046            break;
3047        }
3048    }
3049    qemu_mutex_unlock(&map_client_list_lock);
3050}
3051
3052static void cpu_notify_map_clients(void)
3053{
3054    qemu_mutex_lock(&map_client_list_lock);
3055    cpu_notify_map_clients_locked();
3056    qemu_mutex_unlock(&map_client_list_lock);
3057}
3058
3059static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
3060                                  bool is_write, MemTxAttrs attrs)
3061{
3062    MemoryRegion *mr;
3063    hwaddr l, xlat;
3064
3065    while (len > 0) {
3066        l = len;
3067        mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
3068        if (!memory_access_is_direct(mr, is_write)) {
3069            l = memory_access_size(mr, l, addr);
3070            if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
3071                return false;
3072            }
3073        }
3074
3075        len -= l;
3076        addr += l;
3077    }
3078    return true;
3079}
3080
3081bool address_space_access_valid(AddressSpace *as, hwaddr addr,
3082                                hwaddr len, bool is_write,
3083                                MemTxAttrs attrs)
3084{
3085    FlatView *fv;
3086    bool result;
3087
3088    RCU_READ_LOCK_GUARD();
3089    fv = address_space_to_flatview(as);
3090    result = flatview_access_valid(fv, addr, len, is_write, attrs);
3091    return result;
3092}
3093
3094static hwaddr
3095flatview_extend_translation(FlatView *fv, hwaddr addr,
3096                            hwaddr target_len,
3097                            MemoryRegion *mr, hwaddr base, hwaddr len,
3098                            bool is_write, MemTxAttrs attrs)
3099{
3100    hwaddr done = 0;
3101    hwaddr xlat;
3102    MemoryRegion *this_mr;
3103
3104    for (;;) {
3105        target_len -= len;
3106        addr += len;
3107        done += len;
3108        if (target_len == 0) {
3109            return done;
3110        }
3111
3112        len = target_len;
3113        this_mr = flatview_translate(fv, addr, &xlat,
3114                                     &len, is_write, attrs);
3115        if (this_mr != mr || xlat != base + done) {
3116            return done;
3117        }
3118    }
3119}
3120
3121/* Map a physical memory region into a host virtual address.
3122 * May map a subset of the requested range, given by and returned in *plen.
3123 * May return NULL if resources needed to perform the mapping are exhausted.
3124 * Use only for reads OR writes - not for read-modify-write operations.
3125 * Use cpu_register_map_client() to know when retrying the map operation is
3126 * likely to succeed.
3127 */
3128void *address_space_map(AddressSpace *as,
3129                        hwaddr addr,
3130                        hwaddr *plen,
3131                        bool is_write,
3132                        MemTxAttrs attrs)
3133{
3134    hwaddr len = *plen;
3135    hwaddr l, xlat;
3136    MemoryRegion *mr;
3137    void *ptr;
3138    FlatView *fv;
3139
3140    if (len == 0) {
3141        return NULL;
3142    }
3143
3144    l = len;
3145    RCU_READ_LOCK_GUARD();
3146    fv = address_space_to_flatview(as);
3147    mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
3148
3149    if (!memory_access_is_direct(mr, is_write)) {
3150        if (qatomic_xchg(&bounce.in_use, true)) {
3151            *plen = 0;
3152            return NULL;
3153        }
3154        /* Avoid unbounded allocations */
3155        l = MIN(l, TARGET_PAGE_SIZE);
3156        bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
3157        bounce.addr = addr;
3158        bounce.len = l;
3159
3160        memory_region_ref(mr);
3161        bounce.mr = mr;
3162        if (!is_write) {
3163            flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
3164                               bounce.buffer, l);
3165        }
3166
3167        *plen = l;
3168        return bounce.buffer;
3169    }
3170
3171
3172    memory_region_ref(mr);
3173    *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
3174                                        l, is_write, attrs);
3175    fuzz_dma_read_cb(addr, *plen, mr);
3176    ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
3177
3178    return ptr;
3179}
3180
3181/* Unmaps a memory region previously mapped by address_space_map().
3182 * Will also mark the memory as dirty if is_write is true.  access_len gives
3183 * the amount of memory that was actually read or written by the caller.
3184 */
3185void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3186                         bool is_write, hwaddr access_len)
3187{
3188    if (buffer != bounce.buffer) {
3189        MemoryRegion *mr;
3190        ram_addr_t addr1;
3191
3192        mr = memory_region_from_host(buffer, &addr1);
3193        assert(mr != NULL);
3194        if (is_write) {
3195            invalidate_and_set_dirty(mr, addr1, access_len);
3196        }
3197        if (xen_enabled()) {
3198            xen_invalidate_map_cache_entry(buffer);
3199        }
3200        memory_region_unref(mr);
3201        return;
3202    }
3203    if (is_write) {
3204        address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3205                            bounce.buffer, access_len);
3206    }
3207    qemu_vfree(bounce.buffer);
3208    bounce.buffer = NULL;
3209    memory_region_unref(bounce.mr);
3210    qatomic_mb_set(&bounce.in_use, false);
3211    cpu_notify_map_clients();
3212}
3213
3214void *cpu_physical_memory_map(hwaddr addr,
3215                              hwaddr *plen,
3216                              bool is_write)
3217{
3218    return address_space_map(&address_space_memory, addr, plen, is_write,
3219                             MEMTXATTRS_UNSPECIFIED);
3220}
3221
3222void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3223                               bool is_write, hwaddr access_len)
3224{
3225    return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3226}
3227
3228#define ARG1_DECL                AddressSpace *as
3229#define ARG1                     as
3230#define SUFFIX
3231#define TRANSLATE(...)           address_space_translate(as, __VA_ARGS__)
3232#define RCU_READ_LOCK(...)       rcu_read_lock()
3233#define RCU_READ_UNLOCK(...)     rcu_read_unlock()
3234#include "memory_ldst.c.inc"
3235
3236int64_t address_space_cache_init(MemoryRegionCache *cache,
3237                                 AddressSpace *as,
3238                                 hwaddr addr,
3239                                 hwaddr len,
3240                                 bool is_write)
3241{
3242    AddressSpaceDispatch *d;
3243    hwaddr l;
3244    MemoryRegion *mr;
3245    Int128 diff;
3246
3247    assert(len > 0);
3248
3249    l = len;
3250    cache->fv = address_space_get_flatview(as);
3251    d = flatview_to_dispatch(cache->fv);
3252    cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true);
3253
3254    /*
3255     * cache->xlat is now relative to cache->mrs.mr, not to the section itself.
3256     * Take that into account to compute how many bytes are there between
3257     * cache->xlat and the end of the section.
3258     */
3259    diff = int128_sub(cache->mrs.size,
3260                      int128_make64(cache->xlat - cache->mrs.offset_within_region));
3261    l = int128_get64(int128_min(diff, int128_make64(l)));
3262
3263    mr = cache->mrs.mr;
3264    memory_region_ref(mr);
3265    if (memory_access_is_direct(mr, is_write)) {
3266        /* We don't care about the memory attributes here as we're only
3267         * doing this if we found actual RAM, which behaves the same
3268         * regardless of attributes; so UNSPECIFIED is fine.
3269         */
3270        l = flatview_extend_translation(cache->fv, addr, len, mr,
3271                                        cache->xlat, l, is_write,
3272                                        MEMTXATTRS_UNSPECIFIED);
3273        cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true);
3274    } else {
3275        cache->ptr = NULL;
3276    }
3277
3278    cache->len = l;
3279    cache->is_write = is_write;
3280    return l;
3281}
3282
3283void address_space_cache_invalidate(MemoryRegionCache *cache,
3284                                    hwaddr addr,
3285                                    hwaddr access_len)
3286{
3287    assert(cache->is_write);
3288    if (likely(cache->ptr)) {
3289        invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len);
3290    }
3291}
3292
3293void address_space_cache_destroy(MemoryRegionCache *cache)
3294{
3295    if (!cache->mrs.mr) {
3296        return;
3297    }
3298
3299    if (xen_enabled()) {
3300        xen_invalidate_map_cache_entry(cache->ptr);
3301    }
3302    memory_region_unref(cache->mrs.mr);
3303    flatview_unref(cache->fv);
3304    cache->mrs.mr = NULL;
3305    cache->fv = NULL;
3306}
3307
3308/* Called from RCU critical section.  This function has the same
3309 * semantics as address_space_translate, but it only works on a
3310 * predefined range of a MemoryRegion that was mapped with
3311 * address_space_cache_init.
3312 */
3313static inline MemoryRegion *address_space_translate_cached(
3314    MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
3315    hwaddr *plen, bool is_write, MemTxAttrs attrs)
3316{
3317    MemoryRegionSection section;
3318    MemoryRegion *mr;
3319    IOMMUMemoryRegion *iommu_mr;
3320    AddressSpace *target_as;
3321
3322    assert(!cache->ptr);
3323    *xlat = addr + cache->xlat;
3324
3325    mr = cache->mrs.mr;
3326    iommu_mr = memory_region_get_iommu(mr);
3327    if (!iommu_mr) {
3328        /* MMIO region.  */
3329        return mr;
3330    }
3331
3332    section = address_space_translate_iommu(iommu_mr, xlat, plen,
3333                                            NULL, is_write, true,
3334                                            &target_as, attrs);
3335    return section.mr;
3336}
3337
3338/* Called from RCU critical section. address_space_read_cached uses this
3339 * out of line function when the target is an MMIO or IOMMU region.
3340 */
3341MemTxResult
3342address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
3343                                   void *buf, hwaddr len)
3344{
3345    hwaddr addr1, l;
3346    MemoryRegion *mr;
3347
3348    l = len;
3349    mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
3350                                        MEMTXATTRS_UNSPECIFIED);
3351    return flatview_read_continue(cache->fv,
3352                                  addr, MEMTXATTRS_UNSPECIFIED, buf, len,
3353                                  addr1, l, mr);
3354}
3355
3356/* Called from RCU critical section. address_space_write_cached uses this
3357 * out of line function when the target is an MMIO or IOMMU region.
3358 */
3359MemTxResult
3360address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
3361                                    const void *buf, hwaddr len)
3362{
3363    hwaddr addr1, l;
3364    MemoryRegion *mr;
3365
3366    l = len;
3367    mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
3368                                        MEMTXATTRS_UNSPECIFIED);
3369    return flatview_write_continue(cache->fv,
3370                                   addr, MEMTXATTRS_UNSPECIFIED, buf, len,
3371                                   addr1, l, mr);
3372}
3373
3374#define ARG1_DECL                MemoryRegionCache *cache
3375#define ARG1                     cache
3376#define SUFFIX                   _cached_slow
3377#define TRANSLATE(...)           address_space_translate_cached(cache, __VA_ARGS__)
3378#define RCU_READ_LOCK()          ((void)0)
3379#define RCU_READ_UNLOCK()        ((void)0)
3380#include "memory_ldst.c.inc"
3381
3382/* virtual memory access for debug (includes writing to ROM) */
3383int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3384                        void *ptr, target_ulong len, bool is_write)
3385{
3386    hwaddr phys_addr;
3387    target_ulong l, page;
3388    uint8_t *buf = ptr;
3389
3390    cpu_synchronize_state(cpu);
3391    while (len > 0) {
3392        int asidx;
3393        MemTxAttrs attrs;
3394        MemTxResult res;
3395
3396        page = addr & TARGET_PAGE_MASK;
3397        phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3398        asidx = cpu_asidx_from_attrs(cpu, attrs);
3399        /* if no physical page mapped, return an error */
3400        if (phys_addr == -1)
3401            return -1;
3402        l = (page + TARGET_PAGE_SIZE) - addr;
3403        if (l > len)
3404            l = len;
3405        phys_addr += (addr & ~TARGET_PAGE_MASK);
3406        if (is_write) {
3407            res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
3408                                          attrs, buf, l);
3409        } else {
3410            res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
3411                                     attrs, buf, l);
3412        }
3413        if (res != MEMTX_OK) {
3414            return -1;
3415        }
3416        len -= l;
3417        buf += l;
3418        addr += l;
3419    }
3420    return 0;
3421}
3422
3423/*
3424 * Allows code that needs to deal with migration bitmaps etc to still be built
3425 * target independent.
3426 */
3427size_t qemu_target_page_size(void)
3428{
3429    return TARGET_PAGE_SIZE;
3430}
3431
3432int qemu_target_page_bits(void)
3433{
3434    return TARGET_PAGE_BITS;
3435}
3436
3437int qemu_target_page_bits_min(void)
3438{
3439    return TARGET_PAGE_BITS_MIN;
3440}
3441
3442bool cpu_physical_memory_is_io(hwaddr phys_addr)
3443{
3444    MemoryRegion*mr;
3445    hwaddr l = 1;
3446    bool res;
3447
3448    RCU_READ_LOCK_GUARD();
3449    mr = address_space_translate(&address_space_memory,
3450                                 phys_addr, &phys_addr, &l, false,
3451                                 MEMTXATTRS_UNSPECIFIED);
3452
3453    res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3454    return res;
3455}
3456
3457int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3458{
3459    RAMBlock *block;
3460    int ret = 0;
3461
3462    RCU_READ_LOCK_GUARD();
3463    RAMBLOCK_FOREACH(block) {
3464        ret = func(block, opaque);
3465        if (ret) {
3466            break;
3467        }
3468    }
3469    return ret;
3470}
3471
3472/*
3473 * Unmap pages of memory from start to start+length such that
3474 * they a) read as 0, b) Trigger whatever fault mechanism
3475 * the OS provides for postcopy.
3476 * The pages must be unmapped by the end of the function.
3477 * Returns: 0 on success, none-0 on failure
3478 *
3479 */
3480int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3481{
3482    int ret = -1;
3483
3484    uint8_t *host_startaddr = rb->host + start;
3485
3486    if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
3487        error_report("ram_block_discard_range: Unaligned start address: %p",
3488                     host_startaddr);
3489        goto err;
3490    }
3491
3492    if ((start + length) <= rb->used_length) {
3493        bool need_madvise, need_fallocate;
3494        if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
3495            error_report("ram_block_discard_range: Unaligned length: %zx",
3496                         length);
3497            goto err;
3498        }
3499
3500        errno = ENOTSUP; /* If we are missing MADVISE etc */
3501
3502        /* The logic here is messy;
3503         *    madvise DONTNEED fails for hugepages
3504         *    fallocate works on hugepages and shmem
3505         */
3506        need_madvise = (rb->page_size == qemu_host_page_size);
3507        need_fallocate = rb->fd != -1;
3508        if (need_fallocate) {
3509            /* For a file, this causes the area of the file to be zero'd
3510             * if read, and for hugetlbfs also causes it to be unmapped
3511             * so a userfault will trigger.
3512             */
3513#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3514            ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3515                            start, length);
3516            if (ret) {
3517                ret = -errno;
3518                error_report("ram_block_discard_range: Failed to fallocate "
3519                             "%s:%" PRIx64 " +%zx (%d)",
3520                             rb->idstr, start, length, ret);
3521                goto err;
3522            }
3523#else
3524            ret = -ENOSYS;
3525            error_report("ram_block_discard_range: fallocate not available/file"
3526                         "%s:%" PRIx64 " +%zx (%d)",
3527                         rb->idstr, start, length, ret);
3528            goto err;
3529#endif
3530        }
3531        if (need_madvise) {
3532            /* For normal RAM this causes it to be unmapped,
3533             * for shared memory it causes the local mapping to disappear
3534             * and to fall back on the file contents (which we just
3535             * fallocate'd away).
3536             */
3537#if defined(CONFIG_MADVISE)
3538            ret =  madvise(host_startaddr, length, MADV_DONTNEED);
3539            if (ret) {
3540                ret = -errno;
3541                error_report("ram_block_discard_range: Failed to discard range "
3542                             "%s:%" PRIx64 " +%zx (%d)",
3543                             rb->idstr, start, length, ret);
3544                goto err;
3545            }
3546#else
3547            ret = -ENOSYS;
3548            error_report("ram_block_discard_range: MADVISE not available"
3549                         "%s:%" PRIx64 " +%zx (%d)",
3550                         rb->idstr, start, length, ret);
3551            goto err;
3552#endif
3553        }
3554        trace_ram_block_discard_range(rb->idstr, host_startaddr, length,
3555                                      need_madvise, need_fallocate, ret);
3556    } else {
3557        error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3558                     "/%zx/" RAM_ADDR_FMT")",
3559                     rb->idstr, start, length, rb->used_length);
3560    }
3561
3562err:
3563    return ret;
3564}
3565
3566bool ramblock_is_pmem(RAMBlock *rb)
3567{
3568    return rb->flags & RAM_PMEM;
3569}
3570
3571static void mtree_print_phys_entries(int start, int end, int skip, int ptr)
3572{
3573    if (start == end - 1) {
3574        qemu_printf("\t%3d      ", start);
3575    } else {
3576        qemu_printf("\t%3d..%-3d ", start, end - 1);
3577    }
3578    qemu_printf(" skip=%d ", skip);
3579    if (ptr == PHYS_MAP_NODE_NIL) {
3580        qemu_printf(" ptr=NIL");
3581    } else if (!skip) {
3582        qemu_printf(" ptr=#%d", ptr);
3583    } else {
3584        qemu_printf(" ptr=[%d]", ptr);
3585    }
3586    qemu_printf("\n");
3587}
3588
3589#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3590                           int128_sub((size), int128_one())) : 0)
3591
3592void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root)
3593{
3594    int i;
3595
3596    qemu_printf("  Dispatch\n");
3597    qemu_printf("    Physical sections\n");
3598
3599    for (i = 0; i < d->map.sections_nb; ++i) {
3600        MemoryRegionSection *s = d->map.sections + i;
3601        const char *names[] = { " [unassigned]", " [not dirty]",
3602                                " [ROM]", " [watch]" };
3603
3604        qemu_printf("      #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx
3605                    " %s%s%s%s%s",
3606            i,
3607            s->offset_within_address_space,
3608            s->offset_within_address_space + MR_SIZE(s->mr->size),
3609            s->mr->name ? s->mr->name : "(noname)",
3610            i < ARRAY_SIZE(names) ? names[i] : "",
3611            s->mr == root ? " [ROOT]" : "",
3612            s == d->mru_section ? " [MRU]" : "",
3613            s->mr->is_iommu ? " [iommu]" : "");
3614
3615        if (s->mr->alias) {
3616            qemu_printf(" alias=%s", s->mr->alias->name ?
3617                    s->mr->alias->name : "noname");
3618        }
3619        qemu_printf("\n");
3620    }
3621
3622    qemu_printf("    Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
3623               P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
3624    for (i = 0; i < d->map.nodes_nb; ++i) {
3625        int j, jprev;
3626        PhysPageEntry prev;
3627        Node *n = d->map.nodes + i;
3628
3629        qemu_printf("      [%d]\n", i);
3630
3631        for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
3632            PhysPageEntry *pe = *n + j;
3633
3634            if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
3635                continue;
3636            }
3637
3638            mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
3639
3640            jprev = j;
3641            prev = *pe;
3642        }
3643
3644        if (jprev != ARRAY_SIZE(*n)) {
3645            mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
3646        }
3647    }
3648}
3649
3650/*
3651 * If positive, discarding RAM is disabled. If negative, discarding RAM is
3652 * required to work and cannot be disabled.
3653 */
3654static int ram_block_discard_disabled;
3655
3656int ram_block_discard_disable(bool state)
3657{
3658    int old;
3659
3660    if (!state) {
3661        qatomic_dec(&ram_block_discard_disabled);
3662        return 0;
3663    }
3664
3665    do {
3666        old = qatomic_read(&ram_block_discard_disabled);
3667        if (old < 0) {
3668            return -EBUSY;
3669        }
3670    } while (qatomic_cmpxchg(&ram_block_discard_disabled,
3671                             old, old + 1) != old);
3672    return 0;
3673}
3674
3675int ram_block_discard_require(bool state)
3676{
3677    int old;
3678
3679    if (!state) {
3680        qatomic_inc(&ram_block_discard_disabled);
3681        return 0;
3682    }
3683
3684    do {
3685        old = qatomic_read(&ram_block_discard_disabled);
3686        if (old > 0) {
3687            return -EBUSY;
3688        }
3689    } while (qatomic_cmpxchg(&ram_block_discard_disabled,
3690                             old, old - 1) != old);
3691    return 0;
3692}
3693
3694bool ram_block_discard_is_disabled(void)
3695{
3696    return qatomic_read(&ram_block_discard_disabled) > 0;
3697}
3698
3699bool ram_block_discard_is_required(void)
3700{
3701    return qatomic_read(&ram_block_discard_disabled) < 0;
3702}
3703