qemu/numa.c
<<
>>
Prefs
   1/*
   2 * NUMA parameter parsing routines
   3 *
   4 * Copyright (c) 2014 Fujitsu Ltd.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "sysemu/numa.h"
  27#include "exec/cpu-common.h"
  28#include "exec/ramlist.h"
  29#include "qemu/bitmap.h"
  30#include "qom/cpu.h"
  31#include "qemu/error-report.h"
  32#include "include/exec/cpu-common.h" /* for RAM_ADDR_FMT */
  33#include "qapi-visit.h"
  34#include "qapi/opts-visitor.h"
  35#include "hw/boards.h"
  36#include "sysemu/hostmem.h"
  37#include "qmp-commands.h"
  38#include "hw/mem/pc-dimm.h"
  39#include "qemu/option.h"
  40#include "qemu/config-file.h"
  41
  42QemuOptsList qemu_numa_opts = {
  43    .name = "numa",
  44    .implied_opt_name = "type",
  45    .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head),
  46    .desc = { { 0 } } /* validated with OptsVisitor */
  47};
  48
  49static int have_memdevs = -1;
  50static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one.
  51                             * For all nodes, nodeid < max_numa_nodeid
  52                             */
  53int nb_numa_nodes;
  54bool have_numa_distance;
  55NodeInfo numa_info[MAX_NODES];
  56
  57void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
  58{
  59    struct numa_addr_range *range;
  60
  61    /*
  62     * Memory-less nodes can come here with 0 size in which case,
  63     * there is nothing to do.
  64     */
  65    if (!size) {
  66        return;
  67    }
  68
  69    range = g_malloc0(sizeof(*range));
  70    range->mem_start = addr;
  71    range->mem_end = addr + size - 1;
  72    QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry);
  73}
  74
  75void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
  76{
  77    struct numa_addr_range *range, *next;
  78
  79    QLIST_FOREACH_SAFE(range, &numa_info[node].addr, entry, next) {
  80        if (addr == range->mem_start && (addr + size - 1) == range->mem_end) {
  81            QLIST_REMOVE(range, entry);
  82            g_free(range);
  83            return;
  84        }
  85    }
  86}
  87
  88static void numa_set_mem_ranges(void)
  89{
  90    int i;
  91    ram_addr_t mem_start = 0;
  92
  93    /*
  94     * Deduce start address of each node and use it to store
  95     * the address range info in numa_info address range list
  96     */
  97    for (i = 0; i < nb_numa_nodes; i++) {
  98        numa_set_mem_node_id(mem_start, numa_info[i].node_mem, i);
  99        mem_start += numa_info[i].node_mem;
 100    }
 101}
 102
 103/*
 104 * Check if @addr falls under NUMA @node.
 105 */
 106static bool numa_addr_belongs_to_node(ram_addr_t addr, uint32_t node)
 107{
 108    struct numa_addr_range *range;
 109
 110    QLIST_FOREACH(range, &numa_info[node].addr, entry) {
 111        if (addr >= range->mem_start && addr <= range->mem_end) {
 112            return true;
 113        }
 114    }
 115    return false;
 116}
 117
 118/*
 119 * Given an address, return the index of the NUMA node to which the
 120 * address belongs to.
 121 */
 122uint32_t numa_get_node(ram_addr_t addr, Error **errp)
 123{
 124    uint32_t i;
 125
 126    /* For non NUMA configurations, check if the addr falls under node 0 */
 127    if (!nb_numa_nodes) {
 128        if (numa_addr_belongs_to_node(addr, 0)) {
 129            return 0;
 130        }
 131    }
 132
 133    for (i = 0; i < nb_numa_nodes; i++) {
 134        if (numa_addr_belongs_to_node(addr, i)) {
 135            return i;
 136        }
 137    }
 138
 139    error_setg(errp, "Address 0x" RAM_ADDR_FMT " doesn't belong to any "
 140                "NUMA node", addr);
 141    return -1;
 142}
 143
 144static void parse_numa_node(MachineState *ms, NumaNodeOptions *node,
 145                            QemuOpts *opts, Error **errp)
 146{
 147    uint16_t nodenr;
 148    uint16List *cpus = NULL;
 149    MachineClass *mc = MACHINE_GET_CLASS(ms);
 150
 151    if (node->has_nodeid) {
 152        nodenr = node->nodeid;
 153    } else {
 154        nodenr = nb_numa_nodes;
 155    }
 156
 157    if (nodenr >= MAX_NODES) {
 158        error_setg(errp, "Max number of NUMA nodes reached: %"
 159                   PRIu16 "", nodenr);
 160        return;
 161    }
 162
 163    if (numa_info[nodenr].present) {
 164        error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr);
 165        return;
 166    }
 167
 168    if (!mc->cpu_index_to_instance_props) {
 169        error_report("NUMA is not supported by this machine-type");
 170        exit(1);
 171    }
 172    for (cpus = node->cpus; cpus; cpus = cpus->next) {
 173        CpuInstanceProperties props;
 174        if (cpus->value >= max_cpus) {
 175            error_setg(errp,
 176                       "CPU index (%" PRIu16 ")"
 177                       " should be smaller than maxcpus (%d)",
 178                       cpus->value, max_cpus);
 179            return;
 180        }
 181        props = mc->cpu_index_to_instance_props(ms, cpus->value);
 182        props.node_id = nodenr;
 183        props.has_node_id = true;
 184        machine_set_cpu_numa_node(ms, &props, &error_fatal);
 185    }
 186
 187    if (node->has_mem && node->has_memdev) {
 188        error_setg(errp, "cannot specify both mem= and memdev=");
 189        return;
 190    }
 191
 192    if (have_memdevs == -1) {
 193        have_memdevs = node->has_memdev;
 194    }
 195    if (node->has_memdev != have_memdevs) {
 196        error_setg(errp, "memdev option must be specified for either "
 197                   "all or no nodes");
 198        return;
 199    }
 200
 201    if (node->has_mem) {
 202        uint64_t mem_size = node->mem;
 203        const char *mem_str = qemu_opt_get(opts, "mem");
 204        /* Fix up legacy suffix-less format */
 205        if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) {
 206            mem_size <<= 20;
 207        }
 208        numa_info[nodenr].node_mem = mem_size;
 209    }
 210    if (node->has_memdev) {
 211        Object *o;
 212        o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL);
 213        if (!o) {
 214            error_setg(errp, "memdev=%s is ambiguous", node->memdev);
 215            return;
 216        }
 217
 218        object_ref(o);
 219        numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL);
 220        numa_info[nodenr].node_memdev = MEMORY_BACKEND(o);
 221    }
 222    numa_info[nodenr].present = true;
 223    max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1);
 224}
 225
 226static void parse_numa_distance(NumaDistOptions *dist, Error **errp)
 227{
 228    uint16_t src = dist->src;
 229    uint16_t dst = dist->dst;
 230    uint8_t val = dist->val;
 231
 232    if (src >= MAX_NODES || dst >= MAX_NODES) {
 233        error_setg(errp,
 234                   "Invalid node %d, max possible could be %d",
 235                   MAX(src, dst), MAX_NODES);
 236        return;
 237    }
 238
 239    if (!numa_info[src].present || !numa_info[dst].present) {
 240        error_setg(errp, "Source/Destination NUMA node is missing. "
 241                   "Please use '-numa node' option to declare it first.");
 242        return;
 243    }
 244
 245    if (val < NUMA_DISTANCE_MIN) {
 246        error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, "
 247                   "it shouldn't be less than %d.",
 248                   val, NUMA_DISTANCE_MIN);
 249        return;
 250    }
 251
 252    if (src == dst && val != NUMA_DISTANCE_MIN) {
 253        error_setg(errp, "Local distance of node %d should be %d.",
 254                   src, NUMA_DISTANCE_MIN);
 255        return;
 256    }
 257
 258    numa_info[src].distance[dst] = val;
 259    have_numa_distance = true;
 260}
 261
 262static int parse_numa(void *opaque, QemuOpts *opts, Error **errp)
 263{
 264    NumaOptions *object = NULL;
 265    MachineState *ms = opaque;
 266    Error *err = NULL;
 267
 268    {
 269        Visitor *v = opts_visitor_new(opts);
 270        visit_type_NumaOptions(v, NULL, &object, &err);
 271        visit_free(v);
 272    }
 273
 274    if (err) {
 275        goto end;
 276    }
 277
 278    switch (object->type) {
 279    case NUMA_OPTIONS_TYPE_NODE:
 280        parse_numa_node(ms, &object->u.node, opts, &err);
 281        if (err) {
 282            goto end;
 283        }
 284        nb_numa_nodes++;
 285        break;
 286    case NUMA_OPTIONS_TYPE_DIST:
 287        parse_numa_distance(&object->u.dist, &err);
 288        if (err) {
 289            goto end;
 290        }
 291        break;
 292    case NUMA_OPTIONS_TYPE_CPU:
 293        if (!object->u.cpu.has_node_id) {
 294            error_setg(&err, "Missing mandatory node-id property");
 295            goto end;
 296        }
 297        if (!numa_info[object->u.cpu.node_id].present) {
 298            error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be "
 299                "defined with -numa node,nodeid=ID before it's used with "
 300                "-numa cpu,node-id=ID", object->u.cpu.node_id);
 301            goto end;
 302        }
 303
 304        machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu),
 305                                  &err);
 306        break;
 307    default:
 308        abort();
 309    }
 310
 311end:
 312    qapi_free_NumaOptions(object);
 313    if (err) {
 314        error_report_err(err);
 315        return -1;
 316    }
 317
 318    return 0;
 319}
 320
 321/* If all node pair distances are symmetric, then only distances
 322 * in one direction are enough. If there is even one asymmetric
 323 * pair, though, then all distances must be provided. The
 324 * distance from a node to itself is always NUMA_DISTANCE_MIN,
 325 * so providing it is never necessary.
 326 */
 327static void validate_numa_distance(void)
 328{
 329    int src, dst;
 330    bool is_asymmetrical = false;
 331
 332    for (src = 0; src < nb_numa_nodes; src++) {
 333        for (dst = src; dst < nb_numa_nodes; dst++) {
 334            if (numa_info[src].distance[dst] == 0 &&
 335                numa_info[dst].distance[src] == 0) {
 336                if (src != dst) {
 337                    error_report("The distance between node %d and %d is "
 338                                 "missing, at least one distance value "
 339                                 "between each nodes should be provided.",
 340                                 src, dst);
 341                    exit(EXIT_FAILURE);
 342                }
 343            }
 344
 345            if (numa_info[src].distance[dst] != 0 &&
 346                numa_info[dst].distance[src] != 0 &&
 347                numa_info[src].distance[dst] !=
 348                numa_info[dst].distance[src]) {
 349                is_asymmetrical = true;
 350            }
 351        }
 352    }
 353
 354    if (is_asymmetrical) {
 355        for (src = 0; src < nb_numa_nodes; src++) {
 356            for (dst = 0; dst < nb_numa_nodes; dst++) {
 357                if (src != dst && numa_info[src].distance[dst] == 0) {
 358                    error_report("At least one asymmetrical pair of "
 359                            "distances is given, please provide distances "
 360                            "for both directions of all node pairs.");
 361                    exit(EXIT_FAILURE);
 362                }
 363            }
 364        }
 365    }
 366}
 367
 368static void complete_init_numa_distance(void)
 369{
 370    int src, dst;
 371
 372    /* Fixup NUMA distance by symmetric policy because if it is an
 373     * asymmetric distance table, it should be a complete table and
 374     * there would not be any missing distance except local node, which
 375     * is verified by validate_numa_distance above.
 376     */
 377    for (src = 0; src < nb_numa_nodes; src++) {
 378        for (dst = 0; dst < nb_numa_nodes; dst++) {
 379            if (numa_info[src].distance[dst] == 0) {
 380                if (src == dst) {
 381                    numa_info[src].distance[dst] = NUMA_DISTANCE_MIN;
 382                } else {
 383                    numa_info[src].distance[dst] = numa_info[dst].distance[src];
 384                }
 385            }
 386        }
 387    }
 388}
 389
 390void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
 391                                 int nb_nodes, ram_addr_t size)
 392{
 393    int i;
 394    uint64_t usedmem = 0;
 395
 396    /* Align each node according to the alignment
 397     * requirements of the machine class
 398     */
 399
 400    for (i = 0; i < nb_nodes - 1; i++) {
 401        nodes[i].node_mem = (size / nb_nodes) &
 402                            ~((1 << mc->numa_mem_align_shift) - 1);
 403        usedmem += nodes[i].node_mem;
 404    }
 405    nodes[i].node_mem = size - usedmem;
 406}
 407
 408void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
 409                                  int nb_nodes, ram_addr_t size)
 410{
 411    int i;
 412    uint64_t usedmem = 0, node_mem;
 413    uint64_t granularity = size / nb_nodes;
 414    uint64_t propagate = 0;
 415
 416    for (i = 0; i < nb_nodes - 1; i++) {
 417        node_mem = (granularity + propagate) &
 418                   ~((1 << mc->numa_mem_align_shift) - 1);
 419        propagate = granularity + propagate - node_mem;
 420        nodes[i].node_mem = node_mem;
 421        usedmem += node_mem;
 422    }
 423    nodes[i].node_mem = size - usedmem;
 424}
 425
 426void parse_numa_opts(MachineState *ms)
 427{
 428    int i;
 429    MachineClass *mc = MACHINE_GET_CLASS(ms);
 430
 431    if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, NULL)) {
 432        exit(1);
 433    }
 434
 435    assert(max_numa_nodeid <= MAX_NODES);
 436
 437    /* No support for sparse NUMA node IDs yet: */
 438    for (i = max_numa_nodeid - 1; i >= 0; i--) {
 439        /* Report large node IDs first, to make mistakes easier to spot */
 440        if (!numa_info[i].present) {
 441            error_report("numa: Node ID missing: %d", i);
 442            exit(1);
 443        }
 444    }
 445
 446    /* This must be always true if all nodes are present: */
 447    assert(nb_numa_nodes == max_numa_nodeid);
 448
 449    if (nb_numa_nodes > 0) {
 450        uint64_t numa_total;
 451
 452        if (nb_numa_nodes > MAX_NODES) {
 453            nb_numa_nodes = MAX_NODES;
 454        }
 455
 456        /* If no memory size is given for any node, assume the default case
 457         * and distribute the available memory equally across all nodes
 458         */
 459        for (i = 0; i < nb_numa_nodes; i++) {
 460            if (numa_info[i].node_mem != 0) {
 461                break;
 462            }
 463        }
 464        if (i == nb_numa_nodes) {
 465            assert(mc->numa_auto_assign_ram);
 466            mc->numa_auto_assign_ram(mc, numa_info, nb_numa_nodes, ram_size);
 467        }
 468
 469        numa_total = 0;
 470        for (i = 0; i < nb_numa_nodes; i++) {
 471            numa_total += numa_info[i].node_mem;
 472        }
 473        if (numa_total != ram_size) {
 474            error_report("total memory for NUMA nodes (0x%" PRIx64 ")"
 475                         " should equal RAM size (0x" RAM_ADDR_FMT ")",
 476                         numa_total, ram_size);
 477            exit(1);
 478        }
 479
 480        for (i = 0; i < nb_numa_nodes; i++) {
 481            QLIST_INIT(&numa_info[i].addr);
 482        }
 483
 484        numa_set_mem_ranges();
 485
 486        /* QEMU needs at least all unique node pair distances to build
 487         * the whole NUMA distance table. QEMU treats the distance table
 488         * as symmetric by default, i.e. distance A->B == distance B->A.
 489         * Thus, QEMU is able to complete the distance table
 490         * initialization even though only distance A->B is provided and
 491         * distance B->A is not. QEMU knows the distance of a node to
 492         * itself is always 10, so A->A distances may be omitted. When
 493         * the distances of two nodes of a pair differ, i.e. distance
 494         * A->B != distance B->A, then that means the distance table is
 495         * asymmetric. In this case, the distances for both directions
 496         * of all node pairs are required.
 497         */
 498        if (have_numa_distance) {
 499            /* Validate enough NUMA distance information was provided. */
 500            validate_numa_distance();
 501
 502            /* Validation succeeded, now fill in any missing distances. */
 503            complete_init_numa_distance();
 504        }
 505    } else {
 506        numa_set_mem_node_id(0, ram_size, 0);
 507    }
 508}
 509
 510void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp)
 511{
 512    int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort);
 513
 514    if (node_id == CPU_UNSET_NUMA_NODE_ID) {
 515        /* due to bug in libvirt, it doesn't pass node-id from props on
 516         * device_add as expected, so we have to fix it up here */
 517        if (slot->props.has_node_id) {
 518            object_property_set_int(OBJECT(dev), slot->props.node_id,
 519                                    "node-id", errp);
 520        }
 521    } else if (node_id != slot->props.node_id) {
 522        error_setg(errp, "node-id=%d must match numa node specified "
 523                   "with -numa option", node_id);
 524    }
 525}
 526
 527static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
 528                                           const char *name,
 529                                           uint64_t ram_size)
 530{
 531    if (mem_path) {
 532#ifdef __linux__
 533        Error *err = NULL;
 534        memory_region_init_ram_from_file(mr, owner, name, ram_size, false,
 535                                         mem_path, &err);
 536        if (err) {
 537            error_report_err(err);
 538            if (mem_prealloc) {
 539                exit(1);
 540            }
 541
 542            /* Legacy behavior: if allocation failed, fall back to
 543             * regular RAM allocation.
 544             */
 545            memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal);
 546        }
 547#else
 548        fprintf(stderr, "-mem-path not supported on this host\n");
 549        exit(1);
 550#endif
 551    } else {
 552        memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal);
 553    }
 554    vmstate_register_ram_global(mr);
 555}
 556
 557void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
 558                                          const char *name,
 559                                          uint64_t ram_size)
 560{
 561    uint64_t addr = 0;
 562    int i;
 563
 564    if (nb_numa_nodes == 0 || !have_memdevs) {
 565        allocate_system_memory_nonnuma(mr, owner, name, ram_size);
 566        return;
 567    }
 568
 569    memory_region_init(mr, owner, name, ram_size);
 570    for (i = 0; i < MAX_NODES; i++) {
 571        uint64_t size = numa_info[i].node_mem;
 572        HostMemoryBackend *backend = numa_info[i].node_memdev;
 573        if (!backend) {
 574            continue;
 575        }
 576        MemoryRegion *seg = host_memory_backend_get_memory(backend,
 577                                                           &error_fatal);
 578
 579        if (memory_region_is_mapped(seg)) {
 580            char *path = object_get_canonical_path_component(OBJECT(backend));
 581            error_report("memory backend %s is used multiple times. Each "
 582                         "-numa option must use a different memdev value.",
 583                         path);
 584            exit(1);
 585        }
 586
 587        host_memory_backend_set_mapped(backend, true);
 588        memory_region_add_subregion(mr, addr, seg);
 589        vmstate_register_ram_global(seg);
 590        addr += size;
 591    }
 592}
 593
 594static void numa_stat_memory_devices(uint64_t node_mem[])
 595{
 596    MemoryDeviceInfoList *info_list = NULL;
 597    MemoryDeviceInfoList **prev = &info_list;
 598    MemoryDeviceInfoList *info;
 599
 600    qmp_pc_dimm_device_list(qdev_get_machine(), &prev);
 601    for (info = info_list; info; info = info->next) {
 602        MemoryDeviceInfo *value = info->value;
 603
 604        if (value) {
 605            switch (value->type) {
 606            case MEMORY_DEVICE_INFO_KIND_DIMM:
 607                node_mem[value->u.dimm.data->node] += value->u.dimm.data->size;
 608                break;
 609            default:
 610                break;
 611            }
 612        }
 613    }
 614    qapi_free_MemoryDeviceInfoList(info_list);
 615}
 616
 617void query_numa_node_mem(uint64_t node_mem[])
 618{
 619    int i;
 620
 621    if (nb_numa_nodes <= 0) {
 622        return;
 623    }
 624
 625    numa_stat_memory_devices(node_mem);
 626    for (i = 0; i < nb_numa_nodes; i++) {
 627        node_mem[i] += numa_info[i].node_mem;
 628    }
 629}
 630
 631static int query_memdev(Object *obj, void *opaque)
 632{
 633    MemdevList **list = opaque;
 634    MemdevList *m = NULL;
 635
 636    if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
 637        m = g_malloc0(sizeof(*m));
 638
 639        m->value = g_malloc0(sizeof(*m->value));
 640
 641        m->value->id = object_property_get_str(obj, "id", NULL);
 642        m->value->has_id = !!m->value->id;
 643
 644        m->value->size = object_property_get_uint(obj, "size",
 645                                                  &error_abort);
 646        m->value->merge = object_property_get_bool(obj, "merge",
 647                                                   &error_abort);
 648        m->value->dump = object_property_get_bool(obj, "dump",
 649                                                  &error_abort);
 650        m->value->prealloc = object_property_get_bool(obj,
 651                                                      "prealloc",
 652                                                      &error_abort);
 653        m->value->policy = object_property_get_enum(obj,
 654                                                    "policy",
 655                                                    "HostMemPolicy",
 656                                                    &error_abort);
 657        object_property_get_uint16List(obj, "host-nodes",
 658                                       &m->value->host_nodes,
 659                                       &error_abort);
 660
 661        m->next = *list;
 662        *list = m;
 663    }
 664
 665    return 0;
 666}
 667
 668MemdevList *qmp_query_memdev(Error **errp)
 669{
 670    Object *obj = object_get_objects_root();
 671    MemdevList *list = NULL;
 672
 673    object_child_foreach(obj, query_memdev, &list);
 674    return list;
 675}
 676
 677void ram_block_notifier_add(RAMBlockNotifier *n)
 678{
 679    QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next);
 680}
 681
 682void ram_block_notifier_remove(RAMBlockNotifier *n)
 683{
 684    QLIST_REMOVE(n, next);
 685}
 686
 687void ram_block_notify_add(void *host, size_t size)
 688{
 689    RAMBlockNotifier *notifier;
 690
 691    QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
 692        notifier->ram_block_added(notifier, host, size);
 693    }
 694}
 695
 696void ram_block_notify_remove(void *host, size_t size)
 697{
 698    RAMBlockNotifier *notifier;
 699
 700    QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
 701        notifier->ram_block_removed(notifier, host, size);
 702    }
 703}
 704