qemu/numa.c
<<
>>
Prefs
   1/*
   2 * NUMA parameter parsing routines
   3 *
   4 * Copyright (c) 2014 Fujitsu Ltd.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "sysemu/numa.h"
  27#include "exec/cpu-common.h"
  28#include "exec/ramlist.h"
  29#include "qemu/bitmap.h"
  30#include "qom/cpu.h"
  31#include "qemu/error-report.h"
  32#include "include/exec/cpu-common.h" /* for RAM_ADDR_FMT */
  33#include "qapi-visit.h"
  34#include "qapi/opts-visitor.h"
  35#include "hw/boards.h"
  36#include "sysemu/hostmem.h"
  37#include "qmp-commands.h"
  38#include "hw/mem/pc-dimm.h"
  39#include "qemu/option.h"
  40#include "qemu/config-file.h"
  41#include "qemu/cutils.h"
  42
  43QemuOptsList qemu_numa_opts = {
  44    .name = "numa",
  45    .implied_opt_name = "type",
  46    .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head),
  47    .desc = { { 0 } } /* validated with OptsVisitor */
  48};
  49
  50static int have_memdevs = -1;
  51static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one.
  52                             * For all nodes, nodeid < max_numa_nodeid
  53                             */
  54int nb_numa_nodes;
  55bool have_numa_distance;
  56NodeInfo numa_info[MAX_NODES];
  57
  58void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
  59{
  60    struct numa_addr_range *range;
  61
  62    /*
  63     * Memory-less nodes can come here with 0 size in which case,
  64     * there is nothing to do.
  65     */
  66    if (!size) {
  67        return;
  68    }
  69
  70    range = g_malloc0(sizeof(*range));
  71    range->mem_start = addr;
  72    range->mem_end = addr + size - 1;
  73    QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry);
  74}
  75
  76void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
  77{
  78    struct numa_addr_range *range, *next;
  79
  80    QLIST_FOREACH_SAFE(range, &numa_info[node].addr, entry, next) {
  81        if (addr == range->mem_start && (addr + size - 1) == range->mem_end) {
  82            QLIST_REMOVE(range, entry);
  83            g_free(range);
  84            return;
  85        }
  86    }
  87}
  88
  89static void numa_set_mem_ranges(void)
  90{
  91    int i;
  92    ram_addr_t mem_start = 0;
  93
  94    /*
  95     * Deduce start address of each node and use it to store
  96     * the address range info in numa_info address range list
  97     */
  98    for (i = 0; i < nb_numa_nodes; i++) {
  99        numa_set_mem_node_id(mem_start, numa_info[i].node_mem, i);
 100        mem_start += numa_info[i].node_mem;
 101    }
 102}
 103
 104/*
 105 * Check if @addr falls under NUMA @node.
 106 */
 107static bool numa_addr_belongs_to_node(ram_addr_t addr, uint32_t node)
 108{
 109    struct numa_addr_range *range;
 110
 111    QLIST_FOREACH(range, &numa_info[node].addr, entry) {
 112        if (addr >= range->mem_start && addr <= range->mem_end) {
 113            return true;
 114        }
 115    }
 116    return false;
 117}
 118
 119/*
 120 * Given an address, return the index of the NUMA node to which the
 121 * address belongs to.
 122 */
 123uint32_t numa_get_node(ram_addr_t addr, Error **errp)
 124{
 125    uint32_t i;
 126
 127    /* For non NUMA configurations, check if the addr falls under node 0 */
 128    if (!nb_numa_nodes) {
 129        if (numa_addr_belongs_to_node(addr, 0)) {
 130            return 0;
 131        }
 132    }
 133
 134    for (i = 0; i < nb_numa_nodes; i++) {
 135        if (numa_addr_belongs_to_node(addr, i)) {
 136            return i;
 137        }
 138    }
 139
 140    error_setg(errp, "Address 0x" RAM_ADDR_FMT " doesn't belong to any "
 141                "NUMA node", addr);
 142    return -1;
 143}
 144
 145static void parse_numa_node(MachineState *ms, NumaNodeOptions *node,
 146                            Error **errp)
 147{
 148    uint16_t nodenr;
 149    uint16List *cpus = NULL;
 150    MachineClass *mc = MACHINE_GET_CLASS(ms);
 151
 152    if (node->has_nodeid) {
 153        nodenr = node->nodeid;
 154    } else {
 155        nodenr = nb_numa_nodes;
 156    }
 157
 158    if (nodenr >= MAX_NODES) {
 159        error_setg(errp, "Max number of NUMA nodes reached: %"
 160                   PRIu16 "", nodenr);
 161        return;
 162    }
 163
 164    if (numa_info[nodenr].present) {
 165        error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr);
 166        return;
 167    }
 168
 169    if (!mc->cpu_index_to_instance_props) {
 170        error_report("NUMA is not supported by this machine-type");
 171        exit(1);
 172    }
 173    for (cpus = node->cpus; cpus; cpus = cpus->next) {
 174        CpuInstanceProperties props;
 175        if (cpus->value >= max_cpus) {
 176            error_setg(errp,
 177                       "CPU index (%" PRIu16 ")"
 178                       " should be smaller than maxcpus (%d)",
 179                       cpus->value, max_cpus);
 180            return;
 181        }
 182        props = mc->cpu_index_to_instance_props(ms, cpus->value);
 183        props.node_id = nodenr;
 184        props.has_node_id = true;
 185        machine_set_cpu_numa_node(ms, &props, &error_fatal);
 186    }
 187
 188    if (node->has_mem && node->has_memdev) {
 189        error_setg(errp, "cannot specify both mem= and memdev=");
 190        return;
 191    }
 192
 193    if (have_memdevs == -1) {
 194        have_memdevs = node->has_memdev;
 195    }
 196    if (node->has_memdev != have_memdevs) {
 197        error_setg(errp, "memdev option must be specified for either "
 198                   "all or no nodes");
 199        return;
 200    }
 201
 202    if (node->has_mem) {
 203        numa_info[nodenr].node_mem = node->mem;
 204    }
 205    if (node->has_memdev) {
 206        Object *o;
 207        o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL);
 208        if (!o) {
 209            error_setg(errp, "memdev=%s is ambiguous", node->memdev);
 210            return;
 211        }
 212
 213        object_ref(o);
 214        numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL);
 215        numa_info[nodenr].node_memdev = MEMORY_BACKEND(o);
 216    }
 217    numa_info[nodenr].present = true;
 218    max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1);
 219    nb_numa_nodes++;
 220}
 221
 222static void parse_numa_distance(NumaDistOptions *dist, Error **errp)
 223{
 224    uint16_t src = dist->src;
 225    uint16_t dst = dist->dst;
 226    uint8_t val = dist->val;
 227
 228    if (src >= MAX_NODES || dst >= MAX_NODES) {
 229        error_setg(errp,
 230                   "Invalid node %d, max possible could be %d",
 231                   MAX(src, dst), MAX_NODES);
 232        return;
 233    }
 234
 235    if (!numa_info[src].present || !numa_info[dst].present) {
 236        error_setg(errp, "Source/Destination NUMA node is missing. "
 237                   "Please use '-numa node' option to declare it first.");
 238        return;
 239    }
 240
 241    if (val < NUMA_DISTANCE_MIN) {
 242        error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, "
 243                   "it shouldn't be less than %d.",
 244                   val, NUMA_DISTANCE_MIN);
 245        return;
 246    }
 247
 248    if (src == dst && val != NUMA_DISTANCE_MIN) {
 249        error_setg(errp, "Local distance of node %d should be %d.",
 250                   src, NUMA_DISTANCE_MIN);
 251        return;
 252    }
 253
 254    numa_info[src].distance[dst] = val;
 255    have_numa_distance = true;
 256}
 257
 258static int parse_numa(void *opaque, QemuOpts *opts, Error **errp)
 259{
 260    NumaOptions *object = NULL;
 261    MachineState *ms = opaque;
 262    Error *err = NULL;
 263
 264    {
 265        Visitor *v = opts_visitor_new(opts);
 266        visit_type_NumaOptions(v, NULL, &object, &err);
 267        visit_free(v);
 268    }
 269
 270    if (err) {
 271        goto end;
 272    }
 273
 274    /* Fix up legacy suffix-less format */
 275    if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) {
 276        const char *mem_str = qemu_opt_get(opts, "mem");
 277        qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem);
 278    }
 279
 280    switch (object->type) {
 281    case NUMA_OPTIONS_TYPE_NODE:
 282        parse_numa_node(ms, &object->u.node, &err);
 283        if (err) {
 284            goto end;
 285        }
 286        break;
 287    case NUMA_OPTIONS_TYPE_DIST:
 288        parse_numa_distance(&object->u.dist, &err);
 289        if (err) {
 290            goto end;
 291        }
 292        break;
 293    case NUMA_OPTIONS_TYPE_CPU:
 294        if (!object->u.cpu.has_node_id) {
 295            error_setg(&err, "Missing mandatory node-id property");
 296            goto end;
 297        }
 298        if (!numa_info[object->u.cpu.node_id].present) {
 299            error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be "
 300                "defined with -numa node,nodeid=ID before it's used with "
 301                "-numa cpu,node-id=ID", object->u.cpu.node_id);
 302            goto end;
 303        }
 304
 305        machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu),
 306                                  &err);
 307        break;
 308    default:
 309        abort();
 310    }
 311
 312end:
 313    qapi_free_NumaOptions(object);
 314    if (err) {
 315        error_report_err(err);
 316        return -1;
 317    }
 318
 319    return 0;
 320}
 321
 322/* If all node pair distances are symmetric, then only distances
 323 * in one direction are enough. If there is even one asymmetric
 324 * pair, though, then all distances must be provided. The
 325 * distance from a node to itself is always NUMA_DISTANCE_MIN,
 326 * so providing it is never necessary.
 327 */
 328static void validate_numa_distance(void)
 329{
 330    int src, dst;
 331    bool is_asymmetrical = false;
 332
 333    for (src = 0; src < nb_numa_nodes; src++) {
 334        for (dst = src; dst < nb_numa_nodes; dst++) {
 335            if (numa_info[src].distance[dst] == 0 &&
 336                numa_info[dst].distance[src] == 0) {
 337                if (src != dst) {
 338                    error_report("The distance between node %d and %d is "
 339                                 "missing, at least one distance value "
 340                                 "between each nodes should be provided.",
 341                                 src, dst);
 342                    exit(EXIT_FAILURE);
 343                }
 344            }
 345
 346            if (numa_info[src].distance[dst] != 0 &&
 347                numa_info[dst].distance[src] != 0 &&
 348                numa_info[src].distance[dst] !=
 349                numa_info[dst].distance[src]) {
 350                is_asymmetrical = true;
 351            }
 352        }
 353    }
 354
 355    if (is_asymmetrical) {
 356        for (src = 0; src < nb_numa_nodes; src++) {
 357            for (dst = 0; dst < nb_numa_nodes; dst++) {
 358                if (src != dst && numa_info[src].distance[dst] == 0) {
 359                    error_report("At least one asymmetrical pair of "
 360                            "distances is given, please provide distances "
 361                            "for both directions of all node pairs.");
 362                    exit(EXIT_FAILURE);
 363                }
 364            }
 365        }
 366    }
 367}
 368
 369static void complete_init_numa_distance(void)
 370{
 371    int src, dst;
 372
 373    /* Fixup NUMA distance by symmetric policy because if it is an
 374     * asymmetric distance table, it should be a complete table and
 375     * there would not be any missing distance except local node, which
 376     * is verified by validate_numa_distance above.
 377     */
 378    for (src = 0; src < nb_numa_nodes; src++) {
 379        for (dst = 0; dst < nb_numa_nodes; dst++) {
 380            if (numa_info[src].distance[dst] == 0) {
 381                if (src == dst) {
 382                    numa_info[src].distance[dst] = NUMA_DISTANCE_MIN;
 383                } else {
 384                    numa_info[src].distance[dst] = numa_info[dst].distance[src];
 385                }
 386            }
 387        }
 388    }
 389}
 390
 391void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
 392                                 int nb_nodes, ram_addr_t size)
 393{
 394    int i;
 395    uint64_t usedmem = 0;
 396
 397    /* Align each node according to the alignment
 398     * requirements of the machine class
 399     */
 400
 401    for (i = 0; i < nb_nodes - 1; i++) {
 402        nodes[i].node_mem = (size / nb_nodes) &
 403                            ~((1 << mc->numa_mem_align_shift) - 1);
 404        usedmem += nodes[i].node_mem;
 405    }
 406    nodes[i].node_mem = size - usedmem;
 407}
 408
 409void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
 410                                  int nb_nodes, ram_addr_t size)
 411{
 412    int i;
 413    uint64_t usedmem = 0, node_mem;
 414    uint64_t granularity = size / nb_nodes;
 415    uint64_t propagate = 0;
 416
 417    for (i = 0; i < nb_nodes - 1; i++) {
 418        node_mem = (granularity + propagate) &
 419                   ~((1 << mc->numa_mem_align_shift) - 1);
 420        propagate = granularity + propagate - node_mem;
 421        nodes[i].node_mem = node_mem;
 422        usedmem += node_mem;
 423    }
 424    nodes[i].node_mem = size - usedmem;
 425}
 426
 427void parse_numa_opts(MachineState *ms)
 428{
 429    int i;
 430    MachineClass *mc = MACHINE_GET_CLASS(ms);
 431
 432    if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, NULL)) {
 433        exit(1);
 434    }
 435
 436    /*
 437     * If memory hotplug is enabled (slots > 0) but without '-numa'
 438     * options explicitly on CLI, guestes will break.
 439     *
 440     *   Windows: won't enable memory hotplug without SRAT table at all
 441     *
 442     *   Linux: if QEMU is started with initial memory all below 4Gb
 443     *   and no SRAT table present, guest kernel will use nommu DMA ops,
 444     *   which breaks 32bit hw drivers when memory is hotplugged and
 445     *   guest tries to use it with that drivers.
 446     *
 447     * Enable NUMA implicitly by adding a new NUMA node automatically.
 448     */
 449    if (ms->ram_slots > 0 && nb_numa_nodes == 0 &&
 450        mc->auto_enable_numa_with_memhp) {
 451            NumaNodeOptions node = { };
 452            parse_numa_node(ms, &node, NULL);
 453    }
 454
 455    assert(max_numa_nodeid <= MAX_NODES);
 456
 457    /* No support for sparse NUMA node IDs yet: */
 458    for (i = max_numa_nodeid - 1; i >= 0; i--) {
 459        /* Report large node IDs first, to make mistakes easier to spot */
 460        if (!numa_info[i].present) {
 461            error_report("numa: Node ID missing: %d", i);
 462            exit(1);
 463        }
 464    }
 465
 466    /* This must be always true if all nodes are present: */
 467    assert(nb_numa_nodes == max_numa_nodeid);
 468
 469    if (nb_numa_nodes > 0) {
 470        uint64_t numa_total;
 471
 472        if (nb_numa_nodes > MAX_NODES) {
 473            nb_numa_nodes = MAX_NODES;
 474        }
 475
 476        /* If no memory size is given for any node, assume the default case
 477         * and distribute the available memory equally across all nodes
 478         */
 479        for (i = 0; i < nb_numa_nodes; i++) {
 480            if (numa_info[i].node_mem != 0) {
 481                break;
 482            }
 483        }
 484        if (i == nb_numa_nodes) {
 485            assert(mc->numa_auto_assign_ram);
 486            mc->numa_auto_assign_ram(mc, numa_info, nb_numa_nodes, ram_size);
 487        }
 488
 489        numa_total = 0;
 490        for (i = 0; i < nb_numa_nodes; i++) {
 491            numa_total += numa_info[i].node_mem;
 492        }
 493        if (numa_total != ram_size) {
 494            error_report("total memory for NUMA nodes (0x%" PRIx64 ")"
 495                         " should equal RAM size (0x" RAM_ADDR_FMT ")",
 496                         numa_total, ram_size);
 497            exit(1);
 498        }
 499
 500        for (i = 0; i < nb_numa_nodes; i++) {
 501            QLIST_INIT(&numa_info[i].addr);
 502        }
 503
 504        numa_set_mem_ranges();
 505
 506        /* QEMU needs at least all unique node pair distances to build
 507         * the whole NUMA distance table. QEMU treats the distance table
 508         * as symmetric by default, i.e. distance A->B == distance B->A.
 509         * Thus, QEMU is able to complete the distance table
 510         * initialization even though only distance A->B is provided and
 511         * distance B->A is not. QEMU knows the distance of a node to
 512         * itself is always 10, so A->A distances may be omitted. When
 513         * the distances of two nodes of a pair differ, i.e. distance
 514         * A->B != distance B->A, then that means the distance table is
 515         * asymmetric. In this case, the distances for both directions
 516         * of all node pairs are required.
 517         */
 518        if (have_numa_distance) {
 519            /* Validate enough NUMA distance information was provided. */
 520            validate_numa_distance();
 521
 522            /* Validation succeeded, now fill in any missing distances. */
 523            complete_init_numa_distance();
 524        }
 525    } else {
 526        numa_set_mem_node_id(0, ram_size, 0);
 527    }
 528}
 529
 530void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp)
 531{
 532    int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort);
 533
 534    if (node_id == CPU_UNSET_NUMA_NODE_ID) {
 535        /* due to bug in libvirt, it doesn't pass node-id from props on
 536         * device_add as expected, so we have to fix it up here */
 537        if (slot->props.has_node_id) {
 538            object_property_set_int(OBJECT(dev), slot->props.node_id,
 539                                    "node-id", errp);
 540        }
 541    } else if (node_id != slot->props.node_id) {
 542        error_setg(errp, "node-id=%d must match numa node specified "
 543                   "with -numa option", node_id);
 544    }
 545}
 546
 547static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
 548                                           const char *name,
 549                                           uint64_t ram_size)
 550{
 551    if (mem_path) {
 552#ifdef __linux__
 553        Error *err = NULL;
 554        memory_region_init_ram_from_file(mr, owner, name, ram_size, false,
 555                                         mem_path, &err);
 556        if (err) {
 557            error_report_err(err);
 558            if (mem_prealloc) {
 559                exit(1);
 560            }
 561
 562            /* Legacy behavior: if allocation failed, fall back to
 563             * regular RAM allocation.
 564             */
 565            memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal);
 566        }
 567#else
 568        fprintf(stderr, "-mem-path not supported on this host\n");
 569        exit(1);
 570#endif
 571    } else {
 572        memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal);
 573    }
 574    vmstate_register_ram_global(mr);
 575}
 576
 577void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
 578                                          const char *name,
 579                                          uint64_t ram_size)
 580{
 581    uint64_t addr = 0;
 582    int i;
 583
 584    if (nb_numa_nodes == 0 || !have_memdevs) {
 585        allocate_system_memory_nonnuma(mr, owner, name, ram_size);
 586        return;
 587    }
 588
 589    memory_region_init(mr, owner, name, ram_size);
 590    for (i = 0; i < nb_numa_nodes; i++) {
 591        uint64_t size = numa_info[i].node_mem;
 592        HostMemoryBackend *backend = numa_info[i].node_memdev;
 593        if (!backend) {
 594            continue;
 595        }
 596        MemoryRegion *seg = host_memory_backend_get_memory(backend,
 597                                                           &error_fatal);
 598
 599        if (memory_region_is_mapped(seg)) {
 600            char *path = object_get_canonical_path_component(OBJECT(backend));
 601            error_report("memory backend %s is used multiple times. Each "
 602                         "-numa option must use a different memdev value.",
 603                         path);
 604            exit(1);
 605        }
 606
 607        host_memory_backend_set_mapped(backend, true);
 608        memory_region_add_subregion(mr, addr, seg);
 609        vmstate_register_ram_global(seg);
 610        addr += size;
 611    }
 612}
 613
 614static void numa_stat_memory_devices(NumaNodeMem node_mem[])
 615{
 616    MemoryDeviceInfoList *info_list = NULL;
 617    MemoryDeviceInfoList **prev = &info_list;
 618    MemoryDeviceInfoList *info;
 619    PCDIMMDeviceInfo     *pcdimm_info;
 620
 621    qmp_pc_dimm_device_list(qdev_get_machine(), &prev);
 622    for (info = info_list; info; info = info->next) {
 623        MemoryDeviceInfo *value = info->value;
 624
 625        if (value) {
 626            switch (value->type) {
 627            case MEMORY_DEVICE_INFO_KIND_DIMM: {
 628                pcdimm_info = value->u.dimm.data;
 629                node_mem[pcdimm_info->node].node_mem += pcdimm_info->size;
 630                if (pcdimm_info->hotpluggable && pcdimm_info->hotplugged) {
 631                    node_mem[pcdimm_info->node].node_plugged_mem +=
 632                        pcdimm_info->size;
 633                }
 634                break;
 635            }
 636
 637            default:
 638                break;
 639            }
 640        }
 641    }
 642    qapi_free_MemoryDeviceInfoList(info_list);
 643}
 644
 645void query_numa_node_mem(NumaNodeMem node_mem[])
 646{
 647    int i;
 648
 649    if (nb_numa_nodes <= 0) {
 650        return;
 651    }
 652
 653    numa_stat_memory_devices(node_mem);
 654    for (i = 0; i < nb_numa_nodes; i++) {
 655        node_mem[i].node_mem += numa_info[i].node_mem;
 656    }
 657}
 658
 659static int query_memdev(Object *obj, void *opaque)
 660{
 661    MemdevList **list = opaque;
 662    MemdevList *m = NULL;
 663
 664    if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
 665        m = g_malloc0(sizeof(*m));
 666
 667        m->value = g_malloc0(sizeof(*m->value));
 668
 669        m->value->id = object_property_get_str(obj, "id", NULL);
 670        m->value->has_id = !!m->value->id;
 671
 672        m->value->size = object_property_get_uint(obj, "size",
 673                                                  &error_abort);
 674        m->value->merge = object_property_get_bool(obj, "merge",
 675                                                   &error_abort);
 676        m->value->dump = object_property_get_bool(obj, "dump",
 677                                                  &error_abort);
 678        m->value->prealloc = object_property_get_bool(obj,
 679                                                      "prealloc",
 680                                                      &error_abort);
 681        m->value->policy = object_property_get_enum(obj,
 682                                                    "policy",
 683                                                    "HostMemPolicy",
 684                                                    &error_abort);
 685        object_property_get_uint16List(obj, "host-nodes",
 686                                       &m->value->host_nodes,
 687                                       &error_abort);
 688
 689        m->next = *list;
 690        *list = m;
 691    }
 692
 693    return 0;
 694}
 695
 696MemdevList *qmp_query_memdev(Error **errp)
 697{
 698    Object *obj = object_get_objects_root();
 699    MemdevList *list = NULL;
 700
 701    object_child_foreach(obj, query_memdev, &list);
 702    return list;
 703}
 704
 705void ram_block_notifier_add(RAMBlockNotifier *n)
 706{
 707    QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next);
 708}
 709
 710void ram_block_notifier_remove(RAMBlockNotifier *n)
 711{
 712    QLIST_REMOVE(n, next);
 713}
 714
 715void ram_block_notify_add(void *host, size_t size)
 716{
 717    RAMBlockNotifier *notifier;
 718
 719    QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
 720        notifier->ram_block_added(notifier, host, size);
 721    }
 722}
 723
 724void ram_block_notify_remove(void *host, size_t size)
 725{
 726    RAMBlockNotifier *notifier;
 727
 728    QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
 729        notifier->ram_block_removed(notifier, host, size);
 730    }
 731}
 732