qemu/hw/arm/virt-acpi-build.c
<<
>>
Prefs
   1/* Support for generating ACPI tables and passing them to Guests
   2 *
   3 * ARM virt ACPI generation
   4 *
   5 * Copyright (C) 2008-2010  Kevin O'Connor <kevin@koconnor.net>
   6 * Copyright (C) 2006 Fabrice Bellard
   7 * Copyright (C) 2013 Red Hat Inc
   8 *
   9 * Author: Michael S. Tsirkin <mst@redhat.com>
  10 *
  11 * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
  12 *
  13 * Author: Shannon Zhao <zhaoshenglong@huawei.com>
  14 *
  15 * This program is free software; you can redistribute it and/or modify
  16 * it under the terms of the GNU General Public License as published by
  17 * the Free Software Foundation; either version 2 of the License, or
  18 * (at your option) any later version.
  19
  20 * This program is distributed in the hope that it will be useful,
  21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  23 * GNU General Public License for more details.
  24
  25 * You should have received a copy of the GNU General Public License along
  26 * with this program; if not, see <http://www.gnu.org/licenses/>.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "qapi/error.h"
  31#include "qemu/bitmap.h"
  32#include "trace.h"
  33#include "hw/core/cpu.h"
  34#include "target/arm/cpu.h"
  35#include "hw/acpi/acpi-defs.h"
  36#include "hw/acpi/acpi.h"
  37#include "hw/nvram/fw_cfg.h"
  38#include "hw/acpi/bios-linker-loader.h"
  39#include "hw/acpi/aml-build.h"
  40#include "hw/acpi/utils.h"
  41#include "hw/acpi/pci.h"
  42#include "hw/acpi/memory_hotplug.h"
  43#include "hw/acpi/generic_event_device.h"
  44#include "hw/pci/pcie_host.h"
  45#include "hw/pci/pci.h"
  46#include "hw/arm/virt.h"
  47#include "sysemu/numa.h"
  48#include "sysemu/reset.h"
  49#include "kvm_arm.h"
  50#include "migration/vmstate.h"
  51
  52#define ARM_SPI_BASE 32
  53
  54static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
  55{
  56    uint16_t i;
  57
  58    for (i = 0; i < smp_cpus; i++) {
  59        Aml *dev = aml_device("C%.03X", i);
  60        aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
  61        aml_append(dev, aml_name_decl("_UID", aml_int(i)));
  62        aml_append(scope, dev);
  63    }
  64}
  65
  66static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
  67                                           uint32_t uart_irq)
  68{
  69    Aml *dev = aml_device("COM0");
  70    aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0011")));
  71    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
  72
  73    Aml *crs = aml_resource_template();
  74    aml_append(crs, aml_memory32_fixed(uart_memmap->base,
  75                                       uart_memmap->size, AML_READ_WRITE));
  76    aml_append(crs,
  77               aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
  78                             AML_EXCLUSIVE, &uart_irq, 1));
  79    aml_append(dev, aml_name_decl("_CRS", crs));
  80
  81    /* The _ADR entry is used to link this device to the UART described
  82     * in the SPCR table, i.e. SPCR.base_address.address == _ADR.
  83     */
  84    aml_append(dev, aml_name_decl("_ADR", aml_int(uart_memmap->base)));
  85
  86    aml_append(scope, dev);
  87}
  88
  89static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
  90{
  91    Aml *dev = aml_device("FWCF");
  92    aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
  93    /* device present, functioning, decoding, not shown in UI */
  94    aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
  95    aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
  96
  97    Aml *crs = aml_resource_template();
  98    aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
  99                                       fw_cfg_memmap->size, AML_READ_WRITE));
 100    aml_append(dev, aml_name_decl("_CRS", crs));
 101    aml_append(scope, dev);
 102}
 103
 104static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
 105{
 106    Aml *dev, *crs;
 107    hwaddr base = flash_memmap->base;
 108    hwaddr size = flash_memmap->size / 2;
 109
 110    dev = aml_device("FLS0");
 111    aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
 112    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 113
 114    crs = aml_resource_template();
 115    aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
 116    aml_append(dev, aml_name_decl("_CRS", crs));
 117    aml_append(scope, dev);
 118
 119    dev = aml_device("FLS1");
 120    aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
 121    aml_append(dev, aml_name_decl("_UID", aml_int(1)));
 122    crs = aml_resource_template();
 123    aml_append(crs, aml_memory32_fixed(base + size, size, AML_READ_WRITE));
 124    aml_append(dev, aml_name_decl("_CRS", crs));
 125    aml_append(scope, dev);
 126}
 127
 128static void acpi_dsdt_add_virtio(Aml *scope,
 129                                 const MemMapEntry *virtio_mmio_memmap,
 130                                 uint32_t mmio_irq, int num)
 131{
 132    hwaddr base = virtio_mmio_memmap->base;
 133    hwaddr size = virtio_mmio_memmap->size;
 134    int i;
 135
 136    for (i = 0; i < num; i++) {
 137        uint32_t irq = mmio_irq + i;
 138        Aml *dev = aml_device("VR%02u", i);
 139        aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
 140        aml_append(dev, aml_name_decl("_UID", aml_int(i)));
 141        aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
 142
 143        Aml *crs = aml_resource_template();
 144        aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
 145        aml_append(crs,
 146                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 147                                 AML_EXCLUSIVE, &irq, 1));
 148        aml_append(dev, aml_name_decl("_CRS", crs));
 149        aml_append(scope, dev);
 150        base += size;
 151    }
 152}
 153
 154static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
 155                              uint32_t irq, bool use_highmem, bool highmem_ecam)
 156{
 157    int ecam_id = VIRT_ECAM_ID(highmem_ecam);
 158    Aml *method, *crs, *ifctx, *UUID, *ifctx1, *elsectx, *buf;
 159    int i, bus_no;
 160    hwaddr base_mmio = memmap[VIRT_PCIE_MMIO].base;
 161    hwaddr size_mmio = memmap[VIRT_PCIE_MMIO].size;
 162    hwaddr base_pio = memmap[VIRT_PCIE_PIO].base;
 163    hwaddr size_pio = memmap[VIRT_PCIE_PIO].size;
 164    hwaddr base_ecam = memmap[ecam_id].base;
 165    hwaddr size_ecam = memmap[ecam_id].size;
 166    int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
 167
 168    Aml *dev = aml_device("%s", "PCI0");
 169    aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08")));
 170    aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03")));
 171    aml_append(dev, aml_name_decl("_SEG", aml_int(0)));
 172    aml_append(dev, aml_name_decl("_BBN", aml_int(0)));
 173    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 174    aml_append(dev, aml_name_decl("_UID", aml_string("PCI0")));
 175    aml_append(dev, aml_name_decl("_STR", aml_unicode("PCIe 0 Device")));
 176    aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
 177
 178    /* Declare the PCI Routing Table. */
 179    Aml *rt_pkg = aml_varpackage(nr_pcie_buses * PCI_NUM_PINS);
 180    for (bus_no = 0; bus_no < nr_pcie_buses; bus_no++) {
 181        for (i = 0; i < PCI_NUM_PINS; i++) {
 182            int gsi = (i + bus_no) % PCI_NUM_PINS;
 183            Aml *pkg = aml_package(4);
 184            aml_append(pkg, aml_int((bus_no << 16) | 0xFFFF));
 185            aml_append(pkg, aml_int(i));
 186            aml_append(pkg, aml_name("GSI%d", gsi));
 187            aml_append(pkg, aml_int(0));
 188            aml_append(rt_pkg, pkg);
 189        }
 190    }
 191    aml_append(dev, aml_name_decl("_PRT", rt_pkg));
 192
 193    /* Create GSI link device */
 194    for (i = 0; i < PCI_NUM_PINS; i++) {
 195        uint32_t irqs =  irq + i;
 196        Aml *dev_gsi = aml_device("GSI%d", i);
 197        aml_append(dev_gsi, aml_name_decl("_HID", aml_string("PNP0C0F")));
 198        aml_append(dev_gsi, aml_name_decl("_UID", aml_int(0)));
 199        crs = aml_resource_template();
 200        aml_append(crs,
 201                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 202                                 AML_EXCLUSIVE, &irqs, 1));
 203        aml_append(dev_gsi, aml_name_decl("_PRS", crs));
 204        crs = aml_resource_template();
 205        aml_append(crs,
 206                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 207                                 AML_EXCLUSIVE, &irqs, 1));
 208        aml_append(dev_gsi, aml_name_decl("_CRS", crs));
 209        method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
 210        aml_append(dev_gsi, method);
 211        aml_append(dev, dev_gsi);
 212    }
 213
 214    method = aml_method("_CBA", 0, AML_NOTSERIALIZED);
 215    aml_append(method, aml_return(aml_int(base_ecam)));
 216    aml_append(dev, method);
 217
 218    method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
 219    Aml *rbuf = aml_resource_template();
 220    aml_append(rbuf,
 221        aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
 222                            0x0000, 0x0000, nr_pcie_buses - 1, 0x0000,
 223                            nr_pcie_buses));
 224    aml_append(rbuf,
 225        aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 226                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_mmio,
 227                         base_mmio + size_mmio - 1, 0x0000, size_mmio));
 228    aml_append(rbuf,
 229        aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
 230                     AML_ENTIRE_RANGE, 0x0000, 0x0000, size_pio - 1, base_pio,
 231                     size_pio));
 232
 233    if (use_highmem) {
 234        hwaddr base_mmio_high = memmap[VIRT_HIGH_PCIE_MMIO].base;
 235        hwaddr size_mmio_high = memmap[VIRT_HIGH_PCIE_MMIO].size;
 236
 237        aml_append(rbuf,
 238            aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 239                             AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000,
 240                             base_mmio_high,
 241                             base_mmio_high + size_mmio_high - 1, 0x0000,
 242                             size_mmio_high));
 243    }
 244
 245    aml_append(method, aml_name_decl("RBUF", rbuf));
 246    aml_append(method, aml_return(rbuf));
 247    aml_append(dev, method);
 248
 249    /* Declare an _OSC (OS Control Handoff) method */
 250    aml_append(dev, aml_name_decl("SUPP", aml_int(0)));
 251    aml_append(dev, aml_name_decl("CTRL", aml_int(0)));
 252    method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
 253    aml_append(method,
 254        aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
 255
 256    /* PCI Firmware Specification 3.0
 257     * 4.5.1. _OSC Interface for PCI Host Bridge Devices
 258     * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is
 259     * identified by the Universal Unique IDentifier (UUID)
 260     * 33DB4D5B-1FF7-401C-9657-7441C03DD766
 261     */
 262    UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766");
 263    ifctx = aml_if(aml_equal(aml_arg(0), UUID));
 264    aml_append(ifctx,
 265        aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
 266    aml_append(ifctx,
 267        aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
 268    aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP")));
 269    aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL")));
 270    aml_append(ifctx, aml_store(aml_and(aml_name("CTRL"), aml_int(0x1D), NULL),
 271                                aml_name("CTRL")));
 272
 273    ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
 274    aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x08), NULL),
 275                                 aml_name("CDW1")));
 276    aml_append(ifctx, ifctx1);
 277
 278    ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
 279    aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x10), NULL),
 280                                 aml_name("CDW1")));
 281    aml_append(ifctx, ifctx1);
 282
 283    aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3")));
 284    aml_append(ifctx, aml_return(aml_arg(3)));
 285    aml_append(method, ifctx);
 286
 287    elsectx = aml_else();
 288    aml_append(elsectx, aml_store(aml_or(aml_name("CDW1"), aml_int(4), NULL),
 289                                  aml_name("CDW1")));
 290    aml_append(elsectx, aml_return(aml_arg(3)));
 291    aml_append(method, elsectx);
 292    aml_append(dev, method);
 293
 294    method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
 295
 296    /* PCI Firmware Specification 3.0
 297     * 4.6.1. _DSM for PCI Express Slot Information
 298     * The UUID in _DSM in this context is
 299     * {E5C937D0-3553-4D7A-9117-EA4D19C3434D}
 300     */
 301    UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
 302    ifctx = aml_if(aml_equal(aml_arg(0), UUID));
 303    ifctx1 = aml_if(aml_equal(aml_arg(2), aml_int(0)));
 304    uint8_t byte_list[1] = {1};
 305    buf = aml_buffer(1, byte_list);
 306    aml_append(ifctx1, aml_return(buf));
 307    aml_append(ifctx, ifctx1);
 308    aml_append(method, ifctx);
 309
 310    byte_list[0] = 0;
 311    buf = aml_buffer(1, byte_list);
 312    aml_append(method, aml_return(buf));
 313    aml_append(dev, method);
 314
 315    Aml *dev_rp0 = aml_device("%s", "RP0");
 316    aml_append(dev_rp0, aml_name_decl("_ADR", aml_int(0)));
 317    aml_append(dev, dev_rp0);
 318
 319    Aml *dev_res0 = aml_device("%s", "RES0");
 320    aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));
 321    crs = aml_resource_template();
 322    aml_append(crs,
 323        aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 324                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_ecam,
 325                         base_ecam + size_ecam - 1, 0x0000, size_ecam));
 326    aml_append(dev_res0, aml_name_decl("_CRS", crs));
 327    aml_append(dev, dev_res0);
 328    aml_append(scope, dev);
 329}
 330
 331static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
 332                                           uint32_t gpio_irq)
 333{
 334    Aml *dev = aml_device("GPO0");
 335    aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0061")));
 336    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 337    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 338
 339    Aml *crs = aml_resource_template();
 340    aml_append(crs, aml_memory32_fixed(gpio_memmap->base, gpio_memmap->size,
 341                                       AML_READ_WRITE));
 342    aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 343                                  AML_EXCLUSIVE, &gpio_irq, 1));
 344    aml_append(dev, aml_name_decl("_CRS", crs));
 345
 346    Aml *aei = aml_resource_template();
 347    /* Pin 3 for power button */
 348    const uint32_t pin_list[1] = {3};
 349    aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH,
 350                                 AML_EXCLUSIVE, AML_PULL_UP, 0, pin_list, 1,
 351                                 "GPO0", NULL, 0));
 352    aml_append(dev, aml_name_decl("_AEI", aei));
 353
 354    /* _E03 is handle for power button */
 355    Aml *method = aml_method("_E03", 0, AML_NOTSERIALIZED);
 356    aml_append(method, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE),
 357                                  aml_int(0x80)));
 358    aml_append(dev, method);
 359    aml_append(scope, dev);
 360}
 361
 362static void acpi_dsdt_add_power_button(Aml *scope)
 363{
 364    Aml *dev = aml_device(ACPI_POWER_BUTTON_DEVICE);
 365    aml_append(dev, aml_name_decl("_HID", aml_string("PNP0C0C")));
 366    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 367    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 368    aml_append(scope, dev);
 369}
 370
 371static void
 372build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 373{
 374    int nb_nodes, iort_start = table_data->len;
 375    AcpiIortIdMapping *idmap;
 376    AcpiIortItsGroup *its;
 377    AcpiIortTable *iort;
 378    AcpiIortSmmu3 *smmu;
 379    size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
 380    AcpiIortRC *rc;
 381
 382    iort = acpi_data_push(table_data, sizeof(*iort));
 383
 384    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 385        nb_nodes = 3; /* RC, ITS, SMMUv3 */
 386    } else {
 387        nb_nodes = 2; /* RC, ITS */
 388    }
 389
 390    iort_length = sizeof(*iort);
 391    iort->node_count = cpu_to_le32(nb_nodes);
 392    /*
 393     * Use a copy in case table_data->data moves during acpi_data_push
 394     * operations.
 395     */
 396    iort_node_offset = sizeof(*iort);
 397    iort->node_offset = cpu_to_le32(iort_node_offset);
 398
 399    /* ITS group node */
 400    node_size =  sizeof(*its) + sizeof(uint32_t);
 401    iort_length += node_size;
 402    its = acpi_data_push(table_data, node_size);
 403
 404    its->type = ACPI_IORT_NODE_ITS_GROUP;
 405    its->length = cpu_to_le16(node_size);
 406    its->its_count = cpu_to_le32(1);
 407    its->identifiers[0] = 0; /* MADT translation_id */
 408
 409    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 410        int irq =  vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE;
 411
 412        /* SMMUv3 node */
 413        smmu_offset = iort_node_offset + node_size;
 414        node_size = sizeof(*smmu) + sizeof(*idmap);
 415        iort_length += node_size;
 416        smmu = acpi_data_push(table_data, node_size);
 417
 418        smmu->type = ACPI_IORT_NODE_SMMU_V3;
 419        smmu->length = cpu_to_le16(node_size);
 420        smmu->mapping_count = cpu_to_le32(1);
 421        smmu->mapping_offset = cpu_to_le32(sizeof(*smmu));
 422        smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base);
 423        smmu->flags = cpu_to_le32(ACPI_IORT_SMMU_V3_COHACC_OVERRIDE);
 424        smmu->event_gsiv = cpu_to_le32(irq);
 425        smmu->pri_gsiv = cpu_to_le32(irq + 1);
 426        smmu->gerr_gsiv = cpu_to_le32(irq + 2);
 427        smmu->sync_gsiv = cpu_to_le32(irq + 3);
 428
 429        /* Identity RID mapping covering the whole input RID range */
 430        idmap = &smmu->id_mapping_array[0];
 431        idmap->input_base = 0;
 432        idmap->id_count = cpu_to_le32(0xFFFF);
 433        idmap->output_base = 0;
 434        /* output IORT node is the ITS group node (the first node) */
 435        idmap->output_reference = cpu_to_le32(iort_node_offset);
 436    }
 437
 438    /* Root Complex Node */
 439    node_size = sizeof(*rc) + sizeof(*idmap);
 440    iort_length += node_size;
 441    rc = acpi_data_push(table_data, node_size);
 442
 443    rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX;
 444    rc->length = cpu_to_le16(node_size);
 445    rc->mapping_count = cpu_to_le32(1);
 446    rc->mapping_offset = cpu_to_le32(sizeof(*rc));
 447
 448    /* fully coherent device */
 449    rc->memory_properties.cache_coherency = cpu_to_le32(1);
 450    rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */
 451    rc->pci_segment_number = 0; /* MCFG pci_segment */
 452
 453    /* Identity RID mapping covering the whole input RID range */
 454    idmap = &rc->id_mapping_array[0];
 455    idmap->input_base = 0;
 456    idmap->id_count = cpu_to_le32(0xFFFF);
 457    idmap->output_base = 0;
 458
 459    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 460        /* output IORT node is the smmuv3 node */
 461        idmap->output_reference = cpu_to_le32(smmu_offset);
 462    } else {
 463        /* output IORT node is the ITS group node (the first node) */
 464        idmap->output_reference = cpu_to_le32(iort_node_offset);
 465    }
 466
 467    /*
 468     * Update the pointer address in case table_data->data moves during above
 469     * acpi_data_push operations.
 470     */
 471    iort = (AcpiIortTable *)(table_data->data + iort_start);
 472    iort->length = cpu_to_le32(iort_length);
 473
 474    build_header(linker, table_data, (void *)(table_data->data + iort_start),
 475                 "IORT", table_data->len - iort_start, 0, NULL, NULL);
 476}
 477
 478static void
 479build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 480{
 481    AcpiSerialPortConsoleRedirection *spcr;
 482    const MemMapEntry *uart_memmap = &vms->memmap[VIRT_UART];
 483    int irq = vms->irqmap[VIRT_UART] + ARM_SPI_BASE;
 484    int spcr_start = table_data->len;
 485
 486    spcr = acpi_data_push(table_data, sizeof(*spcr));
 487
 488    spcr->interface_type = 0x3;    /* ARM PL011 UART */
 489
 490    spcr->base_address.space_id = AML_SYSTEM_MEMORY;
 491    spcr->base_address.bit_width = 8;
 492    spcr->base_address.bit_offset = 0;
 493    spcr->base_address.access_width = 1;
 494    spcr->base_address.address = cpu_to_le64(uart_memmap->base);
 495
 496    spcr->interrupt_types = (1 << 3); /* Bit[3] ARMH GIC interrupt */
 497    spcr->gsi = cpu_to_le32(irq);  /* Global System Interrupt */
 498
 499    spcr->baud = 3;                /* Baud Rate: 3 = 9600 */
 500    spcr->parity = 0;              /* No Parity */
 501    spcr->stopbits = 1;            /* 1 Stop bit */
 502    spcr->flowctrl = (1 << 1);     /* Bit[1] = RTS/CTS hardware flow control */
 503    spcr->term_type = 0;           /* Terminal Type: 0 = VT100 */
 504
 505    spcr->pci_device_id = 0xffff;  /* PCI Device ID: not a PCI device */
 506    spcr->pci_vendor_id = 0xffff;  /* PCI Vendor ID: not a PCI device */
 507
 508    build_header(linker, table_data, (void *)(table_data->data + spcr_start),
 509                 "SPCR", table_data->len - spcr_start, 2, NULL, NULL);
 510}
 511
 512static void
 513build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 514{
 515    AcpiSystemResourceAffinityTable *srat;
 516    AcpiSratProcessorGiccAffinity *core;
 517    AcpiSratMemoryAffinity *numamem;
 518    int i, srat_start;
 519    uint64_t mem_base;
 520    MachineClass *mc = MACHINE_GET_CLASS(vms);
 521    MachineState *ms = MACHINE(vms);
 522    const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(ms);
 523
 524    srat_start = table_data->len;
 525    srat = acpi_data_push(table_data, sizeof(*srat));
 526    srat->reserved1 = cpu_to_le32(1);
 527
 528    for (i = 0; i < cpu_list->len; ++i) {
 529        core = acpi_data_push(table_data, sizeof(*core));
 530        core->type = ACPI_SRAT_PROCESSOR_GICC;
 531        core->length = sizeof(*core);
 532        core->proximity = cpu_to_le32(cpu_list->cpus[i].props.node_id);
 533        core->acpi_processor_uid = cpu_to_le32(i);
 534        core->flags = cpu_to_le32(1);
 535    }
 536
 537    mem_base = vms->memmap[VIRT_MEM].base;
 538    for (i = 0; i < ms->numa_state->num_nodes; ++i) {
 539        if (ms->numa_state->nodes[i].node_mem > 0) {
 540            numamem = acpi_data_push(table_data, sizeof(*numamem));
 541            build_srat_memory(numamem, mem_base,
 542                              ms->numa_state->nodes[i].node_mem, i,
 543                              MEM_AFFINITY_ENABLED);
 544            mem_base += ms->numa_state->nodes[i].node_mem;
 545        }
 546    }
 547
 548    if (ms->device_memory) {
 549        numamem = acpi_data_push(table_data, sizeof *numamem);
 550        build_srat_memory(numamem, ms->device_memory->base,
 551                          memory_region_size(&ms->device_memory->mr),
 552                          ms->numa_state->num_nodes - 1,
 553                          MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
 554    }
 555
 556    build_header(linker, table_data, (void *)(table_data->data + srat_start),
 557                 "SRAT", table_data->len - srat_start, 3, NULL, NULL);
 558}
 559
 560/* GTDT */
 561static void
 562build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 563{
 564    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 565    int gtdt_start = table_data->len;
 566    AcpiGenericTimerTable *gtdt;
 567    uint32_t irqflags;
 568
 569    if (vmc->claim_edge_triggered_timers) {
 570        irqflags = ACPI_GTDT_INTERRUPT_MODE_EDGE;
 571    } else {
 572        irqflags = ACPI_GTDT_INTERRUPT_MODE_LEVEL;
 573    }
 574
 575    gtdt = acpi_data_push(table_data, sizeof *gtdt);
 576    /* The interrupt values are the same with the device tree when adding 16 */
 577    gtdt->secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_S_EL1_IRQ + 16);
 578    gtdt->secure_el1_flags = cpu_to_le32(irqflags);
 579
 580    gtdt->non_secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ + 16);
 581    gtdt->non_secure_el1_flags = cpu_to_le32(irqflags |
 582                                             ACPI_GTDT_CAP_ALWAYS_ON);
 583
 584    gtdt->virtual_timer_interrupt = cpu_to_le32(ARCH_TIMER_VIRT_IRQ + 16);
 585    gtdt->virtual_timer_flags = cpu_to_le32(irqflags);
 586
 587    gtdt->non_secure_el2_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ + 16);
 588    gtdt->non_secure_el2_flags = cpu_to_le32(irqflags);
 589
 590    build_header(linker, table_data,
 591                 (void *)(table_data->data + gtdt_start), "GTDT",
 592                 table_data->len - gtdt_start, 2, NULL, NULL);
 593}
 594
 595/* MADT */
 596static void
 597build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 598{
 599    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 600    int madt_start = table_data->len;
 601    const MemMapEntry *memmap = vms->memmap;
 602    const int *irqmap = vms->irqmap;
 603    AcpiMultipleApicTable *madt;
 604    AcpiMadtGenericDistributor *gicd;
 605    AcpiMadtGenericMsiFrame *gic_msi;
 606    int i;
 607
 608    madt = acpi_data_push(table_data, sizeof *madt);
 609
 610    gicd = acpi_data_push(table_data, sizeof *gicd);
 611    gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR;
 612    gicd->length = sizeof(*gicd);
 613    gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base);
 614    gicd->version = vms->gic_version;
 615
 616    for (i = 0; i < vms->smp_cpus; i++) {
 617        AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data,
 618                                                           sizeof(*gicc));
 619        ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
 620
 621        gicc->type = ACPI_APIC_GENERIC_CPU_INTERFACE;
 622        gicc->length = sizeof(*gicc);
 623        if (vms->gic_version == 2) {
 624            gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base);
 625            gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base);
 626            gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base);
 627        }
 628        gicc->cpu_interface_number = cpu_to_le32(i);
 629        gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity);
 630        gicc->uid = cpu_to_le32(i);
 631        gicc->flags = cpu_to_le32(ACPI_MADT_GICC_ENABLED);
 632
 633        if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
 634            gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
 635        }
 636        if (vms->virt) {
 637            gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ));
 638        }
 639    }
 640
 641    if (vms->gic_version == 3) {
 642        AcpiMadtGenericTranslator *gic_its;
 643        int nb_redist_regions = virt_gicv3_redist_region_count(vms);
 644        AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
 645                                                         sizeof *gicr);
 646
 647        gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
 648        gicr->length = sizeof(*gicr);
 649        gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
 650        gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
 651
 652        if (nb_redist_regions == 2) {
 653            gicr = acpi_data_push(table_data, sizeof(*gicr));
 654            gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
 655            gicr->length = sizeof(*gicr);
 656            gicr->base_address =
 657                cpu_to_le64(memmap[VIRT_HIGH_GIC_REDIST2].base);
 658            gicr->range_length =
 659                cpu_to_le32(memmap[VIRT_HIGH_GIC_REDIST2].size);
 660        }
 661
 662        if (its_class_name() && !vmc->no_its) {
 663            gic_its = acpi_data_push(table_data, sizeof *gic_its);
 664            gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
 665            gic_its->length = sizeof(*gic_its);
 666            gic_its->translation_id = 0;
 667            gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base);
 668        }
 669    } else {
 670        gic_msi = acpi_data_push(table_data, sizeof *gic_msi);
 671        gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME;
 672        gic_msi->length = sizeof(*gic_msi);
 673        gic_msi->gic_msi_frame_id = 0;
 674        gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base);
 675        gic_msi->flags = cpu_to_le32(1);
 676        gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS);
 677        gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE);
 678    }
 679
 680    build_header(linker, table_data,
 681                 (void *)(table_data->data + madt_start), "APIC",
 682                 table_data->len - madt_start, 3, NULL, NULL);
 683}
 684
 685/* FADT */
 686static void build_fadt_rev5(GArray *table_data, BIOSLinker *linker,
 687                            VirtMachineState *vms, unsigned dsdt_tbl_offset)
 688{
 689    /* ACPI v5.1 */
 690    AcpiFadtData fadt = {
 691        .rev = 5,
 692        .minor_ver = 1,
 693        .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI,
 694        .xdsdt_tbl_offset = &dsdt_tbl_offset,
 695    };
 696
 697    switch (vms->psci_conduit) {
 698    case QEMU_PSCI_CONDUIT_DISABLED:
 699        fadt.arm_boot_arch = 0;
 700        break;
 701    case QEMU_PSCI_CONDUIT_HVC:
 702        fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT |
 703                             ACPI_FADT_ARM_PSCI_USE_HVC;
 704        break;
 705    case QEMU_PSCI_CONDUIT_SMC:
 706        fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT;
 707        break;
 708    default:
 709        g_assert_not_reached();
 710    }
 711
 712    build_fadt(table_data, linker, &fadt, NULL, NULL);
 713}
 714
 715/* DSDT */
 716static void
 717build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 718{
 719    Aml *scope, *dsdt;
 720    MachineState *ms = MACHINE(vms);
 721    const MemMapEntry *memmap = vms->memmap;
 722    const int *irqmap = vms->irqmap;
 723
 724    dsdt = init_aml_allocator();
 725    /* Reserve space for header */
 726    acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader));
 727
 728    /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
 729     * While UEFI can use libfdt to disable the RTC device node in the DTB that
 730     * it passes to the OS, it cannot modify AML. Therefore, we won't generate
 731     * the RTC ACPI device at all when using UEFI.
 732     */
 733    scope = aml_scope("\\_SB");
 734    acpi_dsdt_add_cpus(scope, vms->smp_cpus);
 735    acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
 736                       (irqmap[VIRT_UART] + ARM_SPI_BASE));
 737    acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
 738    acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
 739    acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
 740                    (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
 741    acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
 742                      vms->highmem, vms->highmem_ecam);
 743    if (vms->acpi_dev) {
 744        build_ged_aml(scope, "\\_SB."GED_DEVICE,
 745                      HOTPLUG_HANDLER(vms->acpi_dev),
 746                      irqmap[VIRT_ACPI_GED] + ARM_SPI_BASE, AML_SYSTEM_MEMORY,
 747                      memmap[VIRT_ACPI_GED].base);
 748    } else {
 749        acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO],
 750                           (irqmap[VIRT_GPIO] + ARM_SPI_BASE));
 751    }
 752
 753    if (vms->acpi_dev) {
 754        uint32_t event = object_property_get_uint(OBJECT(vms->acpi_dev),
 755                                                  "ged-event", &error_abort);
 756
 757        if (event & ACPI_GED_MEM_HOTPLUG_EVT) {
 758            build_memory_hotplug_aml(scope, ms->ram_slots, "\\_SB", NULL,
 759                                     AML_SYSTEM_MEMORY,
 760                                     memmap[VIRT_PCDIMM_ACPI].base);
 761        }
 762    }
 763
 764    acpi_dsdt_add_power_button(scope);
 765
 766    aml_append(dsdt, scope);
 767
 768    /* copy AML table into ACPI tables blob and patch header there */
 769    g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
 770    build_header(linker, table_data,
 771        (void *)(table_data->data + table_data->len - dsdt->buf->len),
 772        "DSDT", dsdt->buf->len, 2, NULL, NULL);
 773    free_aml_allocator();
 774}
 775
 776typedef
 777struct AcpiBuildState {
 778    /* Copy of table in RAM (for patching). */
 779    MemoryRegion *table_mr;
 780    MemoryRegion *rsdp_mr;
 781    MemoryRegion *linker_mr;
 782    /* Is table patched? */
 783    bool patched;
 784} AcpiBuildState;
 785
 786static
 787void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
 788{
 789    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 790    GArray *table_offsets;
 791    unsigned dsdt, xsdt;
 792    GArray *tables_blob = tables->table_data;
 793    MachineState *ms = MACHINE(vms);
 794
 795    table_offsets = g_array_new(false, true /* clear */,
 796                                        sizeof(uint32_t));
 797
 798    bios_linker_loader_alloc(tables->linker,
 799                             ACPI_BUILD_TABLE_FILE, tables_blob,
 800                             64, false /* high memory */);
 801
 802    /* DSDT is pointed to by FADT */
 803    dsdt = tables_blob->len;
 804    build_dsdt(tables_blob, tables->linker, vms);
 805
 806    /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
 807    acpi_add_table(table_offsets, tables_blob);
 808    build_fadt_rev5(tables_blob, tables->linker, vms, dsdt);
 809
 810    acpi_add_table(table_offsets, tables_blob);
 811    build_madt(tables_blob, tables->linker, vms);
 812
 813    acpi_add_table(table_offsets, tables_blob);
 814    build_gtdt(tables_blob, tables->linker, vms);
 815
 816    acpi_add_table(table_offsets, tables_blob);
 817    {
 818        AcpiMcfgInfo mcfg = {
 819           .base = vms->memmap[VIRT_ECAM_ID(vms->highmem_ecam)].base,
 820           .size = vms->memmap[VIRT_ECAM_ID(vms->highmem_ecam)].size,
 821        };
 822        build_mcfg(tables_blob, tables->linker, &mcfg);
 823    }
 824
 825    acpi_add_table(table_offsets, tables_blob);
 826    build_spcr(tables_blob, tables->linker, vms);
 827
 828    if (ms->numa_state->num_nodes > 0) {
 829        acpi_add_table(table_offsets, tables_blob);
 830        build_srat(tables_blob, tables->linker, vms);
 831        if (ms->numa_state->have_numa_distance) {
 832            acpi_add_table(table_offsets, tables_blob);
 833            build_slit(tables_blob, tables->linker, ms);
 834        }
 835    }
 836
 837    if (its_class_name() && !vmc->no_its) {
 838        acpi_add_table(table_offsets, tables_blob);
 839        build_iort(tables_blob, tables->linker, vms);
 840    }
 841
 842    /* XSDT is pointed to by RSDP */
 843    xsdt = tables_blob->len;
 844    build_xsdt(tables_blob, tables->linker, table_offsets, NULL, NULL);
 845
 846    /* RSDP is in FSEG memory, so allocate it separately */
 847    {
 848        AcpiRsdpData rsdp_data = {
 849            .revision = 2,
 850            .oem_id = ACPI_BUILD_APPNAME6,
 851            .xsdt_tbl_offset = &xsdt,
 852            .rsdt_tbl_offset = NULL,
 853        };
 854        build_rsdp(tables->rsdp, tables->linker, &rsdp_data);
 855    }
 856
 857    /* Cleanup memory that's no longer used. */
 858    g_array_free(table_offsets, true);
 859}
 860
 861static void acpi_ram_update(MemoryRegion *mr, GArray *data)
 862{
 863    uint32_t size = acpi_data_len(data);
 864
 865    /* Make sure RAM size is correct - in case it got changed
 866     * e.g. by migration */
 867    memory_region_ram_resize(mr, size, &error_abort);
 868
 869    memcpy(memory_region_get_ram_ptr(mr), data->data, size);
 870    memory_region_set_dirty(mr, 0, size);
 871}
 872
 873static void virt_acpi_build_update(void *build_opaque)
 874{
 875    AcpiBuildState *build_state = build_opaque;
 876    AcpiBuildTables tables;
 877
 878    /* No state to update or already patched? Nothing to do. */
 879    if (!build_state || build_state->patched) {
 880        return;
 881    }
 882    build_state->patched = true;
 883
 884    acpi_build_tables_init(&tables);
 885
 886    virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables);
 887
 888    acpi_ram_update(build_state->table_mr, tables.table_data);
 889    acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
 890    acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
 891
 892    acpi_build_tables_cleanup(&tables, true);
 893}
 894
 895static void virt_acpi_build_reset(void *build_opaque)
 896{
 897    AcpiBuildState *build_state = build_opaque;
 898    build_state->patched = false;
 899}
 900
 901static const VMStateDescription vmstate_virt_acpi_build = {
 902    .name = "virt_acpi_build",
 903    .version_id = 1,
 904    .minimum_version_id = 1,
 905    .fields = (VMStateField[]) {
 906        VMSTATE_BOOL(patched, AcpiBuildState),
 907        VMSTATE_END_OF_LIST()
 908    },
 909};
 910
 911void virt_acpi_setup(VirtMachineState *vms)
 912{
 913    AcpiBuildTables tables;
 914    AcpiBuildState *build_state;
 915
 916    if (!vms->fw_cfg) {
 917        trace_virt_acpi_setup();
 918        return;
 919    }
 920
 921    if (!acpi_enabled) {
 922        trace_virt_acpi_setup();
 923        return;
 924    }
 925
 926    build_state = g_malloc0(sizeof *build_state);
 927
 928    acpi_build_tables_init(&tables);
 929    virt_acpi_build(vms, &tables);
 930
 931    /* Now expose it all to Guest */
 932    build_state->table_mr = acpi_add_rom_blob(virt_acpi_build_update,
 933                                              build_state, tables.table_data,
 934                                              ACPI_BUILD_TABLE_FILE,
 935                                              ACPI_BUILD_TABLE_MAX_SIZE);
 936    assert(build_state->table_mr != NULL);
 937
 938    build_state->linker_mr =
 939        acpi_add_rom_blob(virt_acpi_build_update, build_state,
 940                          tables.linker->cmd_blob, "etc/table-loader", 0);
 941
 942    fw_cfg_add_file(vms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data,
 943                    acpi_data_len(tables.tcpalog));
 944
 945    build_state->rsdp_mr = acpi_add_rom_blob(virt_acpi_build_update,
 946                                             build_state, tables.rsdp,
 947                                             ACPI_BUILD_RSDP_FILE, 0);
 948
 949    qemu_register_reset(virt_acpi_build_reset, build_state);
 950    virt_acpi_build_reset(build_state);
 951    vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state);
 952
 953    /* Cleanup tables but don't free the memory: we track it
 954     * in build_state.
 955     */
 956    acpi_build_tables_cleanup(&tables, false);
 957}
 958