qemu/hw/arm/virt-acpi-build.c
<<
>>
Prefs
   1/* Support for generating ACPI tables and passing them to Guests
   2 *
   3 * ARM virt ACPI generation
   4 *
   5 * Copyright (C) 2008-2010  Kevin O'Connor <kevin@koconnor.net>
   6 * Copyright (C) 2006 Fabrice Bellard
   7 * Copyright (C) 2013 Red Hat Inc
   8 *
   9 * Author: Michael S. Tsirkin <mst@redhat.com>
  10 *
  11 * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
  12 *
  13 * Author: Shannon Zhao <zhaoshenglong@huawei.com>
  14 *
  15 * This program is free software; you can redistribute it and/or modify
  16 * it under the terms of the GNU General Public License as published by
  17 * the Free Software Foundation; either version 2 of the License, or
  18 * (at your option) any later version.
  19
  20 * This program is distributed in the hope that it will be useful,
  21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  23 * GNU General Public License for more details.
  24
  25 * You should have received a copy of the GNU General Public License along
  26 * with this program; if not, see <http://www.gnu.org/licenses/>.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "qapi/error.h"
  31#include "qemu-common.h"
  32#include "qemu/bitmap.h"
  33#include "trace.h"
  34#include "qom/cpu.h"
  35#include "target/arm/cpu.h"
  36#include "hw/acpi/acpi-defs.h"
  37#include "hw/acpi/acpi.h"
  38#include "hw/nvram/fw_cfg.h"
  39#include "hw/acpi/bios-linker-loader.h"
  40#include "hw/loader.h"
  41#include "hw/hw.h"
  42#include "hw/acpi/aml-build.h"
  43#include "hw/pci/pcie_host.h"
  44#include "hw/pci/pci.h"
  45#include "hw/arm/virt.h"
  46#include "sysemu/numa.h"
  47#include "kvm_arm.h"
  48
  49#define ARM_SPI_BASE 32
  50#define ACPI_POWER_BUTTON_DEVICE "PWRB"
  51
  52static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
  53{
  54    uint16_t i;
  55
  56    for (i = 0; i < smp_cpus; i++) {
  57        Aml *dev = aml_device("C%.03X", i);
  58        aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
  59        aml_append(dev, aml_name_decl("_UID", aml_int(i)));
  60        aml_append(scope, dev);
  61    }
  62}
  63
  64static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
  65                                           uint32_t uart_irq)
  66{
  67    Aml *dev = aml_device("COM0");
  68    aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0011")));
  69    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
  70
  71    Aml *crs = aml_resource_template();
  72    aml_append(crs, aml_memory32_fixed(uart_memmap->base,
  73                                       uart_memmap->size, AML_READ_WRITE));
  74    aml_append(crs,
  75               aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
  76                             AML_EXCLUSIVE, &uart_irq, 1));
  77    aml_append(dev, aml_name_decl("_CRS", crs));
  78
  79    /* The _ADR entry is used to link this device to the UART described
  80     * in the SPCR table, i.e. SPCR.base_address.address == _ADR.
  81     */
  82    aml_append(dev, aml_name_decl("_ADR", aml_int(uart_memmap->base)));
  83
  84    aml_append(scope, dev);
  85}
  86
  87static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
  88{
  89    Aml *dev = aml_device("FWCF");
  90    aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
  91    /* device present, functioning, decoding, not shown in UI */
  92    aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
  93    aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
  94
  95    Aml *crs = aml_resource_template();
  96    aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
  97                                       fw_cfg_memmap->size, AML_READ_WRITE));
  98    aml_append(dev, aml_name_decl("_CRS", crs));
  99    aml_append(scope, dev);
 100}
 101
 102static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
 103{
 104    Aml *dev, *crs;
 105    hwaddr base = flash_memmap->base;
 106    hwaddr size = flash_memmap->size / 2;
 107
 108    dev = aml_device("FLS0");
 109    aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
 110    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 111
 112    crs = aml_resource_template();
 113    aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
 114    aml_append(dev, aml_name_decl("_CRS", crs));
 115    aml_append(scope, dev);
 116
 117    dev = aml_device("FLS1");
 118    aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
 119    aml_append(dev, aml_name_decl("_UID", aml_int(1)));
 120    crs = aml_resource_template();
 121    aml_append(crs, aml_memory32_fixed(base + size, size, AML_READ_WRITE));
 122    aml_append(dev, aml_name_decl("_CRS", crs));
 123    aml_append(scope, dev);
 124}
 125
 126static void acpi_dsdt_add_virtio(Aml *scope,
 127                                 const MemMapEntry *virtio_mmio_memmap,
 128                                 uint32_t mmio_irq, int num)
 129{
 130    hwaddr base = virtio_mmio_memmap->base;
 131    hwaddr size = virtio_mmio_memmap->size;
 132    int i;
 133
 134    for (i = 0; i < num; i++) {
 135        uint32_t irq = mmio_irq + i;
 136        Aml *dev = aml_device("VR%02u", i);
 137        aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
 138        aml_append(dev, aml_name_decl("_UID", aml_int(i)));
 139        aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
 140
 141        Aml *crs = aml_resource_template();
 142        aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
 143        aml_append(crs,
 144                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 145                                 AML_EXCLUSIVE, &irq, 1));
 146        aml_append(dev, aml_name_decl("_CRS", crs));
 147        aml_append(scope, dev);
 148        base += size;
 149    }
 150}
 151
 152static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
 153                              uint32_t irq, bool use_highmem, bool highmem_ecam)
 154{
 155    int ecam_id = VIRT_ECAM_ID(highmem_ecam);
 156    Aml *method, *crs, *ifctx, *UUID, *ifctx1, *elsectx, *buf;
 157    int i, bus_no;
 158    hwaddr base_mmio = memmap[VIRT_PCIE_MMIO].base;
 159    hwaddr size_mmio = memmap[VIRT_PCIE_MMIO].size;
 160    hwaddr base_pio = memmap[VIRT_PCIE_PIO].base;
 161    hwaddr size_pio = memmap[VIRT_PCIE_PIO].size;
 162    hwaddr base_ecam = memmap[ecam_id].base;
 163    hwaddr size_ecam = memmap[ecam_id].size;
 164    int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
 165
 166    Aml *dev = aml_device("%s", "PCI0");
 167    aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08")));
 168    aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03")));
 169    aml_append(dev, aml_name_decl("_SEG", aml_int(0)));
 170    aml_append(dev, aml_name_decl("_BBN", aml_int(0)));
 171    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 172    aml_append(dev, aml_name_decl("_UID", aml_string("PCI0")));
 173    aml_append(dev, aml_name_decl("_STR", aml_unicode("PCIe 0 Device")));
 174    aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
 175
 176    /* Declare the PCI Routing Table. */
 177    Aml *rt_pkg = aml_varpackage(nr_pcie_buses * PCI_NUM_PINS);
 178    for (bus_no = 0; bus_no < nr_pcie_buses; bus_no++) {
 179        for (i = 0; i < PCI_NUM_PINS; i++) {
 180            int gsi = (i + bus_no) % PCI_NUM_PINS;
 181            Aml *pkg = aml_package(4);
 182            aml_append(pkg, aml_int((bus_no << 16) | 0xFFFF));
 183            aml_append(pkg, aml_int(i));
 184            aml_append(pkg, aml_name("GSI%d", gsi));
 185            aml_append(pkg, aml_int(0));
 186            aml_append(rt_pkg, pkg);
 187        }
 188    }
 189    aml_append(dev, aml_name_decl("_PRT", rt_pkg));
 190
 191    /* Create GSI link device */
 192    for (i = 0; i < PCI_NUM_PINS; i++) {
 193        uint32_t irqs =  irq + i;
 194        Aml *dev_gsi = aml_device("GSI%d", i);
 195        aml_append(dev_gsi, aml_name_decl("_HID", aml_string("PNP0C0F")));
 196        aml_append(dev_gsi, aml_name_decl("_UID", aml_int(0)));
 197        crs = aml_resource_template();
 198        aml_append(crs,
 199                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 200                                 AML_EXCLUSIVE, &irqs, 1));
 201        aml_append(dev_gsi, aml_name_decl("_PRS", crs));
 202        crs = aml_resource_template();
 203        aml_append(crs,
 204                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 205                                 AML_EXCLUSIVE, &irqs, 1));
 206        aml_append(dev_gsi, aml_name_decl("_CRS", crs));
 207        method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
 208        aml_append(dev_gsi, method);
 209        aml_append(dev, dev_gsi);
 210    }
 211
 212    method = aml_method("_CBA", 0, AML_NOTSERIALIZED);
 213    aml_append(method, aml_return(aml_int(base_ecam)));
 214    aml_append(dev, method);
 215
 216    method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
 217    Aml *rbuf = aml_resource_template();
 218    aml_append(rbuf,
 219        aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
 220                            0x0000, 0x0000, nr_pcie_buses - 1, 0x0000,
 221                            nr_pcie_buses));
 222    aml_append(rbuf,
 223        aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 224                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_mmio,
 225                         base_mmio + size_mmio - 1, 0x0000, size_mmio));
 226    aml_append(rbuf,
 227        aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
 228                     AML_ENTIRE_RANGE, 0x0000, 0x0000, size_pio - 1, base_pio,
 229                     size_pio));
 230
 231    if (use_highmem) {
 232        hwaddr base_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].base;
 233        hwaddr size_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].size;
 234
 235        aml_append(rbuf,
 236            aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 237                             AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000,
 238                             base_mmio_high,
 239                             base_mmio_high + size_mmio_high - 1, 0x0000,
 240                             size_mmio_high));
 241    }
 242
 243    aml_append(method, aml_name_decl("RBUF", rbuf));
 244    aml_append(method, aml_return(rbuf));
 245    aml_append(dev, method);
 246
 247    /* Declare an _OSC (OS Control Handoff) method */
 248    aml_append(dev, aml_name_decl("SUPP", aml_int(0)));
 249    aml_append(dev, aml_name_decl("CTRL", aml_int(0)));
 250    method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
 251    aml_append(method,
 252        aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
 253
 254    /* PCI Firmware Specification 3.0
 255     * 4.5.1. _OSC Interface for PCI Host Bridge Devices
 256     * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is
 257     * identified by the Universal Unique IDentifier (UUID)
 258     * 33DB4D5B-1FF7-401C-9657-7441C03DD766
 259     */
 260    UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766");
 261    ifctx = aml_if(aml_equal(aml_arg(0), UUID));
 262    aml_append(ifctx,
 263        aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
 264    aml_append(ifctx,
 265        aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
 266    aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP")));
 267    aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL")));
 268    aml_append(ifctx, aml_store(aml_and(aml_name("CTRL"), aml_int(0x1D), NULL),
 269                                aml_name("CTRL")));
 270
 271    ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
 272    aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x08), NULL),
 273                                 aml_name("CDW1")));
 274    aml_append(ifctx, ifctx1);
 275
 276    ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
 277    aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x10), NULL),
 278                                 aml_name("CDW1")));
 279    aml_append(ifctx, ifctx1);
 280
 281    aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3")));
 282    aml_append(ifctx, aml_return(aml_arg(3)));
 283    aml_append(method, ifctx);
 284
 285    elsectx = aml_else();
 286    aml_append(elsectx, aml_store(aml_or(aml_name("CDW1"), aml_int(4), NULL),
 287                                  aml_name("CDW1")));
 288    aml_append(elsectx, aml_return(aml_arg(3)));
 289    aml_append(method, elsectx);
 290    aml_append(dev, method);
 291
 292    method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
 293
 294    /* PCI Firmware Specification 3.0
 295     * 4.6.1. _DSM for PCI Express Slot Information
 296     * The UUID in _DSM in this context is
 297     * {E5C937D0-3553-4D7A-9117-EA4D19C3434D}
 298     */
 299    UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
 300    ifctx = aml_if(aml_equal(aml_arg(0), UUID));
 301    ifctx1 = aml_if(aml_equal(aml_arg(2), aml_int(0)));
 302    uint8_t byte_list[1] = {1};
 303    buf = aml_buffer(1, byte_list);
 304    aml_append(ifctx1, aml_return(buf));
 305    aml_append(ifctx, ifctx1);
 306    aml_append(method, ifctx);
 307
 308    byte_list[0] = 0;
 309    buf = aml_buffer(1, byte_list);
 310    aml_append(method, aml_return(buf));
 311    aml_append(dev, method);
 312
 313    Aml *dev_rp0 = aml_device("%s", "RP0");
 314    aml_append(dev_rp0, aml_name_decl("_ADR", aml_int(0)));
 315    aml_append(dev, dev_rp0);
 316
 317    Aml *dev_res0 = aml_device("%s", "RES0");
 318    aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));
 319    crs = aml_resource_template();
 320    aml_append(crs,
 321        aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 322                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_ecam,
 323                         base_ecam + size_ecam - 1, 0x0000, size_ecam));
 324    aml_append(dev_res0, aml_name_decl("_CRS", crs));
 325    aml_append(dev, dev_res0);
 326    aml_append(scope, dev);
 327}
 328
 329static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
 330                                           uint32_t gpio_irq)
 331{
 332    Aml *dev = aml_device("GPO0");
 333    aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0061")));
 334    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 335    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 336
 337    Aml *crs = aml_resource_template();
 338    aml_append(crs, aml_memory32_fixed(gpio_memmap->base, gpio_memmap->size,
 339                                       AML_READ_WRITE));
 340    aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 341                                  AML_EXCLUSIVE, &gpio_irq, 1));
 342    aml_append(dev, aml_name_decl("_CRS", crs));
 343
 344    Aml *aei = aml_resource_template();
 345    /* Pin 3 for power button */
 346    const uint32_t pin_list[1] = {3};
 347    aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH,
 348                                 AML_EXCLUSIVE, AML_PULL_UP, 0, pin_list, 1,
 349                                 "GPO0", NULL, 0));
 350    aml_append(dev, aml_name_decl("_AEI", aei));
 351
 352    /* _E03 is handle for power button */
 353    Aml *method = aml_method("_E03", 0, AML_NOTSERIALIZED);
 354    aml_append(method, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE),
 355                                  aml_int(0x80)));
 356    aml_append(dev, method);
 357    aml_append(scope, dev);
 358}
 359
 360static void acpi_dsdt_add_power_button(Aml *scope)
 361{
 362    Aml *dev = aml_device(ACPI_POWER_BUTTON_DEVICE);
 363    aml_append(dev, aml_name_decl("_HID", aml_string("PNP0C0C")));
 364    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 365    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 366    aml_append(scope, dev);
 367}
 368
 369/* RSDP */
 370static GArray *
 371build_rsdp(GArray *rsdp_table, BIOSLinker *linker, unsigned xsdt_tbl_offset)
 372{
 373    AcpiRsdpDescriptor *rsdp = acpi_data_push(rsdp_table, sizeof *rsdp);
 374    unsigned xsdt_pa_size = sizeof(rsdp->xsdt_physical_address);
 375    unsigned xsdt_pa_offset =
 376        (char *)&rsdp->xsdt_physical_address - rsdp_table->data;
 377
 378    bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, rsdp_table, 16,
 379                             true /* fseg memory */);
 380
 381    memcpy(&rsdp->signature, "RSD PTR ", sizeof(rsdp->signature));
 382    memcpy(rsdp->oem_id, ACPI_BUILD_APPNAME6, sizeof(rsdp->oem_id));
 383    rsdp->length = cpu_to_le32(sizeof(*rsdp));
 384    rsdp->revision = 0x02;
 385
 386    /* Address to be filled by Guest linker */
 387    bios_linker_loader_add_pointer(linker,
 388        ACPI_BUILD_RSDP_FILE, xsdt_pa_offset, xsdt_pa_size,
 389        ACPI_BUILD_TABLE_FILE, xsdt_tbl_offset);
 390
 391    /* Checksum to be filled by Guest linker */
 392    bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE,
 393        (char *)rsdp - rsdp_table->data, sizeof *rsdp,
 394        (char *)&rsdp->checksum - rsdp_table->data);
 395
 396    return rsdp_table;
 397}
 398
 399static void
 400build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 401{
 402    int nb_nodes, iort_start = table_data->len;
 403    AcpiIortIdMapping *idmap;
 404    AcpiIortItsGroup *its;
 405    AcpiIortTable *iort;
 406    AcpiIortSmmu3 *smmu;
 407    size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
 408    AcpiIortRC *rc;
 409
 410    iort = acpi_data_push(table_data, sizeof(*iort));
 411
 412    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 413        nb_nodes = 3; /* RC, ITS, SMMUv3 */
 414    } else {
 415        nb_nodes = 2; /* RC, ITS */
 416    }
 417
 418    iort_length = sizeof(*iort);
 419    iort->node_count = cpu_to_le32(nb_nodes);
 420    /*
 421     * Use a copy in case table_data->data moves during acpi_data_push
 422     * operations.
 423     */
 424    iort_node_offset = sizeof(*iort);
 425    iort->node_offset = cpu_to_le32(iort_node_offset);
 426
 427    /* ITS group node */
 428    node_size =  sizeof(*its) + sizeof(uint32_t);
 429    iort_length += node_size;
 430    its = acpi_data_push(table_data, node_size);
 431
 432    its->type = ACPI_IORT_NODE_ITS_GROUP;
 433    its->length = cpu_to_le16(node_size);
 434    its->its_count = cpu_to_le32(1);
 435    its->identifiers[0] = 0; /* MADT translation_id */
 436
 437    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 438        int irq =  vms->irqmap[VIRT_SMMU];
 439
 440        /* SMMUv3 node */
 441        smmu_offset = iort_node_offset + node_size;
 442        node_size = sizeof(*smmu) + sizeof(*idmap);
 443        iort_length += node_size;
 444        smmu = acpi_data_push(table_data, node_size);
 445
 446        smmu->type = ACPI_IORT_NODE_SMMU_V3;
 447        smmu->length = cpu_to_le16(node_size);
 448        smmu->mapping_count = cpu_to_le32(1);
 449        smmu->mapping_offset = cpu_to_le32(sizeof(*smmu));
 450        smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base);
 451        smmu->event_gsiv = cpu_to_le32(irq);
 452        smmu->pri_gsiv = cpu_to_le32(irq + 1);
 453        smmu->gerr_gsiv = cpu_to_le32(irq + 2);
 454        smmu->sync_gsiv = cpu_to_le32(irq + 3);
 455
 456        /* Identity RID mapping covering the whole input RID range */
 457        idmap = &smmu->id_mapping_array[0];
 458        idmap->input_base = 0;
 459        idmap->id_count = cpu_to_le32(0xFFFF);
 460        idmap->output_base = 0;
 461        /* output IORT node is the ITS group node (the first node) */
 462        idmap->output_reference = cpu_to_le32(iort_node_offset);
 463    }
 464
 465    /* Root Complex Node */
 466    node_size = sizeof(*rc) + sizeof(*idmap);
 467    iort_length += node_size;
 468    rc = acpi_data_push(table_data, node_size);
 469
 470    rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX;
 471    rc->length = cpu_to_le16(node_size);
 472    rc->mapping_count = cpu_to_le32(1);
 473    rc->mapping_offset = cpu_to_le32(sizeof(*rc));
 474
 475    /* fully coherent device */
 476    rc->memory_properties.cache_coherency = cpu_to_le32(1);
 477    rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */
 478    rc->pci_segment_number = 0; /* MCFG pci_segment */
 479
 480    /* Identity RID mapping covering the whole input RID range */
 481    idmap = &rc->id_mapping_array[0];
 482    idmap->input_base = 0;
 483    idmap->id_count = cpu_to_le32(0xFFFF);
 484    idmap->output_base = 0;
 485
 486    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 487        /* output IORT node is the smmuv3 node */
 488        idmap->output_reference = cpu_to_le32(smmu_offset);
 489    } else {
 490        /* output IORT node is the ITS group node (the first node) */
 491        idmap->output_reference = cpu_to_le32(iort_node_offset);
 492    }
 493
 494    /*
 495     * Update the pointer address in case table_data->data moves during above
 496     * acpi_data_push operations.
 497     */
 498    iort = (AcpiIortTable *)(table_data->data + iort_start);
 499    iort->length = cpu_to_le32(iort_length);
 500
 501    build_header(linker, table_data, (void *)(table_data->data + iort_start),
 502                 "IORT", table_data->len - iort_start, 0, NULL, NULL);
 503}
 504
 505static void
 506build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 507{
 508    AcpiSerialPortConsoleRedirection *spcr;
 509    const MemMapEntry *uart_memmap = &vms->memmap[VIRT_UART];
 510    int irq = vms->irqmap[VIRT_UART] + ARM_SPI_BASE;
 511    int spcr_start = table_data->len;
 512
 513    spcr = acpi_data_push(table_data, sizeof(*spcr));
 514
 515    spcr->interface_type = 0x3;    /* ARM PL011 UART */
 516
 517    spcr->base_address.space_id = AML_SYSTEM_MEMORY;
 518    spcr->base_address.bit_width = 8;
 519    spcr->base_address.bit_offset = 0;
 520    spcr->base_address.access_width = 1;
 521    spcr->base_address.address = cpu_to_le64(uart_memmap->base);
 522
 523    spcr->interrupt_types = (1 << 3); /* Bit[3] ARMH GIC interrupt */
 524    spcr->gsi = cpu_to_le32(irq);  /* Global System Interrupt */
 525
 526    spcr->baud = 3;                /* Baud Rate: 3 = 9600 */
 527    spcr->parity = 0;              /* No Parity */
 528    spcr->stopbits = 1;            /* 1 Stop bit */
 529    spcr->flowctrl = (1 << 1);     /* Bit[1] = RTS/CTS hardware flow control */
 530    spcr->term_type = 0;           /* Terminal Type: 0 = VT100 */
 531
 532    spcr->pci_device_id = 0xffff;  /* PCI Device ID: not a PCI device */
 533    spcr->pci_vendor_id = 0xffff;  /* PCI Vendor ID: not a PCI device */
 534
 535    build_header(linker, table_data, (void *)(table_data->data + spcr_start),
 536                 "SPCR", table_data->len - spcr_start, 2, NULL, NULL);
 537}
 538
 539static void
 540build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 541{
 542    AcpiSystemResourceAffinityTable *srat;
 543    AcpiSratProcessorGiccAffinity *core;
 544    AcpiSratMemoryAffinity *numamem;
 545    int i, srat_start;
 546    uint64_t mem_base;
 547    MachineClass *mc = MACHINE_GET_CLASS(vms);
 548    const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms));
 549
 550    srat_start = table_data->len;
 551    srat = acpi_data_push(table_data, sizeof(*srat));
 552    srat->reserved1 = cpu_to_le32(1);
 553
 554    for (i = 0; i < cpu_list->len; ++i) {
 555        core = acpi_data_push(table_data, sizeof(*core));
 556        core->type = ACPI_SRAT_PROCESSOR_GICC;
 557        core->length = sizeof(*core);
 558        core->proximity = cpu_to_le32(cpu_list->cpus[i].props.node_id);
 559        core->acpi_processor_uid = cpu_to_le32(i);
 560        core->flags = cpu_to_le32(1);
 561    }
 562
 563    mem_base = vms->memmap[VIRT_MEM].base;
 564    for (i = 0; i < nb_numa_nodes; ++i) {
 565        if (numa_info[i].node_mem > 0) {
 566            numamem = acpi_data_push(table_data, sizeof(*numamem));
 567            build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i,
 568                              MEM_AFFINITY_ENABLED);
 569            mem_base += numa_info[i].node_mem;
 570        }
 571    }
 572
 573    build_header(linker, table_data, (void *)(table_data->data + srat_start),
 574                 "SRAT", table_data->len - srat_start, 3, NULL, NULL);
 575}
 576
 577static void
 578build_mcfg(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 579{
 580    AcpiTableMcfg *mcfg;
 581    const MemMapEntry *memmap = vms->memmap;
 582    int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
 583    int len = sizeof(*mcfg) + sizeof(mcfg->allocation[0]);
 584    int mcfg_start = table_data->len;
 585
 586    mcfg = acpi_data_push(table_data, len);
 587    mcfg->allocation[0].address = cpu_to_le64(memmap[ecam_id].base);
 588
 589    /* Only a single allocation so no need to play with segments */
 590    mcfg->allocation[0].pci_segment = cpu_to_le16(0);
 591    mcfg->allocation[0].start_bus_number = 0;
 592    mcfg->allocation[0].end_bus_number = (memmap[ecam_id].size
 593                                          / PCIE_MMCFG_SIZE_MIN) - 1;
 594
 595    build_header(linker, table_data, (void *)(table_data->data + mcfg_start),
 596                 "MCFG", table_data->len - mcfg_start, 1, NULL, NULL);
 597}
 598
 599/* GTDT */
 600static void
 601build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 602{
 603    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 604    int gtdt_start = table_data->len;
 605    AcpiGenericTimerTable *gtdt;
 606    uint32_t irqflags;
 607
 608    if (vmc->claim_edge_triggered_timers) {
 609        irqflags = ACPI_GTDT_INTERRUPT_MODE_EDGE;
 610    } else {
 611        irqflags = ACPI_GTDT_INTERRUPT_MODE_LEVEL;
 612    }
 613
 614    gtdt = acpi_data_push(table_data, sizeof *gtdt);
 615    /* The interrupt values are the same with the device tree when adding 16 */
 616    gtdt->secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_S_EL1_IRQ + 16);
 617    gtdt->secure_el1_flags = cpu_to_le32(irqflags);
 618
 619    gtdt->non_secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ + 16);
 620    gtdt->non_secure_el1_flags = cpu_to_le32(irqflags |
 621                                             ACPI_GTDT_CAP_ALWAYS_ON);
 622
 623    gtdt->virtual_timer_interrupt = cpu_to_le32(ARCH_TIMER_VIRT_IRQ + 16);
 624    gtdt->virtual_timer_flags = cpu_to_le32(irqflags);
 625
 626    gtdt->non_secure_el2_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ + 16);
 627    gtdt->non_secure_el2_flags = cpu_to_le32(irqflags);
 628
 629    build_header(linker, table_data,
 630                 (void *)(table_data->data + gtdt_start), "GTDT",
 631                 table_data->len - gtdt_start, 2, NULL, NULL);
 632}
 633
 634/* MADT */
 635static void
 636build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 637{
 638    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 639    int madt_start = table_data->len;
 640    const MemMapEntry *memmap = vms->memmap;
 641    const int *irqmap = vms->irqmap;
 642    AcpiMultipleApicTable *madt;
 643    AcpiMadtGenericDistributor *gicd;
 644    AcpiMadtGenericMsiFrame *gic_msi;
 645    int i;
 646
 647    madt = acpi_data_push(table_data, sizeof *madt);
 648
 649    gicd = acpi_data_push(table_data, sizeof *gicd);
 650    gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR;
 651    gicd->length = sizeof(*gicd);
 652    gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base);
 653    gicd->version = vms->gic_version;
 654
 655    for (i = 0; i < vms->smp_cpus; i++) {
 656        AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data,
 657                                                           sizeof(*gicc));
 658        ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
 659
 660        gicc->type = ACPI_APIC_GENERIC_CPU_INTERFACE;
 661        gicc->length = sizeof(*gicc);
 662        if (vms->gic_version == 2) {
 663            gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base);
 664            gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base);
 665            gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base);
 666        }
 667        gicc->cpu_interface_number = cpu_to_le32(i);
 668        gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity);
 669        gicc->uid = cpu_to_le32(i);
 670        gicc->flags = cpu_to_le32(ACPI_MADT_GICC_ENABLED);
 671
 672        if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
 673            gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
 674        }
 675        if (vms->virt) {
 676            gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ));
 677        }
 678    }
 679
 680    if (vms->gic_version == 3) {
 681        AcpiMadtGenericTranslator *gic_its;
 682        int nb_redist_regions = virt_gicv3_redist_region_count(vms);
 683        AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
 684                                                         sizeof *gicr);
 685
 686        gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
 687        gicr->length = sizeof(*gicr);
 688        gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
 689        gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
 690
 691        if (nb_redist_regions == 2) {
 692            gicr = acpi_data_push(table_data, sizeof(*gicr));
 693            gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
 694            gicr->length = sizeof(*gicr);
 695            gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST2].base);
 696            gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST2].size);
 697        }
 698
 699        if (its_class_name() && !vmc->no_its) {
 700            gic_its = acpi_data_push(table_data, sizeof *gic_its);
 701            gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
 702            gic_its->length = sizeof(*gic_its);
 703            gic_its->translation_id = 0;
 704            gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base);
 705        }
 706    } else {
 707        gic_msi = acpi_data_push(table_data, sizeof *gic_msi);
 708        gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME;
 709        gic_msi->length = sizeof(*gic_msi);
 710        gic_msi->gic_msi_frame_id = 0;
 711        gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base);
 712        gic_msi->flags = cpu_to_le32(1);
 713        gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS);
 714        gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE);
 715    }
 716
 717    build_header(linker, table_data,
 718                 (void *)(table_data->data + madt_start), "APIC",
 719                 table_data->len - madt_start, 3, NULL, NULL);
 720}
 721
 722/* FADT */
 723static void build_fadt_rev5(GArray *table_data, BIOSLinker *linker,
 724                            VirtMachineState *vms, unsigned dsdt_tbl_offset)
 725{
 726    /* ACPI v5.1 */
 727    AcpiFadtData fadt = {
 728        .rev = 5,
 729        .minor_ver = 1,
 730        .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI,
 731        .xdsdt_tbl_offset = &dsdt_tbl_offset,
 732    };
 733
 734    switch (vms->psci_conduit) {
 735    case QEMU_PSCI_CONDUIT_DISABLED:
 736        fadt.arm_boot_arch = 0;
 737        break;
 738    case QEMU_PSCI_CONDUIT_HVC:
 739        fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT |
 740                             ACPI_FADT_ARM_PSCI_USE_HVC;
 741        break;
 742    case QEMU_PSCI_CONDUIT_SMC:
 743        fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT;
 744        break;
 745    default:
 746        g_assert_not_reached();
 747    }
 748
 749    build_fadt(table_data, linker, &fadt, NULL, NULL);
 750}
 751
 752/* DSDT */
 753static void
 754build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 755{
 756    Aml *scope, *dsdt;
 757    const MemMapEntry *memmap = vms->memmap;
 758    const int *irqmap = vms->irqmap;
 759
 760    dsdt = init_aml_allocator();
 761    /* Reserve space for header */
 762    acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader));
 763
 764    /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
 765     * While UEFI can use libfdt to disable the RTC device node in the DTB that
 766     * it passes to the OS, it cannot modify AML. Therefore, we won't generate
 767     * the RTC ACPI device at all when using UEFI.
 768     */
 769    scope = aml_scope("\\_SB");
 770    acpi_dsdt_add_cpus(scope, vms->smp_cpus);
 771    acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
 772                       (irqmap[VIRT_UART] + ARM_SPI_BASE));
 773    acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
 774    acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
 775    acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
 776                    (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
 777    acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
 778                      vms->highmem, vms->highmem_ecam);
 779    acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO],
 780                       (irqmap[VIRT_GPIO] + ARM_SPI_BASE));
 781    acpi_dsdt_add_power_button(scope);
 782
 783    aml_append(dsdt, scope);
 784
 785    /* copy AML table into ACPI tables blob and patch header there */
 786    g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
 787    build_header(linker, table_data,
 788        (void *)(table_data->data + table_data->len - dsdt->buf->len),
 789        "DSDT", dsdt->buf->len, 2, NULL, NULL);
 790    free_aml_allocator();
 791}
 792
 793typedef
 794struct AcpiBuildState {
 795    /* Copy of table in RAM (for patching). */
 796    MemoryRegion *table_mr;
 797    MemoryRegion *rsdp_mr;
 798    MemoryRegion *linker_mr;
 799    /* Is table patched? */
 800    bool patched;
 801} AcpiBuildState;
 802
 803static
 804void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
 805{
 806    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 807    GArray *table_offsets;
 808    unsigned dsdt, xsdt;
 809    GArray *tables_blob = tables->table_data;
 810
 811    table_offsets = g_array_new(false, true /* clear */,
 812                                        sizeof(uint32_t));
 813
 814    bios_linker_loader_alloc(tables->linker,
 815                             ACPI_BUILD_TABLE_FILE, tables_blob,
 816                             64, false /* high memory */);
 817
 818    /* DSDT is pointed to by FADT */
 819    dsdt = tables_blob->len;
 820    build_dsdt(tables_blob, tables->linker, vms);
 821
 822    /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
 823    acpi_add_table(table_offsets, tables_blob);
 824    build_fadt_rev5(tables_blob, tables->linker, vms, dsdt);
 825
 826    acpi_add_table(table_offsets, tables_blob);
 827    build_madt(tables_blob, tables->linker, vms);
 828
 829    acpi_add_table(table_offsets, tables_blob);
 830    build_gtdt(tables_blob, tables->linker, vms);
 831
 832    acpi_add_table(table_offsets, tables_blob);
 833    build_mcfg(tables_blob, tables->linker, vms);
 834
 835    acpi_add_table(table_offsets, tables_blob);
 836    build_spcr(tables_blob, tables->linker, vms);
 837
 838    if (nb_numa_nodes > 0) {
 839        acpi_add_table(table_offsets, tables_blob);
 840        build_srat(tables_blob, tables->linker, vms);
 841        if (have_numa_distance) {
 842            acpi_add_table(table_offsets, tables_blob);
 843            build_slit(tables_blob, tables->linker);
 844        }
 845    }
 846
 847    if (its_class_name() && !vmc->no_its) {
 848        acpi_add_table(table_offsets, tables_blob);
 849        build_iort(tables_blob, tables->linker, vms);
 850    }
 851
 852    /* XSDT is pointed to by RSDP */
 853    xsdt = tables_blob->len;
 854    build_xsdt(tables_blob, tables->linker, table_offsets, NULL, NULL);
 855
 856    /* RSDP is in FSEG memory, so allocate it separately */
 857    build_rsdp(tables->rsdp, tables->linker, xsdt);
 858
 859    /* Cleanup memory that's no longer used. */
 860    g_array_free(table_offsets, true);
 861}
 862
 863static void acpi_ram_update(MemoryRegion *mr, GArray *data)
 864{
 865    uint32_t size = acpi_data_len(data);
 866
 867    /* Make sure RAM size is correct - in case it got changed
 868     * e.g. by migration */
 869    memory_region_ram_resize(mr, size, &error_abort);
 870
 871    memcpy(memory_region_get_ram_ptr(mr), data->data, size);
 872    memory_region_set_dirty(mr, 0, size);
 873}
 874
 875static void virt_acpi_build_update(void *build_opaque)
 876{
 877    AcpiBuildState *build_state = build_opaque;
 878    AcpiBuildTables tables;
 879
 880    /* No state to update or already patched? Nothing to do. */
 881    if (!build_state || build_state->patched) {
 882        return;
 883    }
 884    build_state->patched = true;
 885
 886    acpi_build_tables_init(&tables);
 887
 888    virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables);
 889
 890    acpi_ram_update(build_state->table_mr, tables.table_data);
 891    acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
 892    acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
 893
 894    acpi_build_tables_cleanup(&tables, true);
 895}
 896
 897static void virt_acpi_build_reset(void *build_opaque)
 898{
 899    AcpiBuildState *build_state = build_opaque;
 900    build_state->patched = false;
 901}
 902
 903static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state,
 904                                       GArray *blob, const char *name,
 905                                       uint64_t max_size)
 906{
 907    return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
 908                        name, virt_acpi_build_update, build_state, NULL, true);
 909}
 910
 911static const VMStateDescription vmstate_virt_acpi_build = {
 912    .name = "virt_acpi_build",
 913    .version_id = 1,
 914    .minimum_version_id = 1,
 915    .fields = (VMStateField[]) {
 916        VMSTATE_BOOL(patched, AcpiBuildState),
 917        VMSTATE_END_OF_LIST()
 918    },
 919};
 920
 921void virt_acpi_setup(VirtMachineState *vms)
 922{
 923    AcpiBuildTables tables;
 924    AcpiBuildState *build_state;
 925
 926    if (!vms->fw_cfg) {
 927        trace_virt_acpi_setup();
 928        return;
 929    }
 930
 931    if (!acpi_enabled) {
 932        trace_virt_acpi_setup();
 933        return;
 934    }
 935
 936    build_state = g_malloc0(sizeof *build_state);
 937
 938    acpi_build_tables_init(&tables);
 939    virt_acpi_build(vms, &tables);
 940
 941    /* Now expose it all to Guest */
 942    build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data,
 943                                               ACPI_BUILD_TABLE_FILE,
 944                                               ACPI_BUILD_TABLE_MAX_SIZE);
 945    assert(build_state->table_mr != NULL);
 946
 947    build_state->linker_mr =
 948        acpi_add_rom_blob(build_state, tables.linker->cmd_blob,
 949                          "etc/table-loader", 0);
 950
 951    fw_cfg_add_file(vms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data,
 952                    acpi_data_len(tables.tcpalog));
 953
 954    build_state->rsdp_mr = acpi_add_rom_blob(build_state, tables.rsdp,
 955                                              ACPI_BUILD_RSDP_FILE, 0);
 956
 957    qemu_register_reset(virt_acpi_build_reset, build_state);
 958    virt_acpi_build_reset(build_state);
 959    vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state);
 960
 961    /* Cleanup tables but don't free the memory: we track it
 962     * in build_state.
 963     */
 964    acpi_build_tables_cleanup(&tables, false);
 965}
 966