qemu/hw/arm/virt-acpi-build.c
<<
>>
Prefs
   1/* Support for generating ACPI tables and passing them to Guests
   2 *
   3 * ARM virt ACPI generation
   4 *
   5 * Copyright (C) 2008-2010  Kevin O'Connor <kevin@koconnor.net>
   6 * Copyright (C) 2006 Fabrice Bellard
   7 * Copyright (C) 2013 Red Hat Inc
   8 *
   9 * Author: Michael S. Tsirkin <mst@redhat.com>
  10 *
  11 * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
  12 *
  13 * Author: Shannon Zhao <zhaoshenglong@huawei.com>
  14 *
  15 * This program is free software; you can redistribute it and/or modify
  16 * it under the terms of the GNU General Public License as published by
  17 * the Free Software Foundation; either version 2 of the License, or
  18 * (at your option) any later version.
  19
  20 * This program is distributed in the hope that it will be useful,
  21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  23 * GNU General Public License for more details.
  24
  25 * You should have received a copy of the GNU General Public License along
  26 * with this program; if not, see <http://www.gnu.org/licenses/>.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "qapi/error.h"
  31#include "qemu-common.h"
  32#include "qemu/bitmap.h"
  33#include "trace.h"
  34#include "qom/cpu.h"
  35#include "target/arm/cpu.h"
  36#include "hw/acpi/acpi-defs.h"
  37#include "hw/acpi/acpi.h"
  38#include "hw/nvram/fw_cfg.h"
  39#include "hw/acpi/bios-linker-loader.h"
  40#include "hw/loader.h"
  41#include "hw/hw.h"
  42#include "hw/acpi/aml-build.h"
  43#include "hw/pci/pcie_host.h"
  44#include "hw/pci/pci.h"
  45#include "hw/arm/virt.h"
  46#include "sysemu/numa.h"
  47#include "kvm_arm.h"
  48
  49#define ARM_SPI_BASE 32
  50#define ACPI_POWER_BUTTON_DEVICE "PWRB"
  51
  52static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
  53{
  54    uint16_t i;
  55
  56    for (i = 0; i < smp_cpus; i++) {
  57        Aml *dev = aml_device("C%.03X", i);
  58        aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
  59        aml_append(dev, aml_name_decl("_UID", aml_int(i)));
  60        aml_append(scope, dev);
  61    }
  62}
  63
  64static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
  65                                           uint32_t uart_irq)
  66{
  67    Aml *dev = aml_device("COM0");
  68    aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0011")));
  69    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
  70
  71    Aml *crs = aml_resource_template();
  72    aml_append(crs, aml_memory32_fixed(uart_memmap->base,
  73                                       uart_memmap->size, AML_READ_WRITE));
  74    aml_append(crs,
  75               aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
  76                             AML_EXCLUSIVE, &uart_irq, 1));
  77    aml_append(dev, aml_name_decl("_CRS", crs));
  78
  79    /* The _ADR entry is used to link this device to the UART described
  80     * in the SPCR table, i.e. SPCR.base_address.address == _ADR.
  81     */
  82    aml_append(dev, aml_name_decl("_ADR", aml_int(uart_memmap->base)));
  83
  84    aml_append(scope, dev);
  85}
  86
  87static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
  88{
  89    Aml *dev = aml_device("FWCF");
  90    aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
  91    /* device present, functioning, decoding, not shown in UI */
  92    aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
  93    aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
  94
  95    Aml *crs = aml_resource_template();
  96    aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
  97                                       fw_cfg_memmap->size, AML_READ_WRITE));
  98    aml_append(dev, aml_name_decl("_CRS", crs));
  99    aml_append(scope, dev);
 100}
 101
 102static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
 103{
 104    Aml *dev, *crs;
 105    hwaddr base = flash_memmap->base;
 106    hwaddr size = flash_memmap->size / 2;
 107
 108    dev = aml_device("FLS0");
 109    aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
 110    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 111
 112    crs = aml_resource_template();
 113    aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
 114    aml_append(dev, aml_name_decl("_CRS", crs));
 115    aml_append(scope, dev);
 116
 117    dev = aml_device("FLS1");
 118    aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
 119    aml_append(dev, aml_name_decl("_UID", aml_int(1)));
 120    crs = aml_resource_template();
 121    aml_append(crs, aml_memory32_fixed(base + size, size, AML_READ_WRITE));
 122    aml_append(dev, aml_name_decl("_CRS", crs));
 123    aml_append(scope, dev);
 124}
 125
 126static void acpi_dsdt_add_virtio(Aml *scope,
 127                                 const MemMapEntry *virtio_mmio_memmap,
 128                                 uint32_t mmio_irq, int num)
 129{
 130    hwaddr base = virtio_mmio_memmap->base;
 131    hwaddr size = virtio_mmio_memmap->size;
 132    int i;
 133
 134    for (i = 0; i < num; i++) {
 135        uint32_t irq = mmio_irq + i;
 136        Aml *dev = aml_device("VR%02u", i);
 137        aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
 138        aml_append(dev, aml_name_decl("_UID", aml_int(i)));
 139        aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
 140
 141        Aml *crs = aml_resource_template();
 142        aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
 143        aml_append(crs,
 144                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 145                                 AML_EXCLUSIVE, &irq, 1));
 146        aml_append(dev, aml_name_decl("_CRS", crs));
 147        aml_append(scope, dev);
 148        base += size;
 149    }
 150}
 151
 152static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
 153                              uint32_t irq, bool use_highmem, bool highmem_ecam)
 154{
 155    int ecam_id = VIRT_ECAM_ID(highmem_ecam);
 156    Aml *method, *crs, *ifctx, *UUID, *ifctx1, *elsectx, *buf;
 157    int i, bus_no;
 158    hwaddr base_mmio = memmap[VIRT_PCIE_MMIO].base;
 159    hwaddr size_mmio = memmap[VIRT_PCIE_MMIO].size;
 160    hwaddr base_pio = memmap[VIRT_PCIE_PIO].base;
 161    hwaddr size_pio = memmap[VIRT_PCIE_PIO].size;
 162    hwaddr base_ecam = memmap[ecam_id].base;
 163    hwaddr size_ecam = memmap[ecam_id].size;
 164    int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
 165
 166    Aml *dev = aml_device("%s", "PCI0");
 167    aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08")));
 168    aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03")));
 169    aml_append(dev, aml_name_decl("_SEG", aml_int(0)));
 170    aml_append(dev, aml_name_decl("_BBN", aml_int(0)));
 171    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 172    aml_append(dev, aml_name_decl("_UID", aml_string("PCI0")));
 173    aml_append(dev, aml_name_decl("_STR", aml_unicode("PCIe 0 Device")));
 174    aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
 175
 176    /* Declare the PCI Routing Table. */
 177    Aml *rt_pkg = aml_varpackage(nr_pcie_buses * PCI_NUM_PINS);
 178    for (bus_no = 0; bus_no < nr_pcie_buses; bus_no++) {
 179        for (i = 0; i < PCI_NUM_PINS; i++) {
 180            int gsi = (i + bus_no) % PCI_NUM_PINS;
 181            Aml *pkg = aml_package(4);
 182            aml_append(pkg, aml_int((bus_no << 16) | 0xFFFF));
 183            aml_append(pkg, aml_int(i));
 184            aml_append(pkg, aml_name("GSI%d", gsi));
 185            aml_append(pkg, aml_int(0));
 186            aml_append(rt_pkg, pkg);
 187        }
 188    }
 189    aml_append(dev, aml_name_decl("_PRT", rt_pkg));
 190
 191    /* Create GSI link device */
 192    for (i = 0; i < PCI_NUM_PINS; i++) {
 193        uint32_t irqs =  irq + i;
 194        Aml *dev_gsi = aml_device("GSI%d", i);
 195        aml_append(dev_gsi, aml_name_decl("_HID", aml_string("PNP0C0F")));
 196        aml_append(dev_gsi, aml_name_decl("_UID", aml_int(0)));
 197        crs = aml_resource_template();
 198        aml_append(crs,
 199                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 200                                 AML_EXCLUSIVE, &irqs, 1));
 201        aml_append(dev_gsi, aml_name_decl("_PRS", crs));
 202        crs = aml_resource_template();
 203        aml_append(crs,
 204                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 205                                 AML_EXCLUSIVE, &irqs, 1));
 206        aml_append(dev_gsi, aml_name_decl("_CRS", crs));
 207        method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
 208        aml_append(dev_gsi, method);
 209        aml_append(dev, dev_gsi);
 210    }
 211
 212    method = aml_method("_CBA", 0, AML_NOTSERIALIZED);
 213    aml_append(method, aml_return(aml_int(base_ecam)));
 214    aml_append(dev, method);
 215
 216    method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
 217    Aml *rbuf = aml_resource_template();
 218    aml_append(rbuf,
 219        aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
 220                            0x0000, 0x0000, nr_pcie_buses - 1, 0x0000,
 221                            nr_pcie_buses));
 222    aml_append(rbuf,
 223        aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 224                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_mmio,
 225                         base_mmio + size_mmio - 1, 0x0000, size_mmio));
 226    aml_append(rbuf,
 227        aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
 228                     AML_ENTIRE_RANGE, 0x0000, 0x0000, size_pio - 1, base_pio,
 229                     size_pio));
 230
 231    if (use_highmem) {
 232        hwaddr base_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].base;
 233        hwaddr size_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].size;
 234
 235        aml_append(rbuf,
 236            aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 237                             AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000,
 238                             base_mmio_high,
 239                             base_mmio_high + size_mmio_high - 1, 0x0000,
 240                             size_mmio_high));
 241    }
 242
 243    aml_append(method, aml_name_decl("RBUF", rbuf));
 244    aml_append(method, aml_return(rbuf));
 245    aml_append(dev, method);
 246
 247    /* Declare an _OSC (OS Control Handoff) method */
 248    aml_append(dev, aml_name_decl("SUPP", aml_int(0)));
 249    aml_append(dev, aml_name_decl("CTRL", aml_int(0)));
 250    method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
 251    aml_append(method,
 252        aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
 253
 254    /* PCI Firmware Specification 3.0
 255     * 4.5.1. _OSC Interface for PCI Host Bridge Devices
 256     * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is
 257     * identified by the Universal Unique IDentifier (UUID)
 258     * 33DB4D5B-1FF7-401C-9657-7441C03DD766
 259     */
 260    UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766");
 261    ifctx = aml_if(aml_equal(aml_arg(0), UUID));
 262    aml_append(ifctx,
 263        aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
 264    aml_append(ifctx,
 265        aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
 266    aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP")));
 267    aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL")));
 268    aml_append(ifctx, aml_store(aml_and(aml_name("CTRL"), aml_int(0x1D), NULL),
 269                                aml_name("CTRL")));
 270
 271    ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
 272    aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x08), NULL),
 273                                 aml_name("CDW1")));
 274    aml_append(ifctx, ifctx1);
 275
 276    ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
 277    aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x10), NULL),
 278                                 aml_name("CDW1")));
 279    aml_append(ifctx, ifctx1);
 280
 281    aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3")));
 282    aml_append(ifctx, aml_return(aml_arg(3)));
 283    aml_append(method, ifctx);
 284
 285    elsectx = aml_else();
 286    aml_append(elsectx, aml_store(aml_or(aml_name("CDW1"), aml_int(4), NULL),
 287                                  aml_name("CDW1")));
 288    aml_append(elsectx, aml_return(aml_arg(3)));
 289    aml_append(method, elsectx);
 290    aml_append(dev, method);
 291
 292    method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
 293
 294    /* PCI Firmware Specification 3.0
 295     * 4.6.1. _DSM for PCI Express Slot Information
 296     * The UUID in _DSM in this context is
 297     * {E5C937D0-3553-4D7A-9117-EA4D19C3434D}
 298     */
 299    UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
 300    ifctx = aml_if(aml_equal(aml_arg(0), UUID));
 301    ifctx1 = aml_if(aml_equal(aml_arg(2), aml_int(0)));
 302    uint8_t byte_list[1] = {1};
 303    buf = aml_buffer(1, byte_list);
 304    aml_append(ifctx1, aml_return(buf));
 305    aml_append(ifctx, ifctx1);
 306    aml_append(method, ifctx);
 307
 308    byte_list[0] = 0;
 309    buf = aml_buffer(1, byte_list);
 310    aml_append(method, aml_return(buf));
 311    aml_append(dev, method);
 312
 313    Aml *dev_rp0 = aml_device("%s", "RP0");
 314    aml_append(dev_rp0, aml_name_decl("_ADR", aml_int(0)));
 315    aml_append(dev, dev_rp0);
 316
 317    Aml *dev_res0 = aml_device("%s", "RES0");
 318    aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));
 319    crs = aml_resource_template();
 320    aml_append(crs,
 321        aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 322                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_ecam,
 323                         base_ecam + size_ecam - 1, 0x0000, size_ecam));
 324    aml_append(dev_res0, aml_name_decl("_CRS", crs));
 325    aml_append(dev, dev_res0);
 326    aml_append(scope, dev);
 327}
 328
 329static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
 330                                           uint32_t gpio_irq)
 331{
 332    Aml *dev = aml_device("GPO0");
 333    aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0061")));
 334    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 335    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 336
 337    Aml *crs = aml_resource_template();
 338    aml_append(crs, aml_memory32_fixed(gpio_memmap->base, gpio_memmap->size,
 339                                       AML_READ_WRITE));
 340    aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 341                                  AML_EXCLUSIVE, &gpio_irq, 1));
 342    aml_append(dev, aml_name_decl("_CRS", crs));
 343
 344    Aml *aei = aml_resource_template();
 345    /* Pin 3 for power button */
 346    const uint32_t pin_list[1] = {3};
 347    aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH,
 348                                 AML_EXCLUSIVE, AML_PULL_UP, 0, pin_list, 1,
 349                                 "GPO0", NULL, 0));
 350    aml_append(dev, aml_name_decl("_AEI", aei));
 351
 352    /* _E03 is handle for power button */
 353    Aml *method = aml_method("_E03", 0, AML_NOTSERIALIZED);
 354    aml_append(method, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE),
 355                                  aml_int(0x80)));
 356    aml_append(dev, method);
 357    aml_append(scope, dev);
 358}
 359
 360static void acpi_dsdt_add_power_button(Aml *scope)
 361{
 362    Aml *dev = aml_device(ACPI_POWER_BUTTON_DEVICE);
 363    aml_append(dev, aml_name_decl("_HID", aml_string("PNP0C0C")));
 364    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 365    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 366    aml_append(scope, dev);
 367}
 368
 369/* RSDP */
 370static GArray *
 371build_rsdp(GArray *rsdp_table, BIOSLinker *linker, unsigned xsdt_tbl_offset)
 372{
 373    AcpiRsdpDescriptor *rsdp = acpi_data_push(rsdp_table, sizeof *rsdp);
 374    unsigned xsdt_pa_size = sizeof(rsdp->xsdt_physical_address);
 375    unsigned xsdt_pa_offset =
 376        (char *)&rsdp->xsdt_physical_address - rsdp_table->data;
 377
 378    bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, rsdp_table, 16,
 379                             true /* fseg memory */);
 380
 381    memcpy(&rsdp->signature, "RSD PTR ", sizeof(rsdp->signature));
 382    memcpy(rsdp->oem_id, ACPI_BUILD_APPNAME6, sizeof(rsdp->oem_id));
 383    rsdp->length = cpu_to_le32(sizeof(*rsdp));
 384    rsdp->revision = 0x02;
 385
 386    /* Address to be filled by Guest linker */
 387    bios_linker_loader_add_pointer(linker,
 388        ACPI_BUILD_RSDP_FILE, xsdt_pa_offset, xsdt_pa_size,
 389        ACPI_BUILD_TABLE_FILE, xsdt_tbl_offset);
 390
 391    /* Checksum to be filled by Guest linker */
 392    bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE,
 393        (char *)rsdp - rsdp_table->data, sizeof *rsdp,
 394        (char *)&rsdp->checksum - rsdp_table->data);
 395
 396    return rsdp_table;
 397}
 398
 399static void
 400build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 401{
 402    int nb_nodes, iort_start = table_data->len;
 403    AcpiIortIdMapping *idmap;
 404    AcpiIortItsGroup *its;
 405    AcpiIortTable *iort;
 406    AcpiIortSmmu3 *smmu;
 407    size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
 408    AcpiIortRC *rc;
 409
 410    iort = acpi_data_push(table_data, sizeof(*iort));
 411
 412    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 413        nb_nodes = 3; /* RC, ITS, SMMUv3 */
 414    } else {
 415        nb_nodes = 2; /* RC, ITS */
 416    }
 417
 418    iort_length = sizeof(*iort);
 419    iort->node_count = cpu_to_le32(nb_nodes);
 420    /*
 421     * Use a copy in case table_data->data moves during acpi_data_push
 422     * operations.
 423     */
 424    iort_node_offset = sizeof(*iort);
 425    iort->node_offset = cpu_to_le32(iort_node_offset);
 426
 427    /* ITS group node */
 428    node_size =  sizeof(*its) + sizeof(uint32_t);
 429    iort_length += node_size;
 430    its = acpi_data_push(table_data, node_size);
 431
 432    its->type = ACPI_IORT_NODE_ITS_GROUP;
 433    its->length = cpu_to_le16(node_size);
 434    its->its_count = cpu_to_le32(1);
 435    its->identifiers[0] = 0; /* MADT translation_id */
 436
 437    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 438        int irq =  vms->irqmap[VIRT_SMMU];
 439
 440        /* SMMUv3 node */
 441        smmu_offset = iort_node_offset + node_size;
 442        node_size = sizeof(*smmu) + sizeof(*idmap);
 443        iort_length += node_size;
 444        smmu = acpi_data_push(table_data, node_size);
 445
 446        smmu->type = ACPI_IORT_NODE_SMMU_V3;
 447        smmu->length = cpu_to_le16(node_size);
 448        smmu->mapping_count = cpu_to_le32(1);
 449        smmu->mapping_offset = cpu_to_le32(sizeof(*smmu));
 450        smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base);
 451        smmu->event_gsiv = cpu_to_le32(irq);
 452        smmu->pri_gsiv = cpu_to_le32(irq + 1);
 453        smmu->gerr_gsiv = cpu_to_le32(irq + 2);
 454        smmu->sync_gsiv = cpu_to_le32(irq + 3);
 455
 456        /* Identity RID mapping covering the whole input RID range */
 457        idmap = &smmu->id_mapping_array[0];
 458        idmap->input_base = 0;
 459        idmap->id_count = cpu_to_le32(0xFFFF);
 460        idmap->output_base = 0;
 461        /* output IORT node is the ITS group node (the first node) */
 462        idmap->output_reference = cpu_to_le32(iort_node_offset);
 463    }
 464
 465    /* Root Complex Node */
 466    node_size = sizeof(*rc) + sizeof(*idmap);
 467    iort_length += node_size;
 468    rc = acpi_data_push(table_data, node_size);
 469
 470    rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX;
 471    rc->length = cpu_to_le16(node_size);
 472    rc->mapping_count = cpu_to_le32(1);
 473    rc->mapping_offset = cpu_to_le32(sizeof(*rc));
 474
 475    /* fully coherent device */
 476    rc->memory_properties.cache_coherency = cpu_to_le32(1);
 477    rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */
 478    rc->pci_segment_number = 0; /* MCFG pci_segment */
 479
 480    /* Identity RID mapping covering the whole input RID range */
 481    idmap = &rc->id_mapping_array[0];
 482    idmap->input_base = 0;
 483    idmap->id_count = cpu_to_le32(0xFFFF);
 484    idmap->output_base = 0;
 485
 486    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 487        /* output IORT node is the smmuv3 node */
 488        idmap->output_reference = cpu_to_le32(smmu_offset);
 489    } else {
 490        /* output IORT node is the ITS group node (the first node) */
 491        idmap->output_reference = cpu_to_le32(iort_node_offset);
 492    }
 493
 494    /*
 495     * Update the pointer address in case table_data->data moves during above
 496     * acpi_data_push operations.
 497     */
 498    iort = (AcpiIortTable *)(table_data->data + iort_start);
 499    iort->length = cpu_to_le32(iort_length);
 500
 501    build_header(linker, table_data, (void *)(table_data->data + iort_start),
 502                 "IORT", table_data->len - iort_start, 0, NULL, NULL);
 503}
 504
 505static void
 506build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 507{
 508    AcpiSerialPortConsoleRedirection *spcr;
 509    const MemMapEntry *uart_memmap = &vms->memmap[VIRT_UART];
 510    int irq = vms->irqmap[VIRT_UART] + ARM_SPI_BASE;
 511    int spcr_start = table_data->len;
 512
 513    spcr = acpi_data_push(table_data, sizeof(*spcr));
 514
 515    spcr->interface_type = 0x3;    /* ARM PL011 UART */
 516
 517    spcr->base_address.space_id = AML_SYSTEM_MEMORY;
 518    spcr->base_address.bit_width = 8;
 519    spcr->base_address.bit_offset = 0;
 520    spcr->base_address.access_width = 1;
 521    spcr->base_address.address = cpu_to_le64(uart_memmap->base);
 522
 523    spcr->interrupt_types = (1 << 3); /* Bit[3] ARMH GIC interrupt */
 524    spcr->gsi = cpu_to_le32(irq);  /* Global System Interrupt */
 525
 526    spcr->baud = 3;                /* Baud Rate: 3 = 9600 */
 527    spcr->parity = 0;              /* No Parity */
 528    spcr->stopbits = 1;            /* 1 Stop bit */
 529    spcr->flowctrl = (1 << 1);     /* Bit[1] = RTS/CTS hardware flow control */
 530    spcr->term_type = 0;           /* Terminal Type: 0 = VT100 */
 531
 532    spcr->pci_device_id = 0xffff;  /* PCI Device ID: not a PCI device */
 533    spcr->pci_vendor_id = 0xffff;  /* PCI Vendor ID: not a PCI device */
 534
 535    build_header(linker, table_data, (void *)(table_data->data + spcr_start),
 536                 "SPCR", table_data->len - spcr_start, 2, NULL, NULL);
 537}
 538
 539static void
 540build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 541{
 542    AcpiSystemResourceAffinityTable *srat;
 543    AcpiSratProcessorGiccAffinity *core;
 544    AcpiSratMemoryAffinity *numamem;
 545    int i, srat_start;
 546    uint64_t mem_base;
 547    MachineClass *mc = MACHINE_GET_CLASS(vms);
 548    const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms));
 549
 550    srat_start = table_data->len;
 551    srat = acpi_data_push(table_data, sizeof(*srat));
 552    srat->reserved1 = cpu_to_le32(1);
 553
 554    for (i = 0; i < cpu_list->len; ++i) {
 555        core = acpi_data_push(table_data, sizeof(*core));
 556        core->type = ACPI_SRAT_PROCESSOR_GICC;
 557        core->length = sizeof(*core);
 558        core->proximity = cpu_to_le32(cpu_list->cpus[i].props.node_id);
 559        core->acpi_processor_uid = cpu_to_le32(i);
 560        core->flags = cpu_to_le32(1);
 561    }
 562
 563    mem_base = vms->memmap[VIRT_MEM].base;
 564    for (i = 0; i < nb_numa_nodes; ++i) {
 565        numamem = acpi_data_push(table_data, sizeof(*numamem));
 566        build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i,
 567                          MEM_AFFINITY_ENABLED);
 568        mem_base += numa_info[i].node_mem;
 569    }
 570
 571    build_header(linker, table_data, (void *)(table_data->data + srat_start),
 572                 "SRAT", table_data->len - srat_start, 3, NULL, NULL);
 573}
 574
 575static void
 576build_mcfg(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 577{
 578    AcpiTableMcfg *mcfg;
 579    const MemMapEntry *memmap = vms->memmap;
 580    int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
 581    int len = sizeof(*mcfg) + sizeof(mcfg->allocation[0]);
 582    int mcfg_start = table_data->len;
 583
 584    mcfg = acpi_data_push(table_data, len);
 585    mcfg->allocation[0].address = cpu_to_le64(memmap[ecam_id].base);
 586
 587    /* Only a single allocation so no need to play with segments */
 588    mcfg->allocation[0].pci_segment = cpu_to_le16(0);
 589    mcfg->allocation[0].start_bus_number = 0;
 590    mcfg->allocation[0].end_bus_number = (memmap[ecam_id].size
 591                                          / PCIE_MMCFG_SIZE_MIN) - 1;
 592
 593    build_header(linker, table_data, (void *)(table_data->data + mcfg_start),
 594                 "MCFG", table_data->len - mcfg_start, 1, NULL, NULL);
 595}
 596
 597/* GTDT */
 598static void
 599build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 600{
 601    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 602    int gtdt_start = table_data->len;
 603    AcpiGenericTimerTable *gtdt;
 604    uint32_t irqflags;
 605
 606    if (vmc->claim_edge_triggered_timers) {
 607        irqflags = ACPI_GTDT_INTERRUPT_MODE_EDGE;
 608    } else {
 609        irqflags = ACPI_GTDT_INTERRUPT_MODE_LEVEL;
 610    }
 611
 612    gtdt = acpi_data_push(table_data, sizeof *gtdt);
 613    /* The interrupt values are the same with the device tree when adding 16 */
 614    gtdt->secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_S_EL1_IRQ + 16);
 615    gtdt->secure_el1_flags = cpu_to_le32(irqflags);
 616
 617    gtdt->non_secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ + 16);
 618    gtdt->non_secure_el1_flags = cpu_to_le32(irqflags |
 619                                             ACPI_GTDT_CAP_ALWAYS_ON);
 620
 621    gtdt->virtual_timer_interrupt = cpu_to_le32(ARCH_TIMER_VIRT_IRQ + 16);
 622    gtdt->virtual_timer_flags = cpu_to_le32(irqflags);
 623
 624    gtdt->non_secure_el2_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ + 16);
 625    gtdt->non_secure_el2_flags = cpu_to_le32(irqflags);
 626
 627    build_header(linker, table_data,
 628                 (void *)(table_data->data + gtdt_start), "GTDT",
 629                 table_data->len - gtdt_start, 2, NULL, NULL);
 630}
 631
 632/* MADT */
 633static void
 634build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 635{
 636    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 637    int madt_start = table_data->len;
 638    const MemMapEntry *memmap = vms->memmap;
 639    const int *irqmap = vms->irqmap;
 640    AcpiMultipleApicTable *madt;
 641    AcpiMadtGenericDistributor *gicd;
 642    AcpiMadtGenericMsiFrame *gic_msi;
 643    int i;
 644
 645    madt = acpi_data_push(table_data, sizeof *madt);
 646
 647    gicd = acpi_data_push(table_data, sizeof *gicd);
 648    gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR;
 649    gicd->length = sizeof(*gicd);
 650    gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base);
 651    gicd->version = vms->gic_version;
 652
 653    for (i = 0; i < vms->smp_cpus; i++) {
 654        AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data,
 655                                                           sizeof(*gicc));
 656        ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
 657
 658        gicc->type = ACPI_APIC_GENERIC_CPU_INTERFACE;
 659        gicc->length = sizeof(*gicc);
 660        if (vms->gic_version == 2) {
 661            gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base);
 662        }
 663        gicc->cpu_interface_number = cpu_to_le32(i);
 664        gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity);
 665        gicc->uid = cpu_to_le32(i);
 666        gicc->flags = cpu_to_le32(ACPI_MADT_GICC_ENABLED);
 667
 668        if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
 669            gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
 670        }
 671        if (vms->virt && vms->gic_version == 3) {
 672            gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GICV3_MAINT_IRQ));
 673        }
 674    }
 675
 676    if (vms->gic_version == 3) {
 677        AcpiMadtGenericTranslator *gic_its;
 678        int nb_redist_regions = virt_gicv3_redist_region_count(vms);
 679        AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
 680                                                         sizeof *gicr);
 681
 682        gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
 683        gicr->length = sizeof(*gicr);
 684        gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
 685        gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
 686
 687        if (nb_redist_regions == 2) {
 688            gicr = acpi_data_push(table_data, sizeof(*gicr));
 689            gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
 690            gicr->length = sizeof(*gicr);
 691            gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST2].base);
 692            gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST2].size);
 693        }
 694
 695        if (its_class_name() && !vmc->no_its) {
 696            gic_its = acpi_data_push(table_data, sizeof *gic_its);
 697            gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
 698            gic_its->length = sizeof(*gic_its);
 699            gic_its->translation_id = 0;
 700            gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base);
 701        }
 702    } else {
 703        gic_msi = acpi_data_push(table_data, sizeof *gic_msi);
 704        gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME;
 705        gic_msi->length = sizeof(*gic_msi);
 706        gic_msi->gic_msi_frame_id = 0;
 707        gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base);
 708        gic_msi->flags = cpu_to_le32(1);
 709        gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS);
 710        gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE);
 711    }
 712
 713    build_header(linker, table_data,
 714                 (void *)(table_data->data + madt_start), "APIC",
 715                 table_data->len - madt_start, 3, NULL, NULL);
 716}
 717
 718/* FADT */
 719static void build_fadt_rev5(GArray *table_data, BIOSLinker *linker,
 720                            VirtMachineState *vms, unsigned dsdt_tbl_offset)
 721{
 722    /* ACPI v5.1 */
 723    AcpiFadtData fadt = {
 724        .rev = 5,
 725        .minor_ver = 1,
 726        .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI,
 727        .xdsdt_tbl_offset = &dsdt_tbl_offset,
 728    };
 729
 730    switch (vms->psci_conduit) {
 731    case QEMU_PSCI_CONDUIT_DISABLED:
 732        fadt.arm_boot_arch = 0;
 733        break;
 734    case QEMU_PSCI_CONDUIT_HVC:
 735        fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT |
 736                             ACPI_FADT_ARM_PSCI_USE_HVC;
 737        break;
 738    case QEMU_PSCI_CONDUIT_SMC:
 739        fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT;
 740        break;
 741    default:
 742        g_assert_not_reached();
 743    }
 744
 745    build_fadt(table_data, linker, &fadt, NULL, NULL);
 746}
 747
 748/* DSDT */
 749static void
 750build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 751{
 752    Aml *scope, *dsdt;
 753    const MemMapEntry *memmap = vms->memmap;
 754    const int *irqmap = vms->irqmap;
 755
 756    dsdt = init_aml_allocator();
 757    /* Reserve space for header */
 758    acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader));
 759
 760    /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
 761     * While UEFI can use libfdt to disable the RTC device node in the DTB that
 762     * it passes to the OS, it cannot modify AML. Therefore, we won't generate
 763     * the RTC ACPI device at all when using UEFI.
 764     */
 765    scope = aml_scope("\\_SB");
 766    acpi_dsdt_add_cpus(scope, vms->smp_cpus);
 767    acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
 768                       (irqmap[VIRT_UART] + ARM_SPI_BASE));
 769    acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
 770    acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
 771    acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
 772                    (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
 773    acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
 774                      vms->highmem, vms->highmem_ecam);
 775    acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO],
 776                       (irqmap[VIRT_GPIO] + ARM_SPI_BASE));
 777    acpi_dsdt_add_power_button(scope);
 778
 779    aml_append(dsdt, scope);
 780
 781    /* copy AML table into ACPI tables blob and patch header there */
 782    g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
 783    build_header(linker, table_data,
 784        (void *)(table_data->data + table_data->len - dsdt->buf->len),
 785        "DSDT", dsdt->buf->len, 2, NULL, NULL);
 786    free_aml_allocator();
 787}
 788
 789typedef
 790struct AcpiBuildState {
 791    /* Copy of table in RAM (for patching). */
 792    MemoryRegion *table_mr;
 793    MemoryRegion *rsdp_mr;
 794    MemoryRegion *linker_mr;
 795    /* Is table patched? */
 796    bool patched;
 797} AcpiBuildState;
 798
 799static
 800void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
 801{
 802    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 803    GArray *table_offsets;
 804    unsigned dsdt, xsdt;
 805    GArray *tables_blob = tables->table_data;
 806
 807    table_offsets = g_array_new(false, true /* clear */,
 808                                        sizeof(uint32_t));
 809
 810    bios_linker_loader_alloc(tables->linker,
 811                             ACPI_BUILD_TABLE_FILE, tables_blob,
 812                             64, false /* high memory */);
 813
 814    /* DSDT is pointed to by FADT */
 815    dsdt = tables_blob->len;
 816    build_dsdt(tables_blob, tables->linker, vms);
 817
 818    /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
 819    acpi_add_table(table_offsets, tables_blob);
 820    build_fadt_rev5(tables_blob, tables->linker, vms, dsdt);
 821
 822    acpi_add_table(table_offsets, tables_blob);
 823    build_madt(tables_blob, tables->linker, vms);
 824
 825    acpi_add_table(table_offsets, tables_blob);
 826    build_gtdt(tables_blob, tables->linker, vms);
 827
 828    acpi_add_table(table_offsets, tables_blob);
 829    build_mcfg(tables_blob, tables->linker, vms);
 830
 831    acpi_add_table(table_offsets, tables_blob);
 832    build_spcr(tables_blob, tables->linker, vms);
 833
 834    if (nb_numa_nodes > 0) {
 835        acpi_add_table(table_offsets, tables_blob);
 836        build_srat(tables_blob, tables->linker, vms);
 837        if (have_numa_distance) {
 838            acpi_add_table(table_offsets, tables_blob);
 839            build_slit(tables_blob, tables->linker);
 840        }
 841    }
 842
 843    if (its_class_name() && !vmc->no_its) {
 844        acpi_add_table(table_offsets, tables_blob);
 845        build_iort(tables_blob, tables->linker, vms);
 846    }
 847
 848    /* XSDT is pointed to by RSDP */
 849    xsdt = tables_blob->len;
 850    build_xsdt(tables_blob, tables->linker, table_offsets, NULL, NULL);
 851
 852    /* RSDP is in FSEG memory, so allocate it separately */
 853    build_rsdp(tables->rsdp, tables->linker, xsdt);
 854
 855    /* Cleanup memory that's no longer used. */
 856    g_array_free(table_offsets, true);
 857}
 858
 859static void acpi_ram_update(MemoryRegion *mr, GArray *data)
 860{
 861    uint32_t size = acpi_data_len(data);
 862
 863    /* Make sure RAM size is correct - in case it got changed
 864     * e.g. by migration */
 865    memory_region_ram_resize(mr, size, &error_abort);
 866
 867    memcpy(memory_region_get_ram_ptr(mr), data->data, size);
 868    memory_region_set_dirty(mr, 0, size);
 869}
 870
 871static void virt_acpi_build_update(void *build_opaque)
 872{
 873    AcpiBuildState *build_state = build_opaque;
 874    AcpiBuildTables tables;
 875
 876    /* No state to update or already patched? Nothing to do. */
 877    if (!build_state || build_state->patched) {
 878        return;
 879    }
 880    build_state->patched = true;
 881
 882    acpi_build_tables_init(&tables);
 883
 884    virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables);
 885
 886    acpi_ram_update(build_state->table_mr, tables.table_data);
 887    acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
 888    acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
 889
 890    acpi_build_tables_cleanup(&tables, true);
 891}
 892
 893static void virt_acpi_build_reset(void *build_opaque)
 894{
 895    AcpiBuildState *build_state = build_opaque;
 896    build_state->patched = false;
 897}
 898
 899static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state,
 900                                       GArray *blob, const char *name,
 901                                       uint64_t max_size)
 902{
 903    return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
 904                        name, virt_acpi_build_update, build_state, NULL, true);
 905}
 906
 907static const VMStateDescription vmstate_virt_acpi_build = {
 908    .name = "virt_acpi_build",
 909    .version_id = 1,
 910    .minimum_version_id = 1,
 911    .fields = (VMStateField[]) {
 912        VMSTATE_BOOL(patched, AcpiBuildState),
 913        VMSTATE_END_OF_LIST()
 914    },
 915};
 916
 917void virt_acpi_setup(VirtMachineState *vms)
 918{
 919    AcpiBuildTables tables;
 920    AcpiBuildState *build_state;
 921
 922    if (!vms->fw_cfg) {
 923        trace_virt_acpi_setup();
 924        return;
 925    }
 926
 927    if (!acpi_enabled) {
 928        trace_virt_acpi_setup();
 929        return;
 930    }
 931
 932    build_state = g_malloc0(sizeof *build_state);
 933
 934    acpi_build_tables_init(&tables);
 935    virt_acpi_build(vms, &tables);
 936
 937    /* Now expose it all to Guest */
 938    build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data,
 939                                               ACPI_BUILD_TABLE_FILE,
 940                                               ACPI_BUILD_TABLE_MAX_SIZE);
 941    assert(build_state->table_mr != NULL);
 942
 943    build_state->linker_mr =
 944        acpi_add_rom_blob(build_state, tables.linker->cmd_blob,
 945                          "etc/table-loader", 0);
 946
 947    fw_cfg_add_file(vms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data,
 948                    acpi_data_len(tables.tcpalog));
 949
 950    build_state->rsdp_mr = acpi_add_rom_blob(build_state, tables.rsdp,
 951                                              ACPI_BUILD_RSDP_FILE, 0);
 952
 953    qemu_register_reset(virt_acpi_build_reset, build_state);
 954    virt_acpi_build_reset(build_state);
 955    vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state);
 956
 957    /* Cleanup tables but don't free the memory: we track it
 958     * in build_state.
 959     */
 960    acpi_build_tables_cleanup(&tables, false);
 961}
 962