qemu/hw/arm/virt-acpi-build.c
<<
>>
Prefs
   1/* Support for generating ACPI tables and passing them to Guests
   2 *
   3 * ARM virt ACPI generation
   4 *
   5 * Copyright (C) 2008-2010  Kevin O'Connor <kevin@koconnor.net>
   6 * Copyright (C) 2006 Fabrice Bellard
   7 * Copyright (C) 2013 Red Hat Inc
   8 *
   9 * Author: Michael S. Tsirkin <mst@redhat.com>
  10 *
  11 * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
  12 *
  13 * Author: Shannon Zhao <zhaoshenglong@huawei.com>
  14 *
  15 * This program is free software; you can redistribute it and/or modify
  16 * it under the terms of the GNU General Public License as published by
  17 * the Free Software Foundation; either version 2 of the License, or
  18 * (at your option) any later version.
  19
  20 * This program is distributed in the hope that it will be useful,
  21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  23 * GNU General Public License for more details.
  24
  25 * You should have received a copy of the GNU General Public License along
  26 * with this program; if not, see <http://www.gnu.org/licenses/>.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "qapi/error.h"
  31#include "qemu/bitmap.h"
  32#include "trace.h"
  33#include "qom/cpu.h"
  34#include "target/arm/cpu.h"
  35#include "hw/acpi/acpi-defs.h"
  36#include "hw/acpi/acpi.h"
  37#include "hw/nvram/fw_cfg.h"
  38#include "hw/acpi/bios-linker-loader.h"
  39#include "hw/hw.h"
  40#include "hw/acpi/aml-build.h"
  41#include "hw/acpi/utils.h"
  42#include "hw/acpi/pci.h"
  43#include "hw/pci/pcie_host.h"
  44#include "hw/pci/pci.h"
  45#include "hw/arm/virt.h"
  46#include "sysemu/numa.h"
  47#include "kvm_arm.h"
  48
  49#define ARM_SPI_BASE 32
  50#define ACPI_POWER_BUTTON_DEVICE "PWRB"
  51
  52static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
  53{
  54    uint16_t i;
  55
  56    for (i = 0; i < smp_cpus; i++) {
  57        Aml *dev = aml_device("C%.03X", i);
  58        aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
  59        aml_append(dev, aml_name_decl("_UID", aml_int(i)));
  60        aml_append(scope, dev);
  61    }
  62}
  63
  64static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
  65                                           uint32_t uart_irq)
  66{
  67    Aml *dev = aml_device("COM0");
  68    aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0011")));
  69    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
  70
  71    Aml *crs = aml_resource_template();
  72    aml_append(crs, aml_memory32_fixed(uart_memmap->base,
  73                                       uart_memmap->size, AML_READ_WRITE));
  74    aml_append(crs,
  75               aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
  76                             AML_EXCLUSIVE, &uart_irq, 1));
  77    aml_append(dev, aml_name_decl("_CRS", crs));
  78
  79    /* The _ADR entry is used to link this device to the UART described
  80     * in the SPCR table, i.e. SPCR.base_address.address == _ADR.
  81     */
  82    aml_append(dev, aml_name_decl("_ADR", aml_int(uart_memmap->base)));
  83
  84    aml_append(scope, dev);
  85}
  86
  87static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
  88{
  89    Aml *dev = aml_device("FWCF");
  90    aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
  91    /* device present, functioning, decoding, not shown in UI */
  92    aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
  93    aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
  94
  95    Aml *crs = aml_resource_template();
  96    aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
  97                                       fw_cfg_memmap->size, AML_READ_WRITE));
  98    aml_append(dev, aml_name_decl("_CRS", crs));
  99    aml_append(scope, dev);
 100}
 101
 102static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
 103{
 104    Aml *dev, *crs;
 105    hwaddr base = flash_memmap->base;
 106    hwaddr size = flash_memmap->size / 2;
 107
 108    dev = aml_device("FLS0");
 109    aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
 110    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 111
 112    crs = aml_resource_template();
 113    aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
 114    aml_append(dev, aml_name_decl("_CRS", crs));
 115    aml_append(scope, dev);
 116
 117    dev = aml_device("FLS1");
 118    aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
 119    aml_append(dev, aml_name_decl("_UID", aml_int(1)));
 120    crs = aml_resource_template();
 121    aml_append(crs, aml_memory32_fixed(base + size, size, AML_READ_WRITE));
 122    aml_append(dev, aml_name_decl("_CRS", crs));
 123    aml_append(scope, dev);
 124}
 125
 126static void acpi_dsdt_add_virtio(Aml *scope,
 127                                 const MemMapEntry *virtio_mmio_memmap,
 128                                 uint32_t mmio_irq, int num)
 129{
 130    hwaddr base = virtio_mmio_memmap->base;
 131    hwaddr size = virtio_mmio_memmap->size;
 132    int i;
 133
 134    for (i = 0; i < num; i++) {
 135        uint32_t irq = mmio_irq + i;
 136        Aml *dev = aml_device("VR%02u", i);
 137        aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
 138        aml_append(dev, aml_name_decl("_UID", aml_int(i)));
 139        aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
 140
 141        Aml *crs = aml_resource_template();
 142        aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
 143        aml_append(crs,
 144                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 145                                 AML_EXCLUSIVE, &irq, 1));
 146        aml_append(dev, aml_name_decl("_CRS", crs));
 147        aml_append(scope, dev);
 148        base += size;
 149    }
 150}
 151
 152static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
 153                              uint32_t irq, bool use_highmem, bool highmem_ecam)
 154{
 155    int ecam_id = VIRT_ECAM_ID(highmem_ecam);
 156    Aml *method, *crs, *ifctx, *UUID, *ifctx1, *elsectx, *buf;
 157    int i, bus_no;
 158    hwaddr base_mmio = memmap[VIRT_PCIE_MMIO].base;
 159    hwaddr size_mmio = memmap[VIRT_PCIE_MMIO].size;
 160    hwaddr base_pio = memmap[VIRT_PCIE_PIO].base;
 161    hwaddr size_pio = memmap[VIRT_PCIE_PIO].size;
 162    hwaddr base_ecam = memmap[ecam_id].base;
 163    hwaddr size_ecam = memmap[ecam_id].size;
 164    int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
 165
 166    Aml *dev = aml_device("%s", "PCI0");
 167    aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08")));
 168    aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03")));
 169    aml_append(dev, aml_name_decl("_SEG", aml_int(0)));
 170    aml_append(dev, aml_name_decl("_BBN", aml_int(0)));
 171    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 172    aml_append(dev, aml_name_decl("_UID", aml_string("PCI0")));
 173    aml_append(dev, aml_name_decl("_STR", aml_unicode("PCIe 0 Device")));
 174    aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
 175
 176    /* Declare the PCI Routing Table. */
 177    Aml *rt_pkg = aml_varpackage(nr_pcie_buses * PCI_NUM_PINS);
 178    for (bus_no = 0; bus_no < nr_pcie_buses; bus_no++) {
 179        for (i = 0; i < PCI_NUM_PINS; i++) {
 180            int gsi = (i + bus_no) % PCI_NUM_PINS;
 181            Aml *pkg = aml_package(4);
 182            aml_append(pkg, aml_int((bus_no << 16) | 0xFFFF));
 183            aml_append(pkg, aml_int(i));
 184            aml_append(pkg, aml_name("GSI%d", gsi));
 185            aml_append(pkg, aml_int(0));
 186            aml_append(rt_pkg, pkg);
 187        }
 188    }
 189    aml_append(dev, aml_name_decl("_PRT", rt_pkg));
 190
 191    /* Create GSI link device */
 192    for (i = 0; i < PCI_NUM_PINS; i++) {
 193        uint32_t irqs =  irq + i;
 194        Aml *dev_gsi = aml_device("GSI%d", i);
 195        aml_append(dev_gsi, aml_name_decl("_HID", aml_string("PNP0C0F")));
 196        aml_append(dev_gsi, aml_name_decl("_UID", aml_int(0)));
 197        crs = aml_resource_template();
 198        aml_append(crs,
 199                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 200                                 AML_EXCLUSIVE, &irqs, 1));
 201        aml_append(dev_gsi, aml_name_decl("_PRS", crs));
 202        crs = aml_resource_template();
 203        aml_append(crs,
 204                   aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 205                                 AML_EXCLUSIVE, &irqs, 1));
 206        aml_append(dev_gsi, aml_name_decl("_CRS", crs));
 207        method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
 208        aml_append(dev_gsi, method);
 209        aml_append(dev, dev_gsi);
 210    }
 211
 212    method = aml_method("_CBA", 0, AML_NOTSERIALIZED);
 213    aml_append(method, aml_return(aml_int(base_ecam)));
 214    aml_append(dev, method);
 215
 216    method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
 217    Aml *rbuf = aml_resource_template();
 218    aml_append(rbuf,
 219        aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
 220                            0x0000, 0x0000, nr_pcie_buses - 1, 0x0000,
 221                            nr_pcie_buses));
 222    aml_append(rbuf,
 223        aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 224                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_mmio,
 225                         base_mmio + size_mmio - 1, 0x0000, size_mmio));
 226    aml_append(rbuf,
 227        aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
 228                     AML_ENTIRE_RANGE, 0x0000, 0x0000, size_pio - 1, base_pio,
 229                     size_pio));
 230
 231    if (use_highmem) {
 232        hwaddr base_mmio_high = memmap[VIRT_HIGH_PCIE_MMIO].base;
 233        hwaddr size_mmio_high = memmap[VIRT_HIGH_PCIE_MMIO].size;
 234
 235        aml_append(rbuf,
 236            aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 237                             AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000,
 238                             base_mmio_high,
 239                             base_mmio_high + size_mmio_high - 1, 0x0000,
 240                             size_mmio_high));
 241    }
 242
 243    aml_append(method, aml_name_decl("RBUF", rbuf));
 244    aml_append(method, aml_return(rbuf));
 245    aml_append(dev, method);
 246
 247    /* Declare an _OSC (OS Control Handoff) method */
 248    aml_append(dev, aml_name_decl("SUPP", aml_int(0)));
 249    aml_append(dev, aml_name_decl("CTRL", aml_int(0)));
 250    method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
 251    aml_append(method,
 252        aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
 253
 254    /* PCI Firmware Specification 3.0
 255     * 4.5.1. _OSC Interface for PCI Host Bridge Devices
 256     * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is
 257     * identified by the Universal Unique IDentifier (UUID)
 258     * 33DB4D5B-1FF7-401C-9657-7441C03DD766
 259     */
 260    UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766");
 261    ifctx = aml_if(aml_equal(aml_arg(0), UUID));
 262    aml_append(ifctx,
 263        aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
 264    aml_append(ifctx,
 265        aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
 266    aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP")));
 267    aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL")));
 268    aml_append(ifctx, aml_store(aml_and(aml_name("CTRL"), aml_int(0x1D), NULL),
 269                                aml_name("CTRL")));
 270
 271    ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
 272    aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x08), NULL),
 273                                 aml_name("CDW1")));
 274    aml_append(ifctx, ifctx1);
 275
 276    ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
 277    aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x10), NULL),
 278                                 aml_name("CDW1")));
 279    aml_append(ifctx, ifctx1);
 280
 281    aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3")));
 282    aml_append(ifctx, aml_return(aml_arg(3)));
 283    aml_append(method, ifctx);
 284
 285    elsectx = aml_else();
 286    aml_append(elsectx, aml_store(aml_or(aml_name("CDW1"), aml_int(4), NULL),
 287                                  aml_name("CDW1")));
 288    aml_append(elsectx, aml_return(aml_arg(3)));
 289    aml_append(method, elsectx);
 290    aml_append(dev, method);
 291
 292    method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
 293
 294    /* PCI Firmware Specification 3.0
 295     * 4.6.1. _DSM for PCI Express Slot Information
 296     * The UUID in _DSM in this context is
 297     * {E5C937D0-3553-4D7A-9117-EA4D19C3434D}
 298     */
 299    UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
 300    ifctx = aml_if(aml_equal(aml_arg(0), UUID));
 301    ifctx1 = aml_if(aml_equal(aml_arg(2), aml_int(0)));
 302    uint8_t byte_list[1] = {1};
 303    buf = aml_buffer(1, byte_list);
 304    aml_append(ifctx1, aml_return(buf));
 305    aml_append(ifctx, ifctx1);
 306    aml_append(method, ifctx);
 307
 308    byte_list[0] = 0;
 309    buf = aml_buffer(1, byte_list);
 310    aml_append(method, aml_return(buf));
 311    aml_append(dev, method);
 312
 313    Aml *dev_rp0 = aml_device("%s", "RP0");
 314    aml_append(dev_rp0, aml_name_decl("_ADR", aml_int(0)));
 315    aml_append(dev, dev_rp0);
 316
 317    Aml *dev_res0 = aml_device("%s", "RES0");
 318    aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));
 319    crs = aml_resource_template();
 320    aml_append(crs,
 321        aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
 322                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_ecam,
 323                         base_ecam + size_ecam - 1, 0x0000, size_ecam));
 324    aml_append(dev_res0, aml_name_decl("_CRS", crs));
 325    aml_append(dev, dev_res0);
 326    aml_append(scope, dev);
 327}
 328
 329static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
 330                                           uint32_t gpio_irq)
 331{
 332    Aml *dev = aml_device("GPO0");
 333    aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0061")));
 334    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 335    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 336
 337    Aml *crs = aml_resource_template();
 338    aml_append(crs, aml_memory32_fixed(gpio_memmap->base, gpio_memmap->size,
 339                                       AML_READ_WRITE));
 340    aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
 341                                  AML_EXCLUSIVE, &gpio_irq, 1));
 342    aml_append(dev, aml_name_decl("_CRS", crs));
 343
 344    Aml *aei = aml_resource_template();
 345    /* Pin 3 for power button */
 346    const uint32_t pin_list[1] = {3};
 347    aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH,
 348                                 AML_EXCLUSIVE, AML_PULL_UP, 0, pin_list, 1,
 349                                 "GPO0", NULL, 0));
 350    aml_append(dev, aml_name_decl("_AEI", aei));
 351
 352    /* _E03 is handle for power button */
 353    Aml *method = aml_method("_E03", 0, AML_NOTSERIALIZED);
 354    aml_append(method, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE),
 355                                  aml_int(0x80)));
 356    aml_append(dev, method);
 357    aml_append(scope, dev);
 358}
 359
 360static void acpi_dsdt_add_power_button(Aml *scope)
 361{
 362    Aml *dev = aml_device(ACPI_POWER_BUTTON_DEVICE);
 363    aml_append(dev, aml_name_decl("_HID", aml_string("PNP0C0C")));
 364    aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
 365    aml_append(dev, aml_name_decl("_UID", aml_int(0)));
 366    aml_append(scope, dev);
 367}
 368
 369static void
 370build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 371{
 372    int nb_nodes, iort_start = table_data->len;
 373    AcpiIortIdMapping *idmap;
 374    AcpiIortItsGroup *its;
 375    AcpiIortTable *iort;
 376    AcpiIortSmmu3 *smmu;
 377    size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
 378    AcpiIortRC *rc;
 379
 380    iort = acpi_data_push(table_data, sizeof(*iort));
 381
 382    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 383        nb_nodes = 3; /* RC, ITS, SMMUv3 */
 384    } else {
 385        nb_nodes = 2; /* RC, ITS */
 386    }
 387
 388    iort_length = sizeof(*iort);
 389    iort->node_count = cpu_to_le32(nb_nodes);
 390    /*
 391     * Use a copy in case table_data->data moves during acpi_data_push
 392     * operations.
 393     */
 394    iort_node_offset = sizeof(*iort);
 395    iort->node_offset = cpu_to_le32(iort_node_offset);
 396
 397    /* ITS group node */
 398    node_size =  sizeof(*its) + sizeof(uint32_t);
 399    iort_length += node_size;
 400    its = acpi_data_push(table_data, node_size);
 401
 402    its->type = ACPI_IORT_NODE_ITS_GROUP;
 403    its->length = cpu_to_le16(node_size);
 404    its->its_count = cpu_to_le32(1);
 405    its->identifiers[0] = 0; /* MADT translation_id */
 406
 407    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 408        int irq =  vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE;
 409
 410        /* SMMUv3 node */
 411        smmu_offset = iort_node_offset + node_size;
 412        node_size = sizeof(*smmu) + sizeof(*idmap);
 413        iort_length += node_size;
 414        smmu = acpi_data_push(table_data, node_size);
 415
 416        smmu->type = ACPI_IORT_NODE_SMMU_V3;
 417        smmu->length = cpu_to_le16(node_size);
 418        smmu->mapping_count = cpu_to_le32(1);
 419        smmu->mapping_offset = cpu_to_le32(sizeof(*smmu));
 420        smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base);
 421        smmu->flags = cpu_to_le32(ACPI_IORT_SMMU_V3_COHACC_OVERRIDE);
 422        smmu->event_gsiv = cpu_to_le32(irq);
 423        smmu->pri_gsiv = cpu_to_le32(irq + 1);
 424        smmu->gerr_gsiv = cpu_to_le32(irq + 2);
 425        smmu->sync_gsiv = cpu_to_le32(irq + 3);
 426
 427        /* Identity RID mapping covering the whole input RID range */
 428        idmap = &smmu->id_mapping_array[0];
 429        idmap->input_base = 0;
 430        idmap->id_count = cpu_to_le32(0xFFFF);
 431        idmap->output_base = 0;
 432        /* output IORT node is the ITS group node (the first node) */
 433        idmap->output_reference = cpu_to_le32(iort_node_offset);
 434    }
 435
 436    /* Root Complex Node */
 437    node_size = sizeof(*rc) + sizeof(*idmap);
 438    iort_length += node_size;
 439    rc = acpi_data_push(table_data, node_size);
 440
 441    rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX;
 442    rc->length = cpu_to_le16(node_size);
 443    rc->mapping_count = cpu_to_le32(1);
 444    rc->mapping_offset = cpu_to_le32(sizeof(*rc));
 445
 446    /* fully coherent device */
 447    rc->memory_properties.cache_coherency = cpu_to_le32(1);
 448    rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */
 449    rc->pci_segment_number = 0; /* MCFG pci_segment */
 450
 451    /* Identity RID mapping covering the whole input RID range */
 452    idmap = &rc->id_mapping_array[0];
 453    idmap->input_base = 0;
 454    idmap->id_count = cpu_to_le32(0xFFFF);
 455    idmap->output_base = 0;
 456
 457    if (vms->iommu == VIRT_IOMMU_SMMUV3) {
 458        /* output IORT node is the smmuv3 node */
 459        idmap->output_reference = cpu_to_le32(smmu_offset);
 460    } else {
 461        /* output IORT node is the ITS group node (the first node) */
 462        idmap->output_reference = cpu_to_le32(iort_node_offset);
 463    }
 464
 465    /*
 466     * Update the pointer address in case table_data->data moves during above
 467     * acpi_data_push operations.
 468     */
 469    iort = (AcpiIortTable *)(table_data->data + iort_start);
 470    iort->length = cpu_to_le32(iort_length);
 471
 472    build_header(linker, table_data, (void *)(table_data->data + iort_start),
 473                 "IORT", table_data->len - iort_start, 0, NULL, NULL);
 474}
 475
 476static void
 477build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 478{
 479    AcpiSerialPortConsoleRedirection *spcr;
 480    const MemMapEntry *uart_memmap = &vms->memmap[VIRT_UART];
 481    int irq = vms->irqmap[VIRT_UART] + ARM_SPI_BASE;
 482    int spcr_start = table_data->len;
 483
 484    spcr = acpi_data_push(table_data, sizeof(*spcr));
 485
 486    spcr->interface_type = 0x3;    /* ARM PL011 UART */
 487
 488    spcr->base_address.space_id = AML_SYSTEM_MEMORY;
 489    spcr->base_address.bit_width = 8;
 490    spcr->base_address.bit_offset = 0;
 491    spcr->base_address.access_width = 1;
 492    spcr->base_address.address = cpu_to_le64(uart_memmap->base);
 493
 494    spcr->interrupt_types = (1 << 3); /* Bit[3] ARMH GIC interrupt */
 495    spcr->gsi = cpu_to_le32(irq);  /* Global System Interrupt */
 496
 497    spcr->baud = 3;                /* Baud Rate: 3 = 9600 */
 498    spcr->parity = 0;              /* No Parity */
 499    spcr->stopbits = 1;            /* 1 Stop bit */
 500    spcr->flowctrl = (1 << 1);     /* Bit[1] = RTS/CTS hardware flow control */
 501    spcr->term_type = 0;           /* Terminal Type: 0 = VT100 */
 502
 503    spcr->pci_device_id = 0xffff;  /* PCI Device ID: not a PCI device */
 504    spcr->pci_vendor_id = 0xffff;  /* PCI Vendor ID: not a PCI device */
 505
 506    build_header(linker, table_data, (void *)(table_data->data + spcr_start),
 507                 "SPCR", table_data->len - spcr_start, 2, NULL, NULL);
 508}
 509
 510static void
 511build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 512{
 513    AcpiSystemResourceAffinityTable *srat;
 514    AcpiSratProcessorGiccAffinity *core;
 515    AcpiSratMemoryAffinity *numamem;
 516    int i, srat_start;
 517    uint64_t mem_base;
 518    MachineClass *mc = MACHINE_GET_CLASS(vms);
 519    const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms));
 520
 521    srat_start = table_data->len;
 522    srat = acpi_data_push(table_data, sizeof(*srat));
 523    srat->reserved1 = cpu_to_le32(1);
 524
 525    for (i = 0; i < cpu_list->len; ++i) {
 526        core = acpi_data_push(table_data, sizeof(*core));
 527        core->type = ACPI_SRAT_PROCESSOR_GICC;
 528        core->length = sizeof(*core);
 529        core->proximity = cpu_to_le32(cpu_list->cpus[i].props.node_id);
 530        core->acpi_processor_uid = cpu_to_le32(i);
 531        core->flags = cpu_to_le32(1);
 532    }
 533
 534    mem_base = vms->memmap[VIRT_MEM].base;
 535    for (i = 0; i < nb_numa_nodes; ++i) {
 536        if (numa_info[i].node_mem > 0) {
 537            numamem = acpi_data_push(table_data, sizeof(*numamem));
 538            build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i,
 539                              MEM_AFFINITY_ENABLED);
 540            mem_base += numa_info[i].node_mem;
 541        }
 542    }
 543
 544    build_header(linker, table_data, (void *)(table_data->data + srat_start),
 545                 "SRAT", table_data->len - srat_start, 3, NULL, NULL);
 546}
 547
 548/* GTDT */
 549static void
 550build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 551{
 552    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 553    int gtdt_start = table_data->len;
 554    AcpiGenericTimerTable *gtdt;
 555    uint32_t irqflags;
 556
 557    if (vmc->claim_edge_triggered_timers) {
 558        irqflags = ACPI_GTDT_INTERRUPT_MODE_EDGE;
 559    } else {
 560        irqflags = ACPI_GTDT_INTERRUPT_MODE_LEVEL;
 561    }
 562
 563    gtdt = acpi_data_push(table_data, sizeof *gtdt);
 564    /* The interrupt values are the same with the device tree when adding 16 */
 565    gtdt->secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_S_EL1_IRQ + 16);
 566    gtdt->secure_el1_flags = cpu_to_le32(irqflags);
 567
 568    gtdt->non_secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ + 16);
 569    gtdt->non_secure_el1_flags = cpu_to_le32(irqflags |
 570                                             ACPI_GTDT_CAP_ALWAYS_ON);
 571
 572    gtdt->virtual_timer_interrupt = cpu_to_le32(ARCH_TIMER_VIRT_IRQ + 16);
 573    gtdt->virtual_timer_flags = cpu_to_le32(irqflags);
 574
 575    gtdt->non_secure_el2_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ + 16);
 576    gtdt->non_secure_el2_flags = cpu_to_le32(irqflags);
 577
 578    build_header(linker, table_data,
 579                 (void *)(table_data->data + gtdt_start), "GTDT",
 580                 table_data->len - gtdt_start, 2, NULL, NULL);
 581}
 582
 583/* MADT */
 584static void
 585build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 586{
 587    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 588    int madt_start = table_data->len;
 589    const MemMapEntry *memmap = vms->memmap;
 590    const int *irqmap = vms->irqmap;
 591    AcpiMultipleApicTable *madt;
 592    AcpiMadtGenericDistributor *gicd;
 593    AcpiMadtGenericMsiFrame *gic_msi;
 594    int i;
 595
 596    madt = acpi_data_push(table_data, sizeof *madt);
 597
 598    gicd = acpi_data_push(table_data, sizeof *gicd);
 599    gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR;
 600    gicd->length = sizeof(*gicd);
 601    gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base);
 602    gicd->version = vms->gic_version;
 603
 604    for (i = 0; i < vms->smp_cpus; i++) {
 605        AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data,
 606                                                           sizeof(*gicc));
 607        ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
 608
 609        gicc->type = ACPI_APIC_GENERIC_CPU_INTERFACE;
 610        gicc->length = sizeof(*gicc);
 611        if (vms->gic_version == 2) {
 612            gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base);
 613            gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base);
 614            gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base);
 615        }
 616        gicc->cpu_interface_number = cpu_to_le32(i);
 617        gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity);
 618        gicc->uid = cpu_to_le32(i);
 619        gicc->flags = cpu_to_le32(ACPI_MADT_GICC_ENABLED);
 620
 621        if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
 622            gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
 623        }
 624        if (vms->virt) {
 625            gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ));
 626        }
 627    }
 628
 629    if (vms->gic_version == 3) {
 630        AcpiMadtGenericTranslator *gic_its;
 631        int nb_redist_regions = virt_gicv3_redist_region_count(vms);
 632        AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
 633                                                         sizeof *gicr);
 634
 635        gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
 636        gicr->length = sizeof(*gicr);
 637        gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
 638        gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
 639
 640        if (nb_redist_regions == 2) {
 641            gicr = acpi_data_push(table_data, sizeof(*gicr));
 642            gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
 643            gicr->length = sizeof(*gicr);
 644            gicr->base_address =
 645                cpu_to_le64(memmap[VIRT_HIGH_GIC_REDIST2].base);
 646            gicr->range_length =
 647                cpu_to_le32(memmap[VIRT_HIGH_GIC_REDIST2].size);
 648        }
 649
 650        if (its_class_name() && !vmc->no_its) {
 651            gic_its = acpi_data_push(table_data, sizeof *gic_its);
 652            gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
 653            gic_its->length = sizeof(*gic_its);
 654            gic_its->translation_id = 0;
 655            gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base);
 656        }
 657    } else {
 658        gic_msi = acpi_data_push(table_data, sizeof *gic_msi);
 659        gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME;
 660        gic_msi->length = sizeof(*gic_msi);
 661        gic_msi->gic_msi_frame_id = 0;
 662        gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base);
 663        gic_msi->flags = cpu_to_le32(1);
 664        gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS);
 665        gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE);
 666    }
 667
 668    build_header(linker, table_data,
 669                 (void *)(table_data->data + madt_start), "APIC",
 670                 table_data->len - madt_start, 3, NULL, NULL);
 671}
 672
 673/* FADT */
 674static void build_fadt_rev5(GArray *table_data, BIOSLinker *linker,
 675                            VirtMachineState *vms, unsigned dsdt_tbl_offset)
 676{
 677    /* ACPI v5.1 */
 678    AcpiFadtData fadt = {
 679        .rev = 5,
 680        .minor_ver = 1,
 681        .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI,
 682        .xdsdt_tbl_offset = &dsdt_tbl_offset,
 683    };
 684
 685    switch (vms->psci_conduit) {
 686    case QEMU_PSCI_CONDUIT_DISABLED:
 687        fadt.arm_boot_arch = 0;
 688        break;
 689    case QEMU_PSCI_CONDUIT_HVC:
 690        fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT |
 691                             ACPI_FADT_ARM_PSCI_USE_HVC;
 692        break;
 693    case QEMU_PSCI_CONDUIT_SMC:
 694        fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT;
 695        break;
 696    default:
 697        g_assert_not_reached();
 698    }
 699
 700    build_fadt(table_data, linker, &fadt, NULL, NULL);
 701}
 702
 703/* DSDT */
 704static void
 705build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
 706{
 707    Aml *scope, *dsdt;
 708    const MemMapEntry *memmap = vms->memmap;
 709    const int *irqmap = vms->irqmap;
 710
 711    dsdt = init_aml_allocator();
 712    /* Reserve space for header */
 713    acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader));
 714
 715    /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
 716     * While UEFI can use libfdt to disable the RTC device node in the DTB that
 717     * it passes to the OS, it cannot modify AML. Therefore, we won't generate
 718     * the RTC ACPI device at all when using UEFI.
 719     */
 720    scope = aml_scope("\\_SB");
 721    acpi_dsdt_add_cpus(scope, vms->smp_cpus);
 722    acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
 723                       (irqmap[VIRT_UART] + ARM_SPI_BASE));
 724    acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
 725    acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
 726    acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
 727                    (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
 728    acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
 729                      vms->highmem, vms->highmem_ecam);
 730    acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO],
 731                       (irqmap[VIRT_GPIO] + ARM_SPI_BASE));
 732    acpi_dsdt_add_power_button(scope);
 733
 734    aml_append(dsdt, scope);
 735
 736    /* copy AML table into ACPI tables blob and patch header there */
 737    g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
 738    build_header(linker, table_data,
 739        (void *)(table_data->data + table_data->len - dsdt->buf->len),
 740        "DSDT", dsdt->buf->len, 2, NULL, NULL);
 741    free_aml_allocator();
 742}
 743
 744typedef
 745struct AcpiBuildState {
 746    /* Copy of table in RAM (for patching). */
 747    MemoryRegion *table_mr;
 748    MemoryRegion *rsdp_mr;
 749    MemoryRegion *linker_mr;
 750    /* Is table patched? */
 751    bool patched;
 752} AcpiBuildState;
 753
 754static
 755void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
 756{
 757    VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
 758    GArray *table_offsets;
 759    unsigned dsdt, xsdt;
 760    GArray *tables_blob = tables->table_data;
 761
 762    table_offsets = g_array_new(false, true /* clear */,
 763                                        sizeof(uint32_t));
 764
 765    bios_linker_loader_alloc(tables->linker,
 766                             ACPI_BUILD_TABLE_FILE, tables_blob,
 767                             64, false /* high memory */);
 768
 769    /* DSDT is pointed to by FADT */
 770    dsdt = tables_blob->len;
 771    build_dsdt(tables_blob, tables->linker, vms);
 772
 773    /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
 774    acpi_add_table(table_offsets, tables_blob);
 775    build_fadt_rev5(tables_blob, tables->linker, vms, dsdt);
 776
 777    acpi_add_table(table_offsets, tables_blob);
 778    build_madt(tables_blob, tables->linker, vms);
 779
 780    acpi_add_table(table_offsets, tables_blob);
 781    build_gtdt(tables_blob, tables->linker, vms);
 782
 783    acpi_add_table(table_offsets, tables_blob);
 784    {
 785        AcpiMcfgInfo mcfg = {
 786           .base = vms->memmap[VIRT_ECAM_ID(vms->highmem_ecam)].base,
 787           .size = vms->memmap[VIRT_ECAM_ID(vms->highmem_ecam)].size,
 788        };
 789        build_mcfg(tables_blob, tables->linker, &mcfg);
 790    }
 791
 792    acpi_add_table(table_offsets, tables_blob);
 793    build_spcr(tables_blob, tables->linker, vms);
 794
 795    if (nb_numa_nodes > 0) {
 796        acpi_add_table(table_offsets, tables_blob);
 797        build_srat(tables_blob, tables->linker, vms);
 798        if (have_numa_distance) {
 799            acpi_add_table(table_offsets, tables_blob);
 800            build_slit(tables_blob, tables->linker);
 801        }
 802    }
 803
 804    if (its_class_name() && !vmc->no_its) {
 805        acpi_add_table(table_offsets, tables_blob);
 806        build_iort(tables_blob, tables->linker, vms);
 807    }
 808
 809    /* XSDT is pointed to by RSDP */
 810    xsdt = tables_blob->len;
 811    build_xsdt(tables_blob, tables->linker, table_offsets, NULL, NULL);
 812
 813    /* RSDP is in FSEG memory, so allocate it separately */
 814    {
 815        AcpiRsdpData rsdp_data = {
 816            .revision = 2,
 817            .oem_id = ACPI_BUILD_APPNAME6,
 818            .xsdt_tbl_offset = &xsdt,
 819            .rsdt_tbl_offset = NULL,
 820        };
 821        build_rsdp(tables->rsdp, tables->linker, &rsdp_data);
 822    }
 823
 824    /* Cleanup memory that's no longer used. */
 825    g_array_free(table_offsets, true);
 826}
 827
 828static void acpi_ram_update(MemoryRegion *mr, GArray *data)
 829{
 830    uint32_t size = acpi_data_len(data);
 831
 832    /* Make sure RAM size is correct - in case it got changed
 833     * e.g. by migration */
 834    memory_region_ram_resize(mr, size, &error_abort);
 835
 836    memcpy(memory_region_get_ram_ptr(mr), data->data, size);
 837    memory_region_set_dirty(mr, 0, size);
 838}
 839
 840static void virt_acpi_build_update(void *build_opaque)
 841{
 842    AcpiBuildState *build_state = build_opaque;
 843    AcpiBuildTables tables;
 844
 845    /* No state to update or already patched? Nothing to do. */
 846    if (!build_state || build_state->patched) {
 847        return;
 848    }
 849    build_state->patched = true;
 850
 851    acpi_build_tables_init(&tables);
 852
 853    virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables);
 854
 855    acpi_ram_update(build_state->table_mr, tables.table_data);
 856    acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
 857    acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
 858
 859    acpi_build_tables_cleanup(&tables, true);
 860}
 861
 862static void virt_acpi_build_reset(void *build_opaque)
 863{
 864    AcpiBuildState *build_state = build_opaque;
 865    build_state->patched = false;
 866}
 867
 868static const VMStateDescription vmstate_virt_acpi_build = {
 869    .name = "virt_acpi_build",
 870    .version_id = 1,
 871    .minimum_version_id = 1,
 872    .fields = (VMStateField[]) {
 873        VMSTATE_BOOL(patched, AcpiBuildState),
 874        VMSTATE_END_OF_LIST()
 875    },
 876};
 877
 878void virt_acpi_setup(VirtMachineState *vms)
 879{
 880    AcpiBuildTables tables;
 881    AcpiBuildState *build_state;
 882
 883    if (!vms->fw_cfg) {
 884        trace_virt_acpi_setup();
 885        return;
 886    }
 887
 888    if (!acpi_enabled) {
 889        trace_virt_acpi_setup();
 890        return;
 891    }
 892
 893    build_state = g_malloc0(sizeof *build_state);
 894
 895    acpi_build_tables_init(&tables);
 896    virt_acpi_build(vms, &tables);
 897
 898    /* Now expose it all to Guest */
 899    build_state->table_mr = acpi_add_rom_blob(virt_acpi_build_update,
 900                                              build_state, tables.table_data,
 901                                              ACPI_BUILD_TABLE_FILE,
 902                                              ACPI_BUILD_TABLE_MAX_SIZE);
 903    assert(build_state->table_mr != NULL);
 904
 905    build_state->linker_mr =
 906        acpi_add_rom_blob(virt_acpi_build_update, build_state,
 907                          tables.linker->cmd_blob, "etc/table-loader", 0);
 908
 909    fw_cfg_add_file(vms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data,
 910                    acpi_data_len(tables.tcpalog));
 911
 912    build_state->rsdp_mr = acpi_add_rom_blob(virt_acpi_build_update,
 913                                             build_state, tables.rsdp,
 914                                             ACPI_BUILD_RSDP_FILE, 0);
 915
 916    qemu_register_reset(virt_acpi_build_reset, build_state);
 917    virt_acpi_build_reset(build_state);
 918    vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state);
 919
 920    /* Cleanup tables but don't free the memory: we track it
 921     * in build_state.
 922     */
 923    acpi_build_tables_cleanup(&tables, false);
 924}
 925