qemu/docs/config/mach-virt-serial.cfg
<<
>>
Prefs
   1# mach-virt - VirtIO guest (serial console)
   2# =========================================================
   3#
   4# Usage:
   5#
   6#   $ qemu-system-aarch64 \
   7#     -nodefaults \
   8#     -readconfig mach-virt-serial.cfg \
   9#     -display none -serial mon:stdio \
  10#     -cpu host
  11#
  12# You will probably need to tweak the lines marked as
  13# CHANGE ME before being able to use this configuration!
  14#
  15# The guest will have a selection of VirtIO devices
  16# tailored towards optimal performance with modern guests,
  17# and will be accessed through the serial console.
  18#
  19# ---------------------------------------------------------
  20#
  21# Using -nodefaults is required to have full control over
  22# the virtual hardware: when it's specified, QEMU will
  23# populate the board with only the builtin peripherals,
  24# such as the PL011 UART, plus a PCI Express Root Bus; the
  25# user will then have to explicitly add further devices.
  26#
  27# The PCI Express Root Bus shows up in the guest as:
  28#
  29#   00:00.0 Host bridge
  30#
  31# This configuration file adds a number of other useful
  32# devices, more specifically:
  33#
  34#   00.1c.* PCI bridge (PCI Express Root Ports)
  35#   01:00.0 SCSI storage controller
  36#   02:00.0 Ethernet controller
  37#
  38# More information about these devices is available below.
  39#
  40# We use '-display none' to prevent QEMU from creating a
  41# graphical display window, which would serve no use in
  42# this specific configuration, and '-serial mon:stdio' to
  43# multiplex the guest's serial console and the QEMU monitor
  44# to the host's stdio; use 'Ctrl+A h' to learn how to
  45# switch between the two and more.
  46
  47
  48# Machine options
  49# =========================================================
  50#
  51# We use the virt machine type and enable KVM acceleration
  52# for better performance.
  53#
  54# Using less than 1 GiB of memory is probably not going to
  55# yield good performance in the guest, and might even lead
  56# to obscure boot issues in some cases.
  57#
  58# Unfortunately, there is no way to configure the CPU model
  59# in this file, so it will have to be provided on the
  60# command line, but we can configure the guest to use the
  61# same GIC version as the host.
  62
  63[machine]
  64  type = "virt"
  65  accel = "kvm"
  66  gic-version = "host"
  67
  68[memory]
  69  size = "1024"
  70
  71
  72# Firmware configuration
  73# =========================================================
  74#
  75# There are two parts to the firmware: a read-only image
  76# containing the executable code, which is shared between
  77# guests, and a read/write variable store that is owned
  78# by one specific guest, exclusively, and is used to
  79# record information such as the UEFI boot order.
  80#
  81# For any new guest, its permanent, private variable store
  82# should initially be copied from the template file
  83# provided along with the firmware binary.
  84#
  85# Depending on the OS distribution you're using on the
  86# host, the name of the package containing the firmware
  87# binary and variable store template, as well as the paths
  88# to the files themselves, will be different. For example:
  89#
  90# Fedora
  91#   edk2-aarch64                                      (pkg)
  92#   /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw       (bin)
  93#   /usr/share/edk2/aarch64/vars-template-pflash.raw  (var)
  94#
  95# RHEL
  96#   AAVMF                                             (pkg)
  97#   /usr/share/AAVMF/AAVMF_CODE.fd                    (bin)
  98#   /usr/share/AAVMF/AAVMF_VARS.fd                    (var)
  99#
 100# Debian/Ubuntu
 101#   qemu-efi                                          (pkg)
 102#   /usr/share/AAVMF/AAVMF_CODE.fd                    (bin)
 103#   /usr/share/AAVMF/AAVMF_VARS.fd                    (var)
 104
 105[drive "uefi-binary"]
 106  file = "/usr/share/AAVMF/AAVMF_CODE.fd"       # CHANGE ME
 107  format = "raw"
 108  if = "pflash"
 109  unit = "0"
 110  readonly = "on"
 111
 112[drive "uefi-varstore"]
 113  file = "guest_VARS.fd"                        # CHANGE ME
 114  format = "raw"
 115  if = "pflash"
 116  unit = "1"
 117
 118
 119# PCI bridge (PCI Express Root Ports)
 120# =========================================================
 121#
 122# We create eight PCI Express Root Ports, and we plug them
 123# all into separate functions of the same slot. Some of
 124# them will be used by devices, the rest will remain
 125# available for hotplug.
 126
 127[device "pcie.1"]
 128  driver = "pcie-root-port"
 129  bus = "pcie.0"
 130  addr = "1c.0"
 131  port = "1"
 132  chassis = "1"
 133  multifunction = "on"
 134
 135[device "pcie.2"]
 136  driver = "pcie-root-port"
 137  bus = "pcie.0"
 138  addr = "1c.1"
 139  port = "2"
 140  chassis = "2"
 141
 142[device "pcie.3"]
 143  driver = "pcie-root-port"
 144  bus = "pcie.0"
 145  addr = "1c.2"
 146  port = "3"
 147  chassis = "3"
 148
 149[device "pcie.4"]
 150  driver = "pcie-root-port"
 151  bus = "pcie.0"
 152  addr = "1c.3"
 153  port = "4"
 154  chassis = "4"
 155
 156[device "pcie.5"]
 157  driver = "pcie-root-port"
 158  bus = "pcie.0"
 159  addr = "1c.4"
 160  port = "5"
 161  chassis = "5"
 162
 163[device "pcie.6"]
 164  driver = "pcie-root-port"
 165  bus = "pcie.0"
 166  addr = "1c.5"
 167  port = "6"
 168  chassis = "6"
 169
 170[device "pcie.7"]
 171  driver = "pcie-root-port"
 172  bus = "pcie.0"
 173  addr = "1c.6"
 174  port = "7"
 175  chassis = "7"
 176
 177[device "pcie.8"]
 178  driver = "pcie-root-port"
 179  bus = "pcie.0"
 180  addr = "1c.7"
 181  port = "8"
 182  chassis = "8"
 183
 184
 185# SCSI storage controller (and storage)
 186# =========================================================
 187#
 188# We use virtio-scsi here so that we can (hot)plug a large
 189# number of disks without running into issues; a SCSI disk,
 190# backed by a qcow2 disk image on the host's filesystem, is
 191# attached to it.
 192#
 193# We also create an optical disk, mostly for installation
 194# purposes: once the guest OS has been successfully
 195# installed, the guest will no longer boot from optical
 196# media. If you don't want, or no longer want, to have an
 197# optical disk in the guest you can safely comment out
 198# all relevant sections below.
 199
 200[device "scsi"]
 201  driver = "virtio-scsi-pci"
 202  bus = "pcie.1"
 203  addr = "00.0"
 204
 205[device "scsi-disk"]
 206  driver = "scsi-hd"
 207  bus = "scsi.0"
 208  drive = "disk"
 209  bootindex = "1"
 210
 211[drive "disk"]
 212  file = "guest.qcow2"                          # CHANGE ME
 213  format = "qcow2"
 214  if = "none"
 215
 216[device "scsi-optical-disk"]
 217  driver = "scsi-cd"
 218  bus = "scsi.0"
 219  drive = "optical-disk"
 220  bootindex = "2"
 221
 222[drive "optical-disk"]
 223  file = "install.iso"                          # CHANGE ME
 224  format = "raw"
 225  if = "none"
 226
 227
 228# Ethernet controller
 229# =========================================================
 230#
 231# We use virtio-net for improved performance over emulated
 232# hardware; on the host side, we take advantage of user
 233# networking so that the QEMU process doesn't require any
 234# additional privileges.
 235
 236[netdev "hostnet"]
 237  type = "user"
 238
 239[device "net"]
 240  driver = "virtio-net-pci"
 241  netdev = "hostnet"
 242  bus = "pcie.2"
 243  addr = "00.0"
 244