linux/drivers/xen/Kconfig
<<
>>
Prefs
   1# SPDX-License-Identifier: GPL-2.0-only
   2menu "Xen driver support"
   3        depends on XEN
   4
   5config XEN_BALLOON
   6        bool "Xen memory balloon driver"
   7        default y
   8        help
   9          The balloon driver allows the Xen domain to request more memory from
  10          the system to expand the domain's memory allocation, or alternatively
  11          return unneeded memory to the system.
  12
  13config XEN_BALLOON_MEMORY_HOTPLUG
  14        bool "Memory hotplug support for Xen balloon driver"
  15        depends on XEN_BALLOON && MEMORY_HOTPLUG
  16        default y
  17        help
  18          Memory hotplug support for Xen balloon driver allows expanding memory
  19          available for the system above limit declared at system startup.
  20          It is very useful on critical systems which require long
  21          run without rebooting.
  22
  23          It's also very useful for non PV domains to obtain unpopulated physical
  24          memory ranges to use in order to map foreign memory or grants.
  25
  26          Memory could be hotplugged in following steps:
  27
  28            1) target domain: ensure that memory auto online policy is in
  29               effect by checking /sys/devices/system/memory/auto_online_blocks
  30               file (should be 'online').
  31
  32            2) control domain: xl mem-max <target-domain> <maxmem>
  33               where <maxmem> is >= requested memory size,
  34
  35            3) control domain: xl mem-set <target-domain> <memory>
  36               where <memory> is requested memory size; alternatively memory
  37               could be added by writing proper value to
  38               /sys/devices/system/xen_memory/xen_memory0/target or
  39               /sys/devices/system/xen_memory/xen_memory0/target_kb on the
  40               target domain.
  41
  42          Alternatively, if memory auto onlining was not requested at step 1
  43          the newly added memory can be manually onlined in the target domain
  44          by doing the following:
  45
  46                for i in /sys/devices/system/memory/memory*/state; do \
  47                  [ "`cat "$i"`" = offline ] && echo online > "$i"; done
  48
  49          or by adding the following line to udev rules:
  50
  51          SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
  52
  53config XEN_MEMORY_HOTPLUG_LIMIT
  54        int "Hotplugged memory limit (in GiB) for a PV guest"
  55        default 512
  56        depends on XEN_HAVE_PVMMU
  57        depends on MEMORY_HOTPLUG
  58        help
  59          Maxmium amount of memory (in GiB) that a PV guest can be
  60          expanded to when using memory hotplug.
  61
  62          A PV guest can have more memory than this limit if is
  63          started with a larger maximum.
  64
  65          This value is used to allocate enough space in internal
  66          tables needed for physical memory administration.
  67
  68config XEN_SCRUB_PAGES_DEFAULT
  69        bool "Scrub pages before returning them to system by default"
  70        depends on XEN_BALLOON
  71        default y
  72        help
  73          Scrub pages before returning them to the system for reuse by
  74          other domains.  This makes sure that any confidential data
  75          is not accidentally visible to other domains.  It is more
  76          secure, but slightly less efficient. This can be controlled with
  77          xen_scrub_pages=0 parameter and
  78          /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
  79          This option only sets the default value.
  80
  81          If in doubt, say yes.
  82
  83config XEN_DEV_EVTCHN
  84        tristate "Xen /dev/xen/evtchn device"
  85        default y
  86        help
  87          The evtchn driver allows a userspace process to trigger event
  88          channels and to receive notification of an event channel
  89          firing.
  90          If in doubt, say yes.
  91
  92config XEN_BACKEND
  93        bool "Backend driver support"
  94        default XEN_DOM0
  95        help
  96          Support for backend device drivers that provide I/O services
  97          to other virtual machines.
  98
  99config XENFS
 100        tristate "Xen filesystem"
 101        select XEN_PRIVCMD
 102        default y
 103        help
 104          The xen filesystem provides a way for domains to share
 105          information with each other and with the hypervisor.
 106          For example, by reading and writing the "xenbus" file, guests
 107          may pass arbitrary information to the initial domain.
 108          If in doubt, say yes.
 109
 110config XEN_COMPAT_XENFS
 111        bool "Create compatibility mount point /proc/xen"
 112        depends on XENFS
 113        default y
 114        help
 115          The old xenstore userspace tools expect to find "xenbus"
 116          under /proc/xen, but "xenbus" is now found at the root of the
 117          xenfs filesystem.  Selecting this causes the kernel to create
 118          the compatibility mount point /proc/xen if it is running on
 119          a xen platform.
 120          If in doubt, say yes.
 121
 122config XEN_SYS_HYPERVISOR
 123        bool "Create xen entries under /sys/hypervisor"
 124        depends on SYSFS
 125        select SYS_HYPERVISOR
 126        default y
 127        help
 128          Create entries under /sys/hypervisor describing the Xen
 129          hypervisor environment.  When running native or in another
 130          virtual environment, /sys/hypervisor will still be present,
 131          but will have no xen contents.
 132
 133config XEN_XENBUS_FRONTEND
 134        tristate
 135
 136config XEN_GNTDEV
 137        tristate "userspace grant access device driver"
 138        depends on XEN
 139        default m
 140        select MMU_NOTIFIER
 141        help
 142          Allows userspace processes to use grants.
 143
 144config XEN_GNTDEV_DMABUF
 145        bool "Add support for dma-buf grant access device driver extension"
 146        depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC
 147        select DMA_SHARED_BUFFER
 148        help
 149          Allows userspace processes and kernel modules to use Xen backed
 150          dma-buf implementation. With this extension grant references to
 151          the pages of an imported dma-buf can be exported for other domain
 152          use and grant references coming from a foreign domain can be
 153          converted into a local dma-buf for local export.
 154
 155config XEN_GRANT_DEV_ALLOC
 156        tristate "User-space grant reference allocator driver"
 157        depends on XEN
 158        default m
 159        help
 160          Allows userspace processes to create pages with access granted
 161          to other domains. This can be used to implement frontend drivers
 162          or as part of an inter-domain shared memory channel.
 163
 164config XEN_GRANT_DMA_ALLOC
 165        bool "Allow allocating DMA capable buffers with grant reference module"
 166        depends on XEN && HAS_DMA
 167        help
 168          Extends grant table module API to allow allocating DMA capable
 169          buffers and mapping foreign grant references on top of it.
 170          The resulting buffer is similar to one allocated by the balloon
 171          driver in that proper memory reservation is made by
 172          ({increase|decrease}_reservation and VA mappings are updated if
 173          needed).
 174          This is useful for sharing foreign buffers with HW drivers which
 175          cannot work with scattered buffers provided by the balloon driver,
 176          but require DMAable memory instead.
 177
 178config SWIOTLB_XEN
 179        def_bool y
 180        depends on XEN_PV || ARM || ARM64
 181        select DMA_OPS
 182        select SWIOTLB
 183
 184config XEN_PCIDEV_BACKEND
 185        tristate "Xen PCI-device backend driver"
 186        depends on PCI && X86 && XEN
 187        depends on XEN_BACKEND
 188        default m
 189        help
 190          The PCI device backend driver allows the kernel to export arbitrary
 191          PCI devices to other guests. If you select this to be a module, you
 192          will need to make sure no other driver has bound to the device(s)
 193          you want to make visible to other guests.
 194
 195          The parameter "passthrough" allows you specify how you want the PCI
 196          devices to appear in the guest. You can choose the default (0) where
 197          PCI topology starts at 00.00.0, or (1) for passthrough if you want
 198          the PCI devices topology appear the same as in the host.
 199
 200          The "hide" parameter (only applicable if backend driver is compiled
 201          into the kernel) allows you to bind the PCI devices to this module
 202          from the default device drivers. The argument is the list of PCI BDFs:
 203          xen-pciback.hide=(03:00.0)(04:00.0)
 204
 205          If in doubt, say m.
 206
 207config XEN_PVCALLS_FRONTEND
 208        tristate "XEN PV Calls frontend driver"
 209        depends on INET && XEN
 210        select XEN_XENBUS_FRONTEND
 211        help
 212          Experimental frontend for the Xen PV Calls protocol
 213          (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
 214          sends a small set of POSIX calls to the backend, which
 215          implements them.
 216
 217config XEN_PVCALLS_BACKEND
 218        tristate "XEN PV Calls backend driver"
 219        depends on INET && XEN && XEN_BACKEND
 220        help
 221          Experimental backend for the Xen PV Calls protocol
 222          (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
 223          allows PV Calls frontends to send POSIX calls to the backend,
 224          which implements them.
 225
 226          If in doubt, say n.
 227
 228config XEN_SCSI_BACKEND
 229        tristate "XEN SCSI backend driver"
 230        depends on XEN && XEN_BACKEND && TARGET_CORE
 231        help
 232          The SCSI backend driver allows the kernel to export its SCSI Devices
 233          to other guests via a high-performance shared-memory interface.
 234          Only needed for systems running as XEN driver domains (e.g. Dom0) and
 235          if guests need generic access to SCSI devices.
 236
 237config XEN_PRIVCMD
 238        tristate
 239        depends on XEN
 240        default m
 241
 242config XEN_ACPI_PROCESSOR
 243        tristate "Xen ACPI processor"
 244        depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
 245        default m
 246        help
 247          This ACPI processor uploads Power Management information to the Xen
 248          hypervisor.
 249
 250          To do that the driver parses the Power Management data and uploads
 251          said information to the Xen hypervisor. Then the Xen hypervisor can
 252          select the proper Cx and Pxx states. It also registers itself as the
 253          SMM so that other drivers (such as ACPI cpufreq scaling driver) will
 254          not load.
 255
 256          To compile this driver as a module, choose M here: the module will be
 257          called xen_acpi_processor  If you do not know what to choose, select
 258          M here. If the CPUFREQ drivers are built in, select Y here.
 259
 260config XEN_MCE_LOG
 261        bool "Xen platform mcelog"
 262        depends on XEN_PV_DOM0 && X86_MCE
 263        help
 264          Allow kernel fetching MCE error from Xen platform and
 265          converting it into Linux mcelog format for mcelog tools
 266
 267config XEN_HAVE_PVMMU
 268        bool
 269
 270config XEN_EFI
 271        def_bool y
 272        depends on (ARM || ARM64 || X86_64) && EFI
 273
 274config XEN_AUTO_XLATE
 275        def_bool y
 276        depends on ARM || ARM64 || XEN_PVHVM
 277        help
 278          Support for auto-translated physmap guests.
 279
 280config XEN_ACPI
 281        def_bool y
 282        depends on X86 && ACPI
 283
 284config XEN_SYMS
 285        bool "Xen symbols"
 286        depends on X86 && XEN_DOM0 && XENFS
 287        default y if KALLSYMS
 288        help
 289          Exports hypervisor symbols (along with their types and addresses) via
 290          /proc/xen/xensyms file, similar to /proc/kallsyms
 291
 292config XEN_HAVE_VPMU
 293        bool
 294
 295config XEN_FRONT_PGDIR_SHBUF
 296        tristate
 297
 298config XEN_UNPOPULATED_ALLOC
 299        bool "Use unpopulated memory ranges for guest mappings"
 300        depends on X86 && ZONE_DEVICE
 301        default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
 302        help
 303          Use unpopulated memory ranges in order to create mappings for guest
 304          memory regions, including grant maps and foreign pages. This avoids
 305          having to balloon out RAM regions in order to obtain physical memory
 306          space to create such mappings.
 307
 308endmenu
 309