qemu/hw/vfio/spapr.c
<<
>>
Prefs
   1/*
   2 * DMA memory preregistration
   3 *
   4 * Authors:
   5 *  Alexey Kardashevskiy <aik@ozlabs.ru>
   6 *
   7 * This work is licensed under the terms of the GNU GPL, version 2.  See
   8 * the COPYING file in the top-level directory.
   9 */
  10
  11#include "qemu/osdep.h"
  12#include "cpu.h"
  13#include <sys/ioctl.h>
  14#include <linux/vfio.h>
  15
  16#include "hw/vfio/vfio-common.h"
  17#include "hw/hw.h"
  18#include "exec/ram_addr.h"
  19#include "qemu/error-report.h"
  20#include "trace.h"
  21
  22static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section)
  23{
  24    if (memory_region_is_iommu(section->mr)) {
  25        hw_error("Cannot possibly preregister IOMMU memory");
  26    }
  27
  28    return !memory_region_is_ram(section->mr) ||
  29            memory_region_is_ram_device(section->mr);
  30}
  31
  32static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa)
  33{
  34    return memory_region_get_ram_ptr(section->mr) +
  35        section->offset_within_region +
  36        (gpa - section->offset_within_address_space);
  37}
  38
  39static void vfio_prereg_listener_region_add(MemoryListener *listener,
  40                                            MemoryRegionSection *section)
  41{
  42    VFIOContainer *container = container_of(listener, VFIOContainer,
  43                                            prereg_listener);
  44    const hwaddr gpa = section->offset_within_address_space;
  45    hwaddr end;
  46    int ret;
  47    hwaddr page_mask = qemu_real_host_page_mask;
  48    struct vfio_iommu_spapr_register_memory reg = {
  49        .argsz = sizeof(reg),
  50        .flags = 0,
  51    };
  52
  53    if (vfio_prereg_listener_skipped_section(section)) {
  54        trace_vfio_prereg_listener_region_add_skip(
  55                section->offset_within_address_space,
  56                section->offset_within_address_space +
  57                int128_get64(int128_sub(section->size, int128_one())));
  58        return;
  59    }
  60
  61    if (unlikely((section->offset_within_address_space & ~page_mask) ||
  62                 (section->offset_within_region & ~page_mask) ||
  63                 (int128_get64(section->size) & ~page_mask))) {
  64        error_report("%s received unaligned region", __func__);
  65        return;
  66    }
  67
  68    end = section->offset_within_address_space + int128_get64(section->size);
  69    if (gpa >= end) {
  70        return;
  71    }
  72
  73    memory_region_ref(section->mr);
  74
  75    reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa);
  76    reg.size = end - gpa;
  77
  78    ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
  79    trace_vfio_prereg_register(reg.vaddr, reg.size, ret ? -errno : 0);
  80    if (ret) {
  81        /*
  82         * On the initfn path, store the first error in the container so we
  83         * can gracefully fail.  Runtime, there's not much we can do other
  84         * than throw a hardware error.
  85         */
  86        if (!container->initialized) {
  87            if (!container->error) {
  88                container->error = ret;
  89            }
  90        } else {
  91            hw_error("vfio: Memory registering failed, unable to continue");
  92        }
  93    }
  94}
  95
  96static void vfio_prereg_listener_region_del(MemoryListener *listener,
  97                                            MemoryRegionSection *section)
  98{
  99    VFIOContainer *container = container_of(listener, VFIOContainer,
 100                                            prereg_listener);
 101    const hwaddr gpa = section->offset_within_address_space;
 102    hwaddr end;
 103    int ret;
 104    hwaddr page_mask = qemu_real_host_page_mask;
 105    struct vfio_iommu_spapr_register_memory reg = {
 106        .argsz = sizeof(reg),
 107        .flags = 0,
 108    };
 109
 110    if (vfio_prereg_listener_skipped_section(section)) {
 111        trace_vfio_prereg_listener_region_del_skip(
 112                section->offset_within_address_space,
 113                section->offset_within_address_space +
 114                int128_get64(int128_sub(section->size, int128_one())));
 115        return;
 116    }
 117
 118    if (unlikely((section->offset_within_address_space & ~page_mask) ||
 119                 (section->offset_within_region & ~page_mask) ||
 120                 (int128_get64(section->size) & ~page_mask))) {
 121        error_report("%s received unaligned region", __func__);
 122        return;
 123    }
 124
 125    end = section->offset_within_address_space + int128_get64(section->size);
 126    if (gpa >= end) {
 127        return;
 128    }
 129
 130    reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa);
 131    reg.size = end - gpa;
 132
 133    ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, &reg);
 134    trace_vfio_prereg_unregister(reg.vaddr, reg.size, ret ? -errno : 0);
 135}
 136
 137const MemoryListener vfio_prereg_listener = {
 138    .region_add = vfio_prereg_listener_region_add,
 139    .region_del = vfio_prereg_listener_region_del,
 140};
 141
 142int vfio_spapr_create_window(VFIOContainer *container,
 143                             MemoryRegionSection *section,
 144                             hwaddr *pgsize)
 145{
 146    int ret = 0;
 147    IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
 148    uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
 149    unsigned entries, bits_total, bits_per_level, max_levels;
 150    struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
 151    long rampagesize = qemu_minrampagesize();
 152
 153    /*
 154     * The host might not support the guest supported IOMMU page size,
 155     * so we will use smaller physical IOMMU pages to back them.
 156     */
 157    if (pagesize > rampagesize) {
 158        pagesize = rampagesize;
 159    }
 160    pagesize = 1ULL << (63 - clz64(container->pgsizes &
 161                                   (pagesize | (pagesize - 1))));
 162    if (!pagesize) {
 163        error_report("Host doesn't support page size 0x%"PRIx64
 164                     ", the supported mask is 0x%lx",
 165                     memory_region_iommu_get_min_page_size(iommu_mr),
 166                     container->pgsizes);
 167        return -EINVAL;
 168    }
 169
 170    /*
 171     * FIXME: For VFIO iommu types which have KVM acceleration to
 172     * avoid bouncing all map/unmaps through qemu this way, this
 173     * would be the right place to wire that up (tell the KVM
 174     * device emulation the VFIO iommu handles to use).
 175     */
 176    create.window_size = int128_get64(section->size);
 177    create.page_shift = ctz64(pagesize);
 178    /*
 179     * SPAPR host supports multilevel TCE tables. We try to guess optimal
 180     * levels number and if this fails (for example due to the host memory
 181     * fragmentation), we increase levels. The DMA address structure is:
 182     * rrrrrrrr rxxxxxxx xxxxxxxx xxxxxxxx  xxxxxxxx xxxxxxxx xxxxxxxx iiiiiiii
 183     * where:
 184     *   r = reserved (bits >= 55 are reserved in the existing hardware)
 185     *   i = IOMMU page offset (64K in this example)
 186     *   x = bits to index a TCE which can be split to equal chunks to index
 187     *      within the level.
 188     * The aim is to split "x" to smaller possible number of levels.
 189     */
 190    entries = create.window_size >> create.page_shift;
 191    /* bits_total is number of "x" needed */
 192    bits_total = ctz64(entries * sizeof(uint64_t));
 193    /*
 194     * bits_per_level is a safe guess of how much we can allocate per level:
 195     * 8 is the current minimum for CONFIG_FORCE_MAX_ZONEORDER and MAX_ORDER
 196     * is usually bigger than that.
 197     * Below we look at getpagesize() as TCEs are allocated from system pages.
 198     */
 199    bits_per_level = ctz64(getpagesize()) + 8;
 200    create.levels = bits_total / bits_per_level;
 201    if (bits_total % bits_per_level) {
 202        ++create.levels;
 203    }
 204    max_levels = (64 - create.page_shift) / ctz64(getpagesize());
 205    for ( ; create.levels <= max_levels; ++create.levels) {
 206        ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
 207        if (!ret) {
 208            break;
 209        }
 210    }
 211    if (ret) {
 212        error_report("Failed to create a window, ret = %d (%m)", ret);
 213        return -errno;
 214    }
 215
 216    if (create.start_addr != section->offset_within_address_space) {
 217        vfio_spapr_remove_window(container, create.start_addr);
 218
 219        error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64,
 220                     section->offset_within_address_space,
 221                     (uint64_t)create.start_addr);
 222        return -EINVAL;
 223    }
 224    trace_vfio_spapr_create_window(create.page_shift,
 225                                   create.levels,
 226                                   create.window_size,
 227                                   create.start_addr);
 228    *pgsize = pagesize;
 229
 230    return 0;
 231}
 232
 233int vfio_spapr_remove_window(VFIOContainer *container,
 234                             hwaddr offset_within_address_space)
 235{
 236    struct vfio_iommu_spapr_tce_remove remove = {
 237        .argsz = sizeof(remove),
 238        .start_addr = offset_within_address_space,
 239    };
 240    int ret;
 241
 242    ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
 243    if (ret) {
 244        error_report("Failed to remove window at %"PRIx64,
 245                     (uint64_t)remove.start_addr);
 246        return -errno;
 247    }
 248
 249    trace_vfio_spapr_remove_window(offset_within_address_space);
 250
 251    return 0;
 252}
 253