linux/drivers/vfio/pci/vfio_pci_igd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VFIO PCI Intel Graphics support
   4 *
   5 * Copyright (C) 2016 Red Hat, Inc.  All rights reserved.
   6 *      Author: Alex Williamson <alex.williamson@redhat.com>
   7 *
   8 * Register a device specific region through which to provide read-only
   9 * access to the Intel IGD opregion.  The register defining the opregion
  10 * address is also virtualized to prevent user modification.
  11 */
  12
  13#include <linux/io.h>
  14#include <linux/pci.h>
  15#include <linux/uaccess.h>
  16#include <linux/vfio.h>
  17
  18#include <linux/vfio_pci_core.h>
  19
  20#define OPREGION_SIGNATURE      "IntelGraphicsMem"
  21#define OPREGION_SIZE           (8 * 1024)
  22#define OPREGION_PCI_ADDR       0xfc
  23
  24#define OPREGION_RVDA           0x3ba
  25#define OPREGION_RVDS           0x3c2
  26#define OPREGION_VERSION        0x16
  27
  28static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
  29                               char __user *buf, size_t count, loff_t *ppos,
  30                               bool iswrite)
  31{
  32        unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
  33        void *base = vdev->region[i].data;
  34        loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
  35
  36        if (pos >= vdev->region[i].size || iswrite)
  37                return -EINVAL;
  38
  39        count = min(count, (size_t)(vdev->region[i].size - pos));
  40
  41        if (copy_to_user(buf, base + pos, count))
  42                return -EFAULT;
  43
  44        *ppos += count;
  45
  46        return count;
  47}
  48
  49static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
  50                                 struct vfio_pci_region *region)
  51{
  52        memunmap(region->data);
  53}
  54
  55static const struct vfio_pci_regops vfio_pci_igd_regops = {
  56        .rw             = vfio_pci_igd_rw,
  57        .release        = vfio_pci_igd_release,
  58};
  59
  60static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
  61{
  62        __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
  63        u32 addr, size;
  64        void *base;
  65        int ret;
  66        u16 version;
  67
  68        ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
  69        if (ret)
  70                return ret;
  71
  72        if (!addr || !(~addr))
  73                return -ENODEV;
  74
  75        base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
  76        if (!base)
  77                return -ENOMEM;
  78
  79        if (memcmp(base, OPREGION_SIGNATURE, 16)) {
  80                memunmap(base);
  81                return -EINVAL;
  82        }
  83
  84        size = le32_to_cpu(*(__le32 *)(base + 16));
  85        if (!size) {
  86                memunmap(base);
  87                return -EINVAL;
  88        }
  89
  90        size *= 1024; /* In KB */
  91
  92        /*
  93         * Support opregion v2.1+
  94         * When VBT data exceeds 6KB size and cannot be within mailbox #4, then
  95         * the Extended VBT region next to opregion is used to hold the VBT data.
  96         * RVDA (Relative Address of VBT Data from Opregion Base) and RVDS
  97         * (Raw VBT Data Size) from opregion structure member are used to hold the
  98         * address from region base and size of VBT data. RVDA/RVDS are not
  99         * defined before opregion 2.0.
 100         *
 101         * opregion 2.1+: RVDA is unsigned, relative offset from
 102         * opregion base, and should point to the end of opregion.
 103         * otherwise, exposing to userspace to allow read access to everything between
 104         * the OpRegion and VBT is not safe.
 105         * RVDS is defined as size in bytes.
 106         *
 107         * opregion 2.0: rvda is the physical VBT address.
 108         * Since rvda is HPA it cannot be directly used in guest.
 109         * And it should not be practically available for end user,so it is not supported.
 110         */
 111        version = le16_to_cpu(*(__le16 *)(base + OPREGION_VERSION));
 112        if (version >= 0x0200) {
 113                u64 rvda;
 114                u32 rvds;
 115
 116                rvda = le64_to_cpu(*(__le64 *)(base + OPREGION_RVDA));
 117                rvds = le32_to_cpu(*(__le32 *)(base + OPREGION_RVDS));
 118                if (rvda && rvds) {
 119                        /* no support for opregion v2.0 with physical VBT address */
 120                        if (version == 0x0200) {
 121                                memunmap(base);
 122                                pci_err(vdev->pdev,
 123                                        "IGD assignment does not support opregion v2.0 with an extended VBT region\n");
 124                                return -EINVAL;
 125                        }
 126
 127                        if (rvda != size) {
 128                                memunmap(base);
 129                                pci_err(vdev->pdev,
 130                                        "Extended VBT does not follow opregion on version 0x%04x\n",
 131                                        version);
 132                                return -EINVAL;
 133                        }
 134
 135                        /* region size for opregion v2.0+: opregion and VBT size. */
 136                        size += rvds;
 137                }
 138        }
 139
 140        if (size != OPREGION_SIZE) {
 141                memunmap(base);
 142                base = memremap(addr, size, MEMREMAP_WB);
 143                if (!base)
 144                        return -ENOMEM;
 145        }
 146
 147        ret = vfio_pci_register_dev_region(vdev,
 148                PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
 149                VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
 150                &vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base);
 151        if (ret) {
 152                memunmap(base);
 153                return ret;
 154        }
 155
 156        /* Fill vconfig with the hw value and virtualize register */
 157        *dwordp = cpu_to_le32(addr);
 158        memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
 159               PCI_CAP_ID_INVALID_VIRT, 4);
 160
 161        return ret;
 162}
 163
 164static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
 165                                   char __user *buf, size_t count, loff_t *ppos,
 166                                   bool iswrite)
 167{
 168        unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
 169        struct pci_dev *pdev = vdev->region[i].data;
 170        loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
 171        size_t size;
 172        int ret;
 173
 174        if (pos >= vdev->region[i].size || iswrite)
 175                return -EINVAL;
 176
 177        size = count = min(count, (size_t)(vdev->region[i].size - pos));
 178
 179        if ((pos & 1) && size) {
 180                u8 val;
 181
 182                ret = pci_user_read_config_byte(pdev, pos, &val);
 183                if (ret)
 184                        return ret;
 185
 186                if (copy_to_user(buf + count - size, &val, 1))
 187                        return -EFAULT;
 188
 189                pos++;
 190                size--;
 191        }
 192
 193        if ((pos & 3) && size > 2) {
 194                u16 val;
 195
 196                ret = pci_user_read_config_word(pdev, pos, &val);
 197                if (ret)
 198                        return ret;
 199
 200                val = cpu_to_le16(val);
 201                if (copy_to_user(buf + count - size, &val, 2))
 202                        return -EFAULT;
 203
 204                pos += 2;
 205                size -= 2;
 206        }
 207
 208        while (size > 3) {
 209                u32 val;
 210
 211                ret = pci_user_read_config_dword(pdev, pos, &val);
 212                if (ret)
 213                        return ret;
 214
 215                val = cpu_to_le32(val);
 216                if (copy_to_user(buf + count - size, &val, 4))
 217                        return -EFAULT;
 218
 219                pos += 4;
 220                size -= 4;
 221        }
 222
 223        while (size >= 2) {
 224                u16 val;
 225
 226                ret = pci_user_read_config_word(pdev, pos, &val);
 227                if (ret)
 228                        return ret;
 229
 230                val = cpu_to_le16(val);
 231                if (copy_to_user(buf + count - size, &val, 2))
 232                        return -EFAULT;
 233
 234                pos += 2;
 235                size -= 2;
 236        }
 237
 238        while (size) {
 239                u8 val;
 240
 241                ret = pci_user_read_config_byte(pdev, pos, &val);
 242                if (ret)
 243                        return ret;
 244
 245                if (copy_to_user(buf + count - size, &val, 1))
 246                        return -EFAULT;
 247
 248                pos++;
 249                size--;
 250        }
 251
 252        *ppos += count;
 253
 254        return count;
 255}
 256
 257static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev,
 258                                     struct vfio_pci_region *region)
 259{
 260        struct pci_dev *pdev = region->data;
 261
 262        pci_dev_put(pdev);
 263}
 264
 265static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
 266        .rw             = vfio_pci_igd_cfg_rw,
 267        .release        = vfio_pci_igd_cfg_release,
 268};
 269
 270static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
 271{
 272        struct pci_dev *host_bridge, *lpc_bridge;
 273        int ret;
 274
 275        host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
 276        if (!host_bridge)
 277                return -ENODEV;
 278
 279        if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
 280            host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
 281                pci_dev_put(host_bridge);
 282                return -EINVAL;
 283        }
 284
 285        ret = vfio_pci_register_dev_region(vdev,
 286                PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
 287                VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
 288                &vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
 289                VFIO_REGION_INFO_FLAG_READ, host_bridge);
 290        if (ret) {
 291                pci_dev_put(host_bridge);
 292                return ret;
 293        }
 294
 295        lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
 296        if (!lpc_bridge)
 297                return -ENODEV;
 298
 299        if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
 300            lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
 301                pci_dev_put(lpc_bridge);
 302                return -EINVAL;
 303        }
 304
 305        ret = vfio_pci_register_dev_region(vdev,
 306                PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
 307                VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
 308                &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
 309                VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
 310        if (ret) {
 311                pci_dev_put(lpc_bridge);
 312                return ret;
 313        }
 314
 315        return 0;
 316}
 317
 318int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
 319{
 320        int ret;
 321
 322        ret = vfio_pci_igd_opregion_init(vdev);
 323        if (ret)
 324                return ret;
 325
 326        ret = vfio_pci_igd_cfg_init(vdev);
 327        if (ret)
 328                return ret;
 329
 330        return 0;
 331}
 332