linux/virt/kvm/arm/vgic/vgic-kvm-device.c
<<
>>
Prefs
   1/*
   2 * VGIC: KVM DEVICE API
   3 *
   4 * Copyright (C) 2015 ARM Ltd.
   5 * Author: Marc Zyngier <marc.zyngier@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 */
  16#include <linux/kvm_host.h>
  17#include <kvm/arm_vgic.h>
  18#include <linux/uaccess.h>
  19#include <asm/kvm_mmu.h>
  20#include "vgic.h"
  21
  22/* common helpers */
  23
  24int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
  25                      phys_addr_t addr, phys_addr_t alignment)
  26{
  27        if (addr & ~KVM_PHYS_MASK)
  28                return -E2BIG;
  29
  30        if (!IS_ALIGNED(addr, alignment))
  31                return -EINVAL;
  32
  33        if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
  34                return -EEXIST;
  35
  36        return 0;
  37}
  38
  39/**
  40 * kvm_vgic_addr - set or get vgic VM base addresses
  41 * @kvm:   pointer to the vm struct
  42 * @type:  the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
  43 * @addr:  pointer to address value
  44 * @write: if true set the address in the VM address space, if false read the
  45 *          address
  46 *
  47 * Set or get the vgic base addresses for the distributor and the virtual CPU
  48 * interface in the VM physical address space.  These addresses are properties
  49 * of the emulated core/SoC and therefore user space initially knows this
  50 * information.
  51 * Check them for sanity (alignment, double assignment). We can't check for
  52 * overlapping regions in case of a virtual GICv3 here, since we don't know
  53 * the number of VCPUs yet, so we defer this check to map_resources().
  54 */
  55int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
  56{
  57        int r = 0;
  58        struct vgic_dist *vgic = &kvm->arch.vgic;
  59        int type_needed;
  60        phys_addr_t *addr_ptr, alignment;
  61
  62        mutex_lock(&kvm->lock);
  63        switch (type) {
  64        case KVM_VGIC_V2_ADDR_TYPE_DIST:
  65                type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
  66                addr_ptr = &vgic->vgic_dist_base;
  67                alignment = SZ_4K;
  68                break;
  69        case KVM_VGIC_V2_ADDR_TYPE_CPU:
  70                type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
  71                addr_ptr = &vgic->vgic_cpu_base;
  72                alignment = SZ_4K;
  73                break;
  74        case KVM_VGIC_V3_ADDR_TYPE_DIST:
  75                type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
  76                addr_ptr = &vgic->vgic_dist_base;
  77                alignment = SZ_64K;
  78                break;
  79        case KVM_VGIC_V3_ADDR_TYPE_REDIST:
  80                type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
  81                addr_ptr = &vgic->vgic_redist_base;
  82                alignment = SZ_64K;
  83                break;
  84        default:
  85                r = -ENODEV;
  86                goto out;
  87        }
  88
  89        if (vgic->vgic_model != type_needed) {
  90                r = -ENODEV;
  91                goto out;
  92        }
  93
  94        if (write) {
  95                r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
  96                if (!r)
  97                        *addr_ptr = *addr;
  98        } else {
  99                *addr = *addr_ptr;
 100        }
 101
 102out:
 103        mutex_unlock(&kvm->lock);
 104        return r;
 105}
 106
 107static int vgic_set_common_attr(struct kvm_device *dev,
 108                                struct kvm_device_attr *attr)
 109{
 110        int r;
 111
 112        switch (attr->group) {
 113        case KVM_DEV_ARM_VGIC_GRP_ADDR: {
 114                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
 115                u64 addr;
 116                unsigned long type = (unsigned long)attr->attr;
 117
 118                if (copy_from_user(&addr, uaddr, sizeof(addr)))
 119                        return -EFAULT;
 120
 121                r = kvm_vgic_addr(dev->kvm, type, &addr, true);
 122                return (r == -ENODEV) ? -ENXIO : r;
 123        }
 124        case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
 125                u32 __user *uaddr = (u32 __user *)(long)attr->addr;
 126                u32 val;
 127                int ret = 0;
 128
 129                if (get_user(val, uaddr))
 130                        return -EFAULT;
 131
 132                /*
 133                 * We require:
 134                 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
 135                 * - at most 1024 interrupts
 136                 * - a multiple of 32 interrupts
 137                 */
 138                if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
 139                    val > VGIC_MAX_RESERVED ||
 140                    (val & 31))
 141                        return -EINVAL;
 142
 143                mutex_lock(&dev->kvm->lock);
 144
 145                if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
 146                        ret = -EBUSY;
 147                else
 148                        dev->kvm->arch.vgic.nr_spis =
 149                                val - VGIC_NR_PRIVATE_IRQS;
 150
 151                mutex_unlock(&dev->kvm->lock);
 152
 153                return ret;
 154        }
 155        case KVM_DEV_ARM_VGIC_GRP_CTRL: {
 156                switch (attr->attr) {
 157                case KVM_DEV_ARM_VGIC_CTRL_INIT:
 158                        mutex_lock(&dev->kvm->lock);
 159                        r = vgic_init(dev->kvm);
 160                        mutex_unlock(&dev->kvm->lock);
 161                        return r;
 162                }
 163                break;
 164        }
 165        }
 166
 167        return -ENXIO;
 168}
 169
 170static int vgic_get_common_attr(struct kvm_device *dev,
 171                                struct kvm_device_attr *attr)
 172{
 173        int r = -ENXIO;
 174
 175        switch (attr->group) {
 176        case KVM_DEV_ARM_VGIC_GRP_ADDR: {
 177                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
 178                u64 addr;
 179                unsigned long type = (unsigned long)attr->attr;
 180
 181                r = kvm_vgic_addr(dev->kvm, type, &addr, false);
 182                if (r)
 183                        return (r == -ENODEV) ? -ENXIO : r;
 184
 185                if (copy_to_user(uaddr, &addr, sizeof(addr)))
 186                        return -EFAULT;
 187                break;
 188        }
 189        case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
 190                u32 __user *uaddr = (u32 __user *)(long)attr->addr;
 191
 192                r = put_user(dev->kvm->arch.vgic.nr_spis +
 193                             VGIC_NR_PRIVATE_IRQS, uaddr);
 194                break;
 195        }
 196        }
 197
 198        return r;
 199}
 200
 201static int vgic_create(struct kvm_device *dev, u32 type)
 202{
 203        return kvm_vgic_create(dev->kvm, type);
 204}
 205
 206static void vgic_destroy(struct kvm_device *dev)
 207{
 208        kfree(dev);
 209}
 210
 211int kvm_register_vgic_device(unsigned long type)
 212{
 213        int ret = -ENODEV;
 214
 215        switch (type) {
 216        case KVM_DEV_TYPE_ARM_VGIC_V2:
 217                ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
 218                                              KVM_DEV_TYPE_ARM_VGIC_V2);
 219                break;
 220        case KVM_DEV_TYPE_ARM_VGIC_V3:
 221                ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
 222                                              KVM_DEV_TYPE_ARM_VGIC_V3);
 223
 224#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
 225                if (ret)
 226                        break;
 227                ret = kvm_vgic_register_its_device();
 228#endif
 229                break;
 230        }
 231
 232        return ret;
 233}
 234
 235struct vgic_reg_attr {
 236        struct kvm_vcpu *vcpu;
 237        gpa_t addr;
 238};
 239
 240static int parse_vgic_v2_attr(struct kvm_device *dev,
 241                              struct kvm_device_attr *attr,
 242                              struct vgic_reg_attr *reg_attr)
 243{
 244        int cpuid;
 245
 246        cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
 247                 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
 248
 249        if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
 250                return -EINVAL;
 251
 252        reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
 253        reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
 254
 255        return 0;
 256}
 257
 258/* unlocks vcpus from @vcpu_lock_idx and smaller */
 259static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
 260{
 261        struct kvm_vcpu *tmp_vcpu;
 262
 263        for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
 264                tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
 265                mutex_unlock(&tmp_vcpu->mutex);
 266        }
 267}
 268
 269static void unlock_all_vcpus(struct kvm *kvm)
 270{
 271        unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
 272}
 273
 274/* Returns true if all vcpus were locked, false otherwise */
 275static bool lock_all_vcpus(struct kvm *kvm)
 276{
 277        struct kvm_vcpu *tmp_vcpu;
 278        int c;
 279
 280        /*
 281         * Any time a vcpu is run, vcpu_load is called which tries to grab the
 282         * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
 283         * that no other VCPUs are run and fiddle with the vgic state while we
 284         * access it.
 285         */
 286        kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
 287                if (!mutex_trylock(&tmp_vcpu->mutex)) {
 288                        unlock_vcpus(kvm, c - 1);
 289                        return false;
 290                }
 291        }
 292
 293        return true;
 294}
 295
 296/**
 297 * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state
 298 *
 299 * @dev:      kvm device handle
 300 * @attr:     kvm device attribute
 301 * @reg:      address the value is read or written
 302 * @is_write: true if userspace is writing a register
 303 */
 304static int vgic_attr_regs_access_v2(struct kvm_device *dev,
 305                                    struct kvm_device_attr *attr,
 306                                    u32 *reg, bool is_write)
 307{
 308        struct vgic_reg_attr reg_attr;
 309        gpa_t addr;
 310        struct kvm_vcpu *vcpu;
 311        int ret;
 312
 313        ret = parse_vgic_v2_attr(dev, attr, &reg_attr);
 314        if (ret)
 315                return ret;
 316
 317        vcpu = reg_attr.vcpu;
 318        addr = reg_attr.addr;
 319
 320        mutex_lock(&dev->kvm->lock);
 321
 322        ret = vgic_init(dev->kvm);
 323        if (ret)
 324                goto out;
 325
 326        if (!lock_all_vcpus(dev->kvm)) {
 327                ret = -EBUSY;
 328                goto out;
 329        }
 330
 331        switch (attr->group) {
 332        case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
 333                ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
 334                break;
 335        case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
 336                ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
 337                break;
 338        default:
 339                ret = -EINVAL;
 340                break;
 341        }
 342
 343        unlock_all_vcpus(dev->kvm);
 344out:
 345        mutex_unlock(&dev->kvm->lock);
 346        return ret;
 347}
 348
 349static int vgic_v2_set_attr(struct kvm_device *dev,
 350                            struct kvm_device_attr *attr)
 351{
 352        int ret;
 353
 354        ret = vgic_set_common_attr(dev, attr);
 355        if (ret != -ENXIO)
 356                return ret;
 357
 358        switch (attr->group) {
 359        case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
 360        case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
 361                u32 __user *uaddr = (u32 __user *)(long)attr->addr;
 362                u32 reg;
 363
 364                if (get_user(reg, uaddr))
 365                        return -EFAULT;
 366
 367                return vgic_attr_regs_access_v2(dev, attr, &reg, true);
 368        }
 369        }
 370
 371        return -ENXIO;
 372}
 373
 374static int vgic_v2_get_attr(struct kvm_device *dev,
 375                            struct kvm_device_attr *attr)
 376{
 377        int ret;
 378
 379        ret = vgic_get_common_attr(dev, attr);
 380        if (ret != -ENXIO)
 381                return ret;
 382
 383        switch (attr->group) {
 384        case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
 385        case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
 386                u32 __user *uaddr = (u32 __user *)(long)attr->addr;
 387                u32 reg = 0;
 388
 389                ret = vgic_attr_regs_access_v2(dev, attr, &reg, false);
 390                if (ret)
 391                        return ret;
 392                return put_user(reg, uaddr);
 393        }
 394        }
 395
 396        return -ENXIO;
 397}
 398
 399static int vgic_v2_has_attr(struct kvm_device *dev,
 400                            struct kvm_device_attr *attr)
 401{
 402        switch (attr->group) {
 403        case KVM_DEV_ARM_VGIC_GRP_ADDR:
 404                switch (attr->attr) {
 405                case KVM_VGIC_V2_ADDR_TYPE_DIST:
 406                case KVM_VGIC_V2_ADDR_TYPE_CPU:
 407                        return 0;
 408                }
 409                break;
 410        case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
 411        case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
 412                return vgic_v2_has_attr_regs(dev, attr);
 413        case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
 414                return 0;
 415        case KVM_DEV_ARM_VGIC_GRP_CTRL:
 416                switch (attr->attr) {
 417                case KVM_DEV_ARM_VGIC_CTRL_INIT:
 418                        return 0;
 419                }
 420        }
 421        return -ENXIO;
 422}
 423
 424struct kvm_device_ops kvm_arm_vgic_v2_ops = {
 425        .name = "kvm-arm-vgic-v2",
 426        .create = vgic_create,
 427        .destroy = vgic_destroy,
 428        .set_attr = vgic_v2_set_attr,
 429        .get_attr = vgic_v2_get_attr,
 430        .has_attr = vgic_v2_has_attr,
 431};
 432
 433static int vgic_v3_set_attr(struct kvm_device *dev,
 434                            struct kvm_device_attr *attr)
 435{
 436        return vgic_set_common_attr(dev, attr);
 437}
 438
 439static int vgic_v3_get_attr(struct kvm_device *dev,
 440                            struct kvm_device_attr *attr)
 441{
 442        return vgic_get_common_attr(dev, attr);
 443}
 444
 445static int vgic_v3_has_attr(struct kvm_device *dev,
 446                            struct kvm_device_attr *attr)
 447{
 448        switch (attr->group) {
 449        case KVM_DEV_ARM_VGIC_GRP_ADDR:
 450                switch (attr->attr) {
 451                case KVM_VGIC_V3_ADDR_TYPE_DIST:
 452                case KVM_VGIC_V3_ADDR_TYPE_REDIST:
 453                        return 0;
 454                }
 455                break;
 456        case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
 457                return 0;
 458        case KVM_DEV_ARM_VGIC_GRP_CTRL:
 459                switch (attr->attr) {
 460                case KVM_DEV_ARM_VGIC_CTRL_INIT:
 461                        return 0;
 462                }
 463        }
 464        return -ENXIO;
 465}
 466
 467struct kvm_device_ops kvm_arm_vgic_v3_ops = {
 468        .name = "kvm-arm-vgic-v3",
 469        .create = vgic_create,
 470        .destroy = vgic_destroy,
 471        .set_attr = vgic_v3_set_attr,
 472        .get_attr = vgic_v3_get_attr,
 473        .has_attr = vgic_v3_has_attr,
 474};
 475