linux/virt/kvm/arm/vgic/vgic-mmio-v2.c
<<
>>
Prefs
   1/*
   2 * VGICv2 MMIO handling functions
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/irqchip/arm-gic.h>
  15#include <linux/kvm.h>
  16#include <linux/kvm_host.h>
  17#include <kvm/iodev.h>
  18#include <kvm/arm_vgic.h>
  19
  20#include "vgic.h"
  21#include "vgic-mmio.h"
  22
  23static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
  24                                            gpa_t addr, unsigned int len)
  25{
  26        u32 value;
  27
  28        switch (addr & 0x0c) {
  29        case GIC_DIST_CTRL:
  30                value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
  31                break;
  32        case GIC_DIST_CTR:
  33                value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  34                value = (value >> 5) - 1;
  35                value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
  36                break;
  37        case GIC_DIST_IIDR:
  38                value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  39                break;
  40        default:
  41                return 0;
  42        }
  43
  44        return value;
  45}
  46
  47static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
  48                                    gpa_t addr, unsigned int len,
  49                                    unsigned long val)
  50{
  51        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  52        bool was_enabled = dist->enabled;
  53
  54        switch (addr & 0x0c) {
  55        case GIC_DIST_CTRL:
  56                dist->enabled = val & GICD_ENABLE;
  57                if (!was_enabled && dist->enabled)
  58                        vgic_kick_vcpus(vcpu->kvm);
  59                break;
  60        case GIC_DIST_CTR:
  61        case GIC_DIST_IIDR:
  62                /* Nothing to do */
  63                return;
  64        }
  65}
  66
  67static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
  68                                 gpa_t addr, unsigned int len,
  69                                 unsigned long val)
  70{
  71        int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
  72        int intid = val & 0xf;
  73        int targets = (val >> 16) & 0xff;
  74        int mode = (val >> 24) & 0x03;
  75        int c;
  76        struct kvm_vcpu *vcpu;
  77        unsigned long flags;
  78
  79        switch (mode) {
  80        case 0x0:               /* as specified by targets */
  81                break;
  82        case 0x1:
  83                targets = (1U << nr_vcpus) - 1;                 /* all, ... */
  84                targets &= ~(1U << source_vcpu->vcpu_id);       /* but self */
  85                break;
  86        case 0x2:               /* this very vCPU only */
  87                targets = (1U << source_vcpu->vcpu_id);
  88                break;
  89        case 0x3:               /* reserved */
  90                return;
  91        }
  92
  93        kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
  94                struct vgic_irq *irq;
  95
  96                if (!(targets & (1U << c)))
  97                        continue;
  98
  99                irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
 100
 101                spin_lock_irqsave(&irq->irq_lock, flags);
 102                irq->pending_latch = true;
 103                irq->source |= 1U << source_vcpu->vcpu_id;
 104
 105                vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
 106                vgic_put_irq(source_vcpu->kvm, irq);
 107        }
 108}
 109
 110static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
 111                                           gpa_t addr, unsigned int len)
 112{
 113        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 114        int i;
 115        u64 val = 0;
 116
 117        for (i = 0; i < len; i++) {
 118                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 119
 120                val |= (u64)irq->targets << (i * 8);
 121
 122                vgic_put_irq(vcpu->kvm, irq);
 123        }
 124
 125        return val;
 126}
 127
 128static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
 129                                   gpa_t addr, unsigned int len,
 130                                   unsigned long val)
 131{
 132        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 133        u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
 134        int i;
 135        unsigned long flags;
 136
 137        /* GICD_ITARGETSR[0-7] are read-only */
 138        if (intid < VGIC_NR_PRIVATE_IRQS)
 139                return;
 140
 141        for (i = 0; i < len; i++) {
 142                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
 143                int target;
 144
 145                spin_lock_irqsave(&irq->irq_lock, flags);
 146
 147                irq->targets = (val >> (i * 8)) & cpu_mask;
 148                target = irq->targets ? __ffs(irq->targets) : 0;
 149                irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
 150
 151                spin_unlock_irqrestore(&irq->irq_lock, flags);
 152                vgic_put_irq(vcpu->kvm, irq);
 153        }
 154}
 155
 156static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
 157                                            gpa_t addr, unsigned int len)
 158{
 159        u32 intid = addr & 0x0f;
 160        int i;
 161        u64 val = 0;
 162
 163        for (i = 0; i < len; i++) {
 164                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 165
 166                val |= (u64)irq->source << (i * 8);
 167
 168                vgic_put_irq(vcpu->kvm, irq);
 169        }
 170        return val;
 171}
 172
 173static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
 174                                     gpa_t addr, unsigned int len,
 175                                     unsigned long val)
 176{
 177        u32 intid = addr & 0x0f;
 178        int i;
 179        unsigned long flags;
 180
 181        for (i = 0; i < len; i++) {
 182                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 183
 184                spin_lock_irqsave(&irq->irq_lock, flags);
 185
 186                irq->source &= ~((val >> (i * 8)) & 0xff);
 187                if (!irq->source)
 188                        irq->pending_latch = false;
 189
 190                spin_unlock_irqrestore(&irq->irq_lock, flags);
 191                vgic_put_irq(vcpu->kvm, irq);
 192        }
 193}
 194
 195static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
 196                                     gpa_t addr, unsigned int len,
 197                                     unsigned long val)
 198{
 199        u32 intid = addr & 0x0f;
 200        int i;
 201        unsigned long flags;
 202
 203        for (i = 0; i < len; i++) {
 204                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 205
 206                spin_lock_irqsave(&irq->irq_lock, flags);
 207
 208                irq->source |= (val >> (i * 8)) & 0xff;
 209
 210                if (irq->source) {
 211                        irq->pending_latch = true;
 212                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 213                } else {
 214                        spin_unlock_irqrestore(&irq->irq_lock, flags);
 215                }
 216                vgic_put_irq(vcpu->kvm, irq);
 217        }
 218}
 219
 220#define GICC_ARCH_VERSION_V2    0x2
 221
 222/* These are for userland accesses only, there is no guest-facing emulation. */
 223static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
 224                                           gpa_t addr, unsigned int len)
 225{
 226        struct vgic_vmcr vmcr;
 227        u32 val;
 228
 229        vgic_get_vmcr(vcpu, &vmcr);
 230
 231        switch (addr & 0xff) {
 232        case GIC_CPU_CTRL:
 233                val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
 234                val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
 235                val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
 236                val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
 237                val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
 238                val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
 239
 240                break;
 241        case GIC_CPU_PRIMASK:
 242                /*
 243                 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
 244                 * the PMR field as GICH_VMCR.VMPriMask rather than
 245                 * GICC_PMR.Priority, so we expose the upper five bits of
 246                 * priority mask to userspace using the lower bits in the
 247                 * unsigned long.
 248                 */
 249                val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
 250                        GICV_PMR_PRIORITY_SHIFT;
 251                break;
 252        case GIC_CPU_BINPOINT:
 253                val = vmcr.bpr;
 254                break;
 255        case GIC_CPU_ALIAS_BINPOINT:
 256                val = vmcr.abpr;
 257                break;
 258        case GIC_CPU_IDENT:
 259                val = ((PRODUCT_ID_KVM << 20) |
 260                       (GICC_ARCH_VERSION_V2 << 16) |
 261                       IMPLEMENTER_ARM);
 262                break;
 263        default:
 264                return 0;
 265        }
 266
 267        return val;
 268}
 269
 270static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
 271                                   gpa_t addr, unsigned int len,
 272                                   unsigned long val)
 273{
 274        struct vgic_vmcr vmcr;
 275
 276        vgic_get_vmcr(vcpu, &vmcr);
 277
 278        switch (addr & 0xff) {
 279        case GIC_CPU_CTRL:
 280                vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
 281                vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
 282                vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
 283                vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
 284                vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
 285                vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
 286
 287                break;
 288        case GIC_CPU_PRIMASK:
 289                /*
 290                 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
 291                 * the PMR field as GICH_VMCR.VMPriMask rather than
 292                 * GICC_PMR.Priority, so we expose the upper five bits of
 293                 * priority mask to userspace using the lower bits in the
 294                 * unsigned long.
 295                 */
 296                vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
 297                        GICV_PMR_PRIORITY_MASK;
 298                break;
 299        case GIC_CPU_BINPOINT:
 300                vmcr.bpr = val;
 301                break;
 302        case GIC_CPU_ALIAS_BINPOINT:
 303                vmcr.abpr = val;
 304                break;
 305        }
 306
 307        vgic_set_vmcr(vcpu, &vmcr);
 308}
 309
 310static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
 311                                        gpa_t addr, unsigned int len)
 312{
 313        int n; /* which APRn is this */
 314
 315        n = (addr >> 2) & 0x3;
 316
 317        if (kvm_vgic_global_state.type == VGIC_V2) {
 318                /* GICv2 hardware systems support max. 32 groups */
 319                if (n != 0)
 320                        return 0;
 321                return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
 322        } else {
 323                struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
 324
 325                if (n > vgic_v3_max_apr_idx(vcpu))
 326                        return 0;
 327                /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
 328                return vgicv3->vgic_ap1r[n];
 329        }
 330}
 331
 332static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
 333                                gpa_t addr, unsigned int len,
 334                                unsigned long val)
 335{
 336        int n; /* which APRn is this */
 337
 338        n = (addr >> 2) & 0x3;
 339
 340        if (kvm_vgic_global_state.type == VGIC_V2) {
 341                /* GICv2 hardware systems support max. 32 groups */
 342                if (n != 0)
 343                        return;
 344                vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
 345        } else {
 346                struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
 347
 348                if (n > vgic_v3_max_apr_idx(vcpu))
 349                        return;
 350                /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
 351                vgicv3->vgic_ap1r[n] = val;
 352        }
 353}
 354
 355static const struct vgic_register_region vgic_v2_dist_registers[] = {
 356        REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
 357                vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
 358                VGIC_ACCESS_32bit),
 359        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
 360                vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
 361                VGIC_ACCESS_32bit),
 362        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
 363                vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
 364                VGIC_ACCESS_32bit),
 365        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
 366                vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
 367                VGIC_ACCESS_32bit),
 368        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
 369                vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
 370                VGIC_ACCESS_32bit),
 371        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
 372                vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
 373                VGIC_ACCESS_32bit),
 374        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
 375                vgic_mmio_read_active, vgic_mmio_write_sactive,
 376                NULL, vgic_mmio_uaccess_write_sactive, 1,
 377                VGIC_ACCESS_32bit),
 378        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
 379                vgic_mmio_read_active, vgic_mmio_write_cactive,
 380                NULL, vgic_mmio_uaccess_write_cactive, 1,
 381                VGIC_ACCESS_32bit),
 382        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
 383                vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
 384                8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 385        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
 386                vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
 387                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 388        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
 389                vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
 390                VGIC_ACCESS_32bit),
 391        REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
 392                vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
 393                VGIC_ACCESS_32bit),
 394        REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
 395                vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
 396                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 397        REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
 398                vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
 399                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 400};
 401
 402static const struct vgic_register_region vgic_v2_cpu_registers[] = {
 403        REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
 404                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 405                VGIC_ACCESS_32bit),
 406        REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
 407                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 408                VGIC_ACCESS_32bit),
 409        REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
 410                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 411                VGIC_ACCESS_32bit),
 412        REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
 413                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 414                VGIC_ACCESS_32bit),
 415        REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
 416                vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
 417                VGIC_ACCESS_32bit),
 418        REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
 419                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 420                VGIC_ACCESS_32bit),
 421};
 422
 423unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
 424{
 425        dev->regions = vgic_v2_dist_registers;
 426        dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
 427
 428        kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
 429
 430        return SZ_4K;
 431}
 432
 433int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
 434{
 435        const struct vgic_register_region *region;
 436        struct vgic_io_device iodev;
 437        struct vgic_reg_attr reg_attr;
 438        struct kvm_vcpu *vcpu;
 439        gpa_t addr;
 440        int ret;
 441
 442        ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
 443        if (ret)
 444                return ret;
 445
 446        vcpu = reg_attr.vcpu;
 447        addr = reg_attr.addr;
 448
 449        switch (attr->group) {
 450        case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
 451                iodev.regions = vgic_v2_dist_registers;
 452                iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
 453                iodev.base_addr = 0;
 454                break;
 455        case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
 456                iodev.regions = vgic_v2_cpu_registers;
 457                iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
 458                iodev.base_addr = 0;
 459                break;
 460        default:
 461                return -ENXIO;
 462        }
 463
 464        /* We only support aligned 32-bit accesses. */
 465        if (addr & 3)
 466                return -ENXIO;
 467
 468        region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
 469        if (!region)
 470                return -ENXIO;
 471
 472        return 0;
 473}
 474
 475int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 476                          int offset, u32 *val)
 477{
 478        struct vgic_io_device dev = {
 479                .regions = vgic_v2_cpu_registers,
 480                .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
 481                .iodev_type = IODEV_CPUIF,
 482        };
 483
 484        return vgic_uaccess(vcpu, &dev, is_write, offset, val);
 485}
 486
 487int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 488                         int offset, u32 *val)
 489{
 490        struct vgic_io_device dev = {
 491                .regions = vgic_v2_dist_registers,
 492                .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
 493                .iodev_type = IODEV_DIST,
 494        };
 495
 496        return vgic_uaccess(vcpu, &dev, is_write, offset, val);
 497}
 498