linux/arch/arm64/kvm/vgic/vgic-mmio-v2.c
<<
>>
Prefs
   1/*
   2 * VGICv2 MMIO handling functions
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/irqchip/arm-gic.h>
  15#include <linux/kvm.h>
  16#include <linux/kvm_host.h>
  17#include <linux/nospec.h>
  18
  19#include <kvm/iodev.h>
  20#include <kvm/arm_vgic.h>
  21
  22#include "vgic.h"
  23#include "vgic-mmio.h"
  24
  25/*
  26 * The Revision field in the IIDR have the following meanings:
  27 *
  28 * Revision 1: Report GICv2 interrupts as group 0 instead of group 1
  29 * Revision 2: Interrupt groups are guest-configurable and signaled using
  30 *             their configured groups.
  31 */
  32
  33static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
  34                                            gpa_t addr, unsigned int len)
  35{
  36        struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
  37        u32 value;
  38
  39        switch (addr & 0x0c) {
  40        case GIC_DIST_CTRL:
  41                value = vgic->enabled ? GICD_ENABLE : 0;
  42                break;
  43        case GIC_DIST_CTR:
  44                value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
  45                value = (value >> 5) - 1;
  46                value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
  47                break;
  48        case GIC_DIST_IIDR:
  49                value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
  50                        (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
  51                        (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
  52                break;
  53        default:
  54                return 0;
  55        }
  56
  57        return value;
  58}
  59
  60static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
  61                                    gpa_t addr, unsigned int len,
  62                                    unsigned long val)
  63{
  64        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  65        bool was_enabled = dist->enabled;
  66
  67        switch (addr & 0x0c) {
  68        case GIC_DIST_CTRL:
  69                dist->enabled = val & GICD_ENABLE;
  70                if (!was_enabled && dist->enabled)
  71                        vgic_kick_vcpus(vcpu->kvm);
  72                break;
  73        case GIC_DIST_CTR:
  74        case GIC_DIST_IIDR:
  75                /* Nothing to do */
  76                return;
  77        }
  78}
  79
  80static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
  81                                           gpa_t addr, unsigned int len,
  82                                           unsigned long val)
  83{
  84        switch (addr & 0x0c) {
  85        case GIC_DIST_IIDR:
  86                if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
  87                        return -EINVAL;
  88
  89                /*
  90                 * If we observe a write to GICD_IIDR we know that userspace
  91                 * has been updated and has had a chance to cope with older
  92                 * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting
  93                 * interrupts as group 1, and therefore we now allow groups to
  94                 * be user writable.  Doing this by default would break
  95                 * migration from old kernels to new kernels with legacy
  96                 * userspace.
  97                 */
  98                vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
  99                return 0;
 100        }
 101
 102        vgic_mmio_write_v2_misc(vcpu, addr, len, val);
 103        return 0;
 104}
 105
 106static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
 107                                            gpa_t addr, unsigned int len,
 108                                            unsigned long val)
 109{
 110        if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
 111                vgic_mmio_write_group(vcpu, addr, len, val);
 112
 113        return 0;
 114}
 115
 116static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
 117                                 gpa_t addr, unsigned int len,
 118                                 unsigned long val)
 119{
 120        int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
 121        int intid = val & 0xf;
 122        int targets = (val >> 16) & 0xff;
 123        int mode = (val >> 24) & 0x03;
 124        int c;
 125        struct kvm_vcpu *vcpu;
 126        unsigned long flags;
 127
 128        switch (mode) {
 129        case 0x0:               /* as specified by targets */
 130                break;
 131        case 0x1:
 132                targets = (1U << nr_vcpus) - 1;                 /* all, ... */
 133                targets &= ~(1U << source_vcpu->vcpu_id);       /* but self */
 134                break;
 135        case 0x2:               /* this very vCPU only */
 136                targets = (1U << source_vcpu->vcpu_id);
 137                break;
 138        case 0x3:               /* reserved */
 139                return;
 140        }
 141
 142        kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
 143                struct vgic_irq *irq;
 144
 145                if (!(targets & (1U << c)))
 146                        continue;
 147
 148                irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
 149
 150                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 151                irq->pending_latch = true;
 152                irq->source |= 1U << source_vcpu->vcpu_id;
 153
 154                vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
 155                vgic_put_irq(source_vcpu->kvm, irq);
 156        }
 157}
 158
 159static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
 160                                           gpa_t addr, unsigned int len)
 161{
 162        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 163        int i;
 164        u64 val = 0;
 165
 166        for (i = 0; i < len; i++) {
 167                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 168
 169                val |= (u64)irq->targets << (i * 8);
 170
 171                vgic_put_irq(vcpu->kvm, irq);
 172        }
 173
 174        return val;
 175}
 176
 177static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
 178                                   gpa_t addr, unsigned int len,
 179                                   unsigned long val)
 180{
 181        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 182        u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
 183        int i;
 184        unsigned long flags;
 185
 186        /* GICD_ITARGETSR[0-7] are read-only */
 187        if (intid < VGIC_NR_PRIVATE_IRQS)
 188                return;
 189
 190        for (i = 0; i < len; i++) {
 191                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
 192                int target;
 193
 194                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 195
 196                irq->targets = (val >> (i * 8)) & cpu_mask;
 197                target = irq->targets ? __ffs(irq->targets) : 0;
 198                irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
 199
 200                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 201                vgic_put_irq(vcpu->kvm, irq);
 202        }
 203}
 204
 205static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
 206                                            gpa_t addr, unsigned int len)
 207{
 208        u32 intid = addr & 0x0f;
 209        int i;
 210        u64 val = 0;
 211
 212        for (i = 0; i < len; i++) {
 213                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 214
 215                val |= (u64)irq->source << (i * 8);
 216
 217                vgic_put_irq(vcpu->kvm, irq);
 218        }
 219        return val;
 220}
 221
 222static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
 223                                     gpa_t addr, unsigned int len,
 224                                     unsigned long val)
 225{
 226        u32 intid = addr & 0x0f;
 227        int i;
 228        unsigned long flags;
 229
 230        for (i = 0; i < len; i++) {
 231                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 232
 233                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 234
 235                irq->source &= ~((val >> (i * 8)) & 0xff);
 236                if (!irq->source)
 237                        irq->pending_latch = false;
 238
 239                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 240                vgic_put_irq(vcpu->kvm, irq);
 241        }
 242}
 243
 244static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
 245                                     gpa_t addr, unsigned int len,
 246                                     unsigned long val)
 247{
 248        u32 intid = addr & 0x0f;
 249        int i;
 250        unsigned long flags;
 251
 252        for (i = 0; i < len; i++) {
 253                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 254
 255                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 256
 257                irq->source |= (val >> (i * 8)) & 0xff;
 258
 259                if (irq->source) {
 260                        irq->pending_latch = true;
 261                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 262                } else {
 263                        raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 264                }
 265                vgic_put_irq(vcpu->kvm, irq);
 266        }
 267}
 268
 269#define GICC_ARCH_VERSION_V2    0x2
 270
 271/* These are for userland accesses only, there is no guest-facing emulation. */
 272static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
 273                                           gpa_t addr, unsigned int len)
 274{
 275        struct vgic_vmcr vmcr;
 276        u32 val;
 277
 278        vgic_get_vmcr(vcpu, &vmcr);
 279
 280        switch (addr & 0xff) {
 281        case GIC_CPU_CTRL:
 282                val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
 283                val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
 284                val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
 285                val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
 286                val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
 287                val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
 288
 289                break;
 290        case GIC_CPU_PRIMASK:
 291                /*
 292                 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
 293                 * the PMR field as GICH_VMCR.VMPriMask rather than
 294                 * GICC_PMR.Priority, so we expose the upper five bits of
 295                 * priority mask to userspace using the lower bits in the
 296                 * unsigned long.
 297                 */
 298                val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
 299                        GICV_PMR_PRIORITY_SHIFT;
 300                break;
 301        case GIC_CPU_BINPOINT:
 302                val = vmcr.bpr;
 303                break;
 304        case GIC_CPU_ALIAS_BINPOINT:
 305                val = vmcr.abpr;
 306                break;
 307        case GIC_CPU_IDENT:
 308                val = ((PRODUCT_ID_KVM << 20) |
 309                       (GICC_ARCH_VERSION_V2 << 16) |
 310                       IMPLEMENTER_ARM);
 311                break;
 312        default:
 313                return 0;
 314        }
 315
 316        return val;
 317}
 318
 319static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
 320                                   gpa_t addr, unsigned int len,
 321                                   unsigned long val)
 322{
 323        struct vgic_vmcr vmcr;
 324
 325        vgic_get_vmcr(vcpu, &vmcr);
 326
 327        switch (addr & 0xff) {
 328        case GIC_CPU_CTRL:
 329                vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
 330                vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
 331                vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
 332                vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
 333                vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
 334                vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
 335
 336                break;
 337        case GIC_CPU_PRIMASK:
 338                /*
 339                 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
 340                 * the PMR field as GICH_VMCR.VMPriMask rather than
 341                 * GICC_PMR.Priority, so we expose the upper five bits of
 342                 * priority mask to userspace using the lower bits in the
 343                 * unsigned long.
 344                 */
 345                vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
 346                        GICV_PMR_PRIORITY_MASK;
 347                break;
 348        case GIC_CPU_BINPOINT:
 349                vmcr.bpr = val;
 350                break;
 351        case GIC_CPU_ALIAS_BINPOINT:
 352                vmcr.abpr = val;
 353                break;
 354        }
 355
 356        vgic_set_vmcr(vcpu, &vmcr);
 357}
 358
 359static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
 360                                        gpa_t addr, unsigned int len)
 361{
 362        int n; /* which APRn is this */
 363
 364        n = (addr >> 2) & 0x3;
 365
 366        if (kvm_vgic_global_state.type == VGIC_V2) {
 367                /* GICv2 hardware systems support max. 32 groups */
 368                if (n != 0)
 369                        return 0;
 370                return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
 371        } else {
 372                struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
 373
 374                if (n > vgic_v3_max_apr_idx(vcpu))
 375                        return 0;
 376
 377                n = array_index_nospec(n, 4);
 378
 379                /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
 380                return vgicv3->vgic_ap1r[n];
 381        }
 382}
 383
 384static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
 385                                gpa_t addr, unsigned int len,
 386                                unsigned long val)
 387{
 388        int n; /* which APRn is this */
 389
 390        n = (addr >> 2) & 0x3;
 391
 392        if (kvm_vgic_global_state.type == VGIC_V2) {
 393                /* GICv2 hardware systems support max. 32 groups */
 394                if (n != 0)
 395                        return;
 396                vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
 397        } else {
 398                struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
 399
 400                if (n > vgic_v3_max_apr_idx(vcpu))
 401                        return;
 402
 403                n = array_index_nospec(n, 4);
 404
 405                /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
 406                vgicv3->vgic_ap1r[n] = val;
 407        }
 408}
 409
 410static const struct vgic_register_region vgic_v2_dist_registers[] = {
 411        REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL,
 412                vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc,
 413                NULL, vgic_mmio_uaccess_write_v2_misc,
 414                12, VGIC_ACCESS_32bit),
 415        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
 416                vgic_mmio_read_group, vgic_mmio_write_group,
 417                NULL, vgic_mmio_uaccess_write_v2_group, 1,
 418                VGIC_ACCESS_32bit),
 419        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
 420                vgic_mmio_read_enable, vgic_mmio_write_senable,
 421                NULL, vgic_uaccess_write_senable, 1,
 422                VGIC_ACCESS_32bit),
 423        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
 424                vgic_mmio_read_enable, vgic_mmio_write_cenable,
 425                NULL, vgic_uaccess_write_cenable, 1,
 426                VGIC_ACCESS_32bit),
 427        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
 428                vgic_mmio_read_pending, vgic_mmio_write_spending,
 429                NULL, vgic_uaccess_write_spending, 1,
 430                VGIC_ACCESS_32bit),
 431        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
 432                vgic_mmio_read_pending, vgic_mmio_write_cpending,
 433                NULL, vgic_uaccess_write_cpending, 1,
 434                VGIC_ACCESS_32bit),
 435        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
 436                vgic_mmio_read_active, vgic_mmio_write_sactive,
 437                vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
 438                VGIC_ACCESS_32bit),
 439        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
 440                vgic_mmio_read_active, vgic_mmio_write_cactive,
 441                vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
 442                VGIC_ACCESS_32bit),
 443        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
 444                vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
 445                8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 446        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
 447                vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
 448                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 449        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
 450                vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
 451                VGIC_ACCESS_32bit),
 452        REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
 453                vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
 454                VGIC_ACCESS_32bit),
 455        REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
 456                vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
 457                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 458        REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
 459                vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
 460                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 461};
 462
 463static const struct vgic_register_region vgic_v2_cpu_registers[] = {
 464        REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
 465                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 466                VGIC_ACCESS_32bit),
 467        REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
 468                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 469                VGIC_ACCESS_32bit),
 470        REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
 471                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 472                VGIC_ACCESS_32bit),
 473        REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
 474                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 475                VGIC_ACCESS_32bit),
 476        REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
 477                vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
 478                VGIC_ACCESS_32bit),
 479        REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
 480                vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
 481                VGIC_ACCESS_32bit),
 482};
 483
 484unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
 485{
 486        dev->regions = vgic_v2_dist_registers;
 487        dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
 488
 489        kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
 490
 491        return SZ_4K;
 492}
 493
 494int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
 495{
 496        const struct vgic_register_region *region;
 497        struct vgic_io_device iodev;
 498        struct vgic_reg_attr reg_attr;
 499        struct kvm_vcpu *vcpu;
 500        gpa_t addr;
 501        int ret;
 502
 503        ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
 504        if (ret)
 505                return ret;
 506
 507        vcpu = reg_attr.vcpu;
 508        addr = reg_attr.addr;
 509
 510        switch (attr->group) {
 511        case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
 512                iodev.regions = vgic_v2_dist_registers;
 513                iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
 514                iodev.base_addr = 0;
 515                break;
 516        case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
 517                iodev.regions = vgic_v2_cpu_registers;
 518                iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
 519                iodev.base_addr = 0;
 520                break;
 521        default:
 522                return -ENXIO;
 523        }
 524
 525        /* We only support aligned 32-bit accesses. */
 526        if (addr & 3)
 527                return -ENXIO;
 528
 529        region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
 530        if (!region)
 531                return -ENXIO;
 532
 533        return 0;
 534}
 535
 536int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 537                          int offset, u32 *val)
 538{
 539        struct vgic_io_device dev = {
 540                .regions = vgic_v2_cpu_registers,
 541                .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
 542                .iodev_type = IODEV_CPUIF,
 543        };
 544
 545        return vgic_uaccess(vcpu, &dev, is_write, offset, val);
 546}
 547
 548int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 549                         int offset, u32 *val)
 550{
 551        struct vgic_io_device dev = {
 552                .regions = vgic_v2_dist_registers,
 553                .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
 554                .iodev_type = IODEV_DIST,
 555        };
 556
 557        return vgic_uaccess(vcpu, &dev, is_write, offset, val);
 558}
 559