linux/virt/kvm/arm/vgic/vgic-mmio-v3.c
<<
>>
Prefs
   1/*
   2 * VGICv3 MMIO handling functions
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/irqchip/arm-gic-v3.h>
  15#include <linux/kvm.h>
  16#include <linux/kvm_host.h>
  17#include <kvm/iodev.h>
  18#include <kvm/arm_vgic.h>
  19
  20#include <asm/kvm_emulate.h>
  21#include <asm/kvm_arm.h>
  22#include <asm/kvm_mmu.h>
  23
  24#include "vgic.h"
  25#include "vgic-mmio.h"
  26
  27/* extract @num bytes at @offset bytes offset in data */
  28unsigned long extract_bytes(u64 data, unsigned int offset,
  29                            unsigned int num)
  30{
  31        return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
  32}
  33
  34/* allows updates of any half of a 64-bit register (or the whole thing) */
  35u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
  36                     unsigned long val)
  37{
  38        int lower = (offset & 4) * 8;
  39        int upper = lower + 8 * len - 1;
  40
  41        reg &= ~GENMASK_ULL(upper, lower);
  42        val &= GENMASK_ULL(len * 8 - 1, 0);
  43
  44        return reg | ((u64)val << lower);
  45}
  46
  47bool vgic_has_its(struct kvm *kvm)
  48{
  49        struct vgic_dist *dist = &kvm->arch.vgic;
  50
  51        if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
  52                return false;
  53
  54        return dist->has_its;
  55}
  56
  57bool vgic_supports_direct_msis(struct kvm *kvm)
  58{
  59        return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
  60}
  61
  62static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
  63                                            gpa_t addr, unsigned int len)
  64{
  65        u32 value = 0;
  66
  67        switch (addr & 0x0c) {
  68        case GICD_CTLR:
  69                if (vcpu->kvm->arch.vgic.enabled)
  70                        value |= GICD_CTLR_ENABLE_SS_G1;
  71                value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
  72                break;
  73        case GICD_TYPER:
  74                value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  75                value = (value >> 5) - 1;
  76                if (vgic_has_its(vcpu->kvm)) {
  77                        value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
  78                        value |= GICD_TYPER_LPIS;
  79                } else {
  80                        value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
  81                }
  82                break;
  83        case GICD_IIDR:
  84                value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  85                break;
  86        default:
  87                return 0;
  88        }
  89
  90        return value;
  91}
  92
  93static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
  94                                    gpa_t addr, unsigned int len,
  95                                    unsigned long val)
  96{
  97        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  98        bool was_enabled = dist->enabled;
  99
 100        switch (addr & 0x0c) {
 101        case GICD_CTLR:
 102                dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
 103
 104                if (!was_enabled && dist->enabled)
 105                        vgic_kick_vcpus(vcpu->kvm);
 106                break;
 107        case GICD_TYPER:
 108        case GICD_IIDR:
 109                return;
 110        }
 111}
 112
 113static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
 114                                            gpa_t addr, unsigned int len)
 115{
 116        int intid = VGIC_ADDR_TO_INTID(addr, 64);
 117        struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
 118        unsigned long ret = 0;
 119
 120        if (!irq)
 121                return 0;
 122
 123        /* The upper word is RAZ for us. */
 124        if (!(addr & 4))
 125                ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
 126
 127        vgic_put_irq(vcpu->kvm, irq);
 128        return ret;
 129}
 130
 131static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
 132                                    gpa_t addr, unsigned int len,
 133                                    unsigned long val)
 134{
 135        int intid = VGIC_ADDR_TO_INTID(addr, 64);
 136        struct vgic_irq *irq;
 137        unsigned long flags;
 138
 139        /* The upper word is WI for us since we don't implement Aff3. */
 140        if (addr & 4)
 141                return;
 142
 143        irq = vgic_get_irq(vcpu->kvm, NULL, intid);
 144
 145        if (!irq)
 146                return;
 147
 148        spin_lock_irqsave(&irq->irq_lock, flags);
 149
 150        /* We only care about and preserve Aff0, Aff1 and Aff2. */
 151        irq->mpidr = val & GENMASK(23, 0);
 152        irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
 153
 154        spin_unlock_irqrestore(&irq->irq_lock, flags);
 155        vgic_put_irq(vcpu->kvm, irq);
 156}
 157
 158static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
 159                                             gpa_t addr, unsigned int len)
 160{
 161        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 162
 163        return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
 164}
 165
 166
 167static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
 168                                     gpa_t addr, unsigned int len,
 169                                     unsigned long val)
 170{
 171        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 172        bool was_enabled = vgic_cpu->lpis_enabled;
 173
 174        if (!vgic_has_its(vcpu->kvm))
 175                return;
 176
 177        vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
 178
 179        if (!was_enabled && vgic_cpu->lpis_enabled)
 180                vgic_enable_lpis(vcpu);
 181}
 182
 183static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
 184                                              gpa_t addr, unsigned int len)
 185{
 186        unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
 187        int target_vcpu_id = vcpu->vcpu_id;
 188        u64 value;
 189
 190        value = (u64)(mpidr & GENMASK(23, 0)) << 32;
 191        value |= ((target_vcpu_id & 0xffff) << 8);
 192        if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
 193                value |= GICR_TYPER_LAST;
 194        if (vgic_has_its(vcpu->kvm))
 195                value |= GICR_TYPER_PLPIS;
 196
 197        return extract_bytes(value, addr & 7, len);
 198}
 199
 200static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
 201                                             gpa_t addr, unsigned int len)
 202{
 203        return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
 204}
 205
 206static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
 207                                              gpa_t addr, unsigned int len)
 208{
 209        switch (addr & 0xffff) {
 210        case GICD_PIDR2:
 211                /* report a GICv3 compliant implementation */
 212                return 0x3b;
 213        }
 214
 215        return 0;
 216}
 217
 218static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
 219                                                  gpa_t addr, unsigned int len)
 220{
 221        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 222        u32 value = 0;
 223        int i;
 224
 225        /*
 226         * pending state of interrupt is latched in pending_latch variable.
 227         * Userspace will save and restore pending state and line_level
 228         * separately.
 229         * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt
 230         * for handling of ISPENDR and ICPENDR.
 231         */
 232        for (i = 0; i < len * 8; i++) {
 233                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 234
 235                if (irq->pending_latch)
 236                        value |= (1U << i);
 237
 238                vgic_put_irq(vcpu->kvm, irq);
 239        }
 240
 241        return value;
 242}
 243
 244static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
 245                                          gpa_t addr, unsigned int len,
 246                                          unsigned long val)
 247{
 248        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 249        int i;
 250        unsigned long flags;
 251
 252        for (i = 0; i < len * 8; i++) {
 253                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 254
 255                spin_lock_irqsave(&irq->irq_lock, flags);
 256                if (test_bit(i, &val)) {
 257                        /*
 258                         * pending_latch is set irrespective of irq type
 259                         * (level or edge) to avoid dependency that VM should
 260                         * restore irq config before pending info.
 261                         */
 262                        irq->pending_latch = true;
 263                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 264                } else {
 265                        irq->pending_latch = false;
 266                        spin_unlock_irqrestore(&irq->irq_lock, flags);
 267                }
 268
 269                vgic_put_irq(vcpu->kvm, irq);
 270        }
 271}
 272
 273/* We want to avoid outer shareable. */
 274u64 vgic_sanitise_shareability(u64 field)
 275{
 276        switch (field) {
 277        case GIC_BASER_OuterShareable:
 278                return GIC_BASER_InnerShareable;
 279        default:
 280                return field;
 281        }
 282}
 283
 284/* Avoid any inner non-cacheable mapping. */
 285u64 vgic_sanitise_inner_cacheability(u64 field)
 286{
 287        switch (field) {
 288        case GIC_BASER_CACHE_nCnB:
 289        case GIC_BASER_CACHE_nC:
 290                return GIC_BASER_CACHE_RaWb;
 291        default:
 292                return field;
 293        }
 294}
 295
 296/* Non-cacheable or same-as-inner are OK. */
 297u64 vgic_sanitise_outer_cacheability(u64 field)
 298{
 299        switch (field) {
 300        case GIC_BASER_CACHE_SameAsInner:
 301        case GIC_BASER_CACHE_nC:
 302                return field;
 303        default:
 304                return GIC_BASER_CACHE_nC;
 305        }
 306}
 307
 308u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
 309                        u64 (*sanitise_fn)(u64))
 310{
 311        u64 field = (reg & field_mask) >> field_shift;
 312
 313        field = sanitise_fn(field) << field_shift;
 314        return (reg & ~field_mask) | field;
 315}
 316
 317#define PROPBASER_RES0_MASK                                             \
 318        (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
 319#define PENDBASER_RES0_MASK                                             \
 320        (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) |      \
 321         GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
 322
 323static u64 vgic_sanitise_pendbaser(u64 reg)
 324{
 325        reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
 326                                  GICR_PENDBASER_SHAREABILITY_SHIFT,
 327                                  vgic_sanitise_shareability);
 328        reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
 329                                  GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
 330                                  vgic_sanitise_inner_cacheability);
 331        reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
 332                                  GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
 333                                  vgic_sanitise_outer_cacheability);
 334
 335        reg &= ~PENDBASER_RES0_MASK;
 336        reg &= ~GENMASK_ULL(51, 48);
 337
 338        return reg;
 339}
 340
 341static u64 vgic_sanitise_propbaser(u64 reg)
 342{
 343        reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
 344                                  GICR_PROPBASER_SHAREABILITY_SHIFT,
 345                                  vgic_sanitise_shareability);
 346        reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
 347                                  GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
 348                                  vgic_sanitise_inner_cacheability);
 349        reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
 350                                  GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
 351                                  vgic_sanitise_outer_cacheability);
 352
 353        reg &= ~PROPBASER_RES0_MASK;
 354        reg &= ~GENMASK_ULL(51, 48);
 355        return reg;
 356}
 357
 358static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
 359                                             gpa_t addr, unsigned int len)
 360{
 361        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 362
 363        return extract_bytes(dist->propbaser, addr & 7, len);
 364}
 365
 366static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
 367                                     gpa_t addr, unsigned int len,
 368                                     unsigned long val)
 369{
 370        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 371        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 372        u64 old_propbaser, propbaser;
 373
 374        /* Storing a value with LPIs already enabled is undefined */
 375        if (vgic_cpu->lpis_enabled)
 376                return;
 377
 378        do {
 379                old_propbaser = READ_ONCE(dist->propbaser);
 380                propbaser = old_propbaser;
 381                propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
 382                propbaser = vgic_sanitise_propbaser(propbaser);
 383        } while (cmpxchg64(&dist->propbaser, old_propbaser,
 384                           propbaser) != old_propbaser);
 385}
 386
 387static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
 388                                             gpa_t addr, unsigned int len)
 389{
 390        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 391
 392        return extract_bytes(vgic_cpu->pendbaser, addr & 7, len);
 393}
 394
 395static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
 396                                     gpa_t addr, unsigned int len,
 397                                     unsigned long val)
 398{
 399        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 400        u64 old_pendbaser, pendbaser;
 401
 402        /* Storing a value with LPIs already enabled is undefined */
 403        if (vgic_cpu->lpis_enabled)
 404                return;
 405
 406        do {
 407                old_pendbaser = READ_ONCE(vgic_cpu->pendbaser);
 408                pendbaser = old_pendbaser;
 409                pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
 410                pendbaser = vgic_sanitise_pendbaser(pendbaser);
 411        } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
 412                           pendbaser) != old_pendbaser);
 413}
 414
 415/*
 416 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
 417 * redistributors, while SPIs are covered by registers in the distributor
 418 * block. Trying to set private IRQs in this block gets ignored.
 419 * We take some special care here to fix the calculation of the register
 420 * offset.
 421 */
 422#define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
 423        {                                                               \
 424                .reg_offset = off,                                      \
 425                .bits_per_irq = bpi,                                    \
 426                .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8,                \
 427                .access_flags = acc,                                    \
 428                .read = vgic_mmio_read_raz,                             \
 429                .write = vgic_mmio_write_wi,                            \
 430        }, {                                                            \
 431                .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8,   \
 432                .bits_per_irq = bpi,                                    \
 433                .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8,       \
 434                .access_flags = acc,                                    \
 435                .read = rd,                                             \
 436                .write = wr,                                            \
 437                .uaccess_read = ur,                                     \
 438                .uaccess_write = uw,                                    \
 439        }
 440
 441static const struct vgic_register_region vgic_v3_dist_registers[] = {
 442        REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
 443                vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16,
 444                VGIC_ACCESS_32bit),
 445        REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
 446                vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
 447                VGIC_ACCESS_32bit),
 448        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
 449                vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
 450                VGIC_ACCESS_32bit),
 451        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
 452                vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
 453                VGIC_ACCESS_32bit),
 454        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
 455                vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
 456                VGIC_ACCESS_32bit),
 457        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
 458                vgic_mmio_read_pending, vgic_mmio_write_spending,
 459                vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
 460                VGIC_ACCESS_32bit),
 461        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
 462                vgic_mmio_read_pending, vgic_mmio_write_cpending,
 463                vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
 464                VGIC_ACCESS_32bit),
 465        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
 466                vgic_mmio_read_active, vgic_mmio_write_sactive,
 467                NULL, vgic_mmio_uaccess_write_sactive, 1,
 468                VGIC_ACCESS_32bit),
 469        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
 470                vgic_mmio_read_active, vgic_mmio_write_cactive,
 471                NULL, vgic_mmio_uaccess_write_cactive,
 472                1, VGIC_ACCESS_32bit),
 473        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
 474                vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
 475                8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 476        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
 477                vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
 478                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 479        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
 480                vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
 481                VGIC_ACCESS_32bit),
 482        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
 483                vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
 484                VGIC_ACCESS_32bit),
 485        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
 486                vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
 487                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
 488        REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
 489                vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
 490                VGIC_ACCESS_32bit),
 491};
 492
 493static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
 494        REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
 495                vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
 496                VGIC_ACCESS_32bit),
 497        REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
 498                vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
 499                VGIC_ACCESS_32bit),
 500        REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
 501                vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
 502                VGIC_ACCESS_32bit),
 503        REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
 504                vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
 505                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
 506        REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
 507                vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
 508                VGIC_ACCESS_32bit),
 509        REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
 510                vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
 511                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
 512        REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
 513                vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
 514                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
 515        REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
 516                vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
 517                VGIC_ACCESS_32bit),
 518};
 519
 520static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
 521        REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0,
 522                vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
 523                VGIC_ACCESS_32bit),
 524        REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0,
 525                vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
 526                VGIC_ACCESS_32bit),
 527        REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0,
 528                vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
 529                VGIC_ACCESS_32bit),
 530        REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISPENDR0,
 531                vgic_mmio_read_pending, vgic_mmio_write_spending,
 532                vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
 533                VGIC_ACCESS_32bit),
 534        REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICPENDR0,
 535                vgic_mmio_read_pending, vgic_mmio_write_cpending,
 536                vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
 537                VGIC_ACCESS_32bit),
 538        REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISACTIVER0,
 539                vgic_mmio_read_active, vgic_mmio_write_sactive,
 540                NULL, vgic_mmio_uaccess_write_sactive,
 541                4, VGIC_ACCESS_32bit),
 542        REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICACTIVER0,
 543                vgic_mmio_read_active, vgic_mmio_write_cactive,
 544                NULL, vgic_mmio_uaccess_write_cactive,
 545                4, VGIC_ACCESS_32bit),
 546        REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0,
 547                vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
 548                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
 549        REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0,
 550                vgic_mmio_read_config, vgic_mmio_write_config, 8,
 551                VGIC_ACCESS_32bit),
 552        REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0,
 553                vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
 554                VGIC_ACCESS_32bit),
 555        REGISTER_DESC_WITH_LENGTH(GICR_NSACR,
 556                vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
 557                VGIC_ACCESS_32bit),
 558};
 559
 560unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
 561{
 562        dev->regions = vgic_v3_dist_registers;
 563        dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
 564
 565        kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
 566
 567        return SZ_64K;
 568}
 569
 570/**
 571 * vgic_register_redist_iodev - register a single redist iodev
 572 * @vcpu:    The VCPU to which the redistributor belongs
 573 *
 574 * Register a KVM iodev for this VCPU's redistributor using the address
 575 * provided.
 576 *
 577 * Return 0 on success, -ERRNO otherwise.
 578 */
 579int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
 580{
 581        struct kvm *kvm = vcpu->kvm;
 582        struct vgic_dist *vgic = &kvm->arch.vgic;
 583        struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
 584        struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
 585        gpa_t rd_base, sgi_base;
 586        int ret;
 587
 588        /*
 589         * We may be creating VCPUs before having set the base address for the
 590         * redistributor region, in which case we will come back to this
 591         * function for all VCPUs when the base address is set.  Just return
 592         * without doing any work for now.
 593         */
 594        if (IS_VGIC_ADDR_UNDEF(vgic->vgic_redist_base))
 595                return 0;
 596
 597        if (!vgic_v3_check_base(kvm))
 598                return -EINVAL;
 599
 600        rd_base = vgic->vgic_redist_base + vgic->vgic_redist_free_offset;
 601        sgi_base = rd_base + SZ_64K;
 602
 603        kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
 604        rd_dev->base_addr = rd_base;
 605        rd_dev->iodev_type = IODEV_REDIST;
 606        rd_dev->regions = vgic_v3_rdbase_registers;
 607        rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
 608        rd_dev->redist_vcpu = vcpu;
 609
 610        mutex_lock(&kvm->slots_lock);
 611        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
 612                                      SZ_64K, &rd_dev->dev);
 613        mutex_unlock(&kvm->slots_lock);
 614
 615        if (ret)
 616                return ret;
 617
 618        kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops);
 619        sgi_dev->base_addr = sgi_base;
 620        sgi_dev->iodev_type = IODEV_REDIST;
 621        sgi_dev->regions = vgic_v3_sgibase_registers;
 622        sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers);
 623        sgi_dev->redist_vcpu = vcpu;
 624
 625        mutex_lock(&kvm->slots_lock);
 626        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
 627                                      SZ_64K, &sgi_dev->dev);
 628        if (ret) {
 629                kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
 630                                          &rd_dev->dev);
 631                goto out;
 632        }
 633
 634        vgic->vgic_redist_free_offset += 2 * SZ_64K;
 635out:
 636        mutex_unlock(&kvm->slots_lock);
 637        return ret;
 638}
 639
 640static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
 641{
 642        struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
 643        struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
 644
 645        kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
 646        kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &sgi_dev->dev);
 647}
 648
 649static int vgic_register_all_redist_iodevs(struct kvm *kvm)
 650{
 651        struct kvm_vcpu *vcpu;
 652        int c, ret = 0;
 653
 654        kvm_for_each_vcpu(c, vcpu, kvm) {
 655                ret = vgic_register_redist_iodev(vcpu);
 656                if (ret)
 657                        break;
 658        }
 659
 660        if (ret) {
 661                /* The current c failed, so we start with the previous one. */
 662                mutex_lock(&kvm->slots_lock);
 663                for (c--; c >= 0; c--) {
 664                        vcpu = kvm_get_vcpu(kvm, c);
 665                        vgic_unregister_redist_iodev(vcpu);
 666                }
 667                mutex_unlock(&kvm->slots_lock);
 668        }
 669
 670        return ret;
 671}
 672
 673int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr)
 674{
 675        struct vgic_dist *vgic = &kvm->arch.vgic;
 676        int ret;
 677
 678        /* vgic_check_ioaddr makes sure we don't do this twice */
 679        ret = vgic_check_ioaddr(kvm, &vgic->vgic_redist_base, addr, SZ_64K);
 680        if (ret)
 681                return ret;
 682
 683        vgic->vgic_redist_base = addr;
 684        if (!vgic_v3_check_base(kvm)) {
 685                vgic->vgic_redist_base = VGIC_ADDR_UNDEF;
 686                return -EINVAL;
 687        }
 688
 689        /*
 690         * Register iodevs for each existing VCPU.  Adding more VCPUs
 691         * afterwards will register the iodevs when needed.
 692         */
 693        ret = vgic_register_all_redist_iodevs(kvm);
 694        if (ret)
 695                return ret;
 696
 697        return 0;
 698}
 699
 700int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
 701{
 702        const struct vgic_register_region *region;
 703        struct vgic_io_device iodev;
 704        struct vgic_reg_attr reg_attr;
 705        struct kvm_vcpu *vcpu;
 706        gpa_t addr;
 707        int ret;
 708
 709        ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
 710        if (ret)
 711                return ret;
 712
 713        vcpu = reg_attr.vcpu;
 714        addr = reg_attr.addr;
 715
 716        switch (attr->group) {
 717        case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
 718                iodev.regions = vgic_v3_dist_registers;
 719                iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
 720                iodev.base_addr = 0;
 721                break;
 722        case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
 723                iodev.regions = vgic_v3_rdbase_registers;
 724                iodev.nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
 725                iodev.base_addr = 0;
 726                break;
 727        }
 728        case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
 729                u64 reg, id;
 730
 731                id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
 732                return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, &reg);
 733        }
 734        default:
 735                return -ENXIO;
 736        }
 737
 738        /* We only support aligned 32-bit accesses. */
 739        if (addr & 3)
 740                return -ENXIO;
 741
 742        region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
 743        if (!region)
 744                return -ENXIO;
 745
 746        return 0;
 747}
 748/*
 749 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
 750 * generation register ICC_SGI1R_EL1) with a given VCPU.
 751 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
 752 * return -1.
 753 */
 754static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
 755{
 756        unsigned long affinity;
 757        int level0;
 758
 759        /*
 760         * Split the current VCPU's MPIDR into affinity level 0 and the
 761         * rest as this is what we have to compare against.
 762         */
 763        affinity = kvm_vcpu_get_mpidr_aff(vcpu);
 764        level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
 765        affinity &= ~MPIDR_LEVEL_MASK;
 766
 767        /* bail out if the upper three levels don't match */
 768        if (sgi_aff != affinity)
 769                return -1;
 770
 771        /* Is this VCPU's bit set in the mask ? */
 772        if (!(sgi_cpu_mask & BIT(level0)))
 773                return -1;
 774
 775        return level0;
 776}
 777
 778/*
 779 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
 780 * so provide a wrapper to use the existing defines to isolate a certain
 781 * affinity level.
 782 */
 783#define SGI_AFFINITY_LEVEL(reg, level) \
 784        ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
 785        >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
 786
 787/**
 788 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
 789 * @vcpu: The VCPU requesting a SGI
 790 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
 791 *
 792 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
 793 * This will trap in sys_regs.c and call this function.
 794 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
 795 * target processors as well as a bitmask of 16 Aff0 CPUs.
 796 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
 797 * check for matching ones. If this bit is set, we signal all, but not the
 798 * calling VCPU.
 799 */
 800void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
 801{
 802        struct kvm *kvm = vcpu->kvm;
 803        struct kvm_vcpu *c_vcpu;
 804        u16 target_cpus;
 805        u64 mpidr;
 806        int sgi, c;
 807        int vcpu_id = vcpu->vcpu_id;
 808        bool broadcast;
 809        unsigned long flags;
 810
 811        sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
 812        broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
 813        target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
 814        mpidr = SGI_AFFINITY_LEVEL(reg, 3);
 815        mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
 816        mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
 817
 818        /*
 819         * We iterate over all VCPUs to find the MPIDRs matching the request.
 820         * If we have handled one CPU, we clear its bit to detect early
 821         * if we are already finished. This avoids iterating through all
 822         * VCPUs when most of the times we just signal a single VCPU.
 823         */
 824        kvm_for_each_vcpu(c, c_vcpu, kvm) {
 825                struct vgic_irq *irq;
 826
 827                /* Exit early if we have dealt with all requested CPUs */
 828                if (!broadcast && target_cpus == 0)
 829                        break;
 830
 831                /* Don't signal the calling VCPU */
 832                if (broadcast && c == vcpu_id)
 833                        continue;
 834
 835                if (!broadcast) {
 836                        int level0;
 837
 838                        level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
 839                        if (level0 == -1)
 840                                continue;
 841
 842                        /* remove this matching VCPU from the mask */
 843                        target_cpus &= ~BIT(level0);
 844                }
 845
 846                irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
 847
 848                spin_lock_irqsave(&irq->irq_lock, flags);
 849                irq->pending_latch = true;
 850
 851                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 852                vgic_put_irq(vcpu->kvm, irq);
 853        }
 854}
 855
 856int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 857                         int offset, u32 *val)
 858{
 859        struct vgic_io_device dev = {
 860                .regions = vgic_v3_dist_registers,
 861                .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
 862        };
 863
 864        return vgic_uaccess(vcpu, &dev, is_write, offset, val);
 865}
 866
 867int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 868                           int offset, u32 *val)
 869{
 870        struct vgic_io_device rd_dev = {
 871                .regions = vgic_v3_rdbase_registers,
 872                .nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers),
 873        };
 874
 875        struct vgic_io_device sgi_dev = {
 876                .regions = vgic_v3_sgibase_registers,
 877                .nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers),
 878        };
 879
 880        /* SGI_base is the next 64K frame after RD_base */
 881        if (offset >= SZ_64K)
 882                return vgic_uaccess(vcpu, &sgi_dev, is_write, offset - SZ_64K,
 883                                    val);
 884        else
 885                return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
 886}
 887
 888int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 889                                    u32 intid, u64 *val)
 890{
 891        if (intid % 32)
 892                return -EINVAL;
 893
 894        if (is_write)
 895                vgic_write_irq_line_level_info(vcpu, intid, *val);
 896        else
 897                *val = vgic_read_irq_line_level_info(vcpu, intid);
 898
 899        return 0;
 900}
 901