linux/virt/kvm/arm/vgic/vgic-mmio.c
<<
>>
Prefs
   1/*
   2 * VGIC MMIO handling functions
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/bsearch.h>
  16#include <linux/kvm.h>
  17#include <linux/kvm_host.h>
  18#include <kvm/iodev.h>
  19#include <kvm/arm_arch_timer.h>
  20#include <kvm/arm_vgic.h>
  21
  22#include "vgic.h"
  23#include "vgic-mmio.h"
  24
  25unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
  26                                 gpa_t addr, unsigned int len)
  27{
  28        return 0;
  29}
  30
  31unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
  32                                 gpa_t addr, unsigned int len)
  33{
  34        return -1UL;
  35}
  36
  37void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  38                        unsigned int len, unsigned long val)
  39{
  40        /* Ignore */
  41}
  42
  43int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  44                               unsigned int len, unsigned long val)
  45{
  46        /* Ignore */
  47        return 0;
  48}
  49
  50unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
  51                                   gpa_t addr, unsigned int len)
  52{
  53        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  54        u32 value = 0;
  55        int i;
  56
  57        /* Loop over all IRQs affected by this read */
  58        for (i = 0; i < len * 8; i++) {
  59                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  60
  61                if (irq->group)
  62                        value |= BIT(i);
  63
  64                vgic_put_irq(vcpu->kvm, irq);
  65        }
  66
  67        return value;
  68}
  69
  70void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
  71                           unsigned int len, unsigned long val)
  72{
  73        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  74        int i;
  75        unsigned long flags;
  76
  77        for (i = 0; i < len * 8; i++) {
  78                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  79
  80                raw_spin_lock_irqsave(&irq->irq_lock, flags);
  81                irq->group = !!(val & BIT(i));
  82                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  83
  84                vgic_put_irq(vcpu->kvm, irq);
  85        }
  86}
  87
  88/*
  89 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
  90 * of the enabled bit, so there is only one function for both here.
  91 */
  92unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
  93                                    gpa_t addr, unsigned int len)
  94{
  95        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  96        u32 value = 0;
  97        int i;
  98
  99        /* Loop over all IRQs affected by this read */
 100        for (i = 0; i < len * 8; i++) {
 101                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 102
 103                if (irq->enabled)
 104                        value |= (1U << i);
 105
 106                vgic_put_irq(vcpu->kvm, irq);
 107        }
 108
 109        return value;
 110}
 111
 112void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
 113                             gpa_t addr, unsigned int len,
 114                             unsigned long val)
 115{
 116        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 117        int i;
 118        unsigned long flags;
 119
 120        for_each_set_bit(i, &val, len * 8) {
 121                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 122
 123                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 124                irq->enabled = true;
 125                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 126
 127                vgic_put_irq(vcpu->kvm, irq);
 128        }
 129}
 130
 131void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
 132                             gpa_t addr, unsigned int len,
 133                             unsigned long val)
 134{
 135        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 136        int i;
 137        unsigned long flags;
 138
 139        for_each_set_bit(i, &val, len * 8) {
 140                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 141
 142                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 143
 144                irq->enabled = false;
 145
 146                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 147                vgic_put_irq(vcpu->kvm, irq);
 148        }
 149}
 150
 151unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
 152                                     gpa_t addr, unsigned int len)
 153{
 154        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 155        u32 value = 0;
 156        int i;
 157
 158        /* Loop over all IRQs affected by this read */
 159        for (i = 0; i < len * 8; i++) {
 160                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 161                unsigned long flags;
 162
 163                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 164                if (irq_is_pending(irq))
 165                        value |= (1U << i);
 166                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 167
 168                vgic_put_irq(vcpu->kvm, irq);
 169        }
 170
 171        return value;
 172}
 173
 174/*
 175 * This function will return the VCPU that performed the MMIO access and
 176 * trapped from within the VM, and will return NULL if this is a userspace
 177 * access.
 178 *
 179 * We can disable preemption locally around accessing the per-CPU variable,
 180 * and use the resolved vcpu pointer after enabling preemption again, because
 181 * even if the current thread is migrated to another CPU, reading the per-CPU
 182 * value later will give us the same value as we update the per-CPU variable
 183 * in the preempt notifier handlers.
 184 */
 185static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
 186{
 187        struct kvm_vcpu *vcpu;
 188
 189        preempt_disable();
 190        vcpu = kvm_arm_get_running_vcpu();
 191        preempt_enable();
 192        return vcpu;
 193}
 194
 195/* Must be called with irq->irq_lock held */
 196static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 197                                 bool is_uaccess)
 198{
 199        if (is_uaccess)
 200                return;
 201
 202        irq->pending_latch = true;
 203        vgic_irq_set_phys_active(irq, true);
 204}
 205
 206void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
 207                              gpa_t addr, unsigned int len,
 208                              unsigned long val)
 209{
 210        bool is_uaccess = !vgic_get_mmio_requester_vcpu();
 211        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 212        int i;
 213        unsigned long flags;
 214
 215        for_each_set_bit(i, &val, len * 8) {
 216                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 217
 218                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 219                if (irq->hw)
 220                        vgic_hw_irq_spending(vcpu, irq, is_uaccess);
 221                else
 222                        irq->pending_latch = true;
 223                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 224                vgic_put_irq(vcpu->kvm, irq);
 225        }
 226}
 227
 228/* Must be called with irq->irq_lock held */
 229static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 230                                 bool is_uaccess)
 231{
 232        if (is_uaccess)
 233                return;
 234
 235        irq->pending_latch = false;
 236
 237        /*
 238         * We don't want the guest to effectively mask the physical
 239         * interrupt by doing a write to SPENDR followed by a write to
 240         * CPENDR for HW interrupts, so we clear the active state on
 241         * the physical side if the virtual interrupt is not active.
 242         * This may lead to taking an additional interrupt on the
 243         * host, but that should not be a problem as the worst that
 244         * can happen is an additional vgic injection.  We also clear
 245         * the pending state to maintain proper semantics for edge HW
 246         * interrupts.
 247         */
 248        vgic_irq_set_phys_pending(irq, false);
 249        if (!irq->active)
 250                vgic_irq_set_phys_active(irq, false);
 251}
 252
 253void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
 254                              gpa_t addr, unsigned int len,
 255                              unsigned long val)
 256{
 257        bool is_uaccess = !vgic_get_mmio_requester_vcpu();
 258        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 259        int i;
 260        unsigned long flags;
 261
 262        for_each_set_bit(i, &val, len * 8) {
 263                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 264
 265                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 266
 267                if (irq->hw)
 268                        vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
 269                else
 270                        irq->pending_latch = false;
 271
 272                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 273                vgic_put_irq(vcpu->kvm, irq);
 274        }
 275}
 276
 277unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
 278                                    gpa_t addr, unsigned int len)
 279{
 280        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 281        u32 value = 0;
 282        int i;
 283
 284        /* Loop over all IRQs affected by this read */
 285        for (i = 0; i < len * 8; i++) {
 286                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 287
 288                if (irq->active)
 289                        value |= (1U << i);
 290
 291                vgic_put_irq(vcpu->kvm, irq);
 292        }
 293
 294        return value;
 295}
 296
 297/* Must be called with irq->irq_lock held */
 298static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 299                                      bool active, bool is_uaccess)
 300{
 301        if (is_uaccess)
 302                return;
 303
 304        irq->active = active;
 305        vgic_irq_set_phys_active(irq, active);
 306}
 307
 308static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 309                                    bool active)
 310{
 311        unsigned long flags;
 312        struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
 313
 314        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 315
 316        if (irq->hw) {
 317                vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
 318        } else {
 319                u32 model = vcpu->kvm->arch.vgic.vgic_model;
 320                u8 active_source;
 321
 322                irq->active = active;
 323
 324                /*
 325                 * The GICv2 architecture indicates that the source CPUID for
 326                 * an SGI should be provided during an EOI which implies that
 327                 * the active state is stored somewhere, but at the same time
 328                 * this state is not architecturally exposed anywhere and we
 329                 * have no way of knowing the right source.
 330                 *
 331                 * This may lead to a VCPU not being able to receive
 332                 * additional instances of a particular SGI after migration
 333                 * for a GICv2 VM on some GIC implementations.  Oh well.
 334                 */
 335                active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
 336
 337                if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
 338                    active && vgic_irq_is_sgi(irq->intid))
 339                        irq->active_source = active_source;
 340        }
 341
 342        if (irq->active)
 343                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 344        else
 345                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 346}
 347
 348/*
 349 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
 350 * is not queued on some running VCPU's LRs, because then the change to the
 351 * active state can be overwritten when the VCPU's state is synced coming back
 352 * from the guest.
 353 *
 354 * For shared interrupts, we have to stop all the VCPUs because interrupts can
 355 * be migrated while we don't hold the IRQ locks and we don't want to be
 356 * chasing moving targets.
 357 *
 358 * For private interrupts we don't have to do anything because userspace
 359 * accesses to the VGIC state already require all VCPUs to be stopped, and
 360 * only the VCPU itself can modify its private interrupts active state, which
 361 * guarantees that the VCPU is not running.
 362 */
 363static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
 364{
 365        if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
 366            intid > VGIC_NR_PRIVATE_IRQS)
 367                kvm_arm_halt_guest(vcpu->kvm);
 368}
 369
 370/* See vgic_change_active_prepare */
 371static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
 372{
 373        if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
 374            intid > VGIC_NR_PRIVATE_IRQS)
 375                kvm_arm_resume_guest(vcpu->kvm);
 376}
 377
 378static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 379                                      gpa_t addr, unsigned int len,
 380                                      unsigned long val)
 381{
 382        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 383        int i;
 384
 385        for_each_set_bit(i, &val, len * 8) {
 386                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 387                vgic_mmio_change_active(vcpu, irq, false);
 388                vgic_put_irq(vcpu->kvm, irq);
 389        }
 390}
 391
 392void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 393                             gpa_t addr, unsigned int len,
 394                             unsigned long val)
 395{
 396        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 397
 398        mutex_lock(&vcpu->kvm->lock);
 399        vgic_change_active_prepare(vcpu, intid);
 400
 401        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 402
 403        vgic_change_active_finish(vcpu, intid);
 404        mutex_unlock(&vcpu->kvm->lock);
 405}
 406
 407int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
 408                                     gpa_t addr, unsigned int len,
 409                                     unsigned long val)
 410{
 411        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 412        return 0;
 413}
 414
 415static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 416                                      gpa_t addr, unsigned int len,
 417                                      unsigned long val)
 418{
 419        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 420        int i;
 421
 422        for_each_set_bit(i, &val, len * 8) {
 423                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 424                vgic_mmio_change_active(vcpu, irq, true);
 425                vgic_put_irq(vcpu->kvm, irq);
 426        }
 427}
 428
 429void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 430                             gpa_t addr, unsigned int len,
 431                             unsigned long val)
 432{
 433        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 434
 435        mutex_lock(&vcpu->kvm->lock);
 436        vgic_change_active_prepare(vcpu, intid);
 437
 438        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 439
 440        vgic_change_active_finish(vcpu, intid);
 441        mutex_unlock(&vcpu->kvm->lock);
 442}
 443
 444int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
 445                                     gpa_t addr, unsigned int len,
 446                                     unsigned long val)
 447{
 448        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 449        return 0;
 450}
 451
 452unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
 453                                      gpa_t addr, unsigned int len)
 454{
 455        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 456        int i;
 457        u64 val = 0;
 458
 459        for (i = 0; i < len; i++) {
 460                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 461
 462                val |= (u64)irq->priority << (i * 8);
 463
 464                vgic_put_irq(vcpu->kvm, irq);
 465        }
 466
 467        return val;
 468}
 469
 470/*
 471 * We currently don't handle changing the priority of an interrupt that
 472 * is already pending on a VCPU. If there is a need for this, we would
 473 * need to make this VCPU exit and re-evaluate the priorities, potentially
 474 * leading to this interrupt getting presented now to the guest (if it has
 475 * been masked by the priority mask before).
 476 */
 477void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
 478                              gpa_t addr, unsigned int len,
 479                              unsigned long val)
 480{
 481        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 482        int i;
 483        unsigned long flags;
 484
 485        for (i = 0; i < len; i++) {
 486                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 487
 488                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 489                /* Narrow the priority range to what we actually support */
 490                irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
 491                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 492
 493                vgic_put_irq(vcpu->kvm, irq);
 494        }
 495}
 496
 497unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
 498                                    gpa_t addr, unsigned int len)
 499{
 500        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
 501        u32 value = 0;
 502        int i;
 503
 504        for (i = 0; i < len * 4; i++) {
 505                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 506
 507                if (irq->config == VGIC_CONFIG_EDGE)
 508                        value |= (2U << (i * 2));
 509
 510                vgic_put_irq(vcpu->kvm, irq);
 511        }
 512
 513        return value;
 514}
 515
 516void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
 517                            gpa_t addr, unsigned int len,
 518                            unsigned long val)
 519{
 520        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
 521        int i;
 522        unsigned long flags;
 523
 524        for (i = 0; i < len * 4; i++) {
 525                struct vgic_irq *irq;
 526
 527                /*
 528                 * The configuration cannot be changed for SGIs in general,
 529                 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
 530                 * code relies on PPIs being level triggered, so we also
 531                 * make them read-only here.
 532                 */
 533                if (intid + i < VGIC_NR_PRIVATE_IRQS)
 534                        continue;
 535
 536                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 537                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 538
 539                if (test_bit(i * 2 + 1, &val))
 540                        irq->config = VGIC_CONFIG_EDGE;
 541                else
 542                        irq->config = VGIC_CONFIG_LEVEL;
 543
 544                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 545                vgic_put_irq(vcpu->kvm, irq);
 546        }
 547}
 548
 549u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
 550{
 551        int i;
 552        u64 val = 0;
 553        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 554
 555        for (i = 0; i < 32; i++) {
 556                struct vgic_irq *irq;
 557
 558                if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
 559                        continue;
 560
 561                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 562                if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
 563                        val |= (1U << i);
 564
 565                vgic_put_irq(vcpu->kvm, irq);
 566        }
 567
 568        return val;
 569}
 570
 571void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
 572                                    const u64 val)
 573{
 574        int i;
 575        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 576        unsigned long flags;
 577
 578        for (i = 0; i < 32; i++) {
 579                struct vgic_irq *irq;
 580                bool new_level;
 581
 582                if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
 583                        continue;
 584
 585                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 586
 587                /*
 588                 * Line level is set irrespective of irq type
 589                 * (level or edge) to avoid dependency that VM should
 590                 * restore irq config before line level.
 591                 */
 592                new_level = !!(val & (1U << i));
 593                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 594                irq->line_level = new_level;
 595                if (new_level)
 596                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 597                else
 598                        raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 599
 600                vgic_put_irq(vcpu->kvm, irq);
 601        }
 602}
 603
 604static int match_region(const void *key, const void *elt)
 605{
 606        const unsigned int offset = (unsigned long)key;
 607        const struct vgic_register_region *region = elt;
 608
 609        if (offset < region->reg_offset)
 610                return -1;
 611
 612        if (offset >= region->reg_offset + region->len)
 613                return 1;
 614
 615        return 0;
 616}
 617
 618const struct vgic_register_region *
 619vgic_find_mmio_region(const struct vgic_register_region *regions,
 620                      int nr_regions, unsigned int offset)
 621{
 622        return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
 623                       sizeof(regions[0]), match_region);
 624}
 625
 626void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 627{
 628        if (kvm_vgic_global_state.type == VGIC_V2)
 629                vgic_v2_set_vmcr(vcpu, vmcr);
 630        else
 631                vgic_v3_set_vmcr(vcpu, vmcr);
 632}
 633
 634void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 635{
 636        if (kvm_vgic_global_state.type == VGIC_V2)
 637                vgic_v2_get_vmcr(vcpu, vmcr);
 638        else
 639                vgic_v3_get_vmcr(vcpu, vmcr);
 640}
 641
 642/*
 643 * kvm_mmio_read_buf() returns a value in a format where it can be converted
 644 * to a byte array and be directly observed as the guest wanted it to appear
 645 * in memory if it had done the store itself, which is LE for the GIC, as the
 646 * guest knows the GIC is always LE.
 647 *
 648 * We convert this value to the CPUs native format to deal with it as a data
 649 * value.
 650 */
 651unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
 652{
 653        unsigned long data = kvm_mmio_read_buf(val, len);
 654
 655        switch (len) {
 656        case 1:
 657                return data;
 658        case 2:
 659                return le16_to_cpu(data);
 660        case 4:
 661                return le32_to_cpu(data);
 662        default:
 663                return le64_to_cpu(data);
 664        }
 665}
 666
 667/*
 668 * kvm_mmio_write_buf() expects a value in a format such that if converted to
 669 * a byte array it is observed as the guest would see it if it could perform
 670 * the load directly.  Since the GIC is LE, and the guest knows this, the
 671 * guest expects a value in little endian format.
 672 *
 673 * We convert the data value from the CPUs native format to LE so that the
 674 * value is returned in the proper format.
 675 */
 676void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
 677                                unsigned long data)
 678{
 679        switch (len) {
 680        case 1:
 681                break;
 682        case 2:
 683                data = cpu_to_le16(data);
 684                break;
 685        case 4:
 686                data = cpu_to_le32(data);
 687                break;
 688        default:
 689                data = cpu_to_le64(data);
 690        }
 691
 692        kvm_mmio_write_buf(buf, len, data);
 693}
 694
 695static
 696struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
 697{
 698        return container_of(dev, struct vgic_io_device, dev);
 699}
 700
 701static bool check_region(const struct kvm *kvm,
 702                         const struct vgic_register_region *region,
 703                         gpa_t addr, int len)
 704{
 705        int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 706
 707        switch (len) {
 708        case sizeof(u8):
 709                flags = VGIC_ACCESS_8bit;
 710                break;
 711        case sizeof(u32):
 712                flags = VGIC_ACCESS_32bit;
 713                break;
 714        case sizeof(u64):
 715                flags = VGIC_ACCESS_64bit;
 716                break;
 717        default:
 718                return false;
 719        }
 720
 721        if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
 722                if (!region->bits_per_irq)
 723                        return true;
 724
 725                /* Do we access a non-allocated IRQ? */
 726                return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
 727        }
 728
 729        return false;
 730}
 731
 732const struct vgic_register_region *
 733vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
 734                     gpa_t addr, int len)
 735{
 736        const struct vgic_register_region *region;
 737
 738        region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
 739                                       addr - iodev->base_addr);
 740        if (!region || !check_region(vcpu->kvm, region, addr, len))
 741                return NULL;
 742
 743        return region;
 744}
 745
 746static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 747                             gpa_t addr, u32 *val)
 748{
 749        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 750        const struct vgic_register_region *region;
 751        struct kvm_vcpu *r_vcpu;
 752
 753        region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
 754        if (!region) {
 755                *val = 0;
 756                return 0;
 757        }
 758
 759        r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
 760        if (region->uaccess_read)
 761                *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
 762        else
 763                *val = region->read(r_vcpu, addr, sizeof(u32));
 764
 765        return 0;
 766}
 767
 768static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 769                              gpa_t addr, const u32 *val)
 770{
 771        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 772        const struct vgic_register_region *region;
 773        struct kvm_vcpu *r_vcpu;
 774
 775        region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
 776        if (!region)
 777                return 0;
 778
 779        r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
 780        if (region->uaccess_write)
 781                return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
 782
 783        region->write(r_vcpu, addr, sizeof(u32), *val);
 784        return 0;
 785}
 786
 787/*
 788 * Userland access to VGIC registers.
 789 */
 790int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
 791                 bool is_write, int offset, u32 *val)
 792{
 793        if (is_write)
 794                return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
 795        else
 796                return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
 797}
 798
 799static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 800                              gpa_t addr, int len, void *val)
 801{
 802        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 803        const struct vgic_register_region *region;
 804        unsigned long data = 0;
 805
 806        region = vgic_get_mmio_region(vcpu, iodev, addr, len);
 807        if (!region) {
 808                memset(val, 0, len);
 809                return 0;
 810        }
 811
 812        switch (iodev->iodev_type) {
 813        case IODEV_CPUIF:
 814                data = region->read(vcpu, addr, len);
 815                break;
 816        case IODEV_DIST:
 817                data = region->read(vcpu, addr, len);
 818                break;
 819        case IODEV_REDIST:
 820                data = region->read(iodev->redist_vcpu, addr, len);
 821                break;
 822        case IODEV_ITS:
 823                data = region->its_read(vcpu->kvm, iodev->its, addr, len);
 824                break;
 825        }
 826
 827        vgic_data_host_to_mmio_bus(val, len, data);
 828        return 0;
 829}
 830
 831static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 832                               gpa_t addr, int len, const void *val)
 833{
 834        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 835        const struct vgic_register_region *region;
 836        unsigned long data = vgic_data_mmio_bus_to_host(val, len);
 837
 838        region = vgic_get_mmio_region(vcpu, iodev, addr, len);
 839        if (!region)
 840                return 0;
 841
 842        switch (iodev->iodev_type) {
 843        case IODEV_CPUIF:
 844                region->write(vcpu, addr, len, data);
 845                break;
 846        case IODEV_DIST:
 847                region->write(vcpu, addr, len, data);
 848                break;
 849        case IODEV_REDIST:
 850                region->write(iodev->redist_vcpu, addr, len, data);
 851                break;
 852        case IODEV_ITS:
 853                region->its_write(vcpu->kvm, iodev->its, addr, len, data);
 854                break;
 855        }
 856
 857        return 0;
 858}
 859
 860struct kvm_io_device_ops kvm_io_gic_ops = {
 861        .read = dispatch_mmio_read,
 862        .write = dispatch_mmio_write,
 863};
 864
 865int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 866                             enum vgic_type type)
 867{
 868        struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
 869        int ret = 0;
 870        unsigned int len;
 871
 872        switch (type) {
 873        case VGIC_V2:
 874                len = vgic_v2_init_dist_iodev(io_device);
 875                break;
 876        case VGIC_V3:
 877                len = vgic_v3_init_dist_iodev(io_device);
 878                break;
 879        default:
 880                BUG_ON(1);
 881        }
 882
 883        io_device->base_addr = dist_base_address;
 884        io_device->iodev_type = IODEV_DIST;
 885        io_device->redist_vcpu = NULL;
 886
 887        mutex_lock(&kvm->slots_lock);
 888        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
 889                                      len, &io_device->dev);
 890        mutex_unlock(&kvm->slots_lock);
 891
 892        return ret;
 893}
 894