linux/virt/kvm/arm/vgic/vgic-mmio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VGIC MMIO handling functions
   4 */
   5
   6#include <linux/bitops.h>
   7#include <linux/bsearch.h>
   8#include <linux/kvm.h>
   9#include <linux/kvm_host.h>
  10#include <kvm/iodev.h>
  11#include <kvm/arm_arch_timer.h>
  12#include <kvm/arm_vgic.h>
  13
  14#include "vgic.h"
  15#include "vgic-mmio.h"
  16
  17unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
  18                                 gpa_t addr, unsigned int len)
  19{
  20        return 0;
  21}
  22
  23unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
  24                                 gpa_t addr, unsigned int len)
  25{
  26        return -1UL;
  27}
  28
  29void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  30                        unsigned int len, unsigned long val)
  31{
  32        /* Ignore */
  33}
  34
  35int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  36                               unsigned int len, unsigned long val)
  37{
  38        /* Ignore */
  39        return 0;
  40}
  41
  42unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
  43                                   gpa_t addr, unsigned int len)
  44{
  45        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  46        u32 value = 0;
  47        int i;
  48
  49        /* Loop over all IRQs affected by this read */
  50        for (i = 0; i < len * 8; i++) {
  51                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  52
  53                if (irq->group)
  54                        value |= BIT(i);
  55
  56                vgic_put_irq(vcpu->kvm, irq);
  57        }
  58
  59        return value;
  60}
  61
  62void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
  63                           unsigned int len, unsigned long val)
  64{
  65        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  66        int i;
  67        unsigned long flags;
  68
  69        for (i = 0; i < len * 8; i++) {
  70                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  71
  72                raw_spin_lock_irqsave(&irq->irq_lock, flags);
  73                irq->group = !!(val & BIT(i));
  74                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  75
  76                vgic_put_irq(vcpu->kvm, irq);
  77        }
  78}
  79
  80/*
  81 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
  82 * of the enabled bit, so there is only one function for both here.
  83 */
  84unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
  85                                    gpa_t addr, unsigned int len)
  86{
  87        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  88        u32 value = 0;
  89        int i;
  90
  91        /* Loop over all IRQs affected by this read */
  92        for (i = 0; i < len * 8; i++) {
  93                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  94
  95                if (irq->enabled)
  96                        value |= (1U << i);
  97
  98                vgic_put_irq(vcpu->kvm, irq);
  99        }
 100
 101        return value;
 102}
 103
 104void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
 105                             gpa_t addr, unsigned int len,
 106                             unsigned long val)
 107{
 108        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 109        int i;
 110        unsigned long flags;
 111
 112        for_each_set_bit(i, &val, len * 8) {
 113                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 114
 115                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 116                if (vgic_irq_is_mapped_level(irq)) {
 117                        bool was_high = irq->line_level;
 118
 119                        /*
 120                         * We need to update the state of the interrupt because
 121                         * the guest might have changed the state of the device
 122                         * while the interrupt was disabled at the VGIC level.
 123                         */
 124                        irq->line_level = vgic_get_phys_line_level(irq);
 125                        /*
 126                         * Deactivate the physical interrupt so the GIC will let
 127                         * us know when it is asserted again.
 128                         */
 129                        if (!irq->active && was_high && !irq->line_level)
 130                                vgic_irq_set_phys_active(irq, false);
 131                }
 132                irq->enabled = true;
 133                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 134
 135                vgic_put_irq(vcpu->kvm, irq);
 136        }
 137}
 138
 139void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
 140                             gpa_t addr, unsigned int len,
 141                             unsigned long val)
 142{
 143        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 144        int i;
 145        unsigned long flags;
 146
 147        for_each_set_bit(i, &val, len * 8) {
 148                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 149
 150                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 151
 152                irq->enabled = false;
 153
 154                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 155                vgic_put_irq(vcpu->kvm, irq);
 156        }
 157}
 158
 159unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
 160                                     gpa_t addr, unsigned int len)
 161{
 162        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 163        u32 value = 0;
 164        int i;
 165
 166        /* Loop over all IRQs affected by this read */
 167        for (i = 0; i < len * 8; i++) {
 168                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 169                unsigned long flags;
 170
 171                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 172                if (irq_is_pending(irq))
 173                        value |= (1U << i);
 174                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 175
 176                vgic_put_irq(vcpu->kvm, irq);
 177        }
 178
 179        return value;
 180}
 181
 182/*
 183 * This function will return the VCPU that performed the MMIO access and
 184 * trapped from within the VM, and will return NULL if this is a userspace
 185 * access.
 186 *
 187 * We can disable preemption locally around accessing the per-CPU variable,
 188 * and use the resolved vcpu pointer after enabling preemption again, because
 189 * even if the current thread is migrated to another CPU, reading the per-CPU
 190 * value later will give us the same value as we update the per-CPU variable
 191 * in the preempt notifier handlers.
 192 */
 193static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
 194{
 195        struct kvm_vcpu *vcpu;
 196
 197        preempt_disable();
 198        vcpu = kvm_arm_get_running_vcpu();
 199        preempt_enable();
 200        return vcpu;
 201}
 202
 203/* Must be called with irq->irq_lock held */
 204static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 205                                 bool is_uaccess)
 206{
 207        if (is_uaccess)
 208                return;
 209
 210        irq->pending_latch = true;
 211        vgic_irq_set_phys_active(irq, true);
 212}
 213
 214static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
 215{
 216        return (vgic_irq_is_sgi(irq->intid) &&
 217                vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
 218}
 219
 220void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
 221                              gpa_t addr, unsigned int len,
 222                              unsigned long val)
 223{
 224        bool is_uaccess = !vgic_get_mmio_requester_vcpu();
 225        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 226        int i;
 227        unsigned long flags;
 228
 229        for_each_set_bit(i, &val, len * 8) {
 230                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 231
 232                /* GICD_ISPENDR0 SGI bits are WI */
 233                if (is_vgic_v2_sgi(vcpu, irq)) {
 234                        vgic_put_irq(vcpu->kvm, irq);
 235                        continue;
 236                }
 237
 238                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 239                if (irq->hw)
 240                        vgic_hw_irq_spending(vcpu, irq, is_uaccess);
 241                else
 242                        irq->pending_latch = true;
 243                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 244                vgic_put_irq(vcpu->kvm, irq);
 245        }
 246}
 247
 248/* Must be called with irq->irq_lock held */
 249static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 250                                 bool is_uaccess)
 251{
 252        if (is_uaccess)
 253                return;
 254
 255        irq->pending_latch = false;
 256
 257        /*
 258         * We don't want the guest to effectively mask the physical
 259         * interrupt by doing a write to SPENDR followed by a write to
 260         * CPENDR for HW interrupts, so we clear the active state on
 261         * the physical side if the virtual interrupt is not active.
 262         * This may lead to taking an additional interrupt on the
 263         * host, but that should not be a problem as the worst that
 264         * can happen is an additional vgic injection.  We also clear
 265         * the pending state to maintain proper semantics for edge HW
 266         * interrupts.
 267         */
 268        vgic_irq_set_phys_pending(irq, false);
 269        if (!irq->active)
 270                vgic_irq_set_phys_active(irq, false);
 271}
 272
 273void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
 274                              gpa_t addr, unsigned int len,
 275                              unsigned long val)
 276{
 277        bool is_uaccess = !vgic_get_mmio_requester_vcpu();
 278        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 279        int i;
 280        unsigned long flags;
 281
 282        for_each_set_bit(i, &val, len * 8) {
 283                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 284
 285                /* GICD_ICPENDR0 SGI bits are WI */
 286                if (is_vgic_v2_sgi(vcpu, irq)) {
 287                        vgic_put_irq(vcpu->kvm, irq);
 288                        continue;
 289                }
 290
 291                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 292
 293                if (irq->hw)
 294                        vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
 295                else
 296                        irq->pending_latch = false;
 297
 298                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 299                vgic_put_irq(vcpu->kvm, irq);
 300        }
 301}
 302
 303unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
 304                                    gpa_t addr, unsigned int len)
 305{
 306        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 307        u32 value = 0;
 308        int i;
 309
 310        /* Loop over all IRQs affected by this read */
 311        for (i = 0; i < len * 8; i++) {
 312                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 313
 314                if (irq->active)
 315                        value |= (1U << i);
 316
 317                vgic_put_irq(vcpu->kvm, irq);
 318        }
 319
 320        return value;
 321}
 322
 323/* Must be called with irq->irq_lock held */
 324static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 325                                      bool active, bool is_uaccess)
 326{
 327        if (is_uaccess)
 328                return;
 329
 330        irq->active = active;
 331        vgic_irq_set_phys_active(irq, active);
 332}
 333
 334static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 335                                    bool active)
 336{
 337        unsigned long flags;
 338        struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
 339
 340        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 341
 342        if (irq->hw) {
 343                vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
 344        } else {
 345                u32 model = vcpu->kvm->arch.vgic.vgic_model;
 346                u8 active_source;
 347
 348                irq->active = active;
 349
 350                /*
 351                 * The GICv2 architecture indicates that the source CPUID for
 352                 * an SGI should be provided during an EOI which implies that
 353                 * the active state is stored somewhere, but at the same time
 354                 * this state is not architecturally exposed anywhere and we
 355                 * have no way of knowing the right source.
 356                 *
 357                 * This may lead to a VCPU not being able to receive
 358                 * additional instances of a particular SGI after migration
 359                 * for a GICv2 VM on some GIC implementations.  Oh well.
 360                 */
 361                active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
 362
 363                if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
 364                    active && vgic_irq_is_sgi(irq->intid))
 365                        irq->active_source = active_source;
 366        }
 367
 368        if (irq->active)
 369                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 370        else
 371                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 372}
 373
 374/*
 375 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
 376 * is not queued on some running VCPU's LRs, because then the change to the
 377 * active state can be overwritten when the VCPU's state is synced coming back
 378 * from the guest.
 379 *
 380 * For shared interrupts, we have to stop all the VCPUs because interrupts can
 381 * be migrated while we don't hold the IRQ locks and we don't want to be
 382 * chasing moving targets.
 383 *
 384 * For private interrupts we don't have to do anything because userspace
 385 * accesses to the VGIC state already require all VCPUs to be stopped, and
 386 * only the VCPU itself can modify its private interrupts active state, which
 387 * guarantees that the VCPU is not running.
 388 */
 389static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
 390{
 391        if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
 392            intid > VGIC_NR_PRIVATE_IRQS)
 393                kvm_arm_halt_guest(vcpu->kvm);
 394}
 395
 396/* See vgic_change_active_prepare */
 397static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
 398{
 399        if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
 400            intid > VGIC_NR_PRIVATE_IRQS)
 401                kvm_arm_resume_guest(vcpu->kvm);
 402}
 403
 404static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 405                                      gpa_t addr, unsigned int len,
 406                                      unsigned long val)
 407{
 408        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 409        int i;
 410
 411        for_each_set_bit(i, &val, len * 8) {
 412                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 413                vgic_mmio_change_active(vcpu, irq, false);
 414                vgic_put_irq(vcpu->kvm, irq);
 415        }
 416}
 417
 418void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 419                             gpa_t addr, unsigned int len,
 420                             unsigned long val)
 421{
 422        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 423
 424        mutex_lock(&vcpu->kvm->lock);
 425        vgic_change_active_prepare(vcpu, intid);
 426
 427        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 428
 429        vgic_change_active_finish(vcpu, intid);
 430        mutex_unlock(&vcpu->kvm->lock);
 431}
 432
 433int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
 434                                     gpa_t addr, unsigned int len,
 435                                     unsigned long val)
 436{
 437        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 438        return 0;
 439}
 440
 441static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 442                                      gpa_t addr, unsigned int len,
 443                                      unsigned long val)
 444{
 445        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 446        int i;
 447
 448        for_each_set_bit(i, &val, len * 8) {
 449                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 450                vgic_mmio_change_active(vcpu, irq, true);
 451                vgic_put_irq(vcpu->kvm, irq);
 452        }
 453}
 454
 455void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 456                             gpa_t addr, unsigned int len,
 457                             unsigned long val)
 458{
 459        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 460
 461        mutex_lock(&vcpu->kvm->lock);
 462        vgic_change_active_prepare(vcpu, intid);
 463
 464        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 465
 466        vgic_change_active_finish(vcpu, intid);
 467        mutex_unlock(&vcpu->kvm->lock);
 468}
 469
 470int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
 471                                     gpa_t addr, unsigned int len,
 472                                     unsigned long val)
 473{
 474        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 475        return 0;
 476}
 477
 478unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
 479                                      gpa_t addr, unsigned int len)
 480{
 481        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 482        int i;
 483        u64 val = 0;
 484
 485        for (i = 0; i < len; i++) {
 486                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 487
 488                val |= (u64)irq->priority << (i * 8);
 489
 490                vgic_put_irq(vcpu->kvm, irq);
 491        }
 492
 493        return val;
 494}
 495
 496/*
 497 * We currently don't handle changing the priority of an interrupt that
 498 * is already pending on a VCPU. If there is a need for this, we would
 499 * need to make this VCPU exit and re-evaluate the priorities, potentially
 500 * leading to this interrupt getting presented now to the guest (if it has
 501 * been masked by the priority mask before).
 502 */
 503void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
 504                              gpa_t addr, unsigned int len,
 505                              unsigned long val)
 506{
 507        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 508        int i;
 509        unsigned long flags;
 510
 511        for (i = 0; i < len; i++) {
 512                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 513
 514                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 515                /* Narrow the priority range to what we actually support */
 516                irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
 517                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 518
 519                vgic_put_irq(vcpu->kvm, irq);
 520        }
 521}
 522
 523unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
 524                                    gpa_t addr, unsigned int len)
 525{
 526        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
 527        u32 value = 0;
 528        int i;
 529
 530        for (i = 0; i < len * 4; i++) {
 531                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 532
 533                if (irq->config == VGIC_CONFIG_EDGE)
 534                        value |= (2U << (i * 2));
 535
 536                vgic_put_irq(vcpu->kvm, irq);
 537        }
 538
 539        return value;
 540}
 541
 542void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
 543                            gpa_t addr, unsigned int len,
 544                            unsigned long val)
 545{
 546        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
 547        int i;
 548        unsigned long flags;
 549
 550        for (i = 0; i < len * 4; i++) {
 551                struct vgic_irq *irq;
 552
 553                /*
 554                 * The configuration cannot be changed for SGIs in general,
 555                 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
 556                 * code relies on PPIs being level triggered, so we also
 557                 * make them read-only here.
 558                 */
 559                if (intid + i < VGIC_NR_PRIVATE_IRQS)
 560                        continue;
 561
 562                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 563                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 564
 565                if (test_bit(i * 2 + 1, &val))
 566                        irq->config = VGIC_CONFIG_EDGE;
 567                else
 568                        irq->config = VGIC_CONFIG_LEVEL;
 569
 570                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 571                vgic_put_irq(vcpu->kvm, irq);
 572        }
 573}
 574
 575u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
 576{
 577        int i;
 578        u64 val = 0;
 579        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 580
 581        for (i = 0; i < 32; i++) {
 582                struct vgic_irq *irq;
 583
 584                if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
 585                        continue;
 586
 587                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 588                if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
 589                        val |= (1U << i);
 590
 591                vgic_put_irq(vcpu->kvm, irq);
 592        }
 593
 594        return val;
 595}
 596
 597void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
 598                                    const u64 val)
 599{
 600        int i;
 601        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 602        unsigned long flags;
 603
 604        for (i = 0; i < 32; i++) {
 605                struct vgic_irq *irq;
 606                bool new_level;
 607
 608                if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
 609                        continue;
 610
 611                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 612
 613                /*
 614                 * Line level is set irrespective of irq type
 615                 * (level or edge) to avoid dependency that VM should
 616                 * restore irq config before line level.
 617                 */
 618                new_level = !!(val & (1U << i));
 619                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 620                irq->line_level = new_level;
 621                if (new_level)
 622                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 623                else
 624                        raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 625
 626                vgic_put_irq(vcpu->kvm, irq);
 627        }
 628}
 629
 630static int match_region(const void *key, const void *elt)
 631{
 632        const unsigned int offset = (unsigned long)key;
 633        const struct vgic_register_region *region = elt;
 634
 635        if (offset < region->reg_offset)
 636                return -1;
 637
 638        if (offset >= region->reg_offset + region->len)
 639                return 1;
 640
 641        return 0;
 642}
 643
 644const struct vgic_register_region *
 645vgic_find_mmio_region(const struct vgic_register_region *regions,
 646                      int nr_regions, unsigned int offset)
 647{
 648        return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
 649                       sizeof(regions[0]), match_region);
 650}
 651
 652void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 653{
 654        if (kvm_vgic_global_state.type == VGIC_V2)
 655                vgic_v2_set_vmcr(vcpu, vmcr);
 656        else
 657                vgic_v3_set_vmcr(vcpu, vmcr);
 658}
 659
 660void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 661{
 662        if (kvm_vgic_global_state.type == VGIC_V2)
 663                vgic_v2_get_vmcr(vcpu, vmcr);
 664        else
 665                vgic_v3_get_vmcr(vcpu, vmcr);
 666}
 667
 668/*
 669 * kvm_mmio_read_buf() returns a value in a format where it can be converted
 670 * to a byte array and be directly observed as the guest wanted it to appear
 671 * in memory if it had done the store itself, which is LE for the GIC, as the
 672 * guest knows the GIC is always LE.
 673 *
 674 * We convert this value to the CPUs native format to deal with it as a data
 675 * value.
 676 */
 677unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
 678{
 679        unsigned long data = kvm_mmio_read_buf(val, len);
 680
 681        switch (len) {
 682        case 1:
 683                return data;
 684        case 2:
 685                return le16_to_cpu(data);
 686        case 4:
 687                return le32_to_cpu(data);
 688        default:
 689                return le64_to_cpu(data);
 690        }
 691}
 692
 693/*
 694 * kvm_mmio_write_buf() expects a value in a format such that if converted to
 695 * a byte array it is observed as the guest would see it if it could perform
 696 * the load directly.  Since the GIC is LE, and the guest knows this, the
 697 * guest expects a value in little endian format.
 698 *
 699 * We convert the data value from the CPUs native format to LE so that the
 700 * value is returned in the proper format.
 701 */
 702void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
 703                                unsigned long data)
 704{
 705        switch (len) {
 706        case 1:
 707                break;
 708        case 2:
 709                data = cpu_to_le16(data);
 710                break;
 711        case 4:
 712                data = cpu_to_le32(data);
 713                break;
 714        default:
 715                data = cpu_to_le64(data);
 716        }
 717
 718        kvm_mmio_write_buf(buf, len, data);
 719}
 720
 721static
 722struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
 723{
 724        return container_of(dev, struct vgic_io_device, dev);
 725}
 726
 727static bool check_region(const struct kvm *kvm,
 728                         const struct vgic_register_region *region,
 729                         gpa_t addr, int len)
 730{
 731        int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 732
 733        switch (len) {
 734        case sizeof(u8):
 735                flags = VGIC_ACCESS_8bit;
 736                break;
 737        case sizeof(u32):
 738                flags = VGIC_ACCESS_32bit;
 739                break;
 740        case sizeof(u64):
 741                flags = VGIC_ACCESS_64bit;
 742                break;
 743        default:
 744                return false;
 745        }
 746
 747        if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
 748                if (!region->bits_per_irq)
 749                        return true;
 750
 751                /* Do we access a non-allocated IRQ? */
 752                return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
 753        }
 754
 755        return false;
 756}
 757
 758const struct vgic_register_region *
 759vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
 760                     gpa_t addr, int len)
 761{
 762        const struct vgic_register_region *region;
 763
 764        region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
 765                                       addr - iodev->base_addr);
 766        if (!region || !check_region(vcpu->kvm, region, addr, len))
 767                return NULL;
 768
 769        return region;
 770}
 771
 772static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 773                             gpa_t addr, u32 *val)
 774{
 775        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 776        const struct vgic_register_region *region;
 777        struct kvm_vcpu *r_vcpu;
 778
 779        region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
 780        if (!region) {
 781                *val = 0;
 782                return 0;
 783        }
 784
 785        r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
 786        if (region->uaccess_read)
 787                *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
 788        else
 789                *val = region->read(r_vcpu, addr, sizeof(u32));
 790
 791        return 0;
 792}
 793
 794static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 795                              gpa_t addr, const u32 *val)
 796{
 797        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 798        const struct vgic_register_region *region;
 799        struct kvm_vcpu *r_vcpu;
 800
 801        region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
 802        if (!region)
 803                return 0;
 804
 805        r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
 806        if (region->uaccess_write)
 807                return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
 808
 809        region->write(r_vcpu, addr, sizeof(u32), *val);
 810        return 0;
 811}
 812
 813/*
 814 * Userland access to VGIC registers.
 815 */
 816int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
 817                 bool is_write, int offset, u32 *val)
 818{
 819        if (is_write)
 820                return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
 821        else
 822                return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
 823}
 824
 825static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 826                              gpa_t addr, int len, void *val)
 827{
 828        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 829        const struct vgic_register_region *region;
 830        unsigned long data = 0;
 831
 832        region = vgic_get_mmio_region(vcpu, iodev, addr, len);
 833        if (!region) {
 834                memset(val, 0, len);
 835                return 0;
 836        }
 837
 838        switch (iodev->iodev_type) {
 839        case IODEV_CPUIF:
 840                data = region->read(vcpu, addr, len);
 841                break;
 842        case IODEV_DIST:
 843                data = region->read(vcpu, addr, len);
 844                break;
 845        case IODEV_REDIST:
 846                data = region->read(iodev->redist_vcpu, addr, len);
 847                break;
 848        case IODEV_ITS:
 849                data = region->its_read(vcpu->kvm, iodev->its, addr, len);
 850                break;
 851        }
 852
 853        vgic_data_host_to_mmio_bus(val, len, data);
 854        return 0;
 855}
 856
 857static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 858                               gpa_t addr, int len, const void *val)
 859{
 860        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 861        const struct vgic_register_region *region;
 862        unsigned long data = vgic_data_mmio_bus_to_host(val, len);
 863
 864        region = vgic_get_mmio_region(vcpu, iodev, addr, len);
 865        if (!region)
 866                return 0;
 867
 868        switch (iodev->iodev_type) {
 869        case IODEV_CPUIF:
 870                region->write(vcpu, addr, len, data);
 871                break;
 872        case IODEV_DIST:
 873                region->write(vcpu, addr, len, data);
 874                break;
 875        case IODEV_REDIST:
 876                region->write(iodev->redist_vcpu, addr, len, data);
 877                break;
 878        case IODEV_ITS:
 879                region->its_write(vcpu->kvm, iodev->its, addr, len, data);
 880                break;
 881        }
 882
 883        return 0;
 884}
 885
 886struct kvm_io_device_ops kvm_io_gic_ops = {
 887        .read = dispatch_mmio_read,
 888        .write = dispatch_mmio_write,
 889};
 890
 891int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 892                             enum vgic_type type)
 893{
 894        struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
 895        int ret = 0;
 896        unsigned int len;
 897
 898        switch (type) {
 899        case VGIC_V2:
 900                len = vgic_v2_init_dist_iodev(io_device);
 901                break;
 902        case VGIC_V3:
 903                len = vgic_v3_init_dist_iodev(io_device);
 904                break;
 905        default:
 906                BUG_ON(1);
 907        }
 908
 909        io_device->base_addr = dist_base_address;
 910        io_device->iodev_type = IODEV_DIST;
 911        io_device->redist_vcpu = NULL;
 912
 913        mutex_lock(&kvm->slots_lock);
 914        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
 915                                      len, &io_device->dev);
 916        mutex_unlock(&kvm->slots_lock);
 917
 918        return ret;
 919}
 920