linux/virt/kvm/arm/vgic/vgic-mmio.c
<<
>>
Prefs
   1/*
   2 * VGIC MMIO handling functions
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/bsearch.h>
  16#include <linux/kvm.h>
  17#include <linux/kvm_host.h>
  18#include <kvm/iodev.h>
  19#include <kvm/arm_arch_timer.h>
  20#include <kvm/arm_vgic.h>
  21
  22#include "vgic.h"
  23#include "vgic-mmio.h"
  24
  25unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
  26                                 gpa_t addr, unsigned int len)
  27{
  28        return 0;
  29}
  30
  31unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
  32                                 gpa_t addr, unsigned int len)
  33{
  34        return -1UL;
  35}
  36
  37void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  38                        unsigned int len, unsigned long val)
  39{
  40        /* Ignore */
  41}
  42
  43/*
  44 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
  45 * of the enabled bit, so there is only one function for both here.
  46 */
  47unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
  48                                    gpa_t addr, unsigned int len)
  49{
  50        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  51        u32 value = 0;
  52        int i;
  53
  54        /* Loop over all IRQs affected by this read */
  55        for (i = 0; i < len * 8; i++) {
  56                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  57
  58                if (irq->enabled)
  59                        value |= (1U << i);
  60
  61                vgic_put_irq(vcpu->kvm, irq);
  62        }
  63
  64        return value;
  65}
  66
  67void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
  68                             gpa_t addr, unsigned int len,
  69                             unsigned long val)
  70{
  71        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  72        int i;
  73        unsigned long flags;
  74
  75        for_each_set_bit(i, &val, len * 8) {
  76                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  77
  78                spin_lock_irqsave(&irq->irq_lock, flags);
  79                irq->enabled = true;
  80                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  81
  82                vgic_put_irq(vcpu->kvm, irq);
  83        }
  84}
  85
  86void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
  87                             gpa_t addr, unsigned int len,
  88                             unsigned long val)
  89{
  90        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  91        int i;
  92        unsigned long flags;
  93
  94        for_each_set_bit(i, &val, len * 8) {
  95                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  96
  97                spin_lock_irqsave(&irq->irq_lock, flags);
  98
  99                irq->enabled = false;
 100
 101                spin_unlock_irqrestore(&irq->irq_lock, flags);
 102                vgic_put_irq(vcpu->kvm, irq);
 103        }
 104}
 105
 106unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
 107                                     gpa_t addr, unsigned int len)
 108{
 109        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 110        u32 value = 0;
 111        int i;
 112
 113        /* Loop over all IRQs affected by this read */
 114        for (i = 0; i < len * 8; i++) {
 115                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 116                unsigned long flags;
 117
 118                spin_lock_irqsave(&irq->irq_lock, flags);
 119                if (irq_is_pending(irq))
 120                        value |= (1U << i);
 121                spin_unlock_irqrestore(&irq->irq_lock, flags);
 122
 123                vgic_put_irq(vcpu->kvm, irq);
 124        }
 125
 126        return value;
 127}
 128
 129/*
 130 * This function will return the VCPU that performed the MMIO access and
 131 * trapped from within the VM, and will return NULL if this is a userspace
 132 * access.
 133 *
 134 * We can disable preemption locally around accessing the per-CPU variable,
 135 * and use the resolved vcpu pointer after enabling preemption again, because
 136 * even if the current thread is migrated to another CPU, reading the per-CPU
 137 * value later will give us the same value as we update the per-CPU variable
 138 * in the preempt notifier handlers.
 139 */
 140static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
 141{
 142        struct kvm_vcpu *vcpu;
 143
 144        preempt_disable();
 145        vcpu = kvm_arm_get_running_vcpu();
 146        preempt_enable();
 147        return vcpu;
 148}
 149
 150/* Must be called with irq->irq_lock held */
 151static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 152                                 bool is_uaccess)
 153{
 154        if (is_uaccess)
 155                return;
 156
 157        irq->pending_latch = true;
 158        vgic_irq_set_phys_active(irq, true);
 159}
 160
 161void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
 162                              gpa_t addr, unsigned int len,
 163                              unsigned long val)
 164{
 165        bool is_uaccess = !vgic_get_mmio_requester_vcpu();
 166        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 167        int i;
 168        unsigned long flags;
 169
 170        for_each_set_bit(i, &val, len * 8) {
 171                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 172
 173                spin_lock_irqsave(&irq->irq_lock, flags);
 174                if (irq->hw)
 175                        vgic_hw_irq_spending(vcpu, irq, is_uaccess);
 176                else
 177                        irq->pending_latch = true;
 178                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 179                vgic_put_irq(vcpu->kvm, irq);
 180        }
 181}
 182
 183/* Must be called with irq->irq_lock held */
 184static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 185                                 bool is_uaccess)
 186{
 187        if (is_uaccess)
 188                return;
 189
 190        irq->pending_latch = false;
 191
 192        /*
 193         * We don't want the guest to effectively mask the physical
 194         * interrupt by doing a write to SPENDR followed by a write to
 195         * CPENDR for HW interrupts, so we clear the active state on
 196         * the physical side if the virtual interrupt is not active.
 197         * This may lead to taking an additional interrupt on the
 198         * host, but that should not be a problem as the worst that
 199         * can happen is an additional vgic injection.  We also clear
 200         * the pending state to maintain proper semantics for edge HW
 201         * interrupts.
 202         */
 203        vgic_irq_set_phys_pending(irq, false);
 204        if (!irq->active)
 205                vgic_irq_set_phys_active(irq, false);
 206}
 207
 208void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
 209                              gpa_t addr, unsigned int len,
 210                              unsigned long val)
 211{
 212        bool is_uaccess = !vgic_get_mmio_requester_vcpu();
 213        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 214        int i;
 215        unsigned long flags;
 216
 217        for_each_set_bit(i, &val, len * 8) {
 218                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 219
 220                spin_lock_irqsave(&irq->irq_lock, flags);
 221
 222                if (irq->hw)
 223                        vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
 224                else
 225                        irq->pending_latch = false;
 226
 227                spin_unlock_irqrestore(&irq->irq_lock, flags);
 228                vgic_put_irq(vcpu->kvm, irq);
 229        }
 230}
 231
 232unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
 233                                    gpa_t addr, unsigned int len)
 234{
 235        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 236        u32 value = 0;
 237        int i;
 238
 239        /* Loop over all IRQs affected by this read */
 240        for (i = 0; i < len * 8; i++) {
 241                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 242
 243                if (irq->active)
 244                        value |= (1U << i);
 245
 246                vgic_put_irq(vcpu->kvm, irq);
 247        }
 248
 249        return value;
 250}
 251
 252/* Must be called with irq->irq_lock held */
 253static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 254                                      bool active, bool is_uaccess)
 255{
 256        if (is_uaccess)
 257                return;
 258
 259        irq->active = active;
 260        vgic_irq_set_phys_active(irq, active);
 261}
 262
 263static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 264                                    bool active)
 265{
 266        unsigned long flags;
 267        struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
 268
 269        spin_lock_irqsave(&irq->irq_lock, flags);
 270
 271        /*
 272         * If this virtual IRQ was written into a list register, we
 273         * have to make sure the CPU that runs the VCPU thread has
 274         * synced back the LR state to the struct vgic_irq.
 275         *
 276         * As long as the conditions below are true, we know the VCPU thread
 277         * may be on its way back from the guest (we kicked the VCPU thread in
 278         * vgic_change_active_prepare)  and still has to sync back this IRQ,
 279         * so we release and re-acquire the spin_lock to let the other thread
 280         * sync back the IRQ.
 281         *
 282         * When accessing VGIC state from user space, requester_vcpu is
 283         * NULL, which is fine, because we guarantee that no VCPUs are running
 284         * when accessing VGIC state from user space so irq->vcpu->cpu is
 285         * always -1.
 286         */
 287        while (irq->vcpu && /* IRQ may have state in an LR somewhere */
 288               irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
 289               irq->vcpu->cpu != -1) /* VCPU thread is running */
 290                cond_resched_lock(&irq->irq_lock);
 291
 292        if (irq->hw)
 293                vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
 294        else
 295                irq->active = active;
 296
 297        if (irq->active)
 298                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 299        else
 300                spin_unlock_irqrestore(&irq->irq_lock, flags);
 301}
 302
 303/*
 304 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
 305 * is not queued on some running VCPU's LRs, because then the change to the
 306 * active state can be overwritten when the VCPU's state is synced coming back
 307 * from the guest.
 308 *
 309 * For shared interrupts, we have to stop all the VCPUs because interrupts can
 310 * be migrated while we don't hold the IRQ locks and we don't want to be
 311 * chasing moving targets.
 312 *
 313 * For private interrupts we don't have to do anything because userspace
 314 * accesses to the VGIC state already require all VCPUs to be stopped, and
 315 * only the VCPU itself can modify its private interrupts active state, which
 316 * guarantees that the VCPU is not running.
 317 */
 318static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
 319{
 320        if (intid > VGIC_NR_PRIVATE_IRQS)
 321                kvm_arm_halt_guest(vcpu->kvm);
 322}
 323
 324/* See vgic_change_active_prepare */
 325static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
 326{
 327        if (intid > VGIC_NR_PRIVATE_IRQS)
 328                kvm_arm_resume_guest(vcpu->kvm);
 329}
 330
 331static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 332                                      gpa_t addr, unsigned int len,
 333                                      unsigned long val)
 334{
 335        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 336        int i;
 337
 338        for_each_set_bit(i, &val, len * 8) {
 339                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 340                vgic_mmio_change_active(vcpu, irq, false);
 341                vgic_put_irq(vcpu->kvm, irq);
 342        }
 343}
 344
 345void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 346                             gpa_t addr, unsigned int len,
 347                             unsigned long val)
 348{
 349        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 350
 351        mutex_lock(&vcpu->kvm->lock);
 352        vgic_change_active_prepare(vcpu, intid);
 353
 354        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 355
 356        vgic_change_active_finish(vcpu, intid);
 357        mutex_unlock(&vcpu->kvm->lock);
 358}
 359
 360void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
 361                                     gpa_t addr, unsigned int len,
 362                                     unsigned long val)
 363{
 364        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 365}
 366
 367static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 368                                      gpa_t addr, unsigned int len,
 369                                      unsigned long val)
 370{
 371        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 372        int i;
 373
 374        for_each_set_bit(i, &val, len * 8) {
 375                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 376                vgic_mmio_change_active(vcpu, irq, true);
 377                vgic_put_irq(vcpu->kvm, irq);
 378        }
 379}
 380
 381void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 382                             gpa_t addr, unsigned int len,
 383                             unsigned long val)
 384{
 385        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 386
 387        mutex_lock(&vcpu->kvm->lock);
 388        vgic_change_active_prepare(vcpu, intid);
 389
 390        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 391
 392        vgic_change_active_finish(vcpu, intid);
 393        mutex_unlock(&vcpu->kvm->lock);
 394}
 395
 396void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
 397                                     gpa_t addr, unsigned int len,
 398                                     unsigned long val)
 399{
 400        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 401}
 402
 403unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
 404                                      gpa_t addr, unsigned int len)
 405{
 406        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 407        int i;
 408        u64 val = 0;
 409
 410        for (i = 0; i < len; i++) {
 411                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 412
 413                val |= (u64)irq->priority << (i * 8);
 414
 415                vgic_put_irq(vcpu->kvm, irq);
 416        }
 417
 418        return val;
 419}
 420
 421/*
 422 * We currently don't handle changing the priority of an interrupt that
 423 * is already pending on a VCPU. If there is a need for this, we would
 424 * need to make this VCPU exit and re-evaluate the priorities, potentially
 425 * leading to this interrupt getting presented now to the guest (if it has
 426 * been masked by the priority mask before).
 427 */
 428void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
 429                              gpa_t addr, unsigned int len,
 430                              unsigned long val)
 431{
 432        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 433        int i;
 434        unsigned long flags;
 435
 436        for (i = 0; i < len; i++) {
 437                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 438
 439                spin_lock_irqsave(&irq->irq_lock, flags);
 440                /* Narrow the priority range to what we actually support */
 441                irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
 442                spin_unlock_irqrestore(&irq->irq_lock, flags);
 443
 444                vgic_put_irq(vcpu->kvm, irq);
 445        }
 446}
 447
 448unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
 449                                    gpa_t addr, unsigned int len)
 450{
 451        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
 452        u32 value = 0;
 453        int i;
 454
 455        for (i = 0; i < len * 4; i++) {
 456                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 457
 458                if (irq->config == VGIC_CONFIG_EDGE)
 459                        value |= (2U << (i * 2));
 460
 461                vgic_put_irq(vcpu->kvm, irq);
 462        }
 463
 464        return value;
 465}
 466
 467void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
 468                            gpa_t addr, unsigned int len,
 469                            unsigned long val)
 470{
 471        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
 472        int i;
 473        unsigned long flags;
 474
 475        for (i = 0; i < len * 4; i++) {
 476                struct vgic_irq *irq;
 477
 478                /*
 479                 * The configuration cannot be changed for SGIs in general,
 480                 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
 481                 * code relies on PPIs being level triggered, so we also
 482                 * make them read-only here.
 483                 */
 484                if (intid + i < VGIC_NR_PRIVATE_IRQS)
 485                        continue;
 486
 487                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 488                spin_lock_irqsave(&irq->irq_lock, flags);
 489
 490                if (test_bit(i * 2 + 1, &val))
 491                        irq->config = VGIC_CONFIG_EDGE;
 492                else
 493                        irq->config = VGIC_CONFIG_LEVEL;
 494
 495                spin_unlock_irqrestore(&irq->irq_lock, flags);
 496                vgic_put_irq(vcpu->kvm, irq);
 497        }
 498}
 499
 500u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
 501{
 502        int i;
 503        u64 val = 0;
 504        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 505
 506        for (i = 0; i < 32; i++) {
 507                struct vgic_irq *irq;
 508
 509                if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
 510                        continue;
 511
 512                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 513                if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
 514                        val |= (1U << i);
 515
 516                vgic_put_irq(vcpu->kvm, irq);
 517        }
 518
 519        return val;
 520}
 521
 522void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
 523                                    const u64 val)
 524{
 525        int i;
 526        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 527        unsigned long flags;
 528
 529        for (i = 0; i < 32; i++) {
 530                struct vgic_irq *irq;
 531                bool new_level;
 532
 533                if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
 534                        continue;
 535
 536                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 537
 538                /*
 539                 * Line level is set irrespective of irq type
 540                 * (level or edge) to avoid dependency that VM should
 541                 * restore irq config before line level.
 542                 */
 543                new_level = !!(val & (1U << i));
 544                spin_lock_irqsave(&irq->irq_lock, flags);
 545                irq->line_level = new_level;
 546                if (new_level)
 547                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 548                else
 549                        spin_unlock_irqrestore(&irq->irq_lock, flags);
 550
 551                vgic_put_irq(vcpu->kvm, irq);
 552        }
 553}
 554
 555static int match_region(const void *key, const void *elt)
 556{
 557        const unsigned int offset = (unsigned long)key;
 558        const struct vgic_register_region *region = elt;
 559
 560        if (offset < region->reg_offset)
 561                return -1;
 562
 563        if (offset >= region->reg_offset + region->len)
 564                return 1;
 565
 566        return 0;
 567}
 568
 569const struct vgic_register_region *
 570vgic_find_mmio_region(const struct vgic_register_region *regions,
 571                      int nr_regions, unsigned int offset)
 572{
 573        return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
 574                       sizeof(regions[0]), match_region);
 575}
 576
 577void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 578{
 579        if (kvm_vgic_global_state.type == VGIC_V2)
 580                vgic_v2_set_vmcr(vcpu, vmcr);
 581        else
 582                vgic_v3_set_vmcr(vcpu, vmcr);
 583}
 584
 585void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 586{
 587        if (kvm_vgic_global_state.type == VGIC_V2)
 588                vgic_v2_get_vmcr(vcpu, vmcr);
 589        else
 590                vgic_v3_get_vmcr(vcpu, vmcr);
 591}
 592
 593/*
 594 * kvm_mmio_read_buf() returns a value in a format where it can be converted
 595 * to a byte array and be directly observed as the guest wanted it to appear
 596 * in memory if it had done the store itself, which is LE for the GIC, as the
 597 * guest knows the GIC is always LE.
 598 *
 599 * We convert this value to the CPUs native format to deal with it as a data
 600 * value.
 601 */
 602unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
 603{
 604        unsigned long data = kvm_mmio_read_buf(val, len);
 605
 606        switch (len) {
 607        case 1:
 608                return data;
 609        case 2:
 610                return le16_to_cpu(data);
 611        case 4:
 612                return le32_to_cpu(data);
 613        default:
 614                return le64_to_cpu(data);
 615        }
 616}
 617
 618/*
 619 * kvm_mmio_write_buf() expects a value in a format such that if converted to
 620 * a byte array it is observed as the guest would see it if it could perform
 621 * the load directly.  Since the GIC is LE, and the guest knows this, the
 622 * guest expects a value in little endian format.
 623 *
 624 * We convert the data value from the CPUs native format to LE so that the
 625 * value is returned in the proper format.
 626 */
 627void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
 628                                unsigned long data)
 629{
 630        switch (len) {
 631        case 1:
 632                break;
 633        case 2:
 634                data = cpu_to_le16(data);
 635                break;
 636        case 4:
 637                data = cpu_to_le32(data);
 638                break;
 639        default:
 640                data = cpu_to_le64(data);
 641        }
 642
 643        kvm_mmio_write_buf(buf, len, data);
 644}
 645
 646static
 647struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
 648{
 649        return container_of(dev, struct vgic_io_device, dev);
 650}
 651
 652static bool check_region(const struct kvm *kvm,
 653                         const struct vgic_register_region *region,
 654                         gpa_t addr, int len)
 655{
 656        int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 657
 658        switch (len) {
 659        case sizeof(u8):
 660                flags = VGIC_ACCESS_8bit;
 661                break;
 662        case sizeof(u32):
 663                flags = VGIC_ACCESS_32bit;
 664                break;
 665        case sizeof(u64):
 666                flags = VGIC_ACCESS_64bit;
 667                break;
 668        default:
 669                return false;
 670        }
 671
 672        if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
 673                if (!region->bits_per_irq)
 674                        return true;
 675
 676                /* Do we access a non-allocated IRQ? */
 677                return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
 678        }
 679
 680        return false;
 681}
 682
 683const struct vgic_register_region *
 684vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
 685                     gpa_t addr, int len)
 686{
 687        const struct vgic_register_region *region;
 688
 689        region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
 690                                       addr - iodev->base_addr);
 691        if (!region || !check_region(vcpu->kvm, region, addr, len))
 692                return NULL;
 693
 694        return region;
 695}
 696
 697static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 698                             gpa_t addr, u32 *val)
 699{
 700        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 701        const struct vgic_register_region *region;
 702        struct kvm_vcpu *r_vcpu;
 703
 704        region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
 705        if (!region) {
 706                *val = 0;
 707                return 0;
 708        }
 709
 710        r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
 711        if (region->uaccess_read)
 712                *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
 713        else
 714                *val = region->read(r_vcpu, addr, sizeof(u32));
 715
 716        return 0;
 717}
 718
 719static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 720                              gpa_t addr, const u32 *val)
 721{
 722        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 723        const struct vgic_register_region *region;
 724        struct kvm_vcpu *r_vcpu;
 725
 726        region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
 727        if (!region)
 728                return 0;
 729
 730        r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
 731        if (region->uaccess_write)
 732                region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
 733        else
 734                region->write(r_vcpu, addr, sizeof(u32), *val);
 735
 736        return 0;
 737}
 738
 739/*
 740 * Userland access to VGIC registers.
 741 */
 742int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
 743                 bool is_write, int offset, u32 *val)
 744{
 745        if (is_write)
 746                return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
 747        else
 748                return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
 749}
 750
 751static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 752                              gpa_t addr, int len, void *val)
 753{
 754        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 755        const struct vgic_register_region *region;
 756        unsigned long data = 0;
 757
 758        region = vgic_get_mmio_region(vcpu, iodev, addr, len);
 759        if (!region) {
 760                memset(val, 0, len);
 761                return 0;
 762        }
 763
 764        switch (iodev->iodev_type) {
 765        case IODEV_CPUIF:
 766                data = region->read(vcpu, addr, len);
 767                break;
 768        case IODEV_DIST:
 769                data = region->read(vcpu, addr, len);
 770                break;
 771        case IODEV_REDIST:
 772                data = region->read(iodev->redist_vcpu, addr, len);
 773                break;
 774        case IODEV_ITS:
 775                data = region->its_read(vcpu->kvm, iodev->its, addr, len);
 776                break;
 777        }
 778
 779        vgic_data_host_to_mmio_bus(val, len, data);
 780        return 0;
 781}
 782
 783static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 784                               gpa_t addr, int len, const void *val)
 785{
 786        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 787        const struct vgic_register_region *region;
 788        unsigned long data = vgic_data_mmio_bus_to_host(val, len);
 789
 790        region = vgic_get_mmio_region(vcpu, iodev, addr, len);
 791        if (!region)
 792                return 0;
 793
 794        switch (iodev->iodev_type) {
 795        case IODEV_CPUIF:
 796                region->write(vcpu, addr, len, data);
 797                break;
 798        case IODEV_DIST:
 799                region->write(vcpu, addr, len, data);
 800                break;
 801        case IODEV_REDIST:
 802                region->write(iodev->redist_vcpu, addr, len, data);
 803                break;
 804        case IODEV_ITS:
 805                region->its_write(vcpu->kvm, iodev->its, addr, len, data);
 806                break;
 807        }
 808
 809        return 0;
 810}
 811
 812struct kvm_io_device_ops kvm_io_gic_ops = {
 813        .read = dispatch_mmio_read,
 814        .write = dispatch_mmio_write,
 815};
 816
 817int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 818                             enum vgic_type type)
 819{
 820        struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
 821        int ret = 0;
 822        unsigned int len;
 823
 824        switch (type) {
 825        case VGIC_V2:
 826                len = vgic_v2_init_dist_iodev(io_device);
 827                break;
 828        case VGIC_V3:
 829                len = vgic_v3_init_dist_iodev(io_device);
 830                break;
 831        default:
 832                BUG_ON(1);
 833        }
 834
 835        io_device->base_addr = dist_base_address;
 836        io_device->iodev_type = IODEV_DIST;
 837        io_device->redist_vcpu = NULL;
 838
 839        mutex_lock(&kvm->slots_lock);
 840        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
 841                                      len, &io_device->dev);
 842        mutex_unlock(&kvm->slots_lock);
 843
 844        return ret;
 845}
 846