linux/virt/kvm/arm/vgic/vgic-mmio.c
<<
>>
Prefs
   1/*
   2 * VGIC MMIO handling functions
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/bsearch.h>
  16#include <linux/kvm.h>
  17#include <linux/kvm_host.h>
  18#include <kvm/iodev.h>
  19#include <kvm/arm_arch_timer.h>
  20#include <kvm/arm_vgic.h>
  21
  22#include "vgic.h"
  23#include "vgic-mmio.h"
  24
  25unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
  26                                 gpa_t addr, unsigned int len)
  27{
  28        return 0;
  29}
  30
  31unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
  32                                 gpa_t addr, unsigned int len)
  33{
  34        return -1UL;
  35}
  36
  37void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  38                        unsigned int len, unsigned long val)
  39{
  40        /* Ignore */
  41}
  42
  43int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  44                               unsigned int len, unsigned long val)
  45{
  46        /* Ignore */
  47        return 0;
  48}
  49
  50unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
  51                                   gpa_t addr, unsigned int len)
  52{
  53        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  54        u32 value = 0;
  55        int i;
  56
  57        /* Loop over all IRQs affected by this read */
  58        for (i = 0; i < len * 8; i++) {
  59                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  60
  61                if (irq->group)
  62                        value |= BIT(i);
  63
  64                vgic_put_irq(vcpu->kvm, irq);
  65        }
  66
  67        return value;
  68}
  69
  70void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
  71                           unsigned int len, unsigned long val)
  72{
  73        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  74        int i;
  75        unsigned long flags;
  76
  77        for (i = 0; i < len * 8; i++) {
  78                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  79
  80                spin_lock_irqsave(&irq->irq_lock, flags);
  81                irq->group = !!(val & BIT(i));
  82                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  83
  84                vgic_put_irq(vcpu->kvm, irq);
  85        }
  86}
  87
  88/*
  89 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
  90 * of the enabled bit, so there is only one function for both here.
  91 */
  92unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
  93                                    gpa_t addr, unsigned int len)
  94{
  95        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  96        u32 value = 0;
  97        int i;
  98
  99        /* Loop over all IRQs affected by this read */
 100        for (i = 0; i < len * 8; i++) {
 101                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 102
 103                if (irq->enabled)
 104                        value |= (1U << i);
 105
 106                vgic_put_irq(vcpu->kvm, irq);
 107        }
 108
 109        return value;
 110}
 111
 112void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
 113                             gpa_t addr, unsigned int len,
 114                             unsigned long val)
 115{
 116        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 117        int i;
 118        unsigned long flags;
 119
 120        for_each_set_bit(i, &val, len * 8) {
 121                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 122
 123                spin_lock_irqsave(&irq->irq_lock, flags);
 124                irq->enabled = true;
 125                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 126
 127                vgic_put_irq(vcpu->kvm, irq);
 128        }
 129}
 130
 131void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
 132                             gpa_t addr, unsigned int len,
 133                             unsigned long val)
 134{
 135        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 136        int i;
 137        unsigned long flags;
 138
 139        for_each_set_bit(i, &val, len * 8) {
 140                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 141
 142                spin_lock_irqsave(&irq->irq_lock, flags);
 143
 144                irq->enabled = false;
 145
 146                spin_unlock_irqrestore(&irq->irq_lock, flags);
 147                vgic_put_irq(vcpu->kvm, irq);
 148        }
 149}
 150
 151unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
 152                                     gpa_t addr, unsigned int len)
 153{
 154        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 155        u32 value = 0;
 156        int i;
 157
 158        /* Loop over all IRQs affected by this read */
 159        for (i = 0; i < len * 8; i++) {
 160                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 161                unsigned long flags;
 162
 163                spin_lock_irqsave(&irq->irq_lock, flags);
 164                if (irq_is_pending(irq))
 165                        value |= (1U << i);
 166                spin_unlock_irqrestore(&irq->irq_lock, flags);
 167
 168                vgic_put_irq(vcpu->kvm, irq);
 169        }
 170
 171        return value;
 172}
 173
 174/*
 175 * This function will return the VCPU that performed the MMIO access and
 176 * trapped from within the VM, and will return NULL if this is a userspace
 177 * access.
 178 *
 179 * We can disable preemption locally around accessing the per-CPU variable,
 180 * and use the resolved vcpu pointer after enabling preemption again, because
 181 * even if the current thread is migrated to another CPU, reading the per-CPU
 182 * value later will give us the same value as we update the per-CPU variable
 183 * in the preempt notifier handlers.
 184 */
 185static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
 186{
 187        struct kvm_vcpu *vcpu;
 188
 189        preempt_disable();
 190        vcpu = kvm_arm_get_running_vcpu();
 191        preempt_enable();
 192        return vcpu;
 193}
 194
 195/* Must be called with irq->irq_lock held */
 196static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 197                                 bool is_uaccess)
 198{
 199        if (is_uaccess)
 200                return;
 201
 202        irq->pending_latch = true;
 203        vgic_irq_set_phys_active(irq, true);
 204}
 205
 206void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
 207                              gpa_t addr, unsigned int len,
 208                              unsigned long val)
 209{
 210        bool is_uaccess = !vgic_get_mmio_requester_vcpu();
 211        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 212        int i;
 213        unsigned long flags;
 214
 215        for_each_set_bit(i, &val, len * 8) {
 216                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 217
 218                spin_lock_irqsave(&irq->irq_lock, flags);
 219                if (irq->hw)
 220                        vgic_hw_irq_spending(vcpu, irq, is_uaccess);
 221                else
 222                        irq->pending_latch = true;
 223                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 224                vgic_put_irq(vcpu->kvm, irq);
 225        }
 226}
 227
 228/* Must be called with irq->irq_lock held */
 229static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 230                                 bool is_uaccess)
 231{
 232        if (is_uaccess)
 233                return;
 234
 235        irq->pending_latch = false;
 236
 237        /*
 238         * We don't want the guest to effectively mask the physical
 239         * interrupt by doing a write to SPENDR followed by a write to
 240         * CPENDR for HW interrupts, so we clear the active state on
 241         * the physical side if the virtual interrupt is not active.
 242         * This may lead to taking an additional interrupt on the
 243         * host, but that should not be a problem as the worst that
 244         * can happen is an additional vgic injection.  We also clear
 245         * the pending state to maintain proper semantics for edge HW
 246         * interrupts.
 247         */
 248        vgic_irq_set_phys_pending(irq, false);
 249        if (!irq->active)
 250                vgic_irq_set_phys_active(irq, false);
 251}
 252
 253void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
 254                              gpa_t addr, unsigned int len,
 255                              unsigned long val)
 256{
 257        bool is_uaccess = !vgic_get_mmio_requester_vcpu();
 258        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 259        int i;
 260        unsigned long flags;
 261
 262        for_each_set_bit(i, &val, len * 8) {
 263                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 264
 265                spin_lock_irqsave(&irq->irq_lock, flags);
 266
 267                if (irq->hw)
 268                        vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
 269                else
 270                        irq->pending_latch = false;
 271
 272                spin_unlock_irqrestore(&irq->irq_lock, flags);
 273                vgic_put_irq(vcpu->kvm, irq);
 274        }
 275}
 276
 277unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
 278                                    gpa_t addr, unsigned int len)
 279{
 280        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 281        u32 value = 0;
 282        int i;
 283
 284        /* Loop over all IRQs affected by this read */
 285        for (i = 0; i < len * 8; i++) {
 286                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 287
 288                if (irq->active)
 289                        value |= (1U << i);
 290
 291                vgic_put_irq(vcpu->kvm, irq);
 292        }
 293
 294        return value;
 295}
 296
 297/* Must be called with irq->irq_lock held */
 298static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 299                                      bool active, bool is_uaccess)
 300{
 301        if (is_uaccess)
 302                return;
 303
 304        irq->active = active;
 305        vgic_irq_set_phys_active(irq, active);
 306}
 307
 308static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 309                                    bool active)
 310{
 311        unsigned long flags;
 312        struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
 313
 314        spin_lock_irqsave(&irq->irq_lock, flags);
 315
 316        /*
 317         * If this virtual IRQ was written into a list register, we
 318         * have to make sure the CPU that runs the VCPU thread has
 319         * synced back the LR state to the struct vgic_irq.
 320         *
 321         * As long as the conditions below are true, we know the VCPU thread
 322         * may be on its way back from the guest (we kicked the VCPU thread in
 323         * vgic_change_active_prepare)  and still has to sync back this IRQ,
 324         * so we release and re-acquire the spin_lock to let the other thread
 325         * sync back the IRQ.
 326         *
 327         * When accessing VGIC state from user space, requester_vcpu is
 328         * NULL, which is fine, because we guarantee that no VCPUs are running
 329         * when accessing VGIC state from user space so irq->vcpu->cpu is
 330         * always -1.
 331         */
 332        while (irq->vcpu && /* IRQ may have state in an LR somewhere */
 333               irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
 334               irq->vcpu->cpu != -1) /* VCPU thread is running */
 335                cond_resched_lock(&irq->irq_lock);
 336
 337        if (irq->hw) {
 338                vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
 339        } else {
 340                u32 model = vcpu->kvm->arch.vgic.vgic_model;
 341
 342                irq->active = active;
 343                if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
 344                    active && vgic_irq_is_sgi(irq->intid))
 345                        irq->active_source = requester_vcpu->vcpu_id;
 346        }
 347
 348        if (irq->active)
 349                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 350        else
 351                spin_unlock_irqrestore(&irq->irq_lock, flags);
 352}
 353
 354/*
 355 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
 356 * is not queued on some running VCPU's LRs, because then the change to the
 357 * active state can be overwritten when the VCPU's state is synced coming back
 358 * from the guest.
 359 *
 360 * For shared interrupts, we have to stop all the VCPUs because interrupts can
 361 * be migrated while we don't hold the IRQ locks and we don't want to be
 362 * chasing moving targets.
 363 *
 364 * For private interrupts we don't have to do anything because userspace
 365 * accesses to the VGIC state already require all VCPUs to be stopped, and
 366 * only the VCPU itself can modify its private interrupts active state, which
 367 * guarantees that the VCPU is not running.
 368 */
 369static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
 370{
 371        if (intid > VGIC_NR_PRIVATE_IRQS)
 372                kvm_arm_halt_guest(vcpu->kvm);
 373}
 374
 375/* See vgic_change_active_prepare */
 376static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
 377{
 378        if (intid > VGIC_NR_PRIVATE_IRQS)
 379                kvm_arm_resume_guest(vcpu->kvm);
 380}
 381
 382static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 383                                      gpa_t addr, unsigned int len,
 384                                      unsigned long val)
 385{
 386        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 387        int i;
 388
 389        for_each_set_bit(i, &val, len * 8) {
 390                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 391                vgic_mmio_change_active(vcpu, irq, false);
 392                vgic_put_irq(vcpu->kvm, irq);
 393        }
 394}
 395
 396void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 397                             gpa_t addr, unsigned int len,
 398                             unsigned long val)
 399{
 400        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 401
 402        mutex_lock(&vcpu->kvm->lock);
 403        vgic_change_active_prepare(vcpu, intid);
 404
 405        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 406
 407        vgic_change_active_finish(vcpu, intid);
 408        mutex_unlock(&vcpu->kvm->lock);
 409}
 410
 411int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
 412                                     gpa_t addr, unsigned int len,
 413                                     unsigned long val)
 414{
 415        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 416        return 0;
 417}
 418
 419static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 420                                      gpa_t addr, unsigned int len,
 421                                      unsigned long val)
 422{
 423        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 424        int i;
 425
 426        for_each_set_bit(i, &val, len * 8) {
 427                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 428                vgic_mmio_change_active(vcpu, irq, true);
 429                vgic_put_irq(vcpu->kvm, irq);
 430        }
 431}
 432
 433void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 434                             gpa_t addr, unsigned int len,
 435                             unsigned long val)
 436{
 437        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 438
 439        mutex_lock(&vcpu->kvm->lock);
 440        vgic_change_active_prepare(vcpu, intid);
 441
 442        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 443
 444        vgic_change_active_finish(vcpu, intid);
 445        mutex_unlock(&vcpu->kvm->lock);
 446}
 447
 448int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
 449                                     gpa_t addr, unsigned int len,
 450                                     unsigned long val)
 451{
 452        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 453        return 0;
 454}
 455
 456unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
 457                                      gpa_t addr, unsigned int len)
 458{
 459        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 460        int i;
 461        u64 val = 0;
 462
 463        for (i = 0; i < len; i++) {
 464                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 465
 466                val |= (u64)irq->priority << (i * 8);
 467
 468                vgic_put_irq(vcpu->kvm, irq);
 469        }
 470
 471        return val;
 472}
 473
 474/*
 475 * We currently don't handle changing the priority of an interrupt that
 476 * is already pending on a VCPU. If there is a need for this, we would
 477 * need to make this VCPU exit and re-evaluate the priorities, potentially
 478 * leading to this interrupt getting presented now to the guest (if it has
 479 * been masked by the priority mask before).
 480 */
 481void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
 482                              gpa_t addr, unsigned int len,
 483                              unsigned long val)
 484{
 485        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
 486        int i;
 487        unsigned long flags;
 488
 489        for (i = 0; i < len; i++) {
 490                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 491
 492                spin_lock_irqsave(&irq->irq_lock, flags);
 493                /* Narrow the priority range to what we actually support */
 494                irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
 495                spin_unlock_irqrestore(&irq->irq_lock, flags);
 496
 497                vgic_put_irq(vcpu->kvm, irq);
 498        }
 499}
 500
 501unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
 502                                    gpa_t addr, unsigned int len)
 503{
 504        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
 505        u32 value = 0;
 506        int i;
 507
 508        for (i = 0; i < len * 4; i++) {
 509                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 510
 511                if (irq->config == VGIC_CONFIG_EDGE)
 512                        value |= (2U << (i * 2));
 513
 514                vgic_put_irq(vcpu->kvm, irq);
 515        }
 516
 517        return value;
 518}
 519
 520void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
 521                            gpa_t addr, unsigned int len,
 522                            unsigned long val)
 523{
 524        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
 525        int i;
 526        unsigned long flags;
 527
 528        for (i = 0; i < len * 4; i++) {
 529                struct vgic_irq *irq;
 530
 531                /*
 532                 * The configuration cannot be changed for SGIs in general,
 533                 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
 534                 * code relies on PPIs being level triggered, so we also
 535                 * make them read-only here.
 536                 */
 537                if (intid + i < VGIC_NR_PRIVATE_IRQS)
 538                        continue;
 539
 540                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 541                spin_lock_irqsave(&irq->irq_lock, flags);
 542
 543                if (test_bit(i * 2 + 1, &val))
 544                        irq->config = VGIC_CONFIG_EDGE;
 545                else
 546                        irq->config = VGIC_CONFIG_LEVEL;
 547
 548                spin_unlock_irqrestore(&irq->irq_lock, flags);
 549                vgic_put_irq(vcpu->kvm, irq);
 550        }
 551}
 552
 553u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
 554{
 555        int i;
 556        u64 val = 0;
 557        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 558
 559        for (i = 0; i < 32; i++) {
 560                struct vgic_irq *irq;
 561
 562                if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
 563                        continue;
 564
 565                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 566                if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
 567                        val |= (1U << i);
 568
 569                vgic_put_irq(vcpu->kvm, irq);
 570        }
 571
 572        return val;
 573}
 574
 575void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
 576                                    const u64 val)
 577{
 578        int i;
 579        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 580        unsigned long flags;
 581
 582        for (i = 0; i < 32; i++) {
 583                struct vgic_irq *irq;
 584                bool new_level;
 585
 586                if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
 587                        continue;
 588
 589                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 590
 591                /*
 592                 * Line level is set irrespective of irq type
 593                 * (level or edge) to avoid dependency that VM should
 594                 * restore irq config before line level.
 595                 */
 596                new_level = !!(val & (1U << i));
 597                spin_lock_irqsave(&irq->irq_lock, flags);
 598                irq->line_level = new_level;
 599                if (new_level)
 600                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 601                else
 602                        spin_unlock_irqrestore(&irq->irq_lock, flags);
 603
 604                vgic_put_irq(vcpu->kvm, irq);
 605        }
 606}
 607
 608static int match_region(const void *key, const void *elt)
 609{
 610        const unsigned int offset = (unsigned long)key;
 611        const struct vgic_register_region *region = elt;
 612
 613        if (offset < region->reg_offset)
 614                return -1;
 615
 616        if (offset >= region->reg_offset + region->len)
 617                return 1;
 618
 619        return 0;
 620}
 621
 622const struct vgic_register_region *
 623vgic_find_mmio_region(const struct vgic_register_region *regions,
 624                      int nr_regions, unsigned int offset)
 625{
 626        return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
 627                       sizeof(regions[0]), match_region);
 628}
 629
 630void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 631{
 632        if (kvm_vgic_global_state.type == VGIC_V2)
 633                vgic_v2_set_vmcr(vcpu, vmcr);
 634        else
 635                vgic_v3_set_vmcr(vcpu, vmcr);
 636}
 637
 638void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 639{
 640        if (kvm_vgic_global_state.type == VGIC_V2)
 641                vgic_v2_get_vmcr(vcpu, vmcr);
 642        else
 643                vgic_v3_get_vmcr(vcpu, vmcr);
 644}
 645
 646/*
 647 * kvm_mmio_read_buf() returns a value in a format where it can be converted
 648 * to a byte array and be directly observed as the guest wanted it to appear
 649 * in memory if it had done the store itself, which is LE for the GIC, as the
 650 * guest knows the GIC is always LE.
 651 *
 652 * We convert this value to the CPUs native format to deal with it as a data
 653 * value.
 654 */
 655unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
 656{
 657        unsigned long data = kvm_mmio_read_buf(val, len);
 658
 659        switch (len) {
 660        case 1:
 661                return data;
 662        case 2:
 663                return le16_to_cpu(data);
 664        case 4:
 665                return le32_to_cpu(data);
 666        default:
 667                return le64_to_cpu(data);
 668        }
 669}
 670
 671/*
 672 * kvm_mmio_write_buf() expects a value in a format such that if converted to
 673 * a byte array it is observed as the guest would see it if it could perform
 674 * the load directly.  Since the GIC is LE, and the guest knows this, the
 675 * guest expects a value in little endian format.
 676 *
 677 * We convert the data value from the CPUs native format to LE so that the
 678 * value is returned in the proper format.
 679 */
 680void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
 681                                unsigned long data)
 682{
 683        switch (len) {
 684        case 1:
 685                break;
 686        case 2:
 687                data = cpu_to_le16(data);
 688                break;
 689        case 4:
 690                data = cpu_to_le32(data);
 691                break;
 692        default:
 693                data = cpu_to_le64(data);
 694        }
 695
 696        kvm_mmio_write_buf(buf, len, data);
 697}
 698
 699static
 700struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
 701{
 702        return container_of(dev, struct vgic_io_device, dev);
 703}
 704
 705static bool check_region(const struct kvm *kvm,
 706                         const struct vgic_register_region *region,
 707                         gpa_t addr, int len)
 708{
 709        int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
 710
 711        switch (len) {
 712        case sizeof(u8):
 713                flags = VGIC_ACCESS_8bit;
 714                break;
 715        case sizeof(u32):
 716                flags = VGIC_ACCESS_32bit;
 717                break;
 718        case sizeof(u64):
 719                flags = VGIC_ACCESS_64bit;
 720                break;
 721        default:
 722                return false;
 723        }
 724
 725        if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
 726                if (!region->bits_per_irq)
 727                        return true;
 728
 729                /* Do we access a non-allocated IRQ? */
 730                return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
 731        }
 732
 733        return false;
 734}
 735
 736const struct vgic_register_region *
 737vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
 738                     gpa_t addr, int len)
 739{
 740        const struct vgic_register_region *region;
 741
 742        region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
 743                                       addr - iodev->base_addr);
 744        if (!region || !check_region(vcpu->kvm, region, addr, len))
 745                return NULL;
 746
 747        return region;
 748}
 749
 750static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 751                             gpa_t addr, u32 *val)
 752{
 753        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 754        const struct vgic_register_region *region;
 755        struct kvm_vcpu *r_vcpu;
 756
 757        region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
 758        if (!region) {
 759                *val = 0;
 760                return 0;
 761        }
 762
 763        r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
 764        if (region->uaccess_read)
 765                *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
 766        else
 767                *val = region->read(r_vcpu, addr, sizeof(u32));
 768
 769        return 0;
 770}
 771
 772static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 773                              gpa_t addr, const u32 *val)
 774{
 775        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 776        const struct vgic_register_region *region;
 777        struct kvm_vcpu *r_vcpu;
 778
 779        region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
 780        if (!region)
 781                return 0;
 782
 783        r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
 784        if (region->uaccess_write)
 785                return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
 786
 787        region->write(r_vcpu, addr, sizeof(u32), *val);
 788        return 0;
 789}
 790
 791/*
 792 * Userland access to VGIC registers.
 793 */
 794int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
 795                 bool is_write, int offset, u32 *val)
 796{
 797        if (is_write)
 798                return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
 799        else
 800                return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
 801}
 802
 803static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 804                              gpa_t addr, int len, void *val)
 805{
 806        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 807        const struct vgic_register_region *region;
 808        unsigned long data = 0;
 809
 810        region = vgic_get_mmio_region(vcpu, iodev, addr, len);
 811        if (!region) {
 812                memset(val, 0, len);
 813                return 0;
 814        }
 815
 816        switch (iodev->iodev_type) {
 817        case IODEV_CPUIF:
 818                data = region->read(vcpu, addr, len);
 819                break;
 820        case IODEV_DIST:
 821                data = region->read(vcpu, addr, len);
 822                break;
 823        case IODEV_REDIST:
 824                data = region->read(iodev->redist_vcpu, addr, len);
 825                break;
 826        case IODEV_ITS:
 827                data = region->its_read(vcpu->kvm, iodev->its, addr, len);
 828                break;
 829        }
 830
 831        vgic_data_host_to_mmio_bus(val, len, data);
 832        return 0;
 833}
 834
 835static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
 836                               gpa_t addr, int len, const void *val)
 837{
 838        struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
 839        const struct vgic_register_region *region;
 840        unsigned long data = vgic_data_mmio_bus_to_host(val, len);
 841
 842        region = vgic_get_mmio_region(vcpu, iodev, addr, len);
 843        if (!region)
 844                return 0;
 845
 846        switch (iodev->iodev_type) {
 847        case IODEV_CPUIF:
 848                region->write(vcpu, addr, len, data);
 849                break;
 850        case IODEV_DIST:
 851                region->write(vcpu, addr, len, data);
 852                break;
 853        case IODEV_REDIST:
 854                region->write(iodev->redist_vcpu, addr, len, data);
 855                break;
 856        case IODEV_ITS:
 857                region->its_write(vcpu->kvm, iodev->its, addr, len, data);
 858                break;
 859        }
 860
 861        return 0;
 862}
 863
 864struct kvm_io_device_ops kvm_io_gic_ops = {
 865        .read = dispatch_mmio_read,
 866        .write = dispatch_mmio_write,
 867};
 868
 869int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 870                             enum vgic_type type)
 871{
 872        struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
 873        int ret = 0;
 874        unsigned int len;
 875
 876        switch (type) {
 877        case VGIC_V2:
 878                len = vgic_v2_init_dist_iodev(io_device);
 879                break;
 880        case VGIC_V3:
 881                len = vgic_v3_init_dist_iodev(io_device);
 882                break;
 883        default:
 884                BUG_ON(1);
 885        }
 886
 887        io_device->base_addr = dist_base_address;
 888        io_device->iodev_type = IODEV_DIST;
 889        io_device->redist_vcpu = NULL;
 890
 891        mutex_lock(&kvm->slots_lock);
 892        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
 893                                      len, &io_device->dev);
 894        mutex_unlock(&kvm->slots_lock);
 895
 896        return ret;
 897}
 898