linux/virt/kvm/arm/vgic/vgic.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015, 2016 ARM Ltd.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16
  17#include <linux/interrupt.h>
  18#include <linux/irq.h>
  19#include <linux/kvm.h>
  20#include <linux/kvm_host.h>
  21#include <linux/list_sort.h>
  22#include <linux/nospec.h>
  23
  24#include <asm/kvm_hyp.h>
  25
  26#include "vgic.h"
  27
  28#define CREATE_TRACE_POINTS
  29#include "trace.h"
  30
  31#ifdef CONFIG_DEBUG_SPINLOCK
  32#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
  33#else
  34#define DEBUG_SPINLOCK_BUG_ON(p)
  35#endif
  36
  37struct vgic_global kvm_vgic_global_state __ro_after_init = {
  38        .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
  39};
  40
  41/*
  42 * Locking order is always:
  43 * kvm->lock (mutex)
  44 *   its->cmd_lock (mutex)
  45 *     its->its_lock (mutex)
  46 *       vgic_cpu->ap_list_lock         must be taken with IRQs disabled
  47 *         kvm->lpi_list_lock           must be taken with IRQs disabled
  48 *           vgic_irq->irq_lock         must be taken with IRQs disabled
  49 *
  50 * As the ap_list_lock might be taken from the timer interrupt handler,
  51 * we have to disable IRQs before taking this lock and everything lower
  52 * than it.
  53 *
  54 * If you need to take multiple locks, always take the upper lock first,
  55 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
  56 * If you are already holding a lock and need to take a higher one, you
  57 * have to drop the lower ranking lock first and re-aquire it after having
  58 * taken the upper one.
  59 *
  60 * When taking more than one ap_list_lock at the same time, always take the
  61 * lowest numbered VCPU's ap_list_lock first, so:
  62 *   vcpuX->vcpu_id < vcpuY->vcpu_id:
  63 *     spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
  64 *     spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
  65 *
  66 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
  67 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
  68 * spinlocks for any lock that may be taken while injecting an interrupt.
  69 */
  70
  71/*
  72 * Iterate over the VM's list of mapped LPIs to find the one with a
  73 * matching interrupt ID and return a reference to the IRQ structure.
  74 */
  75static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
  76{
  77        struct vgic_dist *dist = &kvm->arch.vgic;
  78        struct vgic_irq *irq = NULL;
  79        unsigned long flags;
  80
  81        spin_lock_irqsave(&dist->lpi_list_lock, flags);
  82
  83        list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
  84                if (irq->intid != intid)
  85                        continue;
  86
  87                /*
  88                 * This increases the refcount, the caller is expected to
  89                 * call vgic_put_irq() later once it's finished with the IRQ.
  90                 */
  91                vgic_get_irq_kref(irq);
  92                goto out_unlock;
  93        }
  94        irq = NULL;
  95
  96out_unlock:
  97        spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
  98
  99        return irq;
 100}
 101
 102/*
 103 * This looks up the virtual interrupt ID to get the corresponding
 104 * struct vgic_irq. It also increases the refcount, so any caller is expected
 105 * to call vgic_put_irq() once it's finished with this IRQ.
 106 */
 107struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
 108                              u32 intid)
 109{
 110        /* SGIs and PPIs */
 111        if (intid <= VGIC_MAX_PRIVATE) {
 112                intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
 113                return &vcpu->arch.vgic_cpu.private_irqs[intid];
 114        }
 115
 116        /* SPIs */
 117        if (intid <= VGIC_MAX_SPI) {
 118                intid = array_index_nospec(intid, VGIC_MAX_SPI);
 119                return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
 120        }
 121
 122        /* LPIs */
 123        if (intid >= VGIC_MIN_LPI)
 124                return vgic_get_lpi(kvm, intid);
 125
 126        WARN(1, "Looking up struct vgic_irq for reserved INTID");
 127        return NULL;
 128}
 129
 130/*
 131 * We can't do anything in here, because we lack the kvm pointer to
 132 * lock and remove the item from the lpi_list. So we keep this function
 133 * empty and use the return value of kref_put() to trigger the freeing.
 134 */
 135static void vgic_irq_release(struct kref *ref)
 136{
 137}
 138
 139void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
 140{
 141        struct vgic_dist *dist = &kvm->arch.vgic;
 142        unsigned long flags;
 143
 144        if (irq->intid < VGIC_MIN_LPI)
 145                return;
 146
 147        spin_lock_irqsave(&dist->lpi_list_lock, flags);
 148        if (!kref_put(&irq->refcount, vgic_irq_release)) {
 149                spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 150                return;
 151        };
 152
 153        list_del(&irq->lpi_list);
 154        dist->lpi_list_count--;
 155        spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 156
 157        kfree(irq);
 158}
 159
 160void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
 161{
 162        WARN_ON(irq_set_irqchip_state(irq->host_irq,
 163                                      IRQCHIP_STATE_PENDING,
 164                                      pending));
 165}
 166
 167bool vgic_get_phys_line_level(struct vgic_irq *irq)
 168{
 169        bool line_level;
 170
 171        BUG_ON(!irq->hw);
 172
 173        if (irq->get_input_level)
 174                return irq->get_input_level(irq->intid);
 175
 176        WARN_ON(irq_get_irqchip_state(irq->host_irq,
 177                                      IRQCHIP_STATE_PENDING,
 178                                      &line_level));
 179        return line_level;
 180}
 181
 182/* Set/Clear the physical active state */
 183void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
 184{
 185
 186        BUG_ON(!irq->hw);
 187        WARN_ON(irq_set_irqchip_state(irq->host_irq,
 188                                      IRQCHIP_STATE_ACTIVE,
 189                                      active));
 190}
 191
 192/**
 193 * kvm_vgic_target_oracle - compute the target vcpu for an irq
 194 *
 195 * @irq:        The irq to route. Must be already locked.
 196 *
 197 * Based on the current state of the interrupt (enabled, pending,
 198 * active, vcpu and target_vcpu), compute the next vcpu this should be
 199 * given to. Return NULL if this shouldn't be injected at all.
 200 *
 201 * Requires the IRQ lock to be held.
 202 */
 203static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
 204{
 205        DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
 206
 207        /* If the interrupt is active, it must stay on the current vcpu */
 208        if (irq->active)
 209                return irq->vcpu ? : irq->target_vcpu;
 210
 211        /*
 212         * If the IRQ is not active but enabled and pending, we should direct
 213         * it to its configured target VCPU.
 214         * If the distributor is disabled, pending interrupts shouldn't be
 215         * forwarded.
 216         */
 217        if (irq->enabled && irq_is_pending(irq)) {
 218                if (unlikely(irq->target_vcpu &&
 219                             !irq->target_vcpu->kvm->arch.vgic.enabled))
 220                        return NULL;
 221
 222                return irq->target_vcpu;
 223        }
 224
 225        /* If neither active nor pending and enabled, then this IRQ should not
 226         * be queued to any VCPU.
 227         */
 228        return NULL;
 229}
 230
 231/*
 232 * The order of items in the ap_lists defines how we'll pack things in LRs as
 233 * well, the first items in the list being the first things populated in the
 234 * LRs.
 235 *
 236 * A hard rule is that active interrupts can never be pushed out of the LRs
 237 * (and therefore take priority) since we cannot reliably trap on deactivation
 238 * of IRQs and therefore they have to be present in the LRs.
 239 *
 240 * Otherwise things should be sorted by the priority field and the GIC
 241 * hardware support will take care of preemption of priority groups etc.
 242 *
 243 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
 244 * to sort "b" before "a".
 245 */
 246static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
 247{
 248        struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
 249        struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
 250        bool penda, pendb;
 251        int ret;
 252
 253        spin_lock(&irqa->irq_lock);
 254        spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
 255
 256        if (irqa->active || irqb->active) {
 257                ret = (int)irqb->active - (int)irqa->active;
 258                goto out;
 259        }
 260
 261        penda = irqa->enabled && irq_is_pending(irqa);
 262        pendb = irqb->enabled && irq_is_pending(irqb);
 263
 264        if (!penda || !pendb) {
 265                ret = (int)pendb - (int)penda;
 266                goto out;
 267        }
 268
 269        /* Both pending and enabled, sort by priority */
 270        ret = irqa->priority - irqb->priority;
 271out:
 272        spin_unlock(&irqb->irq_lock);
 273        spin_unlock(&irqa->irq_lock);
 274        return ret;
 275}
 276
 277/* Must be called with the ap_list_lock held */
 278static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
 279{
 280        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 281
 282        DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
 283
 284        list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
 285}
 286
 287/*
 288 * Only valid injection if changing level for level-triggered IRQs or for a
 289 * rising edge, and in-kernel connected IRQ lines can only be controlled by
 290 * their owner.
 291 */
 292static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
 293{
 294        if (irq->owner != owner)
 295                return false;
 296
 297        switch (irq->config) {
 298        case VGIC_CONFIG_LEVEL:
 299                return irq->line_level != level;
 300        case VGIC_CONFIG_EDGE:
 301                return level;
 302        }
 303
 304        return false;
 305}
 306
 307/*
 308 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
 309 * Do the queuing if necessary, taking the right locks in the right order.
 310 * Returns true when the IRQ was queued, false otherwise.
 311 *
 312 * Needs to be entered with the IRQ lock already held, but will return
 313 * with all locks dropped.
 314 */
 315bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
 316                           unsigned long flags)
 317{
 318        struct kvm_vcpu *vcpu;
 319
 320        DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
 321
 322retry:
 323        vcpu = vgic_target_oracle(irq);
 324        if (irq->vcpu || !vcpu) {
 325                /*
 326                 * If this IRQ is already on a VCPU's ap_list, then it
 327                 * cannot be moved or modified and there is no more work for
 328                 * us to do.
 329                 *
 330                 * Otherwise, if the irq is not pending and enabled, it does
 331                 * not need to be inserted into an ap_list and there is also
 332                 * no more work for us to do.
 333                 */
 334                spin_unlock_irqrestore(&irq->irq_lock, flags);
 335
 336                /*
 337                 * We have to kick the VCPU here, because we could be
 338                 * queueing an edge-triggered interrupt for which we
 339                 * get no EOI maintenance interrupt. In that case,
 340                 * while the IRQ is already on the VCPU's AP list, the
 341                 * VCPU could have EOI'ed the original interrupt and
 342                 * won't see this one until it exits for some other
 343                 * reason.
 344                 */
 345                if (vcpu) {
 346                        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
 347                        kvm_vcpu_kick(vcpu);
 348                }
 349                return false;
 350        }
 351
 352        /*
 353         * We must unlock the irq lock to take the ap_list_lock where
 354         * we are going to insert this new pending interrupt.
 355         */
 356        spin_unlock_irqrestore(&irq->irq_lock, flags);
 357
 358        /* someone can do stuff here, which we re-check below */
 359
 360        spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
 361        spin_lock(&irq->irq_lock);
 362
 363        /*
 364         * Did something change behind our backs?
 365         *
 366         * There are two cases:
 367         * 1) The irq lost its pending state or was disabled behind our
 368         *    backs and/or it was queued to another VCPU's ap_list.
 369         * 2) Someone changed the affinity on this irq behind our
 370         *    backs and we are now holding the wrong ap_list_lock.
 371         *
 372         * In both cases, drop the locks and retry.
 373         */
 374
 375        if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
 376                spin_unlock(&irq->irq_lock);
 377                spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
 378
 379                spin_lock_irqsave(&irq->irq_lock, flags);
 380                goto retry;
 381        }
 382
 383        /*
 384         * Grab a reference to the irq to reflect the fact that it is
 385         * now in the ap_list.
 386         */
 387        vgic_get_irq_kref(irq);
 388        list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
 389        irq->vcpu = vcpu;
 390
 391        spin_unlock(&irq->irq_lock);
 392        spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
 393
 394        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
 395        kvm_vcpu_kick(vcpu);
 396
 397        return true;
 398}
 399
 400/**
 401 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
 402 * @kvm:     The VM structure pointer
 403 * @cpuid:   The CPU for PPIs
 404 * @intid:   The INTID to inject a new state to.
 405 * @level:   Edge-triggered:  true:  to trigger the interrupt
 406 *                            false: to ignore the call
 407 *           Level-sensitive  true:  raise the input signal
 408 *                            false: lower the input signal
 409 * @owner:   The opaque pointer to the owner of the IRQ being raised to verify
 410 *           that the caller is allowed to inject this IRQ.  Userspace
 411 *           injections will have owner == NULL.
 412 *
 413 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
 414 * level-sensitive interrupts.  You can think of the level parameter as 1
 415 * being HIGH and 0 being LOW and all devices being active-HIGH.
 416 */
 417int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
 418                        bool level, void *owner)
 419{
 420        struct kvm_vcpu *vcpu;
 421        struct vgic_irq *irq;
 422        unsigned long flags;
 423        int ret;
 424
 425        trace_vgic_update_irq_pending(cpuid, intid, level);
 426
 427        ret = vgic_lazy_init(kvm);
 428        if (ret)
 429                return ret;
 430
 431        vcpu = kvm_get_vcpu(kvm, cpuid);
 432        if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
 433                return -EINVAL;
 434
 435        irq = vgic_get_irq(kvm, vcpu, intid);
 436        if (!irq)
 437                return -EINVAL;
 438
 439        spin_lock_irqsave(&irq->irq_lock, flags);
 440
 441        if (!vgic_validate_injection(irq, level, owner)) {
 442                /* Nothing to see here, move along... */
 443                spin_unlock_irqrestore(&irq->irq_lock, flags);
 444                vgic_put_irq(kvm, irq);
 445                return 0;
 446        }
 447
 448        if (irq->config == VGIC_CONFIG_LEVEL)
 449                irq->line_level = level;
 450        else
 451                irq->pending_latch = true;
 452
 453        vgic_queue_irq_unlock(kvm, irq, flags);
 454        vgic_put_irq(kvm, irq);
 455
 456        return 0;
 457}
 458
 459/* @irq->irq_lock must be held */
 460static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 461                            unsigned int host_irq,
 462                            bool (*get_input_level)(int vindid))
 463{
 464        struct irq_desc *desc;
 465        struct irq_data *data;
 466
 467        /*
 468         * Find the physical IRQ number corresponding to @host_irq
 469         */
 470        desc = irq_to_desc(host_irq);
 471        if (!desc) {
 472                kvm_err("%s: no interrupt descriptor\n", __func__);
 473                return -EINVAL;
 474        }
 475        data = irq_desc_get_irq_data(desc);
 476        while (data->parent_data)
 477                data = data->parent_data;
 478
 479        irq->hw = true;
 480        irq->host_irq = host_irq;
 481        irq->hwintid = data->hwirq;
 482        irq->get_input_level = get_input_level;
 483        return 0;
 484}
 485
 486/* @irq->irq_lock must be held */
 487static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
 488{
 489        irq->hw = false;
 490        irq->hwintid = 0;
 491        irq->get_input_level = NULL;
 492}
 493
 494int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
 495                          u32 vintid, bool (*get_input_level)(int vindid))
 496{
 497        struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
 498        unsigned long flags;
 499        int ret;
 500
 501        BUG_ON(!irq);
 502
 503        spin_lock_irqsave(&irq->irq_lock, flags);
 504        ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
 505        spin_unlock_irqrestore(&irq->irq_lock, flags);
 506        vgic_put_irq(vcpu->kvm, irq);
 507
 508        return ret;
 509}
 510
 511/**
 512 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
 513 * @vcpu: The VCPU pointer
 514 * @vintid: The INTID of the interrupt
 515 *
 516 * Reset the active and pending states of a mapped interrupt.  Kernel
 517 * subsystems injecting mapped interrupts should reset their interrupt lines
 518 * when we are doing a reset of the VM.
 519 */
 520void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
 521{
 522        struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
 523        unsigned long flags;
 524
 525        if (!irq->hw)
 526                goto out;
 527
 528        spin_lock_irqsave(&irq->irq_lock, flags);
 529        irq->active = false;
 530        irq->pending_latch = false;
 531        irq->line_level = false;
 532        spin_unlock_irqrestore(&irq->irq_lock, flags);
 533out:
 534        vgic_put_irq(vcpu->kvm, irq);
 535}
 536
 537int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
 538{
 539        struct vgic_irq *irq;
 540        unsigned long flags;
 541
 542        if (!vgic_initialized(vcpu->kvm))
 543                return -EAGAIN;
 544
 545        irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
 546        BUG_ON(!irq);
 547
 548        spin_lock_irqsave(&irq->irq_lock, flags);
 549        kvm_vgic_unmap_irq(irq);
 550        spin_unlock_irqrestore(&irq->irq_lock, flags);
 551        vgic_put_irq(vcpu->kvm, irq);
 552
 553        return 0;
 554}
 555
 556/**
 557 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
 558 *
 559 * @vcpu:   Pointer to the VCPU (used for PPIs)
 560 * @intid:  The virtual INTID identifying the interrupt (PPI or SPI)
 561 * @owner:  Opaque pointer to the owner
 562 *
 563 * Returns 0 if intid is not already used by another in-kernel device and the
 564 * owner is set, otherwise returns an error code.
 565 */
 566int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
 567{
 568        struct vgic_irq *irq;
 569        unsigned long flags;
 570        int ret = 0;
 571
 572        if (!vgic_initialized(vcpu->kvm))
 573                return -EAGAIN;
 574
 575        /* SGIs and LPIs cannot be wired up to any device */
 576        if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
 577                return -EINVAL;
 578
 579        irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
 580        spin_lock_irqsave(&irq->irq_lock, flags);
 581        if (irq->owner && irq->owner != owner)
 582                ret = -EEXIST;
 583        else
 584                irq->owner = owner;
 585        spin_unlock_irqrestore(&irq->irq_lock, flags);
 586
 587        return ret;
 588}
 589
 590/**
 591 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
 592 *
 593 * @vcpu: The VCPU pointer
 594 *
 595 * Go over the list of "interesting" interrupts, and prune those that we
 596 * won't have to consider in the near future.
 597 */
 598static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
 599{
 600        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 601        struct vgic_irq *irq, *tmp;
 602        unsigned long flags;
 603
 604retry:
 605        spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
 606
 607        list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
 608                struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
 609                bool target_vcpu_needs_kick = false;
 610
 611                spin_lock(&irq->irq_lock);
 612
 613                BUG_ON(vcpu != irq->vcpu);
 614
 615                target_vcpu = vgic_target_oracle(irq);
 616
 617                if (!target_vcpu) {
 618                        /*
 619                         * We don't need to process this interrupt any
 620                         * further, move it off the list.
 621                         */
 622                        list_del(&irq->ap_list);
 623                        irq->vcpu = NULL;
 624                        spin_unlock(&irq->irq_lock);
 625
 626                        /*
 627                         * This vgic_put_irq call matches the
 628                         * vgic_get_irq_kref in vgic_queue_irq_unlock,
 629                         * where we added the LPI to the ap_list. As
 630                         * we remove the irq from the list, we drop
 631                         * also drop the refcount.
 632                         */
 633                        vgic_put_irq(vcpu->kvm, irq);
 634                        continue;
 635                }
 636
 637                if (target_vcpu == vcpu) {
 638                        /* We're on the right CPU */
 639                        spin_unlock(&irq->irq_lock);
 640                        continue;
 641                }
 642
 643                /* This interrupt looks like it has to be migrated. */
 644
 645                spin_unlock(&irq->irq_lock);
 646                spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
 647
 648                /*
 649                 * Ensure locking order by always locking the smallest
 650                 * ID first.
 651                 */
 652                if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
 653                        vcpuA = vcpu;
 654                        vcpuB = target_vcpu;
 655                } else {
 656                        vcpuA = target_vcpu;
 657                        vcpuB = vcpu;
 658                }
 659
 660                spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
 661                spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
 662                                 SINGLE_DEPTH_NESTING);
 663                spin_lock(&irq->irq_lock);
 664
 665                /*
 666                 * If the affinity has been preserved, move the
 667                 * interrupt around. Otherwise, it means things have
 668                 * changed while the interrupt was unlocked, and we
 669                 * need to replay this.
 670                 *
 671                 * In all cases, we cannot trust the list not to have
 672                 * changed, so we restart from the beginning.
 673                 */
 674                if (target_vcpu == vgic_target_oracle(irq)) {
 675                        struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
 676
 677                        list_del(&irq->ap_list);
 678                        irq->vcpu = target_vcpu;
 679                        list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
 680                        target_vcpu_needs_kick = true;
 681                }
 682
 683                spin_unlock(&irq->irq_lock);
 684                spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
 685                spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
 686
 687                if (target_vcpu_needs_kick) {
 688                        kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
 689                        kvm_vcpu_kick(target_vcpu);
 690                }
 691
 692                goto retry;
 693        }
 694
 695        spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
 696}
 697
 698static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
 699{
 700        if (kvm_vgic_global_state.type == VGIC_V2)
 701                vgic_v2_fold_lr_state(vcpu);
 702        else
 703                vgic_v3_fold_lr_state(vcpu);
 704}
 705
 706/* Requires the irq_lock to be held. */
 707static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
 708                                    struct vgic_irq *irq, int lr)
 709{
 710        DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
 711
 712        if (kvm_vgic_global_state.type == VGIC_V2)
 713                vgic_v2_populate_lr(vcpu, irq, lr);
 714        else
 715                vgic_v3_populate_lr(vcpu, irq, lr);
 716}
 717
 718static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
 719{
 720        if (kvm_vgic_global_state.type == VGIC_V2)
 721                vgic_v2_clear_lr(vcpu, lr);
 722        else
 723                vgic_v3_clear_lr(vcpu, lr);
 724}
 725
 726static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
 727{
 728        if (kvm_vgic_global_state.type == VGIC_V2)
 729                vgic_v2_set_underflow(vcpu);
 730        else
 731                vgic_v3_set_underflow(vcpu);
 732}
 733
 734/* Requires the ap_list_lock to be held. */
 735static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
 736                                 bool *multi_sgi)
 737{
 738        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 739        struct vgic_irq *irq;
 740        int count = 0;
 741
 742        *multi_sgi = false;
 743
 744        DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
 745
 746        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
 747                int w;
 748
 749                spin_lock(&irq->irq_lock);
 750                /* GICv2 SGIs can count for more than one... */
 751                w = vgic_irq_get_lr_count(irq);
 752                spin_unlock(&irq->irq_lock);
 753
 754                count += w;
 755                *multi_sgi |= (w > 1);
 756        }
 757        return count;
 758}
 759
 760/* Requires the VCPU's ap_list_lock to be held. */
 761static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
 762{
 763        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 764        struct vgic_irq *irq;
 765        int count;
 766        bool multi_sgi;
 767        u8 prio = 0xff;
 768
 769        DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
 770
 771        count = compute_ap_list_depth(vcpu, &multi_sgi);
 772        if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
 773                vgic_sort_ap_list(vcpu);
 774
 775        count = 0;
 776
 777        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
 778                spin_lock(&irq->irq_lock);
 779
 780                /*
 781                 * If we have multi-SGIs in the pipeline, we need to
 782                 * guarantee that they are all seen before any IRQ of
 783                 * lower priority. In that case, we need to filter out
 784                 * these interrupts by exiting early. This is easy as
 785                 * the AP list has been sorted already.
 786                 */
 787                if (multi_sgi && irq->priority > prio) {
 788                        spin_unlock(&irq->irq_lock);
 789                        break;
 790                }
 791
 792                if (likely(vgic_target_oracle(irq) == vcpu)) {
 793                        vgic_populate_lr(vcpu, irq, count++);
 794
 795                        if (irq->source)
 796                                prio = irq->priority;
 797                }
 798
 799                spin_unlock(&irq->irq_lock);
 800
 801                if (count == kvm_vgic_global_state.nr_lr) {
 802                        if (!list_is_last(&irq->ap_list,
 803                                          &vgic_cpu->ap_list_head))
 804                                vgic_set_underflow(vcpu);
 805                        break;
 806                }
 807        }
 808
 809        vcpu->arch.vgic_cpu.used_lrs = count;
 810
 811        /* Nuke remaining LRs */
 812        for ( ; count < kvm_vgic_global_state.nr_lr; count++)
 813                vgic_clear_lr(vcpu, count);
 814}
 815
 816static inline bool can_access_vgic_from_kernel(void)
 817{
 818        /*
 819         * GICv2 can always be accessed from the kernel because it is
 820         * memory-mapped, and VHE systems can access GICv3 EL2 system
 821         * registers.
 822         */
 823        return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
 824}
 825
 826static inline void vgic_save_state(struct kvm_vcpu *vcpu)
 827{
 828        if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
 829                vgic_v2_save_state(vcpu);
 830        else
 831                __vgic_v3_save_state(vcpu);
 832}
 833
 834/* Sync back the hardware VGIC state into our emulation after a guest's run. */
 835void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 836{
 837        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 838
 839        WARN_ON(vgic_v4_sync_hwstate(vcpu));
 840
 841        /* An empty ap_list_head implies used_lrs == 0 */
 842        if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
 843                return;
 844
 845        if (can_access_vgic_from_kernel())
 846                vgic_save_state(vcpu);
 847
 848        if (vgic_cpu->used_lrs)
 849                vgic_fold_lr_state(vcpu);
 850        vgic_prune_ap_list(vcpu);
 851}
 852
 853static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
 854{
 855        if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
 856                vgic_v2_restore_state(vcpu);
 857        else
 858                __vgic_v3_restore_state(vcpu);
 859}
 860
 861/* Flush our emulation state into the GIC hardware before entering the guest. */
 862void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 863{
 864        WARN_ON(vgic_v4_flush_hwstate(vcpu));
 865
 866        /*
 867         * If there are no virtual interrupts active or pending for this
 868         * VCPU, then there is no work to do and we can bail out without
 869         * taking any lock.  There is a potential race with someone injecting
 870         * interrupts to the VCPU, but it is a benign race as the VCPU will
 871         * either observe the new interrupt before or after doing this check,
 872         * and introducing additional synchronization mechanism doesn't change
 873         * this.
 874         */
 875        if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
 876                return;
 877
 878        DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 879
 880        spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
 881        vgic_flush_lr_state(vcpu);
 882        spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
 883
 884        if (can_access_vgic_from_kernel())
 885                vgic_restore_state(vcpu);
 886}
 887
 888void kvm_vgic_load(struct kvm_vcpu *vcpu)
 889{
 890        if (unlikely(!vgic_initialized(vcpu->kvm)))
 891                return;
 892
 893        if (kvm_vgic_global_state.type == VGIC_V2)
 894                vgic_v2_load(vcpu);
 895        else
 896                vgic_v3_load(vcpu);
 897}
 898
 899void kvm_vgic_put(struct kvm_vcpu *vcpu)
 900{
 901        if (unlikely(!vgic_initialized(vcpu->kvm)))
 902                return;
 903
 904        if (kvm_vgic_global_state.type == VGIC_V2)
 905                vgic_v2_put(vcpu);
 906        else
 907                vgic_v3_put(vcpu);
 908}
 909
 910int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
 911{
 912        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 913        struct vgic_irq *irq;
 914        bool pending = false;
 915        unsigned long flags;
 916
 917        if (!vcpu->kvm->arch.vgic.enabled)
 918                return false;
 919
 920        if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
 921                return true;
 922
 923        spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
 924
 925        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
 926                spin_lock(&irq->irq_lock);
 927                pending = irq_is_pending(irq) && irq->enabled;
 928                spin_unlock(&irq->irq_lock);
 929
 930                if (pending)
 931                        break;
 932        }
 933
 934        spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
 935
 936        return pending;
 937}
 938
 939void vgic_kick_vcpus(struct kvm *kvm)
 940{
 941        struct kvm_vcpu *vcpu;
 942        int c;
 943
 944        /*
 945         * We've injected an interrupt, time to find out who deserves
 946         * a good kick...
 947         */
 948        kvm_for_each_vcpu(c, vcpu, kvm) {
 949                if (kvm_vgic_vcpu_pending_irq(vcpu)) {
 950                        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
 951                        kvm_vcpu_kick(vcpu);
 952                }
 953        }
 954}
 955
 956bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
 957{
 958        struct vgic_irq *irq;
 959        bool map_is_active;
 960        unsigned long flags;
 961
 962        if (!vgic_initialized(vcpu->kvm))
 963                return false;
 964
 965        irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
 966        spin_lock_irqsave(&irq->irq_lock, flags);
 967        map_is_active = irq->hw && irq->active;
 968        spin_unlock_irqrestore(&irq->irq_lock, flags);
 969        vgic_put_irq(vcpu->kvm, irq);
 970
 971        return map_is_active;
 972}
 973
 974