linux/virt/kvm/arm/vgic/vgic-its.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * GICv3 ITS emulation
   4 *
   5 * Copyright (C) 2015,2016 ARM Ltd.
   6 * Author: Andre Przywara <andre.przywara@arm.com>
   7 */
   8
   9#include <linux/cpu.h>
  10#include <linux/kvm.h>
  11#include <linux/kvm_host.h>
  12#include <linux/interrupt.h>
  13#include <linux/list.h>
  14#include <linux/uaccess.h>
  15#include <linux/list_sort.h>
  16
  17#include <linux/irqchip/arm-gic-v3.h>
  18
  19#include <asm/kvm_emulate.h>
  20#include <asm/kvm_arm.h>
  21#include <asm/kvm_mmu.h>
  22
  23#include "vgic.h"
  24#include "vgic-mmio.h"
  25
  26static int vgic_its_save_tables_v0(struct vgic_its *its);
  27static int vgic_its_restore_tables_v0(struct vgic_its *its);
  28static int vgic_its_commit_v0(struct vgic_its *its);
  29static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
  30                             struct kvm_vcpu *filter_vcpu, bool needs_inv);
  31
  32/*
  33 * Creates a new (reference to a) struct vgic_irq for a given LPI.
  34 * If this LPI is already mapped on another ITS, we increase its refcount
  35 * and return a pointer to the existing structure.
  36 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
  37 * This function returns a pointer to the _unlocked_ structure.
  38 */
  39static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
  40                                     struct kvm_vcpu *vcpu)
  41{
  42        struct vgic_dist *dist = &kvm->arch.vgic;
  43        struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
  44        unsigned long flags;
  45        int ret;
  46
  47        /* In this case there is no put, since we keep the reference. */
  48        if (irq)
  49                return irq;
  50
  51        irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
  52        if (!irq)
  53                return ERR_PTR(-ENOMEM);
  54
  55        INIT_LIST_HEAD(&irq->lpi_list);
  56        INIT_LIST_HEAD(&irq->ap_list);
  57        raw_spin_lock_init(&irq->irq_lock);
  58
  59        irq->config = VGIC_CONFIG_EDGE;
  60        kref_init(&irq->refcount);
  61        irq->intid = intid;
  62        irq->target_vcpu = vcpu;
  63        irq->group = 1;
  64
  65        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
  66
  67        /*
  68         * There could be a race with another vgic_add_lpi(), so we need to
  69         * check that we don't add a second list entry with the same LPI.
  70         */
  71        list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
  72                if (oldirq->intid != intid)
  73                        continue;
  74
  75                /* Someone was faster with adding this LPI, lets use that. */
  76                kfree(irq);
  77                irq = oldirq;
  78
  79                /*
  80                 * This increases the refcount, the caller is expected to
  81                 * call vgic_put_irq() on the returned pointer once it's
  82                 * finished with the IRQ.
  83                 */
  84                vgic_get_irq_kref(irq);
  85
  86                goto out_unlock;
  87        }
  88
  89        list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
  90        dist->lpi_list_count++;
  91
  92out_unlock:
  93        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
  94
  95        /*
  96         * We "cache" the configuration table entries in our struct vgic_irq's.
  97         * However we only have those structs for mapped IRQs, so we read in
  98         * the respective config data from memory here upon mapping the LPI.
  99         */
 100        ret = update_lpi_config(kvm, irq, NULL, false);
 101        if (ret)
 102                return ERR_PTR(ret);
 103
 104        ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
 105        if (ret)
 106                return ERR_PTR(ret);
 107
 108        return irq;
 109}
 110
 111struct its_device {
 112        struct list_head dev_list;
 113
 114        /* the head for the list of ITTEs */
 115        struct list_head itt_head;
 116        u32 num_eventid_bits;
 117        gpa_t itt_addr;
 118        u32 device_id;
 119};
 120
 121#define COLLECTION_NOT_MAPPED ((u32)~0)
 122
 123struct its_collection {
 124        struct list_head coll_list;
 125
 126        u32 collection_id;
 127        u32 target_addr;
 128};
 129
 130#define its_is_collection_mapped(coll) ((coll) && \
 131                                ((coll)->target_addr != COLLECTION_NOT_MAPPED))
 132
 133struct its_ite {
 134        struct list_head ite_list;
 135
 136        struct vgic_irq *irq;
 137        struct its_collection *collection;
 138        u32 event_id;
 139};
 140
 141struct vgic_translation_cache_entry {
 142        struct list_head        entry;
 143        phys_addr_t             db;
 144        u32                     devid;
 145        u32                     eventid;
 146        struct vgic_irq         *irq;
 147};
 148
 149/**
 150 * struct vgic_its_abi - ITS abi ops and settings
 151 * @cte_esz: collection table entry size
 152 * @dte_esz: device table entry size
 153 * @ite_esz: interrupt translation table entry size
 154 * @save tables: save the ITS tables into guest RAM
 155 * @restore_tables: restore the ITS internal structs from tables
 156 *  stored in guest RAM
 157 * @commit: initialize the registers which expose the ABI settings,
 158 *  especially the entry sizes
 159 */
 160struct vgic_its_abi {
 161        int cte_esz;
 162        int dte_esz;
 163        int ite_esz;
 164        int (*save_tables)(struct vgic_its *its);
 165        int (*restore_tables)(struct vgic_its *its);
 166        int (*commit)(struct vgic_its *its);
 167};
 168
 169#define ABI_0_ESZ       8
 170#define ESZ_MAX         ABI_0_ESZ
 171
 172static const struct vgic_its_abi its_table_abi_versions[] = {
 173        [0] = {
 174         .cte_esz = ABI_0_ESZ,
 175         .dte_esz = ABI_0_ESZ,
 176         .ite_esz = ABI_0_ESZ,
 177         .save_tables = vgic_its_save_tables_v0,
 178         .restore_tables = vgic_its_restore_tables_v0,
 179         .commit = vgic_its_commit_v0,
 180        },
 181};
 182
 183#define NR_ITS_ABIS     ARRAY_SIZE(its_table_abi_versions)
 184
 185inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
 186{
 187        return &its_table_abi_versions[its->abi_rev];
 188}
 189
 190static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
 191{
 192        const struct vgic_its_abi *abi;
 193
 194        its->abi_rev = rev;
 195        abi = vgic_its_get_abi(its);
 196        return abi->commit(its);
 197}
 198
 199/*
 200 * Find and returns a device in the device table for an ITS.
 201 * Must be called with the its_lock mutex held.
 202 */
 203static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
 204{
 205        struct its_device *device;
 206
 207        list_for_each_entry(device, &its->device_list, dev_list)
 208                if (device_id == device->device_id)
 209                        return device;
 210
 211        return NULL;
 212}
 213
 214/*
 215 * Find and returns an interrupt translation table entry (ITTE) for a given
 216 * Device ID/Event ID pair on an ITS.
 217 * Must be called with the its_lock mutex held.
 218 */
 219static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
 220                                  u32 event_id)
 221{
 222        struct its_device *device;
 223        struct its_ite *ite;
 224
 225        device = find_its_device(its, device_id);
 226        if (device == NULL)
 227                return NULL;
 228
 229        list_for_each_entry(ite, &device->itt_head, ite_list)
 230                if (ite->event_id == event_id)
 231                        return ite;
 232
 233        return NULL;
 234}
 235
 236/* To be used as an iterator this macro misses the enclosing parentheses */
 237#define for_each_lpi_its(dev, ite, its) \
 238        list_for_each_entry(dev, &(its)->device_list, dev_list) \
 239                list_for_each_entry(ite, &(dev)->itt_head, ite_list)
 240
 241#define GIC_LPI_OFFSET 8192
 242
 243#define VITS_TYPER_IDBITS 16
 244#define VITS_TYPER_DEVBITS 16
 245#define VITS_DTE_MAX_DEVID_OFFSET       (BIT(14) - 1)
 246#define VITS_ITE_MAX_EVENTID_OFFSET     (BIT(16) - 1)
 247
 248/*
 249 * Finds and returns a collection in the ITS collection table.
 250 * Must be called with the its_lock mutex held.
 251 */
 252static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
 253{
 254        struct its_collection *collection;
 255
 256        list_for_each_entry(collection, &its->collection_list, coll_list) {
 257                if (coll_id == collection->collection_id)
 258                        return collection;
 259        }
 260
 261        return NULL;
 262}
 263
 264#define LPI_PROP_ENABLE_BIT(p)  ((p) & LPI_PROP_ENABLED)
 265#define LPI_PROP_PRIORITY(p)    ((p) & 0xfc)
 266
 267/*
 268 * Reads the configuration data for a given LPI from guest memory and
 269 * updates the fields in struct vgic_irq.
 270 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
 271 * VCPU. Unconditionally applies if filter_vcpu is NULL.
 272 */
 273static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
 274                             struct kvm_vcpu *filter_vcpu, bool needs_inv)
 275{
 276        u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
 277        u8 prop;
 278        int ret;
 279        unsigned long flags;
 280
 281        ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
 282                                  &prop, 1);
 283
 284        if (ret)
 285                return ret;
 286
 287        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 288
 289        if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
 290                irq->priority = LPI_PROP_PRIORITY(prop);
 291                irq->enabled = LPI_PROP_ENABLE_BIT(prop);
 292
 293                if (!irq->hw) {
 294                        vgic_queue_irq_unlock(kvm, irq, flags);
 295                        return 0;
 296                }
 297        }
 298
 299        raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 300
 301        if (irq->hw)
 302                return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
 303
 304        return 0;
 305}
 306
 307/*
 308 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
 309 * enumerate those LPIs without holding any lock.
 310 * Returns their number and puts the kmalloc'ed array into intid_ptr.
 311 */
 312int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
 313{
 314        struct vgic_dist *dist = &kvm->arch.vgic;
 315        struct vgic_irq *irq;
 316        unsigned long flags;
 317        u32 *intids;
 318        int irq_count, i = 0;
 319
 320        /*
 321         * There is an obvious race between allocating the array and LPIs
 322         * being mapped/unmapped. If we ended up here as a result of a
 323         * command, we're safe (locks are held, preventing another
 324         * command). If coming from another path (such as enabling LPIs),
 325         * we must be careful not to overrun the array.
 326         */
 327        irq_count = READ_ONCE(dist->lpi_list_count);
 328        intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
 329        if (!intids)
 330                return -ENOMEM;
 331
 332        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 333        list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
 334                if (i == irq_count)
 335                        break;
 336                /* We don't need to "get" the IRQ, as we hold the list lock. */
 337                if (vcpu && irq->target_vcpu != vcpu)
 338                        continue;
 339                intids[i++] = irq->intid;
 340        }
 341        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 342
 343        *intid_ptr = intids;
 344        return i;
 345}
 346
 347static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
 348{
 349        int ret = 0;
 350        unsigned long flags;
 351
 352        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 353        irq->target_vcpu = vcpu;
 354        raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 355
 356        if (irq->hw) {
 357                struct its_vlpi_map map;
 358
 359                ret = its_get_vlpi(irq->host_irq, &map);
 360                if (ret)
 361                        return ret;
 362
 363                if (map.vpe)
 364                        atomic_dec(&map.vpe->vlpi_count);
 365                map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
 366                atomic_inc(&map.vpe->vlpi_count);
 367
 368                ret = its_map_vlpi(irq->host_irq, &map);
 369        }
 370
 371        return ret;
 372}
 373
 374/*
 375 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
 376 * is targeting) to the VGIC's view, which deals with target VCPUs.
 377 * Needs to be called whenever either the collection for a LPIs has
 378 * changed or the collection itself got retargeted.
 379 */
 380static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
 381{
 382        struct kvm_vcpu *vcpu;
 383
 384        if (!its_is_collection_mapped(ite->collection))
 385                return;
 386
 387        vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
 388        update_affinity(ite->irq, vcpu);
 389}
 390
 391/*
 392 * Updates the target VCPU for every LPI targeting this collection.
 393 * Must be called with the its_lock mutex held.
 394 */
 395static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
 396                                       struct its_collection *coll)
 397{
 398        struct its_device *device;
 399        struct its_ite *ite;
 400
 401        for_each_lpi_its(device, ite, its) {
 402                if (!ite->collection || coll != ite->collection)
 403                        continue;
 404
 405                update_affinity_ite(kvm, ite);
 406        }
 407}
 408
 409static u32 max_lpis_propbaser(u64 propbaser)
 410{
 411        int nr_idbits = (propbaser & 0x1f) + 1;
 412
 413        return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
 414}
 415
 416/*
 417 * Sync the pending table pending bit of LPIs targeting @vcpu
 418 * with our own data structures. This relies on the LPI being
 419 * mapped before.
 420 */
 421static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 422{
 423        gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
 424        struct vgic_irq *irq;
 425        int last_byte_offset = -1;
 426        int ret = 0;
 427        u32 *intids;
 428        int nr_irqs, i;
 429        unsigned long flags;
 430        u8 pendmask;
 431
 432        nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
 433        if (nr_irqs < 0)
 434                return nr_irqs;
 435
 436        for (i = 0; i < nr_irqs; i++) {
 437                int byte_offset, bit_nr;
 438
 439                byte_offset = intids[i] / BITS_PER_BYTE;
 440                bit_nr = intids[i] % BITS_PER_BYTE;
 441
 442                /*
 443                 * For contiguously allocated LPIs chances are we just read
 444                 * this very same byte in the last iteration. Reuse that.
 445                 */
 446                if (byte_offset != last_byte_offset) {
 447                        ret = kvm_read_guest_lock(vcpu->kvm,
 448                                                  pendbase + byte_offset,
 449                                                  &pendmask, 1);
 450                        if (ret) {
 451                                kfree(intids);
 452                                return ret;
 453                        }
 454                        last_byte_offset = byte_offset;
 455                }
 456
 457                irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
 458                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 459                irq->pending_latch = pendmask & (1U << bit_nr);
 460                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 461                vgic_put_irq(vcpu->kvm, irq);
 462        }
 463
 464        kfree(intids);
 465
 466        return ret;
 467}
 468
 469static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
 470                                              struct vgic_its *its,
 471                                              gpa_t addr, unsigned int len)
 472{
 473        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
 474        u64 reg = GITS_TYPER_PLPIS;
 475
 476        /*
 477         * We use linear CPU numbers for redistributor addressing,
 478         * so GITS_TYPER.PTA is 0.
 479         * Also we force all PROPBASER registers to be the same, so
 480         * CommonLPIAff is 0 as well.
 481         * To avoid memory waste in the guest, we keep the number of IDBits and
 482         * DevBits low - as least for the time being.
 483         */
 484        reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
 485        reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
 486        reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
 487
 488        return extract_bytes(reg, addr & 7, len);
 489}
 490
 491static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
 492                                             struct vgic_its *its,
 493                                             gpa_t addr, unsigned int len)
 494{
 495        u32 val;
 496
 497        val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
 498        val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
 499        return val;
 500}
 501
 502static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
 503                                            struct vgic_its *its,
 504                                            gpa_t addr, unsigned int len,
 505                                            unsigned long val)
 506{
 507        u32 rev = GITS_IIDR_REV(val);
 508
 509        if (rev >= NR_ITS_ABIS)
 510                return -EINVAL;
 511        return vgic_its_set_abi(its, rev);
 512}
 513
 514static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
 515                                               struct vgic_its *its,
 516                                               gpa_t addr, unsigned int len)
 517{
 518        switch (addr & 0xffff) {
 519        case GITS_PIDR0:
 520                return 0x92;    /* part number, bits[7:0] */
 521        case GITS_PIDR1:
 522                return 0xb4;    /* part number, bits[11:8] */
 523        case GITS_PIDR2:
 524                return GIC_PIDR2_ARCH_GICv3 | 0x0b;
 525        case GITS_PIDR4:
 526                return 0x40;    /* This is a 64K software visible page */
 527        /* The following are the ID registers for (any) GIC. */
 528        case GITS_CIDR0:
 529                return 0x0d;
 530        case GITS_CIDR1:
 531                return 0xf0;
 532        case GITS_CIDR2:
 533                return 0x05;
 534        case GITS_CIDR3:
 535                return 0xb1;
 536        }
 537
 538        return 0;
 539}
 540
 541static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
 542                                               phys_addr_t db,
 543                                               u32 devid, u32 eventid)
 544{
 545        struct vgic_translation_cache_entry *cte;
 546
 547        list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
 548                /*
 549                 * If we hit a NULL entry, there is nothing after this
 550                 * point.
 551                 */
 552                if (!cte->irq)
 553                        break;
 554
 555                if (cte->db != db || cte->devid != devid ||
 556                    cte->eventid != eventid)
 557                        continue;
 558
 559                /*
 560                 * Move this entry to the head, as it is the most
 561                 * recently used.
 562                 */
 563                if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
 564                        list_move(&cte->entry, &dist->lpi_translation_cache);
 565
 566                return cte->irq;
 567        }
 568
 569        return NULL;
 570}
 571
 572static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
 573                                             u32 devid, u32 eventid)
 574{
 575        struct vgic_dist *dist = &kvm->arch.vgic;
 576        struct vgic_irq *irq;
 577        unsigned long flags;
 578
 579        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 580        irq = __vgic_its_check_cache(dist, db, devid, eventid);
 581        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 582
 583        return irq;
 584}
 585
 586static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
 587                                       u32 devid, u32 eventid,
 588                                       struct vgic_irq *irq)
 589{
 590        struct vgic_dist *dist = &kvm->arch.vgic;
 591        struct vgic_translation_cache_entry *cte;
 592        unsigned long flags;
 593        phys_addr_t db;
 594
 595        /* Do not cache a directly injected interrupt */
 596        if (irq->hw)
 597                return;
 598
 599        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 600
 601        if (unlikely(list_empty(&dist->lpi_translation_cache)))
 602                goto out;
 603
 604        /*
 605         * We could have raced with another CPU caching the same
 606         * translation behind our back, so let's check it is not in
 607         * already
 608         */
 609        db = its->vgic_its_base + GITS_TRANSLATER;
 610        if (__vgic_its_check_cache(dist, db, devid, eventid))
 611                goto out;
 612
 613        /* Always reuse the last entry (LRU policy) */
 614        cte = list_last_entry(&dist->lpi_translation_cache,
 615                              typeof(*cte), entry);
 616
 617        /*
 618         * Caching the translation implies having an extra reference
 619         * to the interrupt, so drop the potential reference on what
 620         * was in the cache, and increment it on the new interrupt.
 621         */
 622        if (cte->irq)
 623                __vgic_put_lpi_locked(kvm, cte->irq);
 624
 625        vgic_get_irq_kref(irq);
 626
 627        cte->db         = db;
 628        cte->devid      = devid;
 629        cte->eventid    = eventid;
 630        cte->irq        = irq;
 631
 632        /* Move the new translation to the head of the list */
 633        list_move(&cte->entry, &dist->lpi_translation_cache);
 634
 635out:
 636        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 637}
 638
 639void vgic_its_invalidate_cache(struct kvm *kvm)
 640{
 641        struct vgic_dist *dist = &kvm->arch.vgic;
 642        struct vgic_translation_cache_entry *cte;
 643        unsigned long flags;
 644
 645        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 646
 647        list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
 648                /*
 649                 * If we hit a NULL entry, there is nothing after this
 650                 * point.
 651                 */
 652                if (!cte->irq)
 653                        break;
 654
 655                __vgic_put_lpi_locked(kvm, cte->irq);
 656                cte->irq = NULL;
 657        }
 658
 659        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 660}
 661
 662int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
 663                         u32 devid, u32 eventid, struct vgic_irq **irq)
 664{
 665        struct kvm_vcpu *vcpu;
 666        struct its_ite *ite;
 667
 668        if (!its->enabled)
 669                return -EBUSY;
 670
 671        ite = find_ite(its, devid, eventid);
 672        if (!ite || !its_is_collection_mapped(ite->collection))
 673                return E_ITS_INT_UNMAPPED_INTERRUPT;
 674
 675        vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
 676        if (!vcpu)
 677                return E_ITS_INT_UNMAPPED_INTERRUPT;
 678
 679        if (!vcpu->arch.vgic_cpu.lpis_enabled)
 680                return -EBUSY;
 681
 682        vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
 683
 684        *irq = ite->irq;
 685        return 0;
 686}
 687
 688struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
 689{
 690        u64 address;
 691        struct kvm_io_device *kvm_io_dev;
 692        struct vgic_io_device *iodev;
 693
 694        if (!vgic_has_its(kvm))
 695                return ERR_PTR(-ENODEV);
 696
 697        if (!(msi->flags & KVM_MSI_VALID_DEVID))
 698                return ERR_PTR(-EINVAL);
 699
 700        address = (u64)msi->address_hi << 32 | msi->address_lo;
 701
 702        kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
 703        if (!kvm_io_dev)
 704                return ERR_PTR(-EINVAL);
 705
 706        if (kvm_io_dev->ops != &kvm_io_gic_ops)
 707                return ERR_PTR(-EINVAL);
 708
 709        iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
 710        if (iodev->iodev_type != IODEV_ITS)
 711                return ERR_PTR(-EINVAL);
 712
 713        return iodev->its;
 714}
 715
 716/*
 717 * Find the target VCPU and the LPI number for a given devid/eventid pair
 718 * and make this IRQ pending, possibly injecting it.
 719 * Must be called with the its_lock mutex held.
 720 * Returns 0 on success, a positive error value for any ITS mapping
 721 * related errors and negative error values for generic errors.
 722 */
 723static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
 724                                u32 devid, u32 eventid)
 725{
 726        struct vgic_irq *irq = NULL;
 727        unsigned long flags;
 728        int err;
 729
 730        err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
 731        if (err)
 732                return err;
 733
 734        if (irq->hw)
 735                return irq_set_irqchip_state(irq->host_irq,
 736                                             IRQCHIP_STATE_PENDING, true);
 737
 738        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 739        irq->pending_latch = true;
 740        vgic_queue_irq_unlock(kvm, irq, flags);
 741
 742        return 0;
 743}
 744
 745int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
 746{
 747        struct vgic_irq *irq;
 748        unsigned long flags;
 749        phys_addr_t db;
 750
 751        db = (u64)msi->address_hi << 32 | msi->address_lo;
 752        irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
 753
 754        if (!irq)
 755                return -1;
 756
 757        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 758        irq->pending_latch = true;
 759        vgic_queue_irq_unlock(kvm, irq, flags);
 760
 761        return 0;
 762}
 763
 764/*
 765 * Queries the KVM IO bus framework to get the ITS pointer from the given
 766 * doorbell address.
 767 * We then call vgic_its_trigger_msi() with the decoded data.
 768 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
 769 */
 770int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 771{
 772        struct vgic_its *its;
 773        int ret;
 774
 775        if (!vgic_its_inject_cached_translation(kvm, msi))
 776                return 1;
 777
 778        its = vgic_msi_to_its(kvm, msi);
 779        if (IS_ERR(its))
 780                return PTR_ERR(its);
 781
 782        mutex_lock(&its->its_lock);
 783        ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
 784        mutex_unlock(&its->its_lock);
 785
 786        if (ret < 0)
 787                return ret;
 788
 789        /*
 790         * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
 791         * if the guest has blocked the MSI. So we map any LPI mapping
 792         * related error to that.
 793         */
 794        if (ret)
 795                return 0;
 796        else
 797                return 1;
 798}
 799
 800/* Requires the its_lock to be held. */
 801static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
 802{
 803        list_del(&ite->ite_list);
 804
 805        /* This put matches the get in vgic_add_lpi. */
 806        if (ite->irq) {
 807                if (ite->irq->hw)
 808                        WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
 809
 810                vgic_put_irq(kvm, ite->irq);
 811        }
 812
 813        kfree(ite);
 814}
 815
 816static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
 817{
 818        return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
 819}
 820
 821#define its_cmd_get_command(cmd)        its_cmd_mask_field(cmd, 0,  0,  8)
 822#define its_cmd_get_deviceid(cmd)       its_cmd_mask_field(cmd, 0, 32, 32)
 823#define its_cmd_get_size(cmd)           (its_cmd_mask_field(cmd, 1,  0,  5) + 1)
 824#define its_cmd_get_id(cmd)             its_cmd_mask_field(cmd, 1,  0, 32)
 825#define its_cmd_get_physical_id(cmd)    its_cmd_mask_field(cmd, 1, 32, 32)
 826#define its_cmd_get_collection(cmd)     its_cmd_mask_field(cmd, 2,  0, 16)
 827#define its_cmd_get_ittaddr(cmd)        (its_cmd_mask_field(cmd, 2,  8, 44) << 8)
 828#define its_cmd_get_target_addr(cmd)    its_cmd_mask_field(cmd, 2, 16, 32)
 829#define its_cmd_get_validbit(cmd)       its_cmd_mask_field(cmd, 2, 63,  1)
 830
 831/*
 832 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
 833 * Must be called with the its_lock mutex held.
 834 */
 835static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
 836                                       u64 *its_cmd)
 837{
 838        u32 device_id = its_cmd_get_deviceid(its_cmd);
 839        u32 event_id = its_cmd_get_id(its_cmd);
 840        struct its_ite *ite;
 841
 842
 843        ite = find_ite(its, device_id, event_id);
 844        if (ite && ite->collection) {
 845                /*
 846                 * Though the spec talks about removing the pending state, we
 847                 * don't bother here since we clear the ITTE anyway and the
 848                 * pending state is a property of the ITTE struct.
 849                 */
 850                vgic_its_invalidate_cache(kvm);
 851
 852                its_free_ite(kvm, ite);
 853                return 0;
 854        }
 855
 856        return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
 857}
 858
 859/*
 860 * The MOVI command moves an ITTE to a different collection.
 861 * Must be called with the its_lock mutex held.
 862 */
 863static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
 864                                    u64 *its_cmd)
 865{
 866        u32 device_id = its_cmd_get_deviceid(its_cmd);
 867        u32 event_id = its_cmd_get_id(its_cmd);
 868        u32 coll_id = its_cmd_get_collection(its_cmd);
 869        struct kvm_vcpu *vcpu;
 870        struct its_ite *ite;
 871        struct its_collection *collection;
 872
 873        ite = find_ite(its, device_id, event_id);
 874        if (!ite)
 875                return E_ITS_MOVI_UNMAPPED_INTERRUPT;
 876
 877        if (!its_is_collection_mapped(ite->collection))
 878                return E_ITS_MOVI_UNMAPPED_COLLECTION;
 879
 880        collection = find_collection(its, coll_id);
 881        if (!its_is_collection_mapped(collection))
 882                return E_ITS_MOVI_UNMAPPED_COLLECTION;
 883
 884        ite->collection = collection;
 885        vcpu = kvm_get_vcpu(kvm, collection->target_addr);
 886
 887        vgic_its_invalidate_cache(kvm);
 888
 889        return update_affinity(ite->irq, vcpu);
 890}
 891
 892/*
 893 * Check whether an ID can be stored into the corresponding guest table.
 894 * For a direct table this is pretty easy, but gets a bit nasty for
 895 * indirect tables. We check whether the resulting guest physical address
 896 * is actually valid (covered by a memslot and guest accessible).
 897 * For this we have to read the respective first level entry.
 898 */
 899static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 900                              gpa_t *eaddr)
 901{
 902        int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
 903        u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
 904        phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
 905        int esz = GITS_BASER_ENTRY_SIZE(baser);
 906        int index, idx;
 907        gfn_t gfn;
 908        bool ret;
 909
 910        switch (type) {
 911        case GITS_BASER_TYPE_DEVICE:
 912                if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
 913                        return false;
 914                break;
 915        case GITS_BASER_TYPE_COLLECTION:
 916                /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
 917                if (id >= BIT_ULL(16))
 918                        return false;
 919                break;
 920        default:
 921                return false;
 922        }
 923
 924        if (!(baser & GITS_BASER_INDIRECT)) {
 925                phys_addr_t addr;
 926
 927                if (id >= (l1_tbl_size / esz))
 928                        return false;
 929
 930                addr = base + id * esz;
 931                gfn = addr >> PAGE_SHIFT;
 932
 933                if (eaddr)
 934                        *eaddr = addr;
 935
 936                goto out;
 937        }
 938
 939        /* calculate and check the index into the 1st level */
 940        index = id / (SZ_64K / esz);
 941        if (index >= (l1_tbl_size / sizeof(u64)))
 942                return false;
 943
 944        /* Each 1st level entry is represented by a 64-bit value. */
 945        if (kvm_read_guest_lock(its->dev->kvm,
 946                           base + index * sizeof(indirect_ptr),
 947                           &indirect_ptr, sizeof(indirect_ptr)))
 948                return false;
 949
 950        indirect_ptr = le64_to_cpu(indirect_ptr);
 951
 952        /* check the valid bit of the first level entry */
 953        if (!(indirect_ptr & BIT_ULL(63)))
 954                return false;
 955
 956        /* Mask the guest physical address and calculate the frame number. */
 957        indirect_ptr &= GENMASK_ULL(51, 16);
 958
 959        /* Find the address of the actual entry */
 960        index = id % (SZ_64K / esz);
 961        indirect_ptr += index * esz;
 962        gfn = indirect_ptr >> PAGE_SHIFT;
 963
 964        if (eaddr)
 965                *eaddr = indirect_ptr;
 966
 967out:
 968        idx = srcu_read_lock(&its->dev->kvm->srcu);
 969        ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
 970        srcu_read_unlock(&its->dev->kvm->srcu, idx);
 971        return ret;
 972}
 973
 974static int vgic_its_alloc_collection(struct vgic_its *its,
 975                                     struct its_collection **colp,
 976                                     u32 coll_id)
 977{
 978        struct its_collection *collection;
 979
 980        if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
 981                return E_ITS_MAPC_COLLECTION_OOR;
 982
 983        collection = kzalloc(sizeof(*collection), GFP_KERNEL);
 984        if (!collection)
 985                return -ENOMEM;
 986
 987        collection->collection_id = coll_id;
 988        collection->target_addr = COLLECTION_NOT_MAPPED;
 989
 990        list_add_tail(&collection->coll_list, &its->collection_list);
 991        *colp = collection;
 992
 993        return 0;
 994}
 995
 996static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
 997{
 998        struct its_collection *collection;
 999        struct its_device *device;
1000        struct its_ite *ite;
1001
1002        /*
1003         * Clearing the mapping for that collection ID removes the
1004         * entry from the list. If there wasn't any before, we can
1005         * go home early.
1006         */
1007        collection = find_collection(its, coll_id);
1008        if (!collection)
1009                return;
1010
1011        for_each_lpi_its(device, ite, its)
1012                if (ite->collection &&
1013                    ite->collection->collection_id == coll_id)
1014                        ite->collection = NULL;
1015
1016        list_del(&collection->coll_list);
1017        kfree(collection);
1018}
1019
1020/* Must be called with its_lock mutex held */
1021static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
1022                                          struct its_collection *collection,
1023                                          u32 event_id)
1024{
1025        struct its_ite *ite;
1026
1027        ite = kzalloc(sizeof(*ite), GFP_KERNEL);
1028        if (!ite)
1029                return ERR_PTR(-ENOMEM);
1030
1031        ite->event_id   = event_id;
1032        ite->collection = collection;
1033
1034        list_add_tail(&ite->ite_list, &device->itt_head);
1035        return ite;
1036}
1037
1038/*
1039 * The MAPTI and MAPI commands map LPIs to ITTEs.
1040 * Must be called with its_lock mutex held.
1041 */
1042static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1043                                    u64 *its_cmd)
1044{
1045        u32 device_id = its_cmd_get_deviceid(its_cmd);
1046        u32 event_id = its_cmd_get_id(its_cmd);
1047        u32 coll_id = its_cmd_get_collection(its_cmd);
1048        struct its_ite *ite;
1049        struct kvm_vcpu *vcpu = NULL;
1050        struct its_device *device;
1051        struct its_collection *collection, *new_coll = NULL;
1052        struct vgic_irq *irq;
1053        int lpi_nr;
1054
1055        device = find_its_device(its, device_id);
1056        if (!device)
1057                return E_ITS_MAPTI_UNMAPPED_DEVICE;
1058
1059        if (event_id >= BIT_ULL(device->num_eventid_bits))
1060                return E_ITS_MAPTI_ID_OOR;
1061
1062        if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
1063                lpi_nr = its_cmd_get_physical_id(its_cmd);
1064        else
1065                lpi_nr = event_id;
1066        if (lpi_nr < GIC_LPI_OFFSET ||
1067            lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
1068                return E_ITS_MAPTI_PHYSICALID_OOR;
1069
1070        /* If there is an existing mapping, behavior is UNPREDICTABLE. */
1071        if (find_ite(its, device_id, event_id))
1072                return 0;
1073
1074        collection = find_collection(its, coll_id);
1075        if (!collection) {
1076                int ret = vgic_its_alloc_collection(its, &collection, coll_id);
1077                if (ret)
1078                        return ret;
1079                new_coll = collection;
1080        }
1081
1082        ite = vgic_its_alloc_ite(device, collection, event_id);
1083        if (IS_ERR(ite)) {
1084                if (new_coll)
1085                        vgic_its_free_collection(its, coll_id);
1086                return PTR_ERR(ite);
1087        }
1088
1089        if (its_is_collection_mapped(collection))
1090                vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1091
1092        irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
1093        if (IS_ERR(irq)) {
1094                if (new_coll)
1095                        vgic_its_free_collection(its, coll_id);
1096                its_free_ite(kvm, ite);
1097                return PTR_ERR(irq);
1098        }
1099        ite->irq = irq;
1100
1101        return 0;
1102}
1103
1104/* Requires the its_lock to be held. */
1105static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
1106{
1107        struct its_ite *ite, *temp;
1108
1109        /*
1110         * The spec says that unmapping a device with still valid
1111         * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1112         * since we cannot leave the memory unreferenced.
1113         */
1114        list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
1115                its_free_ite(kvm, ite);
1116
1117        vgic_its_invalidate_cache(kvm);
1118
1119        list_del(&device->dev_list);
1120        kfree(device);
1121}
1122
1123/* its lock must be held */
1124static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1125{
1126        struct its_device *cur, *temp;
1127
1128        list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1129                vgic_its_free_device(kvm, cur);
1130}
1131
1132/* its lock must be held */
1133static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1134{
1135        struct its_collection *cur, *temp;
1136
1137        list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1138                vgic_its_free_collection(its, cur->collection_id);
1139}
1140
1141/* Must be called with its_lock mutex held */
1142static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1143                                                u32 device_id, gpa_t itt_addr,
1144                                                u8 num_eventid_bits)
1145{
1146        struct its_device *device;
1147
1148        device = kzalloc(sizeof(*device), GFP_KERNEL);
1149        if (!device)
1150                return ERR_PTR(-ENOMEM);
1151
1152        device->device_id = device_id;
1153        device->itt_addr = itt_addr;
1154        device->num_eventid_bits = num_eventid_bits;
1155        INIT_LIST_HEAD(&device->itt_head);
1156
1157        list_add_tail(&device->dev_list, &its->device_list);
1158        return device;
1159}
1160
1161/*
1162 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1163 * Must be called with the its_lock mutex held.
1164 */
1165static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1166                                    u64 *its_cmd)
1167{
1168        u32 device_id = its_cmd_get_deviceid(its_cmd);
1169        bool valid = its_cmd_get_validbit(its_cmd);
1170        u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1171        gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1172        struct its_device *device;
1173
1174        if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1175                return E_ITS_MAPD_DEVICE_OOR;
1176
1177        if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1178                return E_ITS_MAPD_ITTSIZE_OOR;
1179
1180        device = find_its_device(its, device_id);
1181
1182        /*
1183         * The spec says that calling MAPD on an already mapped device
1184         * invalidates all cached data for this device. We implement this
1185         * by removing the mapping and re-establishing it.
1186         */
1187        if (device)
1188                vgic_its_free_device(kvm, device);
1189
1190        /*
1191         * The spec does not say whether unmapping a not-mapped device
1192         * is an error, so we are done in any case.
1193         */
1194        if (!valid)
1195                return 0;
1196
1197        device = vgic_its_alloc_device(its, device_id, itt_addr,
1198                                       num_eventid_bits);
1199
1200        return PTR_ERR_OR_ZERO(device);
1201}
1202
1203/*
1204 * The MAPC command maps collection IDs to redistributors.
1205 * Must be called with the its_lock mutex held.
1206 */
1207static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1208                                    u64 *its_cmd)
1209{
1210        u16 coll_id;
1211        u32 target_addr;
1212        struct its_collection *collection;
1213        bool valid;
1214
1215        valid = its_cmd_get_validbit(its_cmd);
1216        coll_id = its_cmd_get_collection(its_cmd);
1217        target_addr = its_cmd_get_target_addr(its_cmd);
1218
1219        if (target_addr >= atomic_read(&kvm->online_vcpus))
1220                return E_ITS_MAPC_PROCNUM_OOR;
1221
1222        if (!valid) {
1223                vgic_its_free_collection(its, coll_id);
1224                vgic_its_invalidate_cache(kvm);
1225        } else {
1226                collection = find_collection(its, coll_id);
1227
1228                if (!collection) {
1229                        int ret;
1230
1231                        ret = vgic_its_alloc_collection(its, &collection,
1232                                                        coll_id);
1233                        if (ret)
1234                                return ret;
1235                        collection->target_addr = target_addr;
1236                } else {
1237                        collection->target_addr = target_addr;
1238                        update_affinity_collection(kvm, its, collection);
1239                }
1240        }
1241
1242        return 0;
1243}
1244
1245/*
1246 * The CLEAR command removes the pending state for a particular LPI.
1247 * Must be called with the its_lock mutex held.
1248 */
1249static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1250                                     u64 *its_cmd)
1251{
1252        u32 device_id = its_cmd_get_deviceid(its_cmd);
1253        u32 event_id = its_cmd_get_id(its_cmd);
1254        struct its_ite *ite;
1255
1256
1257        ite = find_ite(its, device_id, event_id);
1258        if (!ite)
1259                return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1260
1261        ite->irq->pending_latch = false;
1262
1263        if (ite->irq->hw)
1264                return irq_set_irqchip_state(ite->irq->host_irq,
1265                                             IRQCHIP_STATE_PENDING, false);
1266
1267        return 0;
1268}
1269
1270/*
1271 * The INV command syncs the configuration bits from the memory table.
1272 * Must be called with the its_lock mutex held.
1273 */
1274static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1275                                   u64 *its_cmd)
1276{
1277        u32 device_id = its_cmd_get_deviceid(its_cmd);
1278        u32 event_id = its_cmd_get_id(its_cmd);
1279        struct its_ite *ite;
1280
1281
1282        ite = find_ite(its, device_id, event_id);
1283        if (!ite)
1284                return E_ITS_INV_UNMAPPED_INTERRUPT;
1285
1286        return update_lpi_config(kvm, ite->irq, NULL, true);
1287}
1288
1289/*
1290 * The INVALL command requests flushing of all IRQ data in this collection.
1291 * Find the VCPU mapped to that collection, then iterate over the VM's list
1292 * of mapped LPIs and update the configuration for each IRQ which targets
1293 * the specified vcpu. The configuration will be read from the in-memory
1294 * configuration table.
1295 * Must be called with the its_lock mutex held.
1296 */
1297static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1298                                      u64 *its_cmd)
1299{
1300        u32 coll_id = its_cmd_get_collection(its_cmd);
1301        struct its_collection *collection;
1302        struct kvm_vcpu *vcpu;
1303        struct vgic_irq *irq;
1304        u32 *intids;
1305        int irq_count, i;
1306
1307        collection = find_collection(its, coll_id);
1308        if (!its_is_collection_mapped(collection))
1309                return E_ITS_INVALL_UNMAPPED_COLLECTION;
1310
1311        vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1312
1313        irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1314        if (irq_count < 0)
1315                return irq_count;
1316
1317        for (i = 0; i < irq_count; i++) {
1318                irq = vgic_get_irq(kvm, NULL, intids[i]);
1319                if (!irq)
1320                        continue;
1321                update_lpi_config(kvm, irq, vcpu, false);
1322                vgic_put_irq(kvm, irq);
1323        }
1324
1325        kfree(intids);
1326
1327        if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1328                its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1329
1330        return 0;
1331}
1332
1333/*
1334 * The MOVALL command moves the pending state of all IRQs targeting one
1335 * redistributor to another. We don't hold the pending state in the VCPUs,
1336 * but in the IRQs instead, so there is really not much to do for us here.
1337 * However the spec says that no IRQ must target the old redistributor
1338 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1339 * This command affects all LPIs in the system that target that redistributor.
1340 */
1341static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1342                                      u64 *its_cmd)
1343{
1344        u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1345        u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1346        struct kvm_vcpu *vcpu1, *vcpu2;
1347        struct vgic_irq *irq;
1348        u32 *intids;
1349        int irq_count, i;
1350
1351        if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1352            target2_addr >= atomic_read(&kvm->online_vcpus))
1353                return E_ITS_MOVALL_PROCNUM_OOR;
1354
1355        if (target1_addr == target2_addr)
1356                return 0;
1357
1358        vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1359        vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1360
1361        irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
1362        if (irq_count < 0)
1363                return irq_count;
1364
1365        for (i = 0; i < irq_count; i++) {
1366                irq = vgic_get_irq(kvm, NULL, intids[i]);
1367
1368                update_affinity(irq, vcpu2);
1369
1370                vgic_put_irq(kvm, irq);
1371        }
1372
1373        vgic_its_invalidate_cache(kvm);
1374
1375        kfree(intids);
1376        return 0;
1377}
1378
1379/*
1380 * The INT command injects the LPI associated with that DevID/EvID pair.
1381 * Must be called with the its_lock mutex held.
1382 */
1383static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1384                                   u64 *its_cmd)
1385{
1386        u32 msi_data = its_cmd_get_id(its_cmd);
1387        u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1388
1389        return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1390}
1391
1392/*
1393 * This function is called with the its_cmd lock held, but the ITS data
1394 * structure lock dropped.
1395 */
1396static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1397                                   u64 *its_cmd)
1398{
1399        int ret = -ENODEV;
1400
1401        mutex_lock(&its->its_lock);
1402        switch (its_cmd_get_command(its_cmd)) {
1403        case GITS_CMD_MAPD:
1404                ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1405                break;
1406        case GITS_CMD_MAPC:
1407                ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1408                break;
1409        case GITS_CMD_MAPI:
1410                ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1411                break;
1412        case GITS_CMD_MAPTI:
1413                ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1414                break;
1415        case GITS_CMD_MOVI:
1416                ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1417                break;
1418        case GITS_CMD_DISCARD:
1419                ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1420                break;
1421        case GITS_CMD_CLEAR:
1422                ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1423                break;
1424        case GITS_CMD_MOVALL:
1425                ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1426                break;
1427        case GITS_CMD_INT:
1428                ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1429                break;
1430        case GITS_CMD_INV:
1431                ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1432                break;
1433        case GITS_CMD_INVALL:
1434                ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1435                break;
1436        case GITS_CMD_SYNC:
1437                /* we ignore this command: we are in sync all of the time */
1438                ret = 0;
1439                break;
1440        }
1441        mutex_unlock(&its->its_lock);
1442
1443        return ret;
1444}
1445
1446static u64 vgic_sanitise_its_baser(u64 reg)
1447{
1448        reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1449                                  GITS_BASER_SHAREABILITY_SHIFT,
1450                                  vgic_sanitise_shareability);
1451        reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1452                                  GITS_BASER_INNER_CACHEABILITY_SHIFT,
1453                                  vgic_sanitise_inner_cacheability);
1454        reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1455                                  GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1456                                  vgic_sanitise_outer_cacheability);
1457
1458        /* We support only one (ITS) page size: 64K */
1459        reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1460
1461        return reg;
1462}
1463
1464static u64 vgic_sanitise_its_cbaser(u64 reg)
1465{
1466        reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1467                                  GITS_CBASER_SHAREABILITY_SHIFT,
1468                                  vgic_sanitise_shareability);
1469        reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1470                                  GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1471                                  vgic_sanitise_inner_cacheability);
1472        reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1473                                  GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1474                                  vgic_sanitise_outer_cacheability);
1475
1476        /* Sanitise the physical address to be 64k aligned. */
1477        reg &= ~GENMASK_ULL(15, 12);
1478
1479        return reg;
1480}
1481
1482static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1483                                               struct vgic_its *its,
1484                                               gpa_t addr, unsigned int len)
1485{
1486        return extract_bytes(its->cbaser, addr & 7, len);
1487}
1488
1489static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1490                                       gpa_t addr, unsigned int len,
1491                                       unsigned long val)
1492{
1493        /* When GITS_CTLR.Enable is 1, this register is RO. */
1494        if (its->enabled)
1495                return;
1496
1497        mutex_lock(&its->cmd_lock);
1498        its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1499        its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1500        its->creadr = 0;
1501        /*
1502         * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1503         * it to CREADR to make sure we start with an empty command buffer.
1504         */
1505        its->cwriter = its->creadr;
1506        mutex_unlock(&its->cmd_lock);
1507}
1508
1509#define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
1510#define ITS_CMD_SIZE                    32
1511#define ITS_CMD_OFFSET(reg)             ((reg) & GENMASK(19, 5))
1512
1513/* Must be called with the cmd_lock held. */
1514static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1515{
1516        gpa_t cbaser;
1517        u64 cmd_buf[4];
1518
1519        /* Commands are only processed when the ITS is enabled. */
1520        if (!its->enabled)
1521                return;
1522
1523        cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1524
1525        while (its->cwriter != its->creadr) {
1526                int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1527                                              cmd_buf, ITS_CMD_SIZE);
1528                /*
1529                 * If kvm_read_guest() fails, this could be due to the guest
1530                 * programming a bogus value in CBASER or something else going
1531                 * wrong from which we cannot easily recover.
1532                 * According to section 6.3.2 in the GICv3 spec we can just
1533                 * ignore that command then.
1534                 */
1535                if (!ret)
1536                        vgic_its_handle_command(kvm, its, cmd_buf);
1537
1538                its->creadr += ITS_CMD_SIZE;
1539                if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1540                        its->creadr = 0;
1541        }
1542}
1543
1544/*
1545 * By writing to CWRITER the guest announces new commands to be processed.
1546 * To avoid any races in the first place, we take the its_cmd lock, which
1547 * protects our ring buffer variables, so that there is only one user
1548 * per ITS handling commands at a given time.
1549 */
1550static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1551                                        gpa_t addr, unsigned int len,
1552                                        unsigned long val)
1553{
1554        u64 reg;
1555
1556        if (!its)
1557                return;
1558
1559        mutex_lock(&its->cmd_lock);
1560
1561        reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1562        reg = ITS_CMD_OFFSET(reg);
1563        if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1564                mutex_unlock(&its->cmd_lock);
1565                return;
1566        }
1567        its->cwriter = reg;
1568
1569        vgic_its_process_commands(kvm, its);
1570
1571        mutex_unlock(&its->cmd_lock);
1572}
1573
1574static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1575                                                struct vgic_its *its,
1576                                                gpa_t addr, unsigned int len)
1577{
1578        return extract_bytes(its->cwriter, addr & 0x7, len);
1579}
1580
1581static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1582                                               struct vgic_its *its,
1583                                               gpa_t addr, unsigned int len)
1584{
1585        return extract_bytes(its->creadr, addr & 0x7, len);
1586}
1587
1588static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1589                                              struct vgic_its *its,
1590                                              gpa_t addr, unsigned int len,
1591                                              unsigned long val)
1592{
1593        u32 cmd_offset;
1594        int ret = 0;
1595
1596        mutex_lock(&its->cmd_lock);
1597
1598        if (its->enabled) {
1599                ret = -EBUSY;
1600                goto out;
1601        }
1602
1603        cmd_offset = ITS_CMD_OFFSET(val);
1604        if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1605                ret = -EINVAL;
1606                goto out;
1607        }
1608
1609        its->creadr = cmd_offset;
1610out:
1611        mutex_unlock(&its->cmd_lock);
1612        return ret;
1613}
1614
1615#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1616static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1617                                              struct vgic_its *its,
1618                                              gpa_t addr, unsigned int len)
1619{
1620        u64 reg;
1621
1622        switch (BASER_INDEX(addr)) {
1623        case 0:
1624                reg = its->baser_device_table;
1625                break;
1626        case 1:
1627                reg = its->baser_coll_table;
1628                break;
1629        default:
1630                reg = 0;
1631                break;
1632        }
1633
1634        return extract_bytes(reg, addr & 7, len);
1635}
1636
1637#define GITS_BASER_RO_MASK      (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1638static void vgic_mmio_write_its_baser(struct kvm *kvm,
1639                                      struct vgic_its *its,
1640                                      gpa_t addr, unsigned int len,
1641                                      unsigned long val)
1642{
1643        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1644        u64 entry_size, table_type;
1645        u64 reg, *regptr, clearbits = 0;
1646
1647        /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1648        if (its->enabled)
1649                return;
1650
1651        switch (BASER_INDEX(addr)) {
1652        case 0:
1653                regptr = &its->baser_device_table;
1654                entry_size = abi->dte_esz;
1655                table_type = GITS_BASER_TYPE_DEVICE;
1656                break;
1657        case 1:
1658                regptr = &its->baser_coll_table;
1659                entry_size = abi->cte_esz;
1660                table_type = GITS_BASER_TYPE_COLLECTION;
1661                clearbits = GITS_BASER_INDIRECT;
1662                break;
1663        default:
1664                return;
1665        }
1666
1667        reg = update_64bit_reg(*regptr, addr & 7, len, val);
1668        reg &= ~GITS_BASER_RO_MASK;
1669        reg &= ~clearbits;
1670
1671        reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1672        reg |= table_type << GITS_BASER_TYPE_SHIFT;
1673        reg = vgic_sanitise_its_baser(reg);
1674
1675        *regptr = reg;
1676
1677        if (!(reg & GITS_BASER_VALID)) {
1678                /* Take the its_lock to prevent a race with a save/restore */
1679                mutex_lock(&its->its_lock);
1680                switch (table_type) {
1681                case GITS_BASER_TYPE_DEVICE:
1682                        vgic_its_free_device_list(kvm, its);
1683                        break;
1684                case GITS_BASER_TYPE_COLLECTION:
1685                        vgic_its_free_collection_list(kvm, its);
1686                        break;
1687                }
1688                mutex_unlock(&its->its_lock);
1689        }
1690}
1691
1692static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1693                                             struct vgic_its *its,
1694                                             gpa_t addr, unsigned int len)
1695{
1696        u32 reg = 0;
1697
1698        mutex_lock(&its->cmd_lock);
1699        if (its->creadr == its->cwriter)
1700                reg |= GITS_CTLR_QUIESCENT;
1701        if (its->enabled)
1702                reg |= GITS_CTLR_ENABLE;
1703        mutex_unlock(&its->cmd_lock);
1704
1705        return reg;
1706}
1707
1708static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1709                                     gpa_t addr, unsigned int len,
1710                                     unsigned long val)
1711{
1712        mutex_lock(&its->cmd_lock);
1713
1714        /*
1715         * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1716         * device/collection BASER are invalid
1717         */
1718        if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1719                (!(its->baser_device_table & GITS_BASER_VALID) ||
1720                 !(its->baser_coll_table & GITS_BASER_VALID) ||
1721                 !(its->cbaser & GITS_CBASER_VALID)))
1722                goto out;
1723
1724        its->enabled = !!(val & GITS_CTLR_ENABLE);
1725        if (!its->enabled)
1726                vgic_its_invalidate_cache(kvm);
1727
1728        /*
1729         * Try to process any pending commands. This function bails out early
1730         * if the ITS is disabled or no commands have been queued.
1731         */
1732        vgic_its_process_commands(kvm, its);
1733
1734out:
1735        mutex_unlock(&its->cmd_lock);
1736}
1737
1738#define REGISTER_ITS_DESC(off, rd, wr, length, acc)             \
1739{                                                               \
1740        .reg_offset = off,                                      \
1741        .len = length,                                          \
1742        .access_flags = acc,                                    \
1743        .its_read = rd,                                         \
1744        .its_write = wr,                                        \
1745}
1746
1747#define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1748{                                                               \
1749        .reg_offset = off,                                      \
1750        .len = length,                                          \
1751        .access_flags = acc,                                    \
1752        .its_read = rd,                                         \
1753        .its_write = wr,                                        \
1754        .uaccess_its_write = uwr,                               \
1755}
1756
1757static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1758                              gpa_t addr, unsigned int len, unsigned long val)
1759{
1760        /* Ignore */
1761}
1762
1763static struct vgic_register_region its_registers[] = {
1764        REGISTER_ITS_DESC(GITS_CTLR,
1765                vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1766                VGIC_ACCESS_32bit),
1767        REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1768                vgic_mmio_read_its_iidr, its_mmio_write_wi,
1769                vgic_mmio_uaccess_write_its_iidr, 4,
1770                VGIC_ACCESS_32bit),
1771        REGISTER_ITS_DESC(GITS_TYPER,
1772                vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1773                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1774        REGISTER_ITS_DESC(GITS_CBASER,
1775                vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1776                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1777        REGISTER_ITS_DESC(GITS_CWRITER,
1778                vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1779                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1780        REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1781                vgic_mmio_read_its_creadr, its_mmio_write_wi,
1782                vgic_mmio_uaccess_write_its_creadr, 8,
1783                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1784        REGISTER_ITS_DESC(GITS_BASER,
1785                vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1786                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1787        REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1788                vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1789                VGIC_ACCESS_32bit),
1790};
1791
1792/* This is called on setting the LPI enable bit in the redistributor. */
1793void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1794{
1795        if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1796                its_sync_lpi_pending_table(vcpu);
1797}
1798
1799static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1800                                   u64 addr)
1801{
1802        struct vgic_io_device *iodev = &its->iodev;
1803        int ret;
1804
1805        mutex_lock(&kvm->slots_lock);
1806        if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1807                ret = -EBUSY;
1808                goto out;
1809        }
1810
1811        its->vgic_its_base = addr;
1812        iodev->regions = its_registers;
1813        iodev->nr_regions = ARRAY_SIZE(its_registers);
1814        kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1815
1816        iodev->base_addr = its->vgic_its_base;
1817        iodev->iodev_type = IODEV_ITS;
1818        iodev->its = its;
1819        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1820                                      KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1821out:
1822        mutex_unlock(&kvm->slots_lock);
1823
1824        return ret;
1825}
1826
1827/* Default is 16 cached LPIs per vcpu */
1828#define LPI_DEFAULT_PCPU_CACHE_SIZE     16
1829
1830void vgic_lpi_translation_cache_init(struct kvm *kvm)
1831{
1832        struct vgic_dist *dist = &kvm->arch.vgic;
1833        unsigned int sz;
1834        int i;
1835
1836        if (!list_empty(&dist->lpi_translation_cache))
1837                return;
1838
1839        sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
1840
1841        for (i = 0; i < sz; i++) {
1842                struct vgic_translation_cache_entry *cte;
1843
1844                /* An allocation failure is not fatal */
1845                cte = kzalloc(sizeof(*cte), GFP_KERNEL);
1846                if (WARN_ON(!cte))
1847                        break;
1848
1849                INIT_LIST_HEAD(&cte->entry);
1850                list_add(&cte->entry, &dist->lpi_translation_cache);
1851        }
1852}
1853
1854void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
1855{
1856        struct vgic_dist *dist = &kvm->arch.vgic;
1857        struct vgic_translation_cache_entry *cte, *tmp;
1858
1859        vgic_its_invalidate_cache(kvm);
1860
1861        list_for_each_entry_safe(cte, tmp,
1862                                 &dist->lpi_translation_cache, entry) {
1863                list_del(&cte->entry);
1864                kfree(cte);
1865        }
1866}
1867
1868#define INITIAL_BASER_VALUE                                               \
1869        (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)                | \
1870         GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner)         | \
1871         GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)             | \
1872         GITS_BASER_PAGE_SIZE_64K)
1873
1874#define INITIAL_PROPBASER_VALUE                                           \
1875        (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)            | \
1876         GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner)     | \
1877         GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1878
1879static int vgic_its_create(struct kvm_device *dev, u32 type)
1880{
1881        struct vgic_its *its;
1882
1883        if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1884                return -ENODEV;
1885
1886        its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1887        if (!its)
1888                return -ENOMEM;
1889
1890        if (vgic_initialized(dev->kvm)) {
1891                int ret = vgic_v4_init(dev->kvm);
1892                if (ret < 0) {
1893                        kfree(its);
1894                        return ret;
1895                }
1896
1897                vgic_lpi_translation_cache_init(dev->kvm);
1898        }
1899
1900        mutex_init(&its->its_lock);
1901        mutex_init(&its->cmd_lock);
1902
1903        its->vgic_its_base = VGIC_ADDR_UNDEF;
1904
1905        INIT_LIST_HEAD(&its->device_list);
1906        INIT_LIST_HEAD(&its->collection_list);
1907
1908        dev->kvm->arch.vgic.msis_require_devid = true;
1909        dev->kvm->arch.vgic.has_its = true;
1910        its->enabled = false;
1911        its->dev = dev;
1912
1913        its->baser_device_table = INITIAL_BASER_VALUE                   |
1914                ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1915        its->baser_coll_table = INITIAL_BASER_VALUE |
1916                ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1917        dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1918
1919        dev->private = its;
1920
1921        return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1922}
1923
1924static void vgic_its_destroy(struct kvm_device *kvm_dev)
1925{
1926        struct kvm *kvm = kvm_dev->kvm;
1927        struct vgic_its *its = kvm_dev->private;
1928
1929        mutex_lock(&its->its_lock);
1930
1931        vgic_its_free_device_list(kvm, its);
1932        vgic_its_free_collection_list(kvm, its);
1933
1934        mutex_unlock(&its->its_lock);
1935        kfree(its);
1936        kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
1937}
1938
1939static int vgic_its_has_attr_regs(struct kvm_device *dev,
1940                                  struct kvm_device_attr *attr)
1941{
1942        const struct vgic_register_region *region;
1943        gpa_t offset = attr->attr;
1944        int align;
1945
1946        align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1947
1948        if (offset & align)
1949                return -EINVAL;
1950
1951        region = vgic_find_mmio_region(its_registers,
1952                                       ARRAY_SIZE(its_registers),
1953                                       offset);
1954        if (!region)
1955                return -ENXIO;
1956
1957        return 0;
1958}
1959
1960static int vgic_its_attr_regs_access(struct kvm_device *dev,
1961                                     struct kvm_device_attr *attr,
1962                                     u64 *reg, bool is_write)
1963{
1964        const struct vgic_register_region *region;
1965        struct vgic_its *its;
1966        gpa_t addr, offset;
1967        unsigned int len;
1968        int align, ret = 0;
1969
1970        its = dev->private;
1971        offset = attr->attr;
1972
1973        /*
1974         * Although the spec supports upper/lower 32-bit accesses to
1975         * 64-bit ITS registers, the userspace ABI requires 64-bit
1976         * accesses to all 64-bit wide registers. We therefore only
1977         * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1978         * registers
1979         */
1980        if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1981                align = 0x3;
1982        else
1983                align = 0x7;
1984
1985        if (offset & align)
1986                return -EINVAL;
1987
1988        mutex_lock(&dev->kvm->lock);
1989
1990        if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1991                ret = -ENXIO;
1992                goto out;
1993        }
1994
1995        region = vgic_find_mmio_region(its_registers,
1996                                       ARRAY_SIZE(its_registers),
1997                                       offset);
1998        if (!region) {
1999                ret = -ENXIO;
2000                goto out;
2001        }
2002
2003        if (!lock_all_vcpus(dev->kvm)) {
2004                ret = -EBUSY;
2005                goto out;
2006        }
2007
2008        addr = its->vgic_its_base + offset;
2009
2010        len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
2011
2012        if (is_write) {
2013                if (region->uaccess_its_write)
2014                        ret = region->uaccess_its_write(dev->kvm, its, addr,
2015                                                        len, *reg);
2016                else
2017                        region->its_write(dev->kvm, its, addr, len, *reg);
2018        } else {
2019                *reg = region->its_read(dev->kvm, its, addr, len);
2020        }
2021        unlock_all_vcpus(dev->kvm);
2022out:
2023        mutex_unlock(&dev->kvm->lock);
2024        return ret;
2025}
2026
2027static u32 compute_next_devid_offset(struct list_head *h,
2028                                     struct its_device *dev)
2029{
2030        struct its_device *next;
2031        u32 next_offset;
2032
2033        if (list_is_last(&dev->dev_list, h))
2034                return 0;
2035        next = list_next_entry(dev, dev_list);
2036        next_offset = next->device_id - dev->device_id;
2037
2038        return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
2039}
2040
2041static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
2042{
2043        struct its_ite *next;
2044        u32 next_offset;
2045
2046        if (list_is_last(&ite->ite_list, h))
2047                return 0;
2048        next = list_next_entry(ite, ite_list);
2049        next_offset = next->event_id - ite->event_id;
2050
2051        return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
2052}
2053
2054/**
2055 * entry_fn_t - Callback called on a table entry restore path
2056 * @its: its handle
2057 * @id: id of the entry
2058 * @entry: pointer to the entry
2059 * @opaque: pointer to an opaque data
2060 *
2061 * Return: < 0 on error, 0 if last element was identified, id offset to next
2062 * element otherwise
2063 */
2064typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2065                          void *opaque);
2066
2067/**
2068 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2069 * to each entry
2070 *
2071 * @its: its handle
2072 * @base: base gpa of the table
2073 * @size: size of the table in bytes
2074 * @esz: entry size in bytes
2075 * @start_id: the ID of the first entry in the table
2076 * (non zero for 2d level tables)
2077 * @fn: function to apply on each entry
2078 *
2079 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2080 * (the last element may not be found on second level tables)
2081 */
2082static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2083                          int start_id, entry_fn_t fn, void *opaque)
2084{
2085        struct kvm *kvm = its->dev->kvm;
2086        unsigned long len = size;
2087        int id = start_id;
2088        gpa_t gpa = base;
2089        char entry[ESZ_MAX];
2090        int ret;
2091
2092        memset(entry, 0, esz);
2093
2094        while (len > 0) {
2095                int next_offset;
2096                size_t byte_offset;
2097
2098                ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
2099                if (ret)
2100                        return ret;
2101
2102                next_offset = fn(its, id, entry, opaque);
2103                if (next_offset <= 0)
2104                        return next_offset;
2105
2106                byte_offset = next_offset * esz;
2107                id += next_offset;
2108                gpa += byte_offset;
2109                len -= byte_offset;
2110        }
2111        return 1;
2112}
2113
2114/**
2115 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2116 */
2117static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2118                              struct its_ite *ite, gpa_t gpa, int ite_esz)
2119{
2120        struct kvm *kvm = its->dev->kvm;
2121        u32 next_offset;
2122        u64 val;
2123
2124        next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
2125        val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
2126               ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
2127                ite->collection->collection_id;
2128        val = cpu_to_le64(val);
2129        return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
2130}
2131
2132/**
2133 * vgic_its_restore_ite - restore an interrupt translation entry
2134 * @event_id: id used for indexing
2135 * @ptr: pointer to the ITE entry
2136 * @opaque: pointer to the its_device
2137 */
2138static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2139                                void *ptr, void *opaque)
2140{
2141        struct its_device *dev = (struct its_device *)opaque;
2142        struct its_collection *collection;
2143        struct kvm *kvm = its->dev->kvm;
2144        struct kvm_vcpu *vcpu = NULL;
2145        u64 val;
2146        u64 *p = (u64 *)ptr;
2147        struct vgic_irq *irq;
2148        u32 coll_id, lpi_id;
2149        struct its_ite *ite;
2150        u32 offset;
2151
2152        val = *p;
2153
2154        val = le64_to_cpu(val);
2155
2156        coll_id = val & KVM_ITS_ITE_ICID_MASK;
2157        lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
2158
2159        if (!lpi_id)
2160                return 1; /* invalid entry, no choice but to scan next entry */
2161
2162        if (lpi_id < VGIC_MIN_LPI)
2163                return -EINVAL;
2164
2165        offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
2166        if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
2167                return -EINVAL;
2168
2169        collection = find_collection(its, coll_id);
2170        if (!collection)
2171                return -EINVAL;
2172
2173        ite = vgic_its_alloc_ite(dev, collection, event_id);
2174        if (IS_ERR(ite))
2175                return PTR_ERR(ite);
2176
2177        if (its_is_collection_mapped(collection))
2178                vcpu = kvm_get_vcpu(kvm, collection->target_addr);
2179
2180        irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2181        if (IS_ERR(irq))
2182                return PTR_ERR(irq);
2183        ite->irq = irq;
2184
2185        return offset;
2186}
2187
2188static int vgic_its_ite_cmp(void *priv, struct list_head *a,
2189                            struct list_head *b)
2190{
2191        struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2192        struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2193
2194        if (itea->event_id < iteb->event_id)
2195                return -1;
2196        else
2197                return 1;
2198}
2199
2200static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2201{
2202        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2203        gpa_t base = device->itt_addr;
2204        struct its_ite *ite;
2205        int ret;
2206        int ite_esz = abi->ite_esz;
2207
2208        list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2209
2210        list_for_each_entry(ite, &device->itt_head, ite_list) {
2211                gpa_t gpa = base + ite->event_id * ite_esz;
2212
2213                /*
2214                 * If an LPI carries the HW bit, this means that this
2215                 * interrupt is controlled by GICv4, and we do not
2216                 * have direct access to that state. Let's simply fail
2217                 * the save operation...
2218                 */
2219                if (ite->irq->hw)
2220                        return -EACCES;
2221
2222                ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2223                if (ret)
2224                        return ret;
2225        }
2226        return 0;
2227}
2228
2229/**
2230 * vgic_its_restore_itt - restore the ITT of a device
2231 *
2232 * @its: its handle
2233 * @dev: device handle
2234 *
2235 * Return 0 on success, < 0 on error
2236 */
2237static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2238{
2239        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2240        gpa_t base = dev->itt_addr;
2241        int ret;
2242        int ite_esz = abi->ite_esz;
2243        size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2244
2245        ret = scan_its_table(its, base, max_size, ite_esz, 0,
2246                             vgic_its_restore_ite, dev);
2247
2248        /* scan_its_table returns +1 if all ITEs are invalid */
2249        if (ret > 0)
2250                ret = 0;
2251
2252        return ret;
2253}
2254
2255/**
2256 * vgic_its_save_dte - Save a device table entry at a given GPA
2257 *
2258 * @its: ITS handle
2259 * @dev: ITS device
2260 * @ptr: GPA
2261 */
2262static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2263                             gpa_t ptr, int dte_esz)
2264{
2265        struct kvm *kvm = its->dev->kvm;
2266        u64 val, itt_addr_field;
2267        u32 next_offset;
2268
2269        itt_addr_field = dev->itt_addr >> 8;
2270        next_offset = compute_next_devid_offset(&its->device_list, dev);
2271        val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2272               ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2273               (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2274                (dev->num_eventid_bits - 1));
2275        val = cpu_to_le64(val);
2276        return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
2277}
2278
2279/**
2280 * vgic_its_restore_dte - restore a device table entry
2281 *
2282 * @its: its handle
2283 * @id: device id the DTE corresponds to
2284 * @ptr: kernel VA where the 8 byte DTE is located
2285 * @opaque: unused
2286 *
2287 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2288 * next dte otherwise
2289 */
2290static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2291                                void *ptr, void *opaque)
2292{
2293        struct its_device *dev;
2294        gpa_t itt_addr;
2295        u8 num_eventid_bits;
2296        u64 entry = *(u64 *)ptr;
2297        bool valid;
2298        u32 offset;
2299        int ret;
2300
2301        entry = le64_to_cpu(entry);
2302
2303        valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2304        num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2305        itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2306                        >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2307
2308        if (!valid)
2309                return 1;
2310
2311        /* dte entry is valid */
2312        offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2313
2314        dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2315        if (IS_ERR(dev))
2316                return PTR_ERR(dev);
2317
2318        ret = vgic_its_restore_itt(its, dev);
2319        if (ret) {
2320                vgic_its_free_device(its->dev->kvm, dev);
2321                return ret;
2322        }
2323
2324        return offset;
2325}
2326
2327static int vgic_its_device_cmp(void *priv, struct list_head *a,
2328                               struct list_head *b)
2329{
2330        struct its_device *deva = container_of(a, struct its_device, dev_list);
2331        struct its_device *devb = container_of(b, struct its_device, dev_list);
2332
2333        if (deva->device_id < devb->device_id)
2334                return -1;
2335        else
2336                return 1;
2337}
2338
2339/**
2340 * vgic_its_save_device_tables - Save the device table and all ITT
2341 * into guest RAM
2342 *
2343 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2344 * returns the GPA of the device entry
2345 */
2346static int vgic_its_save_device_tables(struct vgic_its *its)
2347{
2348        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2349        u64 baser = its->baser_device_table;
2350        struct its_device *dev;
2351        int dte_esz = abi->dte_esz;
2352
2353        if (!(baser & GITS_BASER_VALID))
2354                return 0;
2355
2356        list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2357
2358        list_for_each_entry(dev, &its->device_list, dev_list) {
2359                int ret;
2360                gpa_t eaddr;
2361
2362                if (!vgic_its_check_id(its, baser,
2363                                       dev->device_id, &eaddr))
2364                        return -EINVAL;
2365
2366                ret = vgic_its_save_itt(its, dev);
2367                if (ret)
2368                        return ret;
2369
2370                ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2371                if (ret)
2372                        return ret;
2373        }
2374        return 0;
2375}
2376
2377/**
2378 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2379 *
2380 * @its: its handle
2381 * @id: index of the entry in the L1 table
2382 * @addr: kernel VA
2383 * @opaque: unused
2384 *
2385 * L1 table entries are scanned by steps of 1 entry
2386 * Return < 0 if error, 0 if last dte was found when scanning the L2
2387 * table, +1 otherwise (meaning next L1 entry must be scanned)
2388 */
2389static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2390                         void *opaque)
2391{
2392        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2393        int l2_start_id = id * (SZ_64K / abi->dte_esz);
2394        u64 entry = *(u64 *)addr;
2395        int dte_esz = abi->dte_esz;
2396        gpa_t gpa;
2397        int ret;
2398
2399        entry = le64_to_cpu(entry);
2400
2401        if (!(entry & KVM_ITS_L1E_VALID_MASK))
2402                return 1;
2403
2404        gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2405
2406        ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2407                             l2_start_id, vgic_its_restore_dte, NULL);
2408
2409        return ret;
2410}
2411
2412/**
2413 * vgic_its_restore_device_tables - Restore the device table and all ITT
2414 * from guest RAM to internal data structs
2415 */
2416static int vgic_its_restore_device_tables(struct vgic_its *its)
2417{
2418        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2419        u64 baser = its->baser_device_table;
2420        int l1_esz, ret;
2421        int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2422        gpa_t l1_gpa;
2423
2424        if (!(baser & GITS_BASER_VALID))
2425                return 0;
2426
2427        l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
2428
2429        if (baser & GITS_BASER_INDIRECT) {
2430                l1_esz = GITS_LVL1_ENTRY_SIZE;
2431                ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2432                                     handle_l1_dte, NULL);
2433        } else {
2434                l1_esz = abi->dte_esz;
2435                ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2436                                     vgic_its_restore_dte, NULL);
2437        }
2438
2439        /* scan_its_table returns +1 if all entries are invalid */
2440        if (ret > 0)
2441                ret = 0;
2442
2443        return ret;
2444}
2445
2446static int vgic_its_save_cte(struct vgic_its *its,
2447                             struct its_collection *collection,
2448                             gpa_t gpa, int esz)
2449{
2450        u64 val;
2451
2452        val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2453               ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2454               collection->collection_id);
2455        val = cpu_to_le64(val);
2456        return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2457}
2458
2459static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2460{
2461        struct its_collection *collection;
2462        struct kvm *kvm = its->dev->kvm;
2463        u32 target_addr, coll_id;
2464        u64 val;
2465        int ret;
2466
2467        BUG_ON(esz > sizeof(val));
2468        ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2469        if (ret)
2470                return ret;
2471        val = le64_to_cpu(val);
2472        if (!(val & KVM_ITS_CTE_VALID_MASK))
2473                return 0;
2474
2475        target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2476        coll_id = val & KVM_ITS_CTE_ICID_MASK;
2477
2478        if (target_addr >= atomic_read(&kvm->online_vcpus))
2479                return -EINVAL;
2480
2481        collection = find_collection(its, coll_id);
2482        if (collection)
2483                return -EEXIST;
2484        ret = vgic_its_alloc_collection(its, &collection, coll_id);
2485        if (ret)
2486                return ret;
2487        collection->target_addr = target_addr;
2488        return 1;
2489}
2490
2491/**
2492 * vgic_its_save_collection_table - Save the collection table into
2493 * guest RAM
2494 */
2495static int vgic_its_save_collection_table(struct vgic_its *its)
2496{
2497        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2498        u64 baser = its->baser_coll_table;
2499        gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
2500        struct its_collection *collection;
2501        u64 val;
2502        size_t max_size, filled = 0;
2503        int ret, cte_esz = abi->cte_esz;
2504
2505        if (!(baser & GITS_BASER_VALID))
2506                return 0;
2507
2508        max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2509
2510        list_for_each_entry(collection, &its->collection_list, coll_list) {
2511                ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2512                if (ret)
2513                        return ret;
2514                gpa += cte_esz;
2515                filled += cte_esz;
2516        }
2517
2518        if (filled == max_size)
2519                return 0;
2520
2521        /*
2522         * table is not fully filled, add a last dummy element
2523         * with valid bit unset
2524         */
2525        val = 0;
2526        BUG_ON(cte_esz > sizeof(val));
2527        ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2528        return ret;
2529}
2530
2531/**
2532 * vgic_its_restore_collection_table - reads the collection table
2533 * in guest memory and restores the ITS internal state. Requires the
2534 * BASER registers to be restored before.
2535 */
2536static int vgic_its_restore_collection_table(struct vgic_its *its)
2537{
2538        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2539        u64 baser = its->baser_coll_table;
2540        int cte_esz = abi->cte_esz;
2541        size_t max_size, read = 0;
2542        gpa_t gpa;
2543        int ret;
2544
2545        if (!(baser & GITS_BASER_VALID))
2546                return 0;
2547
2548        gpa = GITS_BASER_ADDR_48_to_52(baser);
2549
2550        max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2551
2552        while (read < max_size) {
2553                ret = vgic_its_restore_cte(its, gpa, cte_esz);
2554                if (ret <= 0)
2555                        break;
2556                gpa += cte_esz;
2557                read += cte_esz;
2558        }
2559
2560        if (ret > 0)
2561                return 0;
2562
2563        return ret;
2564}
2565
2566/**
2567 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2568 * according to v0 ABI
2569 */
2570static int vgic_its_save_tables_v0(struct vgic_its *its)
2571{
2572        int ret;
2573
2574        ret = vgic_its_save_device_tables(its);
2575        if (ret)
2576                return ret;
2577
2578        return vgic_its_save_collection_table(its);
2579}
2580
2581/**
2582 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2583 * to internal data structs according to V0 ABI
2584 *
2585 */
2586static int vgic_its_restore_tables_v0(struct vgic_its *its)
2587{
2588        int ret;
2589
2590        ret = vgic_its_restore_collection_table(its);
2591        if (ret)
2592                return ret;
2593
2594        return vgic_its_restore_device_tables(its);
2595}
2596
2597static int vgic_its_commit_v0(struct vgic_its *its)
2598{
2599        const struct vgic_its_abi *abi;
2600
2601        abi = vgic_its_get_abi(its);
2602        its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2603        its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2604
2605        its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2606                                        << GITS_BASER_ENTRY_SIZE_SHIFT);
2607
2608        its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2609                                        << GITS_BASER_ENTRY_SIZE_SHIFT);
2610        return 0;
2611}
2612
2613static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2614{
2615        /* We need to keep the ABI specific field values */
2616        its->baser_coll_table &= ~GITS_BASER_VALID;
2617        its->baser_device_table &= ~GITS_BASER_VALID;
2618        its->cbaser = 0;
2619        its->creadr = 0;
2620        its->cwriter = 0;
2621        its->enabled = 0;
2622        vgic_its_free_device_list(kvm, its);
2623        vgic_its_free_collection_list(kvm, its);
2624}
2625
2626static int vgic_its_has_attr(struct kvm_device *dev,
2627                             struct kvm_device_attr *attr)
2628{
2629        switch (attr->group) {
2630        case KVM_DEV_ARM_VGIC_GRP_ADDR:
2631                switch (attr->attr) {
2632                case KVM_VGIC_ITS_ADDR_TYPE:
2633                        return 0;
2634                }
2635                break;
2636        case KVM_DEV_ARM_VGIC_GRP_CTRL:
2637                switch (attr->attr) {
2638                case KVM_DEV_ARM_VGIC_CTRL_INIT:
2639                        return 0;
2640                case KVM_DEV_ARM_ITS_CTRL_RESET:
2641                        return 0;
2642                case KVM_DEV_ARM_ITS_SAVE_TABLES:
2643                        return 0;
2644                case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2645                        return 0;
2646                }
2647                break;
2648        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2649                return vgic_its_has_attr_regs(dev, attr);
2650        }
2651        return -ENXIO;
2652}
2653
2654static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2655{
2656        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2657        int ret = 0;
2658
2659        if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2660                return 0;
2661
2662        mutex_lock(&kvm->lock);
2663        mutex_lock(&its->its_lock);
2664
2665        if (!lock_all_vcpus(kvm)) {
2666                mutex_unlock(&its->its_lock);
2667                mutex_unlock(&kvm->lock);
2668                return -EBUSY;
2669        }
2670
2671        switch (attr) {
2672        case KVM_DEV_ARM_ITS_CTRL_RESET:
2673                vgic_its_reset(kvm, its);
2674                break;
2675        case KVM_DEV_ARM_ITS_SAVE_TABLES:
2676                ret = abi->save_tables(its);
2677                break;
2678        case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2679                ret = abi->restore_tables(its);
2680                break;
2681        }
2682
2683        unlock_all_vcpus(kvm);
2684        mutex_unlock(&its->its_lock);
2685        mutex_unlock(&kvm->lock);
2686        return ret;
2687}
2688
2689static int vgic_its_set_attr(struct kvm_device *dev,
2690                             struct kvm_device_attr *attr)
2691{
2692        struct vgic_its *its = dev->private;
2693        int ret;
2694
2695        switch (attr->group) {
2696        case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2697                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2698                unsigned long type = (unsigned long)attr->attr;
2699                u64 addr;
2700
2701                if (type != KVM_VGIC_ITS_ADDR_TYPE)
2702                        return -ENODEV;
2703
2704                if (copy_from_user(&addr, uaddr, sizeof(addr)))
2705                        return -EFAULT;
2706
2707                ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2708                                        addr, SZ_64K);
2709                if (ret)
2710                        return ret;
2711
2712                return vgic_register_its_iodev(dev->kvm, its, addr);
2713        }
2714        case KVM_DEV_ARM_VGIC_GRP_CTRL:
2715                return vgic_its_ctrl(dev->kvm, its, attr->attr);
2716        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2717                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2718                u64 reg;
2719
2720                if (get_user(reg, uaddr))
2721                        return -EFAULT;
2722
2723                return vgic_its_attr_regs_access(dev, attr, &reg, true);
2724        }
2725        }
2726        return -ENXIO;
2727}
2728
2729static int vgic_its_get_attr(struct kvm_device *dev,
2730                             struct kvm_device_attr *attr)
2731{
2732        switch (attr->group) {
2733        case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2734                struct vgic_its *its = dev->private;
2735                u64 addr = its->vgic_its_base;
2736                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2737                unsigned long type = (unsigned long)attr->attr;
2738
2739                if (type != KVM_VGIC_ITS_ADDR_TYPE)
2740                        return -ENODEV;
2741
2742                if (copy_to_user(uaddr, &addr, sizeof(addr)))
2743                        return -EFAULT;
2744                break;
2745        }
2746        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2747                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2748                u64 reg;
2749                int ret;
2750
2751                ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2752                if (ret)
2753                        return ret;
2754                return put_user(reg, uaddr);
2755        }
2756        default:
2757                return -ENXIO;
2758        }
2759
2760        return 0;
2761}
2762
2763static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2764        .name = "kvm-arm-vgic-its",
2765        .create = vgic_its_create,
2766        .destroy = vgic_its_destroy,
2767        .set_attr = vgic_its_set_attr,
2768        .get_attr = vgic_its_get_attr,
2769        .has_attr = vgic_its_has_attr,
2770};
2771
2772int kvm_vgic_register_its_device(void)
2773{
2774        return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2775                                       KVM_DEV_TYPE_ARM_VGIC_ITS);
2776}
2777