linux/arch/arm64/kvm/vgic/vgic-its.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * GICv3 ITS emulation
   4 *
   5 * Copyright (C) 2015,2016 ARM Ltd.
   6 * Author: Andre Przywara <andre.przywara@arm.com>
   7 */
   8
   9#include <linux/cpu.h>
  10#include <linux/kvm.h>
  11#include <linux/kvm_host.h>
  12#include <linux/interrupt.h>
  13#include <linux/list.h>
  14#include <linux/uaccess.h>
  15#include <linux/list_sort.h>
  16
  17#include <linux/irqchip/arm-gic-v3.h>
  18
  19#include <asm/kvm_emulate.h>
  20#include <asm/kvm_arm.h>
  21#include <asm/kvm_mmu.h>
  22
  23#include "vgic.h"
  24#include "vgic-mmio.h"
  25
  26static int vgic_its_save_tables_v0(struct vgic_its *its);
  27static int vgic_its_restore_tables_v0(struct vgic_its *its);
  28static int vgic_its_commit_v0(struct vgic_its *its);
  29static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
  30                             struct kvm_vcpu *filter_vcpu, bool needs_inv);
  31
  32/*
  33 * Creates a new (reference to a) struct vgic_irq for a given LPI.
  34 * If this LPI is already mapped on another ITS, we increase its refcount
  35 * and return a pointer to the existing structure.
  36 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
  37 * This function returns a pointer to the _unlocked_ structure.
  38 */
  39static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
  40                                     struct kvm_vcpu *vcpu)
  41{
  42        struct vgic_dist *dist = &kvm->arch.vgic;
  43        struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
  44        unsigned long flags;
  45        int ret;
  46
  47        /* In this case there is no put, since we keep the reference. */
  48        if (irq)
  49                return irq;
  50
  51        irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
  52        if (!irq)
  53                return ERR_PTR(-ENOMEM);
  54
  55        INIT_LIST_HEAD(&irq->lpi_list);
  56        INIT_LIST_HEAD(&irq->ap_list);
  57        raw_spin_lock_init(&irq->irq_lock);
  58
  59        irq->config = VGIC_CONFIG_EDGE;
  60        kref_init(&irq->refcount);
  61        irq->intid = intid;
  62        irq->target_vcpu = vcpu;
  63        irq->group = 1;
  64
  65        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
  66
  67        /*
  68         * There could be a race with another vgic_add_lpi(), so we need to
  69         * check that we don't add a second list entry with the same LPI.
  70         */
  71        list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
  72                if (oldirq->intid != intid)
  73                        continue;
  74
  75                /* Someone was faster with adding this LPI, lets use that. */
  76                kfree(irq);
  77                irq = oldirq;
  78
  79                /*
  80                 * This increases the refcount, the caller is expected to
  81                 * call vgic_put_irq() on the returned pointer once it's
  82                 * finished with the IRQ.
  83                 */
  84                vgic_get_irq_kref(irq);
  85
  86                goto out_unlock;
  87        }
  88
  89        list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
  90        dist->lpi_list_count++;
  91
  92out_unlock:
  93        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
  94
  95        /*
  96         * We "cache" the configuration table entries in our struct vgic_irq's.
  97         * However we only have those structs for mapped IRQs, so we read in
  98         * the respective config data from memory here upon mapping the LPI.
  99         *
 100         * Should any of these fail, behave as if we couldn't create the LPI
 101         * by dropping the refcount and returning the error.
 102         */
 103        ret = update_lpi_config(kvm, irq, NULL, false);
 104        if (ret) {
 105                vgic_put_irq(kvm, irq);
 106                return ERR_PTR(ret);
 107        }
 108
 109        ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
 110        if (ret) {
 111                vgic_put_irq(kvm, irq);
 112                return ERR_PTR(ret);
 113        }
 114
 115        return irq;
 116}
 117
 118struct its_device {
 119        struct list_head dev_list;
 120
 121        /* the head for the list of ITTEs */
 122        struct list_head itt_head;
 123        u32 num_eventid_bits;
 124        gpa_t itt_addr;
 125        u32 device_id;
 126};
 127
 128#define COLLECTION_NOT_MAPPED ((u32)~0)
 129
 130struct its_collection {
 131        struct list_head coll_list;
 132
 133        u32 collection_id;
 134        u32 target_addr;
 135};
 136
 137#define its_is_collection_mapped(coll) ((coll) && \
 138                                ((coll)->target_addr != COLLECTION_NOT_MAPPED))
 139
 140struct its_ite {
 141        struct list_head ite_list;
 142
 143        struct vgic_irq *irq;
 144        struct its_collection *collection;
 145        u32 event_id;
 146};
 147
 148struct vgic_translation_cache_entry {
 149        struct list_head        entry;
 150        phys_addr_t             db;
 151        u32                     devid;
 152        u32                     eventid;
 153        struct vgic_irq         *irq;
 154};
 155
 156/**
 157 * struct vgic_its_abi - ITS abi ops and settings
 158 * @cte_esz: collection table entry size
 159 * @dte_esz: device table entry size
 160 * @ite_esz: interrupt translation table entry size
 161 * @save tables: save the ITS tables into guest RAM
 162 * @restore_tables: restore the ITS internal structs from tables
 163 *  stored in guest RAM
 164 * @commit: initialize the registers which expose the ABI settings,
 165 *  especially the entry sizes
 166 */
 167struct vgic_its_abi {
 168        int cte_esz;
 169        int dte_esz;
 170        int ite_esz;
 171        int (*save_tables)(struct vgic_its *its);
 172        int (*restore_tables)(struct vgic_its *its);
 173        int (*commit)(struct vgic_its *its);
 174};
 175
 176#define ABI_0_ESZ       8
 177#define ESZ_MAX         ABI_0_ESZ
 178
 179static const struct vgic_its_abi its_table_abi_versions[] = {
 180        [0] = {
 181         .cte_esz = ABI_0_ESZ,
 182         .dte_esz = ABI_0_ESZ,
 183         .ite_esz = ABI_0_ESZ,
 184         .save_tables = vgic_its_save_tables_v0,
 185         .restore_tables = vgic_its_restore_tables_v0,
 186         .commit = vgic_its_commit_v0,
 187        },
 188};
 189
 190#define NR_ITS_ABIS     ARRAY_SIZE(its_table_abi_versions)
 191
 192inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
 193{
 194        return &its_table_abi_versions[its->abi_rev];
 195}
 196
 197static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
 198{
 199        const struct vgic_its_abi *abi;
 200
 201        its->abi_rev = rev;
 202        abi = vgic_its_get_abi(its);
 203        return abi->commit(its);
 204}
 205
 206/*
 207 * Find and returns a device in the device table for an ITS.
 208 * Must be called with the its_lock mutex held.
 209 */
 210static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
 211{
 212        struct its_device *device;
 213
 214        list_for_each_entry(device, &its->device_list, dev_list)
 215                if (device_id == device->device_id)
 216                        return device;
 217
 218        return NULL;
 219}
 220
 221/*
 222 * Find and returns an interrupt translation table entry (ITTE) for a given
 223 * Device ID/Event ID pair on an ITS.
 224 * Must be called with the its_lock mutex held.
 225 */
 226static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
 227                                  u32 event_id)
 228{
 229        struct its_device *device;
 230        struct its_ite *ite;
 231
 232        device = find_its_device(its, device_id);
 233        if (device == NULL)
 234                return NULL;
 235
 236        list_for_each_entry(ite, &device->itt_head, ite_list)
 237                if (ite->event_id == event_id)
 238                        return ite;
 239
 240        return NULL;
 241}
 242
 243/* To be used as an iterator this macro misses the enclosing parentheses */
 244#define for_each_lpi_its(dev, ite, its) \
 245        list_for_each_entry(dev, &(its)->device_list, dev_list) \
 246                list_for_each_entry(ite, &(dev)->itt_head, ite_list)
 247
 248#define GIC_LPI_OFFSET 8192
 249
 250#define VITS_TYPER_IDBITS 16
 251#define VITS_TYPER_DEVBITS 16
 252#define VITS_DTE_MAX_DEVID_OFFSET       (BIT(14) - 1)
 253#define VITS_ITE_MAX_EVENTID_OFFSET     (BIT(16) - 1)
 254
 255/*
 256 * Finds and returns a collection in the ITS collection table.
 257 * Must be called with the its_lock mutex held.
 258 */
 259static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
 260{
 261        struct its_collection *collection;
 262
 263        list_for_each_entry(collection, &its->collection_list, coll_list) {
 264                if (coll_id == collection->collection_id)
 265                        return collection;
 266        }
 267
 268        return NULL;
 269}
 270
 271#define LPI_PROP_ENABLE_BIT(p)  ((p) & LPI_PROP_ENABLED)
 272#define LPI_PROP_PRIORITY(p)    ((p) & 0xfc)
 273
 274/*
 275 * Reads the configuration data for a given LPI from guest memory and
 276 * updates the fields in struct vgic_irq.
 277 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
 278 * VCPU. Unconditionally applies if filter_vcpu is NULL.
 279 */
 280static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
 281                             struct kvm_vcpu *filter_vcpu, bool needs_inv)
 282{
 283        u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
 284        u8 prop;
 285        int ret;
 286        unsigned long flags;
 287
 288        ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
 289                                  &prop, 1);
 290
 291        if (ret)
 292                return ret;
 293
 294        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 295
 296        if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
 297                irq->priority = LPI_PROP_PRIORITY(prop);
 298                irq->enabled = LPI_PROP_ENABLE_BIT(prop);
 299
 300                if (!irq->hw) {
 301                        vgic_queue_irq_unlock(kvm, irq, flags);
 302                        return 0;
 303                }
 304        }
 305
 306        raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 307
 308        if (irq->hw)
 309                return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
 310
 311        return 0;
 312}
 313
 314/*
 315 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
 316 * enumerate those LPIs without holding any lock.
 317 * Returns their number and puts the kmalloc'ed array into intid_ptr.
 318 */
 319int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
 320{
 321        struct vgic_dist *dist = &kvm->arch.vgic;
 322        struct vgic_irq *irq;
 323        unsigned long flags;
 324        u32 *intids;
 325        int irq_count, i = 0;
 326
 327        /*
 328         * There is an obvious race between allocating the array and LPIs
 329         * being mapped/unmapped. If we ended up here as a result of a
 330         * command, we're safe (locks are held, preventing another
 331         * command). If coming from another path (such as enabling LPIs),
 332         * we must be careful not to overrun the array.
 333         */
 334        irq_count = READ_ONCE(dist->lpi_list_count);
 335        intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
 336        if (!intids)
 337                return -ENOMEM;
 338
 339        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 340        list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
 341                if (i == irq_count)
 342                        break;
 343                /* We don't need to "get" the IRQ, as we hold the list lock. */
 344                if (vcpu && irq->target_vcpu != vcpu)
 345                        continue;
 346                intids[i++] = irq->intid;
 347        }
 348        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 349
 350        *intid_ptr = intids;
 351        return i;
 352}
 353
 354static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
 355{
 356        int ret = 0;
 357        unsigned long flags;
 358
 359        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 360        irq->target_vcpu = vcpu;
 361        raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 362
 363        if (irq->hw) {
 364                struct its_vlpi_map map;
 365
 366                ret = its_get_vlpi(irq->host_irq, &map);
 367                if (ret)
 368                        return ret;
 369
 370                if (map.vpe)
 371                        atomic_dec(&map.vpe->vlpi_count);
 372                map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
 373                atomic_inc(&map.vpe->vlpi_count);
 374
 375                ret = its_map_vlpi(irq->host_irq, &map);
 376        }
 377
 378        return ret;
 379}
 380
 381/*
 382 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
 383 * is targeting) to the VGIC's view, which deals with target VCPUs.
 384 * Needs to be called whenever either the collection for a LPIs has
 385 * changed or the collection itself got retargeted.
 386 */
 387static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
 388{
 389        struct kvm_vcpu *vcpu;
 390
 391        if (!its_is_collection_mapped(ite->collection))
 392                return;
 393
 394        vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
 395        update_affinity(ite->irq, vcpu);
 396}
 397
 398/*
 399 * Updates the target VCPU for every LPI targeting this collection.
 400 * Must be called with the its_lock mutex held.
 401 */
 402static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
 403                                       struct its_collection *coll)
 404{
 405        struct its_device *device;
 406        struct its_ite *ite;
 407
 408        for_each_lpi_its(device, ite, its) {
 409                if (!ite->collection || coll != ite->collection)
 410                        continue;
 411
 412                update_affinity_ite(kvm, ite);
 413        }
 414}
 415
 416static u32 max_lpis_propbaser(u64 propbaser)
 417{
 418        int nr_idbits = (propbaser & 0x1f) + 1;
 419
 420        return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
 421}
 422
 423/*
 424 * Sync the pending table pending bit of LPIs targeting @vcpu
 425 * with our own data structures. This relies on the LPI being
 426 * mapped before.
 427 */
 428static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 429{
 430        gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
 431        struct vgic_irq *irq;
 432        int last_byte_offset = -1;
 433        int ret = 0;
 434        u32 *intids;
 435        int nr_irqs, i;
 436        unsigned long flags;
 437        u8 pendmask;
 438
 439        nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
 440        if (nr_irqs < 0)
 441                return nr_irqs;
 442
 443        for (i = 0; i < nr_irqs; i++) {
 444                int byte_offset, bit_nr;
 445
 446                byte_offset = intids[i] / BITS_PER_BYTE;
 447                bit_nr = intids[i] % BITS_PER_BYTE;
 448
 449                /*
 450                 * For contiguously allocated LPIs chances are we just read
 451                 * this very same byte in the last iteration. Reuse that.
 452                 */
 453                if (byte_offset != last_byte_offset) {
 454                        ret = kvm_read_guest_lock(vcpu->kvm,
 455                                                  pendbase + byte_offset,
 456                                                  &pendmask, 1);
 457                        if (ret) {
 458                                kfree(intids);
 459                                return ret;
 460                        }
 461                        last_byte_offset = byte_offset;
 462                }
 463
 464                irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
 465                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 466                irq->pending_latch = pendmask & (1U << bit_nr);
 467                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 468                vgic_put_irq(vcpu->kvm, irq);
 469        }
 470
 471        kfree(intids);
 472
 473        return ret;
 474}
 475
 476static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
 477                                              struct vgic_its *its,
 478                                              gpa_t addr, unsigned int len)
 479{
 480        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
 481        u64 reg = GITS_TYPER_PLPIS;
 482
 483        /*
 484         * We use linear CPU numbers for redistributor addressing,
 485         * so GITS_TYPER.PTA is 0.
 486         * Also we force all PROPBASER registers to be the same, so
 487         * CommonLPIAff is 0 as well.
 488         * To avoid memory waste in the guest, we keep the number of IDBits and
 489         * DevBits low - as least for the time being.
 490         */
 491        reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
 492        reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
 493        reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
 494
 495        return extract_bytes(reg, addr & 7, len);
 496}
 497
 498static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
 499                                             struct vgic_its *its,
 500                                             gpa_t addr, unsigned int len)
 501{
 502        u32 val;
 503
 504        val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
 505        val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
 506        return val;
 507}
 508
 509static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
 510                                            struct vgic_its *its,
 511                                            gpa_t addr, unsigned int len,
 512                                            unsigned long val)
 513{
 514        u32 rev = GITS_IIDR_REV(val);
 515
 516        if (rev >= NR_ITS_ABIS)
 517                return -EINVAL;
 518        return vgic_its_set_abi(its, rev);
 519}
 520
 521static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
 522                                               struct vgic_its *its,
 523                                               gpa_t addr, unsigned int len)
 524{
 525        switch (addr & 0xffff) {
 526        case GITS_PIDR0:
 527                return 0x92;    /* part number, bits[7:0] */
 528        case GITS_PIDR1:
 529                return 0xb4;    /* part number, bits[11:8] */
 530        case GITS_PIDR2:
 531                return GIC_PIDR2_ARCH_GICv3 | 0x0b;
 532        case GITS_PIDR4:
 533                return 0x40;    /* This is a 64K software visible page */
 534        /* The following are the ID registers for (any) GIC. */
 535        case GITS_CIDR0:
 536                return 0x0d;
 537        case GITS_CIDR1:
 538                return 0xf0;
 539        case GITS_CIDR2:
 540                return 0x05;
 541        case GITS_CIDR3:
 542                return 0xb1;
 543        }
 544
 545        return 0;
 546}
 547
 548static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
 549                                               phys_addr_t db,
 550                                               u32 devid, u32 eventid)
 551{
 552        struct vgic_translation_cache_entry *cte;
 553
 554        list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
 555                /*
 556                 * If we hit a NULL entry, there is nothing after this
 557                 * point.
 558                 */
 559                if (!cte->irq)
 560                        break;
 561
 562                if (cte->db != db || cte->devid != devid ||
 563                    cte->eventid != eventid)
 564                        continue;
 565
 566                /*
 567                 * Move this entry to the head, as it is the most
 568                 * recently used.
 569                 */
 570                if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
 571                        list_move(&cte->entry, &dist->lpi_translation_cache);
 572
 573                return cte->irq;
 574        }
 575
 576        return NULL;
 577}
 578
 579static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
 580                                             u32 devid, u32 eventid)
 581{
 582        struct vgic_dist *dist = &kvm->arch.vgic;
 583        struct vgic_irq *irq;
 584        unsigned long flags;
 585
 586        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 587        irq = __vgic_its_check_cache(dist, db, devid, eventid);
 588        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 589
 590        return irq;
 591}
 592
 593static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
 594                                       u32 devid, u32 eventid,
 595                                       struct vgic_irq *irq)
 596{
 597        struct vgic_dist *dist = &kvm->arch.vgic;
 598        struct vgic_translation_cache_entry *cte;
 599        unsigned long flags;
 600        phys_addr_t db;
 601
 602        /* Do not cache a directly injected interrupt */
 603        if (irq->hw)
 604                return;
 605
 606        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 607
 608        if (unlikely(list_empty(&dist->lpi_translation_cache)))
 609                goto out;
 610
 611        /*
 612         * We could have raced with another CPU caching the same
 613         * translation behind our back, so let's check it is not in
 614         * already
 615         */
 616        db = its->vgic_its_base + GITS_TRANSLATER;
 617        if (__vgic_its_check_cache(dist, db, devid, eventid))
 618                goto out;
 619
 620        /* Always reuse the last entry (LRU policy) */
 621        cte = list_last_entry(&dist->lpi_translation_cache,
 622                              typeof(*cte), entry);
 623
 624        /*
 625         * Caching the translation implies having an extra reference
 626         * to the interrupt, so drop the potential reference on what
 627         * was in the cache, and increment it on the new interrupt.
 628         */
 629        if (cte->irq)
 630                __vgic_put_lpi_locked(kvm, cte->irq);
 631
 632        vgic_get_irq_kref(irq);
 633
 634        cte->db         = db;
 635        cte->devid      = devid;
 636        cte->eventid    = eventid;
 637        cte->irq        = irq;
 638
 639        /* Move the new translation to the head of the list */
 640        list_move(&cte->entry, &dist->lpi_translation_cache);
 641
 642out:
 643        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 644}
 645
 646void vgic_its_invalidate_cache(struct kvm *kvm)
 647{
 648        struct vgic_dist *dist = &kvm->arch.vgic;
 649        struct vgic_translation_cache_entry *cte;
 650        unsigned long flags;
 651
 652        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 653
 654        list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
 655                /*
 656                 * If we hit a NULL entry, there is nothing after this
 657                 * point.
 658                 */
 659                if (!cte->irq)
 660                        break;
 661
 662                __vgic_put_lpi_locked(kvm, cte->irq);
 663                cte->irq = NULL;
 664        }
 665
 666        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 667}
 668
 669int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
 670                         u32 devid, u32 eventid, struct vgic_irq **irq)
 671{
 672        struct kvm_vcpu *vcpu;
 673        struct its_ite *ite;
 674
 675        if (!its->enabled)
 676                return -EBUSY;
 677
 678        ite = find_ite(its, devid, eventid);
 679        if (!ite || !its_is_collection_mapped(ite->collection))
 680                return E_ITS_INT_UNMAPPED_INTERRUPT;
 681
 682        vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
 683        if (!vcpu)
 684                return E_ITS_INT_UNMAPPED_INTERRUPT;
 685
 686        if (!vcpu->arch.vgic_cpu.lpis_enabled)
 687                return -EBUSY;
 688
 689        vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
 690
 691        *irq = ite->irq;
 692        return 0;
 693}
 694
 695struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
 696{
 697        u64 address;
 698        struct kvm_io_device *kvm_io_dev;
 699        struct vgic_io_device *iodev;
 700
 701        if (!vgic_has_its(kvm))
 702                return ERR_PTR(-ENODEV);
 703
 704        if (!(msi->flags & KVM_MSI_VALID_DEVID))
 705                return ERR_PTR(-EINVAL);
 706
 707        address = (u64)msi->address_hi << 32 | msi->address_lo;
 708
 709        kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
 710        if (!kvm_io_dev)
 711                return ERR_PTR(-EINVAL);
 712
 713        if (kvm_io_dev->ops != &kvm_io_gic_ops)
 714                return ERR_PTR(-EINVAL);
 715
 716        iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
 717        if (iodev->iodev_type != IODEV_ITS)
 718                return ERR_PTR(-EINVAL);
 719
 720        return iodev->its;
 721}
 722
 723/*
 724 * Find the target VCPU and the LPI number for a given devid/eventid pair
 725 * and make this IRQ pending, possibly injecting it.
 726 * Must be called with the its_lock mutex held.
 727 * Returns 0 on success, a positive error value for any ITS mapping
 728 * related errors and negative error values for generic errors.
 729 */
 730static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
 731                                u32 devid, u32 eventid)
 732{
 733        struct vgic_irq *irq = NULL;
 734        unsigned long flags;
 735        int err;
 736
 737        err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
 738        if (err)
 739                return err;
 740
 741        if (irq->hw)
 742                return irq_set_irqchip_state(irq->host_irq,
 743                                             IRQCHIP_STATE_PENDING, true);
 744
 745        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 746        irq->pending_latch = true;
 747        vgic_queue_irq_unlock(kvm, irq, flags);
 748
 749        return 0;
 750}
 751
 752int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
 753{
 754        struct vgic_irq *irq;
 755        unsigned long flags;
 756        phys_addr_t db;
 757
 758        db = (u64)msi->address_hi << 32 | msi->address_lo;
 759        irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
 760        if (!irq)
 761                return -EWOULDBLOCK;
 762
 763        raw_spin_lock_irqsave(&irq->irq_lock, flags);
 764        irq->pending_latch = true;
 765        vgic_queue_irq_unlock(kvm, irq, flags);
 766
 767        return 0;
 768}
 769
 770/*
 771 * Queries the KVM IO bus framework to get the ITS pointer from the given
 772 * doorbell address.
 773 * We then call vgic_its_trigger_msi() with the decoded data.
 774 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
 775 */
 776int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 777{
 778        struct vgic_its *its;
 779        int ret;
 780
 781        if (!vgic_its_inject_cached_translation(kvm, msi))
 782                return 1;
 783
 784        its = vgic_msi_to_its(kvm, msi);
 785        if (IS_ERR(its))
 786                return PTR_ERR(its);
 787
 788        mutex_lock(&its->its_lock);
 789        ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
 790        mutex_unlock(&its->its_lock);
 791
 792        if (ret < 0)
 793                return ret;
 794
 795        /*
 796         * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
 797         * if the guest has blocked the MSI. So we map any LPI mapping
 798         * related error to that.
 799         */
 800        if (ret)
 801                return 0;
 802        else
 803                return 1;
 804}
 805
 806/* Requires the its_lock to be held. */
 807static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
 808{
 809        list_del(&ite->ite_list);
 810
 811        /* This put matches the get in vgic_add_lpi. */
 812        if (ite->irq) {
 813                if (ite->irq->hw)
 814                        WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
 815
 816                vgic_put_irq(kvm, ite->irq);
 817        }
 818
 819        kfree(ite);
 820}
 821
 822static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
 823{
 824        return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
 825}
 826
 827#define its_cmd_get_command(cmd)        its_cmd_mask_field(cmd, 0,  0,  8)
 828#define its_cmd_get_deviceid(cmd)       its_cmd_mask_field(cmd, 0, 32, 32)
 829#define its_cmd_get_size(cmd)           (its_cmd_mask_field(cmd, 1,  0,  5) + 1)
 830#define its_cmd_get_id(cmd)             its_cmd_mask_field(cmd, 1,  0, 32)
 831#define its_cmd_get_physical_id(cmd)    its_cmd_mask_field(cmd, 1, 32, 32)
 832#define its_cmd_get_collection(cmd)     its_cmd_mask_field(cmd, 2,  0, 16)
 833#define its_cmd_get_ittaddr(cmd)        (its_cmd_mask_field(cmd, 2,  8, 44) << 8)
 834#define its_cmd_get_target_addr(cmd)    its_cmd_mask_field(cmd, 2, 16, 32)
 835#define its_cmd_get_validbit(cmd)       its_cmd_mask_field(cmd, 2, 63,  1)
 836
 837/*
 838 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
 839 * Must be called with the its_lock mutex held.
 840 */
 841static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
 842                                       u64 *its_cmd)
 843{
 844        u32 device_id = its_cmd_get_deviceid(its_cmd);
 845        u32 event_id = its_cmd_get_id(its_cmd);
 846        struct its_ite *ite;
 847
 848        ite = find_ite(its, device_id, event_id);
 849        if (ite && its_is_collection_mapped(ite->collection)) {
 850                /*
 851                 * Though the spec talks about removing the pending state, we
 852                 * don't bother here since we clear the ITTE anyway and the
 853                 * pending state is a property of the ITTE struct.
 854                 */
 855                vgic_its_invalidate_cache(kvm);
 856
 857                its_free_ite(kvm, ite);
 858                return 0;
 859        }
 860
 861        return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
 862}
 863
 864/*
 865 * The MOVI command moves an ITTE to a different collection.
 866 * Must be called with the its_lock mutex held.
 867 */
 868static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
 869                                    u64 *its_cmd)
 870{
 871        u32 device_id = its_cmd_get_deviceid(its_cmd);
 872        u32 event_id = its_cmd_get_id(its_cmd);
 873        u32 coll_id = its_cmd_get_collection(its_cmd);
 874        struct kvm_vcpu *vcpu;
 875        struct its_ite *ite;
 876        struct its_collection *collection;
 877
 878        ite = find_ite(its, device_id, event_id);
 879        if (!ite)
 880                return E_ITS_MOVI_UNMAPPED_INTERRUPT;
 881
 882        if (!its_is_collection_mapped(ite->collection))
 883                return E_ITS_MOVI_UNMAPPED_COLLECTION;
 884
 885        collection = find_collection(its, coll_id);
 886        if (!its_is_collection_mapped(collection))
 887                return E_ITS_MOVI_UNMAPPED_COLLECTION;
 888
 889        ite->collection = collection;
 890        vcpu = kvm_get_vcpu(kvm, collection->target_addr);
 891
 892        vgic_its_invalidate_cache(kvm);
 893
 894        return update_affinity(ite->irq, vcpu);
 895}
 896
 897/*
 898 * Check whether an ID can be stored into the corresponding guest table.
 899 * For a direct table this is pretty easy, but gets a bit nasty for
 900 * indirect tables. We check whether the resulting guest physical address
 901 * is actually valid (covered by a memslot and guest accessible).
 902 * For this we have to read the respective first level entry.
 903 */
 904static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 905                              gpa_t *eaddr)
 906{
 907        int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
 908        u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
 909        phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
 910        int esz = GITS_BASER_ENTRY_SIZE(baser);
 911        int index, idx;
 912        gfn_t gfn;
 913        bool ret;
 914
 915        switch (type) {
 916        case GITS_BASER_TYPE_DEVICE:
 917                if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
 918                        return false;
 919                break;
 920        case GITS_BASER_TYPE_COLLECTION:
 921                /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
 922                if (id >= BIT_ULL(16))
 923                        return false;
 924                break;
 925        default:
 926                return false;
 927        }
 928
 929        if (!(baser & GITS_BASER_INDIRECT)) {
 930                phys_addr_t addr;
 931
 932                if (id >= (l1_tbl_size / esz))
 933                        return false;
 934
 935                addr = base + id * esz;
 936                gfn = addr >> PAGE_SHIFT;
 937
 938                if (eaddr)
 939                        *eaddr = addr;
 940
 941                goto out;
 942        }
 943
 944        /* calculate and check the index into the 1st level */
 945        index = id / (SZ_64K / esz);
 946        if (index >= (l1_tbl_size / sizeof(u64)))
 947                return false;
 948
 949        /* Each 1st level entry is represented by a 64-bit value. */
 950        if (kvm_read_guest_lock(its->dev->kvm,
 951                           base + index * sizeof(indirect_ptr),
 952                           &indirect_ptr, sizeof(indirect_ptr)))
 953                return false;
 954
 955        indirect_ptr = le64_to_cpu(indirect_ptr);
 956
 957        /* check the valid bit of the first level entry */
 958        if (!(indirect_ptr & BIT_ULL(63)))
 959                return false;
 960
 961        /* Mask the guest physical address and calculate the frame number. */
 962        indirect_ptr &= GENMASK_ULL(51, 16);
 963
 964        /* Find the address of the actual entry */
 965        index = id % (SZ_64K / esz);
 966        indirect_ptr += index * esz;
 967        gfn = indirect_ptr >> PAGE_SHIFT;
 968
 969        if (eaddr)
 970                *eaddr = indirect_ptr;
 971
 972out:
 973        idx = srcu_read_lock(&its->dev->kvm->srcu);
 974        ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
 975        srcu_read_unlock(&its->dev->kvm->srcu, idx);
 976        return ret;
 977}
 978
 979static int vgic_its_alloc_collection(struct vgic_its *its,
 980                                     struct its_collection **colp,
 981                                     u32 coll_id)
 982{
 983        struct its_collection *collection;
 984
 985        if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
 986                return E_ITS_MAPC_COLLECTION_OOR;
 987
 988        collection = kzalloc(sizeof(*collection), GFP_KERNEL);
 989        if (!collection)
 990                return -ENOMEM;
 991
 992        collection->collection_id = coll_id;
 993        collection->target_addr = COLLECTION_NOT_MAPPED;
 994
 995        list_add_tail(&collection->coll_list, &its->collection_list);
 996        *colp = collection;
 997
 998        return 0;
 999}
1000
1001static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
1002{
1003        struct its_collection *collection;
1004        struct its_device *device;
1005        struct its_ite *ite;
1006
1007        /*
1008         * Clearing the mapping for that collection ID removes the
1009         * entry from the list. If there wasn't any before, we can
1010         * go home early.
1011         */
1012        collection = find_collection(its, coll_id);
1013        if (!collection)
1014                return;
1015
1016        for_each_lpi_its(device, ite, its)
1017                if (ite->collection &&
1018                    ite->collection->collection_id == coll_id)
1019                        ite->collection = NULL;
1020
1021        list_del(&collection->coll_list);
1022        kfree(collection);
1023}
1024
1025/* Must be called with its_lock mutex held */
1026static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
1027                                          struct its_collection *collection,
1028                                          u32 event_id)
1029{
1030        struct its_ite *ite;
1031
1032        ite = kzalloc(sizeof(*ite), GFP_KERNEL);
1033        if (!ite)
1034                return ERR_PTR(-ENOMEM);
1035
1036        ite->event_id   = event_id;
1037        ite->collection = collection;
1038
1039        list_add_tail(&ite->ite_list, &device->itt_head);
1040        return ite;
1041}
1042
1043/*
1044 * The MAPTI and MAPI commands map LPIs to ITTEs.
1045 * Must be called with its_lock mutex held.
1046 */
1047static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1048                                    u64 *its_cmd)
1049{
1050        u32 device_id = its_cmd_get_deviceid(its_cmd);
1051        u32 event_id = its_cmd_get_id(its_cmd);
1052        u32 coll_id = its_cmd_get_collection(its_cmd);
1053        struct its_ite *ite;
1054        struct kvm_vcpu *vcpu = NULL;
1055        struct its_device *device;
1056        struct its_collection *collection, *new_coll = NULL;
1057        struct vgic_irq *irq;
1058        int lpi_nr;
1059
1060        device = find_its_device(its, device_id);
1061        if (!device)
1062                return E_ITS_MAPTI_UNMAPPED_DEVICE;
1063
1064        if (event_id >= BIT_ULL(device->num_eventid_bits))
1065                return E_ITS_MAPTI_ID_OOR;
1066
1067        if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
1068                lpi_nr = its_cmd_get_physical_id(its_cmd);
1069        else
1070                lpi_nr = event_id;
1071        if (lpi_nr < GIC_LPI_OFFSET ||
1072            lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
1073                return E_ITS_MAPTI_PHYSICALID_OOR;
1074
1075        /* If there is an existing mapping, behavior is UNPREDICTABLE. */
1076        if (find_ite(its, device_id, event_id))
1077                return 0;
1078
1079        collection = find_collection(its, coll_id);
1080        if (!collection) {
1081                int ret = vgic_its_alloc_collection(its, &collection, coll_id);
1082                if (ret)
1083                        return ret;
1084                new_coll = collection;
1085        }
1086
1087        ite = vgic_its_alloc_ite(device, collection, event_id);
1088        if (IS_ERR(ite)) {
1089                if (new_coll)
1090                        vgic_its_free_collection(its, coll_id);
1091                return PTR_ERR(ite);
1092        }
1093
1094        if (its_is_collection_mapped(collection))
1095                vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1096
1097        irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
1098        if (IS_ERR(irq)) {
1099                if (new_coll)
1100                        vgic_its_free_collection(its, coll_id);
1101                its_free_ite(kvm, ite);
1102                return PTR_ERR(irq);
1103        }
1104        ite->irq = irq;
1105
1106        return 0;
1107}
1108
1109/* Requires the its_lock to be held. */
1110static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
1111{
1112        struct its_ite *ite, *temp;
1113
1114        /*
1115         * The spec says that unmapping a device with still valid
1116         * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1117         * since we cannot leave the memory unreferenced.
1118         */
1119        list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
1120                its_free_ite(kvm, ite);
1121
1122        vgic_its_invalidate_cache(kvm);
1123
1124        list_del(&device->dev_list);
1125        kfree(device);
1126}
1127
1128/* its lock must be held */
1129static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1130{
1131        struct its_device *cur, *temp;
1132
1133        list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1134                vgic_its_free_device(kvm, cur);
1135}
1136
1137/* its lock must be held */
1138static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1139{
1140        struct its_collection *cur, *temp;
1141
1142        list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1143                vgic_its_free_collection(its, cur->collection_id);
1144}
1145
1146/* Must be called with its_lock mutex held */
1147static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1148                                                u32 device_id, gpa_t itt_addr,
1149                                                u8 num_eventid_bits)
1150{
1151        struct its_device *device;
1152
1153        device = kzalloc(sizeof(*device), GFP_KERNEL);
1154        if (!device)
1155                return ERR_PTR(-ENOMEM);
1156
1157        device->device_id = device_id;
1158        device->itt_addr = itt_addr;
1159        device->num_eventid_bits = num_eventid_bits;
1160        INIT_LIST_HEAD(&device->itt_head);
1161
1162        list_add_tail(&device->dev_list, &its->device_list);
1163        return device;
1164}
1165
1166/*
1167 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1168 * Must be called with the its_lock mutex held.
1169 */
1170static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1171                                    u64 *its_cmd)
1172{
1173        u32 device_id = its_cmd_get_deviceid(its_cmd);
1174        bool valid = its_cmd_get_validbit(its_cmd);
1175        u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1176        gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1177        struct its_device *device;
1178
1179        if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1180                return E_ITS_MAPD_DEVICE_OOR;
1181
1182        if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1183                return E_ITS_MAPD_ITTSIZE_OOR;
1184
1185        device = find_its_device(its, device_id);
1186
1187        /*
1188         * The spec says that calling MAPD on an already mapped device
1189         * invalidates all cached data for this device. We implement this
1190         * by removing the mapping and re-establishing it.
1191         */
1192        if (device)
1193                vgic_its_free_device(kvm, device);
1194
1195        /*
1196         * The spec does not say whether unmapping a not-mapped device
1197         * is an error, so we are done in any case.
1198         */
1199        if (!valid)
1200                return 0;
1201
1202        device = vgic_its_alloc_device(its, device_id, itt_addr,
1203                                       num_eventid_bits);
1204
1205        return PTR_ERR_OR_ZERO(device);
1206}
1207
1208/*
1209 * The MAPC command maps collection IDs to redistributors.
1210 * Must be called with the its_lock mutex held.
1211 */
1212static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1213                                    u64 *its_cmd)
1214{
1215        u16 coll_id;
1216        u32 target_addr;
1217        struct its_collection *collection;
1218        bool valid;
1219
1220        valid = its_cmd_get_validbit(its_cmd);
1221        coll_id = its_cmd_get_collection(its_cmd);
1222        target_addr = its_cmd_get_target_addr(its_cmd);
1223
1224        if (target_addr >= atomic_read(&kvm->online_vcpus))
1225                return E_ITS_MAPC_PROCNUM_OOR;
1226
1227        if (!valid) {
1228                vgic_its_free_collection(its, coll_id);
1229                vgic_its_invalidate_cache(kvm);
1230        } else {
1231                collection = find_collection(its, coll_id);
1232
1233                if (!collection) {
1234                        int ret;
1235
1236                        ret = vgic_its_alloc_collection(its, &collection,
1237                                                        coll_id);
1238                        if (ret)
1239                                return ret;
1240                        collection->target_addr = target_addr;
1241                } else {
1242                        collection->target_addr = target_addr;
1243                        update_affinity_collection(kvm, its, collection);
1244                }
1245        }
1246
1247        return 0;
1248}
1249
1250/*
1251 * The CLEAR command removes the pending state for a particular LPI.
1252 * Must be called with the its_lock mutex held.
1253 */
1254static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1255                                     u64 *its_cmd)
1256{
1257        u32 device_id = its_cmd_get_deviceid(its_cmd);
1258        u32 event_id = its_cmd_get_id(its_cmd);
1259        struct its_ite *ite;
1260
1261
1262        ite = find_ite(its, device_id, event_id);
1263        if (!ite)
1264                return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1265
1266        ite->irq->pending_latch = false;
1267
1268        if (ite->irq->hw)
1269                return irq_set_irqchip_state(ite->irq->host_irq,
1270                                             IRQCHIP_STATE_PENDING, false);
1271
1272        return 0;
1273}
1274
1275/*
1276 * The INV command syncs the configuration bits from the memory table.
1277 * Must be called with the its_lock mutex held.
1278 */
1279static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1280                                   u64 *its_cmd)
1281{
1282        u32 device_id = its_cmd_get_deviceid(its_cmd);
1283        u32 event_id = its_cmd_get_id(its_cmd);
1284        struct its_ite *ite;
1285
1286
1287        ite = find_ite(its, device_id, event_id);
1288        if (!ite)
1289                return E_ITS_INV_UNMAPPED_INTERRUPT;
1290
1291        return update_lpi_config(kvm, ite->irq, NULL, true);
1292}
1293
1294/*
1295 * The INVALL command requests flushing of all IRQ data in this collection.
1296 * Find the VCPU mapped to that collection, then iterate over the VM's list
1297 * of mapped LPIs and update the configuration for each IRQ which targets
1298 * the specified vcpu. The configuration will be read from the in-memory
1299 * configuration table.
1300 * Must be called with the its_lock mutex held.
1301 */
1302static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1303                                      u64 *its_cmd)
1304{
1305        u32 coll_id = its_cmd_get_collection(its_cmd);
1306        struct its_collection *collection;
1307        struct kvm_vcpu *vcpu;
1308        struct vgic_irq *irq;
1309        u32 *intids;
1310        int irq_count, i;
1311
1312        collection = find_collection(its, coll_id);
1313        if (!its_is_collection_mapped(collection))
1314                return E_ITS_INVALL_UNMAPPED_COLLECTION;
1315
1316        vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1317
1318        irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1319        if (irq_count < 0)
1320                return irq_count;
1321
1322        for (i = 0; i < irq_count; i++) {
1323                irq = vgic_get_irq(kvm, NULL, intids[i]);
1324                if (!irq)
1325                        continue;
1326                update_lpi_config(kvm, irq, vcpu, false);
1327                vgic_put_irq(kvm, irq);
1328        }
1329
1330        kfree(intids);
1331
1332        if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1333                its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1334
1335        return 0;
1336}
1337
1338/*
1339 * The MOVALL command moves the pending state of all IRQs targeting one
1340 * redistributor to another. We don't hold the pending state in the VCPUs,
1341 * but in the IRQs instead, so there is really not much to do for us here.
1342 * However the spec says that no IRQ must target the old redistributor
1343 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1344 * This command affects all LPIs in the system that target that redistributor.
1345 */
1346static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1347                                      u64 *its_cmd)
1348{
1349        u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1350        u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1351        struct kvm_vcpu *vcpu1, *vcpu2;
1352        struct vgic_irq *irq;
1353        u32 *intids;
1354        int irq_count, i;
1355
1356        if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1357            target2_addr >= atomic_read(&kvm->online_vcpus))
1358                return E_ITS_MOVALL_PROCNUM_OOR;
1359
1360        if (target1_addr == target2_addr)
1361                return 0;
1362
1363        vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1364        vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1365
1366        irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
1367        if (irq_count < 0)
1368                return irq_count;
1369
1370        for (i = 0; i < irq_count; i++) {
1371                irq = vgic_get_irq(kvm, NULL, intids[i]);
1372
1373                update_affinity(irq, vcpu2);
1374
1375                vgic_put_irq(kvm, irq);
1376        }
1377
1378        vgic_its_invalidate_cache(kvm);
1379
1380        kfree(intids);
1381        return 0;
1382}
1383
1384/*
1385 * The INT command injects the LPI associated with that DevID/EvID pair.
1386 * Must be called with the its_lock mutex held.
1387 */
1388static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1389                                   u64 *its_cmd)
1390{
1391        u32 msi_data = its_cmd_get_id(its_cmd);
1392        u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1393
1394        return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1395}
1396
1397/*
1398 * This function is called with the its_cmd lock held, but the ITS data
1399 * structure lock dropped.
1400 */
1401static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1402                                   u64 *its_cmd)
1403{
1404        int ret = -ENODEV;
1405
1406        mutex_lock(&its->its_lock);
1407        switch (its_cmd_get_command(its_cmd)) {
1408        case GITS_CMD_MAPD:
1409                ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1410                break;
1411        case GITS_CMD_MAPC:
1412                ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1413                break;
1414        case GITS_CMD_MAPI:
1415                ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1416                break;
1417        case GITS_CMD_MAPTI:
1418                ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1419                break;
1420        case GITS_CMD_MOVI:
1421                ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1422                break;
1423        case GITS_CMD_DISCARD:
1424                ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1425                break;
1426        case GITS_CMD_CLEAR:
1427                ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1428                break;
1429        case GITS_CMD_MOVALL:
1430                ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1431                break;
1432        case GITS_CMD_INT:
1433                ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1434                break;
1435        case GITS_CMD_INV:
1436                ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1437                break;
1438        case GITS_CMD_INVALL:
1439                ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1440                break;
1441        case GITS_CMD_SYNC:
1442                /* we ignore this command: we are in sync all of the time */
1443                ret = 0;
1444                break;
1445        }
1446        mutex_unlock(&its->its_lock);
1447
1448        return ret;
1449}
1450
1451static u64 vgic_sanitise_its_baser(u64 reg)
1452{
1453        reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1454                                  GITS_BASER_SHAREABILITY_SHIFT,
1455                                  vgic_sanitise_shareability);
1456        reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1457                                  GITS_BASER_INNER_CACHEABILITY_SHIFT,
1458                                  vgic_sanitise_inner_cacheability);
1459        reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1460                                  GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1461                                  vgic_sanitise_outer_cacheability);
1462
1463        /* We support only one (ITS) page size: 64K */
1464        reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1465
1466        return reg;
1467}
1468
1469static u64 vgic_sanitise_its_cbaser(u64 reg)
1470{
1471        reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1472                                  GITS_CBASER_SHAREABILITY_SHIFT,
1473                                  vgic_sanitise_shareability);
1474        reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1475                                  GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1476                                  vgic_sanitise_inner_cacheability);
1477        reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1478                                  GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1479                                  vgic_sanitise_outer_cacheability);
1480
1481        /* Sanitise the physical address to be 64k aligned. */
1482        reg &= ~GENMASK_ULL(15, 12);
1483
1484        return reg;
1485}
1486
1487static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1488                                               struct vgic_its *its,
1489                                               gpa_t addr, unsigned int len)
1490{
1491        return extract_bytes(its->cbaser, addr & 7, len);
1492}
1493
1494static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1495                                       gpa_t addr, unsigned int len,
1496                                       unsigned long val)
1497{
1498        /* When GITS_CTLR.Enable is 1, this register is RO. */
1499        if (its->enabled)
1500                return;
1501
1502        mutex_lock(&its->cmd_lock);
1503        its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1504        its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1505        its->creadr = 0;
1506        /*
1507         * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1508         * it to CREADR to make sure we start with an empty command buffer.
1509         */
1510        its->cwriter = its->creadr;
1511        mutex_unlock(&its->cmd_lock);
1512}
1513
1514#define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
1515#define ITS_CMD_SIZE                    32
1516#define ITS_CMD_OFFSET(reg)             ((reg) & GENMASK(19, 5))
1517
1518/* Must be called with the cmd_lock held. */
1519static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1520{
1521        gpa_t cbaser;
1522        u64 cmd_buf[4];
1523
1524        /* Commands are only processed when the ITS is enabled. */
1525        if (!its->enabled)
1526                return;
1527
1528        cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1529
1530        while (its->cwriter != its->creadr) {
1531                int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1532                                              cmd_buf, ITS_CMD_SIZE);
1533                /*
1534                 * If kvm_read_guest() fails, this could be due to the guest
1535                 * programming a bogus value in CBASER or something else going
1536                 * wrong from which we cannot easily recover.
1537                 * According to section 6.3.2 in the GICv3 spec we can just
1538                 * ignore that command then.
1539                 */
1540                if (!ret)
1541                        vgic_its_handle_command(kvm, its, cmd_buf);
1542
1543                its->creadr += ITS_CMD_SIZE;
1544                if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1545                        its->creadr = 0;
1546        }
1547}
1548
1549/*
1550 * By writing to CWRITER the guest announces new commands to be processed.
1551 * To avoid any races in the first place, we take the its_cmd lock, which
1552 * protects our ring buffer variables, so that there is only one user
1553 * per ITS handling commands at a given time.
1554 */
1555static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1556                                        gpa_t addr, unsigned int len,
1557                                        unsigned long val)
1558{
1559        u64 reg;
1560
1561        if (!its)
1562                return;
1563
1564        mutex_lock(&its->cmd_lock);
1565
1566        reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1567        reg = ITS_CMD_OFFSET(reg);
1568        if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1569                mutex_unlock(&its->cmd_lock);
1570                return;
1571        }
1572        its->cwriter = reg;
1573
1574        vgic_its_process_commands(kvm, its);
1575
1576        mutex_unlock(&its->cmd_lock);
1577}
1578
1579static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1580                                                struct vgic_its *its,
1581                                                gpa_t addr, unsigned int len)
1582{
1583        return extract_bytes(its->cwriter, addr & 0x7, len);
1584}
1585
1586static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1587                                               struct vgic_its *its,
1588                                               gpa_t addr, unsigned int len)
1589{
1590        return extract_bytes(its->creadr, addr & 0x7, len);
1591}
1592
1593static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1594                                              struct vgic_its *its,
1595                                              gpa_t addr, unsigned int len,
1596                                              unsigned long val)
1597{
1598        u32 cmd_offset;
1599        int ret = 0;
1600
1601        mutex_lock(&its->cmd_lock);
1602
1603        if (its->enabled) {
1604                ret = -EBUSY;
1605                goto out;
1606        }
1607
1608        cmd_offset = ITS_CMD_OFFSET(val);
1609        if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1610                ret = -EINVAL;
1611                goto out;
1612        }
1613
1614        its->creadr = cmd_offset;
1615out:
1616        mutex_unlock(&its->cmd_lock);
1617        return ret;
1618}
1619
1620#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1621static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1622                                              struct vgic_its *its,
1623                                              gpa_t addr, unsigned int len)
1624{
1625        u64 reg;
1626
1627        switch (BASER_INDEX(addr)) {
1628        case 0:
1629                reg = its->baser_device_table;
1630                break;
1631        case 1:
1632                reg = its->baser_coll_table;
1633                break;
1634        default:
1635                reg = 0;
1636                break;
1637        }
1638
1639        return extract_bytes(reg, addr & 7, len);
1640}
1641
1642#define GITS_BASER_RO_MASK      (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1643static void vgic_mmio_write_its_baser(struct kvm *kvm,
1644                                      struct vgic_its *its,
1645                                      gpa_t addr, unsigned int len,
1646                                      unsigned long val)
1647{
1648        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1649        u64 entry_size, table_type;
1650        u64 reg, *regptr, clearbits = 0;
1651
1652        /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1653        if (its->enabled)
1654                return;
1655
1656        switch (BASER_INDEX(addr)) {
1657        case 0:
1658                regptr = &its->baser_device_table;
1659                entry_size = abi->dte_esz;
1660                table_type = GITS_BASER_TYPE_DEVICE;
1661                break;
1662        case 1:
1663                regptr = &its->baser_coll_table;
1664                entry_size = abi->cte_esz;
1665                table_type = GITS_BASER_TYPE_COLLECTION;
1666                clearbits = GITS_BASER_INDIRECT;
1667                break;
1668        default:
1669                return;
1670        }
1671
1672        reg = update_64bit_reg(*regptr, addr & 7, len, val);
1673        reg &= ~GITS_BASER_RO_MASK;
1674        reg &= ~clearbits;
1675
1676        reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1677        reg |= table_type << GITS_BASER_TYPE_SHIFT;
1678        reg = vgic_sanitise_its_baser(reg);
1679
1680        *regptr = reg;
1681
1682        if (!(reg & GITS_BASER_VALID)) {
1683                /* Take the its_lock to prevent a race with a save/restore */
1684                mutex_lock(&its->its_lock);
1685                switch (table_type) {
1686                case GITS_BASER_TYPE_DEVICE:
1687                        vgic_its_free_device_list(kvm, its);
1688                        break;
1689                case GITS_BASER_TYPE_COLLECTION:
1690                        vgic_its_free_collection_list(kvm, its);
1691                        break;
1692                }
1693                mutex_unlock(&its->its_lock);
1694        }
1695}
1696
1697static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1698                                             struct vgic_its *its,
1699                                             gpa_t addr, unsigned int len)
1700{
1701        u32 reg = 0;
1702
1703        mutex_lock(&its->cmd_lock);
1704        if (its->creadr == its->cwriter)
1705                reg |= GITS_CTLR_QUIESCENT;
1706        if (its->enabled)
1707                reg |= GITS_CTLR_ENABLE;
1708        mutex_unlock(&its->cmd_lock);
1709
1710        return reg;
1711}
1712
1713static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1714                                     gpa_t addr, unsigned int len,
1715                                     unsigned long val)
1716{
1717        mutex_lock(&its->cmd_lock);
1718
1719        /*
1720         * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1721         * device/collection BASER are invalid
1722         */
1723        if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1724                (!(its->baser_device_table & GITS_BASER_VALID) ||
1725                 !(its->baser_coll_table & GITS_BASER_VALID) ||
1726                 !(its->cbaser & GITS_CBASER_VALID)))
1727                goto out;
1728
1729        its->enabled = !!(val & GITS_CTLR_ENABLE);
1730        if (!its->enabled)
1731                vgic_its_invalidate_cache(kvm);
1732
1733        /*
1734         * Try to process any pending commands. This function bails out early
1735         * if the ITS is disabled or no commands have been queued.
1736         */
1737        vgic_its_process_commands(kvm, its);
1738
1739out:
1740        mutex_unlock(&its->cmd_lock);
1741}
1742
1743#define REGISTER_ITS_DESC(off, rd, wr, length, acc)             \
1744{                                                               \
1745        .reg_offset = off,                                      \
1746        .len = length,                                          \
1747        .access_flags = acc,                                    \
1748        .its_read = rd,                                         \
1749        .its_write = wr,                                        \
1750}
1751
1752#define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1753{                                                               \
1754        .reg_offset = off,                                      \
1755        .len = length,                                          \
1756        .access_flags = acc,                                    \
1757        .its_read = rd,                                         \
1758        .its_write = wr,                                        \
1759        .uaccess_its_write = uwr,                               \
1760}
1761
1762static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1763                              gpa_t addr, unsigned int len, unsigned long val)
1764{
1765        /* Ignore */
1766}
1767
1768static struct vgic_register_region its_registers[] = {
1769        REGISTER_ITS_DESC(GITS_CTLR,
1770                vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1771                VGIC_ACCESS_32bit),
1772        REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1773                vgic_mmio_read_its_iidr, its_mmio_write_wi,
1774                vgic_mmio_uaccess_write_its_iidr, 4,
1775                VGIC_ACCESS_32bit),
1776        REGISTER_ITS_DESC(GITS_TYPER,
1777                vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1778                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1779        REGISTER_ITS_DESC(GITS_CBASER,
1780                vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1781                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1782        REGISTER_ITS_DESC(GITS_CWRITER,
1783                vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1784                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1785        REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1786                vgic_mmio_read_its_creadr, its_mmio_write_wi,
1787                vgic_mmio_uaccess_write_its_creadr, 8,
1788                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1789        REGISTER_ITS_DESC(GITS_BASER,
1790                vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1791                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1792        REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1793                vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1794                VGIC_ACCESS_32bit),
1795};
1796
1797/* This is called on setting the LPI enable bit in the redistributor. */
1798void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1799{
1800        if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1801                its_sync_lpi_pending_table(vcpu);
1802}
1803
1804static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1805                                   u64 addr)
1806{
1807        struct vgic_io_device *iodev = &its->iodev;
1808        int ret;
1809
1810        mutex_lock(&kvm->slots_lock);
1811        if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1812                ret = -EBUSY;
1813                goto out;
1814        }
1815
1816        its->vgic_its_base = addr;
1817        iodev->regions = its_registers;
1818        iodev->nr_regions = ARRAY_SIZE(its_registers);
1819        kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1820
1821        iodev->base_addr = its->vgic_its_base;
1822        iodev->iodev_type = IODEV_ITS;
1823        iodev->its = its;
1824        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1825                                      KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1826out:
1827        mutex_unlock(&kvm->slots_lock);
1828
1829        return ret;
1830}
1831
1832/* Default is 16 cached LPIs per vcpu */
1833#define LPI_DEFAULT_PCPU_CACHE_SIZE     16
1834
1835void vgic_lpi_translation_cache_init(struct kvm *kvm)
1836{
1837        struct vgic_dist *dist = &kvm->arch.vgic;
1838        unsigned int sz;
1839        int i;
1840
1841        if (!list_empty(&dist->lpi_translation_cache))
1842                return;
1843
1844        sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
1845
1846        for (i = 0; i < sz; i++) {
1847                struct vgic_translation_cache_entry *cte;
1848
1849                /* An allocation failure is not fatal */
1850                cte = kzalloc(sizeof(*cte), GFP_KERNEL);
1851                if (WARN_ON(!cte))
1852                        break;
1853
1854                INIT_LIST_HEAD(&cte->entry);
1855                list_add(&cte->entry, &dist->lpi_translation_cache);
1856        }
1857}
1858
1859void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
1860{
1861        struct vgic_dist *dist = &kvm->arch.vgic;
1862        struct vgic_translation_cache_entry *cte, *tmp;
1863
1864        vgic_its_invalidate_cache(kvm);
1865
1866        list_for_each_entry_safe(cte, tmp,
1867                                 &dist->lpi_translation_cache, entry) {
1868                list_del(&cte->entry);
1869                kfree(cte);
1870        }
1871}
1872
1873#define INITIAL_BASER_VALUE                                               \
1874        (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)                | \
1875         GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner)         | \
1876         GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)             | \
1877         GITS_BASER_PAGE_SIZE_64K)
1878
1879#define INITIAL_PROPBASER_VALUE                                           \
1880        (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)            | \
1881         GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner)     | \
1882         GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1883
1884static int vgic_its_create(struct kvm_device *dev, u32 type)
1885{
1886        struct vgic_its *its;
1887
1888        if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1889                return -ENODEV;
1890
1891        its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1892        if (!its)
1893                return -ENOMEM;
1894
1895        if (vgic_initialized(dev->kvm)) {
1896                int ret = vgic_v4_init(dev->kvm);
1897                if (ret < 0) {
1898                        kfree(its);
1899                        return ret;
1900                }
1901
1902                vgic_lpi_translation_cache_init(dev->kvm);
1903        }
1904
1905        mutex_init(&its->its_lock);
1906        mutex_init(&its->cmd_lock);
1907
1908        its->vgic_its_base = VGIC_ADDR_UNDEF;
1909
1910        INIT_LIST_HEAD(&its->device_list);
1911        INIT_LIST_HEAD(&its->collection_list);
1912
1913        dev->kvm->arch.vgic.msis_require_devid = true;
1914        dev->kvm->arch.vgic.has_its = true;
1915        its->enabled = false;
1916        its->dev = dev;
1917
1918        its->baser_device_table = INITIAL_BASER_VALUE                   |
1919                ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1920        its->baser_coll_table = INITIAL_BASER_VALUE |
1921                ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1922        dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1923
1924        dev->private = its;
1925
1926        return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1927}
1928
1929static void vgic_its_destroy(struct kvm_device *kvm_dev)
1930{
1931        struct kvm *kvm = kvm_dev->kvm;
1932        struct vgic_its *its = kvm_dev->private;
1933
1934        mutex_lock(&its->its_lock);
1935
1936        vgic_its_free_device_list(kvm, its);
1937        vgic_its_free_collection_list(kvm, its);
1938
1939        mutex_unlock(&its->its_lock);
1940        kfree(its);
1941        kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
1942}
1943
1944static int vgic_its_has_attr_regs(struct kvm_device *dev,
1945                                  struct kvm_device_attr *attr)
1946{
1947        const struct vgic_register_region *region;
1948        gpa_t offset = attr->attr;
1949        int align;
1950
1951        align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1952
1953        if (offset & align)
1954                return -EINVAL;
1955
1956        region = vgic_find_mmio_region(its_registers,
1957                                       ARRAY_SIZE(its_registers),
1958                                       offset);
1959        if (!region)
1960                return -ENXIO;
1961
1962        return 0;
1963}
1964
1965static int vgic_its_attr_regs_access(struct kvm_device *dev,
1966                                     struct kvm_device_attr *attr,
1967                                     u64 *reg, bool is_write)
1968{
1969        const struct vgic_register_region *region;
1970        struct vgic_its *its;
1971        gpa_t addr, offset;
1972        unsigned int len;
1973        int align, ret = 0;
1974
1975        its = dev->private;
1976        offset = attr->attr;
1977
1978        /*
1979         * Although the spec supports upper/lower 32-bit accesses to
1980         * 64-bit ITS registers, the userspace ABI requires 64-bit
1981         * accesses to all 64-bit wide registers. We therefore only
1982         * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1983         * registers
1984         */
1985        if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1986                align = 0x3;
1987        else
1988                align = 0x7;
1989
1990        if (offset & align)
1991                return -EINVAL;
1992
1993        mutex_lock(&dev->kvm->lock);
1994
1995        if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1996                ret = -ENXIO;
1997                goto out;
1998        }
1999
2000        region = vgic_find_mmio_region(its_registers,
2001                                       ARRAY_SIZE(its_registers),
2002                                       offset);
2003        if (!region) {
2004                ret = -ENXIO;
2005                goto out;
2006        }
2007
2008        if (!lock_all_vcpus(dev->kvm)) {
2009                ret = -EBUSY;
2010                goto out;
2011        }
2012
2013        addr = its->vgic_its_base + offset;
2014
2015        len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
2016
2017        if (is_write) {
2018                if (region->uaccess_its_write)
2019                        ret = region->uaccess_its_write(dev->kvm, its, addr,
2020                                                        len, *reg);
2021                else
2022                        region->its_write(dev->kvm, its, addr, len, *reg);
2023        } else {
2024                *reg = region->its_read(dev->kvm, its, addr, len);
2025        }
2026        unlock_all_vcpus(dev->kvm);
2027out:
2028        mutex_unlock(&dev->kvm->lock);
2029        return ret;
2030}
2031
2032static u32 compute_next_devid_offset(struct list_head *h,
2033                                     struct its_device *dev)
2034{
2035        struct its_device *next;
2036        u32 next_offset;
2037
2038        if (list_is_last(&dev->dev_list, h))
2039                return 0;
2040        next = list_next_entry(dev, dev_list);
2041        next_offset = next->device_id - dev->device_id;
2042
2043        return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
2044}
2045
2046static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
2047{
2048        struct its_ite *next;
2049        u32 next_offset;
2050
2051        if (list_is_last(&ite->ite_list, h))
2052                return 0;
2053        next = list_next_entry(ite, ite_list);
2054        next_offset = next->event_id - ite->event_id;
2055
2056        return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
2057}
2058
2059/**
2060 * entry_fn_t - Callback called on a table entry restore path
2061 * @its: its handle
2062 * @id: id of the entry
2063 * @entry: pointer to the entry
2064 * @opaque: pointer to an opaque data
2065 *
2066 * Return: < 0 on error, 0 if last element was identified, id offset to next
2067 * element otherwise
2068 */
2069typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2070                          void *opaque);
2071
2072/**
2073 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2074 * to each entry
2075 *
2076 * @its: its handle
2077 * @base: base gpa of the table
2078 * @size: size of the table in bytes
2079 * @esz: entry size in bytes
2080 * @start_id: the ID of the first entry in the table
2081 * (non zero for 2d level tables)
2082 * @fn: function to apply on each entry
2083 *
2084 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2085 * (the last element may not be found on second level tables)
2086 */
2087static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2088                          int start_id, entry_fn_t fn, void *opaque)
2089{
2090        struct kvm *kvm = its->dev->kvm;
2091        unsigned long len = size;
2092        int id = start_id;
2093        gpa_t gpa = base;
2094        char entry[ESZ_MAX];
2095        int ret;
2096
2097        memset(entry, 0, esz);
2098
2099        while (len > 0) {
2100                int next_offset;
2101                size_t byte_offset;
2102
2103                ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
2104                if (ret)
2105                        return ret;
2106
2107                next_offset = fn(its, id, entry, opaque);
2108                if (next_offset <= 0)
2109                        return next_offset;
2110
2111                byte_offset = next_offset * esz;
2112                id += next_offset;
2113                gpa += byte_offset;
2114                len -= byte_offset;
2115        }
2116        return 1;
2117}
2118
2119/**
2120 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2121 */
2122static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2123                              struct its_ite *ite, gpa_t gpa, int ite_esz)
2124{
2125        struct kvm *kvm = its->dev->kvm;
2126        u32 next_offset;
2127        u64 val;
2128
2129        next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
2130        val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
2131               ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
2132                ite->collection->collection_id;
2133        val = cpu_to_le64(val);
2134        return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
2135}
2136
2137/**
2138 * vgic_its_restore_ite - restore an interrupt translation entry
2139 * @event_id: id used for indexing
2140 * @ptr: pointer to the ITE entry
2141 * @opaque: pointer to the its_device
2142 */
2143static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2144                                void *ptr, void *opaque)
2145{
2146        struct its_device *dev = (struct its_device *)opaque;
2147        struct its_collection *collection;
2148        struct kvm *kvm = its->dev->kvm;
2149        struct kvm_vcpu *vcpu = NULL;
2150        u64 val;
2151        u64 *p = (u64 *)ptr;
2152        struct vgic_irq *irq;
2153        u32 coll_id, lpi_id;
2154        struct its_ite *ite;
2155        u32 offset;
2156
2157        val = *p;
2158
2159        val = le64_to_cpu(val);
2160
2161        coll_id = val & KVM_ITS_ITE_ICID_MASK;
2162        lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
2163
2164        if (!lpi_id)
2165                return 1; /* invalid entry, no choice but to scan next entry */
2166
2167        if (lpi_id < VGIC_MIN_LPI)
2168                return -EINVAL;
2169
2170        offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
2171        if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
2172                return -EINVAL;
2173
2174        collection = find_collection(its, coll_id);
2175        if (!collection)
2176                return -EINVAL;
2177
2178        ite = vgic_its_alloc_ite(dev, collection, event_id);
2179        if (IS_ERR(ite))
2180                return PTR_ERR(ite);
2181
2182        if (its_is_collection_mapped(collection))
2183                vcpu = kvm_get_vcpu(kvm, collection->target_addr);
2184
2185        irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2186        if (IS_ERR(irq))
2187                return PTR_ERR(irq);
2188        ite->irq = irq;
2189
2190        return offset;
2191}
2192
2193static int vgic_its_ite_cmp(void *priv, struct list_head *a,
2194                            struct list_head *b)
2195{
2196        struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2197        struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2198
2199        if (itea->event_id < iteb->event_id)
2200                return -1;
2201        else
2202                return 1;
2203}
2204
2205static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2206{
2207        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2208        gpa_t base = device->itt_addr;
2209        struct its_ite *ite;
2210        int ret;
2211        int ite_esz = abi->ite_esz;
2212
2213        list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2214
2215        list_for_each_entry(ite, &device->itt_head, ite_list) {
2216                gpa_t gpa = base + ite->event_id * ite_esz;
2217
2218                /*
2219                 * If an LPI carries the HW bit, this means that this
2220                 * interrupt is controlled by GICv4, and we do not
2221                 * have direct access to that state. Let's simply fail
2222                 * the save operation...
2223                 */
2224                if (ite->irq->hw)
2225                        return -EACCES;
2226
2227                ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2228                if (ret)
2229                        return ret;
2230        }
2231        return 0;
2232}
2233
2234/**
2235 * vgic_its_restore_itt - restore the ITT of a device
2236 *
2237 * @its: its handle
2238 * @dev: device handle
2239 *
2240 * Return 0 on success, < 0 on error
2241 */
2242static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2243{
2244        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2245        gpa_t base = dev->itt_addr;
2246        int ret;
2247        int ite_esz = abi->ite_esz;
2248        size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2249
2250        ret = scan_its_table(its, base, max_size, ite_esz, 0,
2251                             vgic_its_restore_ite, dev);
2252
2253        /* scan_its_table returns +1 if all ITEs are invalid */
2254        if (ret > 0)
2255                ret = 0;
2256
2257        return ret;
2258}
2259
2260/**
2261 * vgic_its_save_dte - Save a device table entry at a given GPA
2262 *
2263 * @its: ITS handle
2264 * @dev: ITS device
2265 * @ptr: GPA
2266 */
2267static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2268                             gpa_t ptr, int dte_esz)
2269{
2270        struct kvm *kvm = its->dev->kvm;
2271        u64 val, itt_addr_field;
2272        u32 next_offset;
2273
2274        itt_addr_field = dev->itt_addr >> 8;
2275        next_offset = compute_next_devid_offset(&its->device_list, dev);
2276        val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2277               ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2278               (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2279                (dev->num_eventid_bits - 1));
2280        val = cpu_to_le64(val);
2281        return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
2282}
2283
2284/**
2285 * vgic_its_restore_dte - restore a device table entry
2286 *
2287 * @its: its handle
2288 * @id: device id the DTE corresponds to
2289 * @ptr: kernel VA where the 8 byte DTE is located
2290 * @opaque: unused
2291 *
2292 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2293 * next dte otherwise
2294 */
2295static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2296                                void *ptr, void *opaque)
2297{
2298        struct its_device *dev;
2299        gpa_t itt_addr;
2300        u8 num_eventid_bits;
2301        u64 entry = *(u64 *)ptr;
2302        bool valid;
2303        u32 offset;
2304        int ret;
2305
2306        entry = le64_to_cpu(entry);
2307
2308        valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2309        num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2310        itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2311                        >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2312
2313        if (!valid)
2314                return 1;
2315
2316        /* dte entry is valid */
2317        offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2318
2319        dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2320        if (IS_ERR(dev))
2321                return PTR_ERR(dev);
2322
2323        ret = vgic_its_restore_itt(its, dev);
2324        if (ret) {
2325                vgic_its_free_device(its->dev->kvm, dev);
2326                return ret;
2327        }
2328
2329        return offset;
2330}
2331
2332static int vgic_its_device_cmp(void *priv, struct list_head *a,
2333                               struct list_head *b)
2334{
2335        struct its_device *deva = container_of(a, struct its_device, dev_list);
2336        struct its_device *devb = container_of(b, struct its_device, dev_list);
2337
2338        if (deva->device_id < devb->device_id)
2339                return -1;
2340        else
2341                return 1;
2342}
2343
2344/**
2345 * vgic_its_save_device_tables - Save the device table and all ITT
2346 * into guest RAM
2347 *
2348 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2349 * returns the GPA of the device entry
2350 */
2351static int vgic_its_save_device_tables(struct vgic_its *its)
2352{
2353        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2354        u64 baser = its->baser_device_table;
2355        struct its_device *dev;
2356        int dte_esz = abi->dte_esz;
2357
2358        if (!(baser & GITS_BASER_VALID))
2359                return 0;
2360
2361        list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2362
2363        list_for_each_entry(dev, &its->device_list, dev_list) {
2364                int ret;
2365                gpa_t eaddr;
2366
2367                if (!vgic_its_check_id(its, baser,
2368                                       dev->device_id, &eaddr))
2369                        return -EINVAL;
2370
2371                ret = vgic_its_save_itt(its, dev);
2372                if (ret)
2373                        return ret;
2374
2375                ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2376                if (ret)
2377                        return ret;
2378        }
2379        return 0;
2380}
2381
2382/**
2383 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2384 *
2385 * @its: its handle
2386 * @id: index of the entry in the L1 table
2387 * @addr: kernel VA
2388 * @opaque: unused
2389 *
2390 * L1 table entries are scanned by steps of 1 entry
2391 * Return < 0 if error, 0 if last dte was found when scanning the L2
2392 * table, +1 otherwise (meaning next L1 entry must be scanned)
2393 */
2394static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2395                         void *opaque)
2396{
2397        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2398        int l2_start_id = id * (SZ_64K / abi->dte_esz);
2399        u64 entry = *(u64 *)addr;
2400        int dte_esz = abi->dte_esz;
2401        gpa_t gpa;
2402        int ret;
2403
2404        entry = le64_to_cpu(entry);
2405
2406        if (!(entry & KVM_ITS_L1E_VALID_MASK))
2407                return 1;
2408
2409        gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2410
2411        ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2412                             l2_start_id, vgic_its_restore_dte, NULL);
2413
2414        return ret;
2415}
2416
2417/**
2418 * vgic_its_restore_device_tables - Restore the device table and all ITT
2419 * from guest RAM to internal data structs
2420 */
2421static int vgic_its_restore_device_tables(struct vgic_its *its)
2422{
2423        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2424        u64 baser = its->baser_device_table;
2425        int l1_esz, ret;
2426        int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2427        gpa_t l1_gpa;
2428
2429        if (!(baser & GITS_BASER_VALID))
2430                return 0;
2431
2432        l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
2433
2434        if (baser & GITS_BASER_INDIRECT) {
2435                l1_esz = GITS_LVL1_ENTRY_SIZE;
2436                ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2437                                     handle_l1_dte, NULL);
2438        } else {
2439                l1_esz = abi->dte_esz;
2440                ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2441                                     vgic_its_restore_dte, NULL);
2442        }
2443
2444        /* scan_its_table returns +1 if all entries are invalid */
2445        if (ret > 0)
2446                ret = 0;
2447
2448        return ret;
2449}
2450
2451static int vgic_its_save_cte(struct vgic_its *its,
2452                             struct its_collection *collection,
2453                             gpa_t gpa, int esz)
2454{
2455        u64 val;
2456
2457        val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2458               ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2459               collection->collection_id);
2460        val = cpu_to_le64(val);
2461        return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2462}
2463
2464static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2465{
2466        struct its_collection *collection;
2467        struct kvm *kvm = its->dev->kvm;
2468        u32 target_addr, coll_id;
2469        u64 val;
2470        int ret;
2471
2472        BUG_ON(esz > sizeof(val));
2473        ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2474        if (ret)
2475                return ret;
2476        val = le64_to_cpu(val);
2477        if (!(val & KVM_ITS_CTE_VALID_MASK))
2478                return 0;
2479
2480        target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2481        coll_id = val & KVM_ITS_CTE_ICID_MASK;
2482
2483        if (target_addr != COLLECTION_NOT_MAPPED &&
2484            target_addr >= atomic_read(&kvm->online_vcpus))
2485                return -EINVAL;
2486
2487        collection = find_collection(its, coll_id);
2488        if (collection)
2489                return -EEXIST;
2490        ret = vgic_its_alloc_collection(its, &collection, coll_id);
2491        if (ret)
2492                return ret;
2493        collection->target_addr = target_addr;
2494        return 1;
2495}
2496
2497/**
2498 * vgic_its_save_collection_table - Save the collection table into
2499 * guest RAM
2500 */
2501static int vgic_its_save_collection_table(struct vgic_its *its)
2502{
2503        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2504        u64 baser = its->baser_coll_table;
2505        gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
2506        struct its_collection *collection;
2507        u64 val;
2508        size_t max_size, filled = 0;
2509        int ret, cte_esz = abi->cte_esz;
2510
2511        if (!(baser & GITS_BASER_VALID))
2512                return 0;
2513
2514        max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2515
2516        list_for_each_entry(collection, &its->collection_list, coll_list) {
2517                ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2518                if (ret)
2519                        return ret;
2520                gpa += cte_esz;
2521                filled += cte_esz;
2522        }
2523
2524        if (filled == max_size)
2525                return 0;
2526
2527        /*
2528         * table is not fully filled, add a last dummy element
2529         * with valid bit unset
2530         */
2531        val = 0;
2532        BUG_ON(cte_esz > sizeof(val));
2533        ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2534        return ret;
2535}
2536
2537/**
2538 * vgic_its_restore_collection_table - reads the collection table
2539 * in guest memory and restores the ITS internal state. Requires the
2540 * BASER registers to be restored before.
2541 */
2542static int vgic_its_restore_collection_table(struct vgic_its *its)
2543{
2544        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2545        u64 baser = its->baser_coll_table;
2546        int cte_esz = abi->cte_esz;
2547        size_t max_size, read = 0;
2548        gpa_t gpa;
2549        int ret;
2550
2551        if (!(baser & GITS_BASER_VALID))
2552                return 0;
2553
2554        gpa = GITS_BASER_ADDR_48_to_52(baser);
2555
2556        max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2557
2558        while (read < max_size) {
2559                ret = vgic_its_restore_cte(its, gpa, cte_esz);
2560                if (ret <= 0)
2561                        break;
2562                gpa += cte_esz;
2563                read += cte_esz;
2564        }
2565
2566        if (ret > 0)
2567                return 0;
2568
2569        return ret;
2570}
2571
2572/**
2573 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2574 * according to v0 ABI
2575 */
2576static int vgic_its_save_tables_v0(struct vgic_its *its)
2577{
2578        int ret;
2579
2580        ret = vgic_its_save_device_tables(its);
2581        if (ret)
2582                return ret;
2583
2584        return vgic_its_save_collection_table(its);
2585}
2586
2587/**
2588 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2589 * to internal data structs according to V0 ABI
2590 *
2591 */
2592static int vgic_its_restore_tables_v0(struct vgic_its *its)
2593{
2594        int ret;
2595
2596        ret = vgic_its_restore_collection_table(its);
2597        if (ret)
2598                return ret;
2599
2600        return vgic_its_restore_device_tables(its);
2601}
2602
2603static int vgic_its_commit_v0(struct vgic_its *its)
2604{
2605        const struct vgic_its_abi *abi;
2606
2607        abi = vgic_its_get_abi(its);
2608        its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2609        its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2610
2611        its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2612                                        << GITS_BASER_ENTRY_SIZE_SHIFT);
2613
2614        its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2615                                        << GITS_BASER_ENTRY_SIZE_SHIFT);
2616        return 0;
2617}
2618
2619static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2620{
2621        /* We need to keep the ABI specific field values */
2622        its->baser_coll_table &= ~GITS_BASER_VALID;
2623        its->baser_device_table &= ~GITS_BASER_VALID;
2624        its->cbaser = 0;
2625        its->creadr = 0;
2626        its->cwriter = 0;
2627        its->enabled = 0;
2628        vgic_its_free_device_list(kvm, its);
2629        vgic_its_free_collection_list(kvm, its);
2630}
2631
2632static int vgic_its_has_attr(struct kvm_device *dev,
2633                             struct kvm_device_attr *attr)
2634{
2635        switch (attr->group) {
2636        case KVM_DEV_ARM_VGIC_GRP_ADDR:
2637                switch (attr->attr) {
2638                case KVM_VGIC_ITS_ADDR_TYPE:
2639                        return 0;
2640                }
2641                break;
2642        case KVM_DEV_ARM_VGIC_GRP_CTRL:
2643                switch (attr->attr) {
2644                case KVM_DEV_ARM_VGIC_CTRL_INIT:
2645                        return 0;
2646                case KVM_DEV_ARM_ITS_CTRL_RESET:
2647                        return 0;
2648                case KVM_DEV_ARM_ITS_SAVE_TABLES:
2649                        return 0;
2650                case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2651                        return 0;
2652                }
2653                break;
2654        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2655                return vgic_its_has_attr_regs(dev, attr);
2656        }
2657        return -ENXIO;
2658}
2659
2660static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2661{
2662        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2663        int ret = 0;
2664
2665        if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2666                return 0;
2667
2668        mutex_lock(&kvm->lock);
2669        mutex_lock(&its->its_lock);
2670
2671        if (!lock_all_vcpus(kvm)) {
2672                mutex_unlock(&its->its_lock);
2673                mutex_unlock(&kvm->lock);
2674                return -EBUSY;
2675        }
2676
2677        switch (attr) {
2678        case KVM_DEV_ARM_ITS_CTRL_RESET:
2679                vgic_its_reset(kvm, its);
2680                break;
2681        case KVM_DEV_ARM_ITS_SAVE_TABLES:
2682                ret = abi->save_tables(its);
2683                break;
2684        case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2685                ret = abi->restore_tables(its);
2686                break;
2687        }
2688
2689        unlock_all_vcpus(kvm);
2690        mutex_unlock(&its->its_lock);
2691        mutex_unlock(&kvm->lock);
2692        return ret;
2693}
2694
2695static int vgic_its_set_attr(struct kvm_device *dev,
2696                             struct kvm_device_attr *attr)
2697{
2698        struct vgic_its *its = dev->private;
2699        int ret;
2700
2701        switch (attr->group) {
2702        case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2703                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2704                unsigned long type = (unsigned long)attr->attr;
2705                u64 addr;
2706
2707                if (type != KVM_VGIC_ITS_ADDR_TYPE)
2708                        return -ENODEV;
2709
2710                if (copy_from_user(&addr, uaddr, sizeof(addr)))
2711                        return -EFAULT;
2712
2713                ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2714                                        addr, SZ_64K);
2715                if (ret)
2716                        return ret;
2717
2718                return vgic_register_its_iodev(dev->kvm, its, addr);
2719        }
2720        case KVM_DEV_ARM_VGIC_GRP_CTRL:
2721                return vgic_its_ctrl(dev->kvm, its, attr->attr);
2722        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2723                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2724                u64 reg;
2725
2726                if (get_user(reg, uaddr))
2727                        return -EFAULT;
2728
2729                return vgic_its_attr_regs_access(dev, attr, &reg, true);
2730        }
2731        }
2732        return -ENXIO;
2733}
2734
2735static int vgic_its_get_attr(struct kvm_device *dev,
2736                             struct kvm_device_attr *attr)
2737{
2738        switch (attr->group) {
2739        case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2740                struct vgic_its *its = dev->private;
2741                u64 addr = its->vgic_its_base;
2742                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2743                unsigned long type = (unsigned long)attr->attr;
2744
2745                if (type != KVM_VGIC_ITS_ADDR_TYPE)
2746                        return -ENODEV;
2747
2748                if (copy_to_user(uaddr, &addr, sizeof(addr)))
2749                        return -EFAULT;
2750                break;
2751        }
2752        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2753                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2754                u64 reg;
2755                int ret;
2756
2757                ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2758                if (ret)
2759                        return ret;
2760                return put_user(reg, uaddr);
2761        }
2762        default:
2763                return -ENXIO;
2764        }
2765
2766        return 0;
2767}
2768
2769static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2770        .name = "kvm-arm-vgic-its",
2771        .create = vgic_its_create,
2772        .destroy = vgic_its_destroy,
2773        .set_attr = vgic_its_set_attr,
2774        .get_attr = vgic_its_get_attr,
2775        .has_attr = vgic_its_has_attr,
2776};
2777
2778int kvm_vgic_register_its_device(void)
2779{
2780        return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2781                                       KVM_DEV_TYPE_ARM_VGIC_ITS);
2782}
2783