linux/virt/kvm/arm/vgic/vgic-its.c
<<
>>
Prefs
   1/*
   2 * GICv3 ITS emulation
   3 *
   4 * Copyright (C) 2015,2016 ARM Ltd.
   5 * Author: Andre Przywara <andre.przywara@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include <linux/cpu.h>
  21#include <linux/kvm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/interrupt.h>
  24#include <linux/list.h>
  25#include <linux/uaccess.h>
  26#include <linux/list_sort.h>
  27
  28#include <linux/irqchip/arm-gic-v3.h>
  29
  30#include <asm/kvm_emulate.h>
  31#include <asm/kvm_arm.h>
  32#include <asm/kvm_mmu.h>
  33
  34#include "vgic.h"
  35#include "vgic-mmio.h"
  36
  37static int vgic_its_save_tables_v0(struct vgic_its *its);
  38static int vgic_its_restore_tables_v0(struct vgic_its *its);
  39static int vgic_its_commit_v0(struct vgic_its *its);
  40static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
  41                             struct kvm_vcpu *filter_vcpu, bool needs_inv);
  42
  43/*
  44 * Creates a new (reference to a) struct vgic_irq for a given LPI.
  45 * If this LPI is already mapped on another ITS, we increase its refcount
  46 * and return a pointer to the existing structure.
  47 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
  48 * This function returns a pointer to the _unlocked_ structure.
  49 */
  50static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
  51                                     struct kvm_vcpu *vcpu)
  52{
  53        struct vgic_dist *dist = &kvm->arch.vgic;
  54        struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
  55        unsigned long flags;
  56        int ret;
  57
  58        /* In this case there is no put, since we keep the reference. */
  59        if (irq)
  60                return irq;
  61
  62        irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
  63        if (!irq)
  64                return ERR_PTR(-ENOMEM);
  65
  66        INIT_LIST_HEAD(&irq->lpi_list);
  67        INIT_LIST_HEAD(&irq->ap_list);
  68        spin_lock_init(&irq->irq_lock);
  69
  70        irq->config = VGIC_CONFIG_EDGE;
  71        kref_init(&irq->refcount);
  72        irq->intid = intid;
  73        irq->target_vcpu = vcpu;
  74
  75        spin_lock_irqsave(&dist->lpi_list_lock, flags);
  76
  77        /*
  78         * There could be a race with another vgic_add_lpi(), so we need to
  79         * check that we don't add a second list entry with the same LPI.
  80         */
  81        list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
  82                if (oldirq->intid != intid)
  83                        continue;
  84
  85                /* Someone was faster with adding this LPI, lets use that. */
  86                kfree(irq);
  87                irq = oldirq;
  88
  89                /*
  90                 * This increases the refcount, the caller is expected to
  91                 * call vgic_put_irq() on the returned pointer once it's
  92                 * finished with the IRQ.
  93                 */
  94                vgic_get_irq_kref(irq);
  95
  96                goto out_unlock;
  97        }
  98
  99        list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
 100        dist->lpi_list_count++;
 101
 102out_unlock:
 103        spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 104
 105        /*
 106         * We "cache" the configuration table entries in our struct vgic_irq's.
 107         * However we only have those structs for mapped IRQs, so we read in
 108         * the respective config data from memory here upon mapping the LPI.
 109         */
 110        ret = update_lpi_config(kvm, irq, NULL, false);
 111        if (ret)
 112                return ERR_PTR(ret);
 113
 114        ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
 115        if (ret)
 116                return ERR_PTR(ret);
 117
 118        return irq;
 119}
 120
 121struct its_device {
 122        struct list_head dev_list;
 123
 124        /* the head for the list of ITTEs */
 125        struct list_head itt_head;
 126        u32 num_eventid_bits;
 127        gpa_t itt_addr;
 128        u32 device_id;
 129};
 130
 131#define COLLECTION_NOT_MAPPED ((u32)~0)
 132
 133struct its_collection {
 134        struct list_head coll_list;
 135
 136        u32 collection_id;
 137        u32 target_addr;
 138};
 139
 140#define its_is_collection_mapped(coll) ((coll) && \
 141                                ((coll)->target_addr != COLLECTION_NOT_MAPPED))
 142
 143struct its_ite {
 144        struct list_head ite_list;
 145
 146        struct vgic_irq *irq;
 147        struct its_collection *collection;
 148        u32 event_id;
 149};
 150
 151/**
 152 * struct vgic_its_abi - ITS abi ops and settings
 153 * @cte_esz: collection table entry size
 154 * @dte_esz: device table entry size
 155 * @ite_esz: interrupt translation table entry size
 156 * @save tables: save the ITS tables into guest RAM
 157 * @restore_tables: restore the ITS internal structs from tables
 158 *  stored in guest RAM
 159 * @commit: initialize the registers which expose the ABI settings,
 160 *  especially the entry sizes
 161 */
 162struct vgic_its_abi {
 163        int cte_esz;
 164        int dte_esz;
 165        int ite_esz;
 166        int (*save_tables)(struct vgic_its *its);
 167        int (*restore_tables)(struct vgic_its *its);
 168        int (*commit)(struct vgic_its *its);
 169};
 170
 171static const struct vgic_its_abi its_table_abi_versions[] = {
 172        [0] = {.cte_esz = 8, .dte_esz = 8, .ite_esz = 8,
 173         .save_tables = vgic_its_save_tables_v0,
 174         .restore_tables = vgic_its_restore_tables_v0,
 175         .commit = vgic_its_commit_v0,
 176        },
 177};
 178
 179#define NR_ITS_ABIS     ARRAY_SIZE(its_table_abi_versions)
 180
 181inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
 182{
 183        return &its_table_abi_versions[its->abi_rev];
 184}
 185
 186int vgic_its_set_abi(struct vgic_its *its, int rev)
 187{
 188        const struct vgic_its_abi *abi;
 189
 190        its->abi_rev = rev;
 191        abi = vgic_its_get_abi(its);
 192        return abi->commit(its);
 193}
 194
 195/*
 196 * Find and returns a device in the device table for an ITS.
 197 * Must be called with the its_lock mutex held.
 198 */
 199static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
 200{
 201        struct its_device *device;
 202
 203        list_for_each_entry(device, &its->device_list, dev_list)
 204                if (device_id == device->device_id)
 205                        return device;
 206
 207        return NULL;
 208}
 209
 210/*
 211 * Find and returns an interrupt translation table entry (ITTE) for a given
 212 * Device ID/Event ID pair on an ITS.
 213 * Must be called with the its_lock mutex held.
 214 */
 215static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
 216                                  u32 event_id)
 217{
 218        struct its_device *device;
 219        struct its_ite *ite;
 220
 221        device = find_its_device(its, device_id);
 222        if (device == NULL)
 223                return NULL;
 224
 225        list_for_each_entry(ite, &device->itt_head, ite_list)
 226                if (ite->event_id == event_id)
 227                        return ite;
 228
 229        return NULL;
 230}
 231
 232/* To be used as an iterator this macro misses the enclosing parentheses */
 233#define for_each_lpi_its(dev, ite, its) \
 234        list_for_each_entry(dev, &(its)->device_list, dev_list) \
 235                list_for_each_entry(ite, &(dev)->itt_head, ite_list)
 236
 237/*
 238 * We only implement 48 bits of PA at the moment, although the ITS
 239 * supports more. Let's be restrictive here.
 240 */
 241#define BASER_ADDRESS(x)        ((x) & GENMASK_ULL(47, 16))
 242#define CBASER_ADDRESS(x)       ((x) & GENMASK_ULL(47, 12))
 243
 244#define GIC_LPI_OFFSET 8192
 245
 246#define VITS_TYPER_IDBITS 16
 247#define VITS_TYPER_DEVBITS 16
 248#define VITS_DTE_MAX_DEVID_OFFSET       (BIT(14) - 1)
 249#define VITS_ITE_MAX_EVENTID_OFFSET     (BIT(16) - 1)
 250
 251/*
 252 * Finds and returns a collection in the ITS collection table.
 253 * Must be called with the its_lock mutex held.
 254 */
 255static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
 256{
 257        struct its_collection *collection;
 258
 259        list_for_each_entry(collection, &its->collection_list, coll_list) {
 260                if (coll_id == collection->collection_id)
 261                        return collection;
 262        }
 263
 264        return NULL;
 265}
 266
 267#define LPI_PROP_ENABLE_BIT(p)  ((p) & LPI_PROP_ENABLED)
 268#define LPI_PROP_PRIORITY(p)    ((p) & 0xfc)
 269
 270/*
 271 * Reads the configuration data for a given LPI from guest memory and
 272 * updates the fields in struct vgic_irq.
 273 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
 274 * VCPU. Unconditionally applies if filter_vcpu is NULL.
 275 */
 276static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
 277                             struct kvm_vcpu *filter_vcpu, bool needs_inv)
 278{
 279        u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
 280        u8 prop;
 281        int ret;
 282        unsigned long flags;
 283
 284        ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
 285                                  &prop, 1);
 286
 287        if (ret)
 288                return ret;
 289
 290        spin_lock_irqsave(&irq->irq_lock, flags);
 291
 292        if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
 293                irq->priority = LPI_PROP_PRIORITY(prop);
 294                irq->enabled = LPI_PROP_ENABLE_BIT(prop);
 295
 296                if (!irq->hw) {
 297                        vgic_queue_irq_unlock(kvm, irq, flags);
 298                        return 0;
 299                }
 300        }
 301
 302        spin_unlock_irqrestore(&irq->irq_lock, flags);
 303
 304        if (irq->hw)
 305                return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
 306
 307        return 0;
 308}
 309
 310/*
 311 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
 312 * enumerate those LPIs without holding any lock.
 313 * Returns their number and puts the kmalloc'ed array into intid_ptr.
 314 */
 315static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
 316{
 317        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 318        struct vgic_irq *irq;
 319        unsigned long flags;
 320        u32 *intids;
 321        int irq_count, i = 0;
 322
 323        /*
 324         * There is an obvious race between allocating the array and LPIs
 325         * being mapped/unmapped. If we ended up here as a result of a
 326         * command, we're safe (locks are held, preventing another
 327         * command). If coming from another path (such as enabling LPIs),
 328         * we must be careful not to overrun the array.
 329         */
 330        irq_count = READ_ONCE(dist->lpi_list_count);
 331        intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
 332        if (!intids)
 333                return -ENOMEM;
 334
 335        spin_lock_irqsave(&dist->lpi_list_lock, flags);
 336        list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
 337                if (i == irq_count)
 338                        break;
 339                /* We don't need to "get" the IRQ, as we hold the list lock. */
 340                if (irq->target_vcpu != vcpu)
 341                        continue;
 342                intids[i++] = irq->intid;
 343        }
 344        spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 345
 346        *intid_ptr = intids;
 347        return i;
 348}
 349
 350static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
 351{
 352        int ret = 0;
 353        unsigned long flags;
 354
 355        spin_lock_irqsave(&irq->irq_lock, flags);
 356        irq->target_vcpu = vcpu;
 357        spin_unlock_irqrestore(&irq->irq_lock, flags);
 358
 359        if (irq->hw) {
 360                struct its_vlpi_map map;
 361
 362                ret = its_get_vlpi(irq->host_irq, &map);
 363                if (ret)
 364                        return ret;
 365
 366                map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
 367
 368                ret = its_map_vlpi(irq->host_irq, &map);
 369        }
 370
 371        return ret;
 372}
 373
 374/*
 375 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
 376 * is targeting) to the VGIC's view, which deals with target VCPUs.
 377 * Needs to be called whenever either the collection for a LPIs has
 378 * changed or the collection itself got retargeted.
 379 */
 380static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
 381{
 382        struct kvm_vcpu *vcpu;
 383
 384        if (!its_is_collection_mapped(ite->collection))
 385                return;
 386
 387        vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
 388        update_affinity(ite->irq, vcpu);
 389}
 390
 391/*
 392 * Updates the target VCPU for every LPI targeting this collection.
 393 * Must be called with the its_lock mutex held.
 394 */
 395static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
 396                                       struct its_collection *coll)
 397{
 398        struct its_device *device;
 399        struct its_ite *ite;
 400
 401        for_each_lpi_its(device, ite, its) {
 402                if (!ite->collection || coll != ite->collection)
 403                        continue;
 404
 405                update_affinity_ite(kvm, ite);
 406        }
 407}
 408
 409static u32 max_lpis_propbaser(u64 propbaser)
 410{
 411        int nr_idbits = (propbaser & 0x1f) + 1;
 412
 413        return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
 414}
 415
 416/*
 417 * Sync the pending table pending bit of LPIs targeting @vcpu
 418 * with our own data structures. This relies on the LPI being
 419 * mapped before.
 420 */
 421static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 422{
 423        gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
 424        struct vgic_irq *irq;
 425        int last_byte_offset = -1;
 426        int ret = 0;
 427        u32 *intids;
 428        int nr_irqs, i;
 429        unsigned long flags;
 430        u8 pendmask;
 431
 432        nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
 433        if (nr_irqs < 0)
 434                return nr_irqs;
 435
 436        for (i = 0; i < nr_irqs; i++) {
 437                int byte_offset, bit_nr;
 438
 439                byte_offset = intids[i] / BITS_PER_BYTE;
 440                bit_nr = intids[i] % BITS_PER_BYTE;
 441
 442                /*
 443                 * For contiguously allocated LPIs chances are we just read
 444                 * this very same byte in the last iteration. Reuse that.
 445                 */
 446                if (byte_offset != last_byte_offset) {
 447                        ret = kvm_read_guest_lock(vcpu->kvm,
 448                                                  pendbase + byte_offset,
 449                                                  &pendmask, 1);
 450                        if (ret) {
 451                                kfree(intids);
 452                                return ret;
 453                        }
 454                        last_byte_offset = byte_offset;
 455                }
 456
 457                irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
 458                spin_lock_irqsave(&irq->irq_lock, flags);
 459                irq->pending_latch = pendmask & (1U << bit_nr);
 460                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 461                vgic_put_irq(vcpu->kvm, irq);
 462        }
 463
 464        kfree(intids);
 465
 466        return ret;
 467}
 468
 469static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
 470                                              struct vgic_its *its,
 471                                              gpa_t addr, unsigned int len)
 472{
 473        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
 474        u64 reg = GITS_TYPER_PLPIS;
 475
 476        /*
 477         * We use linear CPU numbers for redistributor addressing,
 478         * so GITS_TYPER.PTA is 0.
 479         * Also we force all PROPBASER registers to be the same, so
 480         * CommonLPIAff is 0 as well.
 481         * To avoid memory waste in the guest, we keep the number of IDBits and
 482         * DevBits low - as least for the time being.
 483         */
 484        reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
 485        reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
 486        reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
 487
 488        return extract_bytes(reg, addr & 7, len);
 489}
 490
 491static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
 492                                             struct vgic_its *its,
 493                                             gpa_t addr, unsigned int len)
 494{
 495        u32 val;
 496
 497        val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
 498        val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
 499        return val;
 500}
 501
 502static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
 503                                            struct vgic_its *its,
 504                                            gpa_t addr, unsigned int len,
 505                                            unsigned long val)
 506{
 507        u32 rev = GITS_IIDR_REV(val);
 508
 509        if (rev >= NR_ITS_ABIS)
 510                return -EINVAL;
 511        return vgic_its_set_abi(its, rev);
 512}
 513
 514static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
 515                                               struct vgic_its *its,
 516                                               gpa_t addr, unsigned int len)
 517{
 518        switch (addr & 0xffff) {
 519        case GITS_PIDR0:
 520                return 0x92;    /* part number, bits[7:0] */
 521        case GITS_PIDR1:
 522                return 0xb4;    /* part number, bits[11:8] */
 523        case GITS_PIDR2:
 524                return GIC_PIDR2_ARCH_GICv3 | 0x0b;
 525        case GITS_PIDR4:
 526                return 0x40;    /* This is a 64K software visible page */
 527        /* The following are the ID registers for (any) GIC. */
 528        case GITS_CIDR0:
 529                return 0x0d;
 530        case GITS_CIDR1:
 531                return 0xf0;
 532        case GITS_CIDR2:
 533                return 0x05;
 534        case GITS_CIDR3:
 535                return 0xb1;
 536        }
 537
 538        return 0;
 539}
 540
 541int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
 542                         u32 devid, u32 eventid, struct vgic_irq **irq)
 543{
 544        struct kvm_vcpu *vcpu;
 545        struct its_ite *ite;
 546
 547        if (!its->enabled)
 548                return -EBUSY;
 549
 550        ite = find_ite(its, devid, eventid);
 551        if (!ite || !its_is_collection_mapped(ite->collection))
 552                return E_ITS_INT_UNMAPPED_INTERRUPT;
 553
 554        vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
 555        if (!vcpu)
 556                return E_ITS_INT_UNMAPPED_INTERRUPT;
 557
 558        if (!vcpu->arch.vgic_cpu.lpis_enabled)
 559                return -EBUSY;
 560
 561        *irq = ite->irq;
 562        return 0;
 563}
 564
 565struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
 566{
 567        u64 address;
 568        struct kvm_io_device *kvm_io_dev;
 569        struct vgic_io_device *iodev;
 570
 571        if (!vgic_has_its(kvm))
 572                return ERR_PTR(-ENODEV);
 573
 574        if (!(msi->flags & KVM_MSI_VALID_DEVID))
 575                return ERR_PTR(-EINVAL);
 576
 577        address = (u64)msi->address_hi << 32 | msi->address_lo;
 578
 579        kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
 580        if (!kvm_io_dev)
 581                return ERR_PTR(-EINVAL);
 582
 583        if (kvm_io_dev->ops != &kvm_io_gic_ops)
 584                return ERR_PTR(-EINVAL);
 585
 586        iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
 587        if (iodev->iodev_type != IODEV_ITS)
 588                return ERR_PTR(-EINVAL);
 589
 590        return iodev->its;
 591}
 592
 593/*
 594 * Find the target VCPU and the LPI number for a given devid/eventid pair
 595 * and make this IRQ pending, possibly injecting it.
 596 * Must be called with the its_lock mutex held.
 597 * Returns 0 on success, a positive error value for any ITS mapping
 598 * related errors and negative error values for generic errors.
 599 */
 600static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
 601                                u32 devid, u32 eventid)
 602{
 603        struct vgic_irq *irq = NULL;
 604        unsigned long flags;
 605        int err;
 606
 607        err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
 608        if (err)
 609                return err;
 610
 611        if (irq->hw)
 612                return irq_set_irqchip_state(irq->host_irq,
 613                                             IRQCHIP_STATE_PENDING, true);
 614
 615        spin_lock_irqsave(&irq->irq_lock, flags);
 616        irq->pending_latch = true;
 617        vgic_queue_irq_unlock(kvm, irq, flags);
 618
 619        return 0;
 620}
 621
 622/*
 623 * Queries the KVM IO bus framework to get the ITS pointer from the given
 624 * doorbell address.
 625 * We then call vgic_its_trigger_msi() with the decoded data.
 626 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
 627 */
 628int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 629{
 630        struct vgic_its *its;
 631        int ret;
 632
 633        its = vgic_msi_to_its(kvm, msi);
 634        if (IS_ERR(its))
 635                return PTR_ERR(its);
 636
 637        mutex_lock(&its->its_lock);
 638        ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
 639        mutex_unlock(&its->its_lock);
 640
 641        if (ret < 0)
 642                return ret;
 643
 644        /*
 645         * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
 646         * if the guest has blocked the MSI. So we map any LPI mapping
 647         * related error to that.
 648         */
 649        if (ret)
 650                return 0;
 651        else
 652                return 1;
 653}
 654
 655/* Requires the its_lock to be held. */
 656static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
 657{
 658        list_del(&ite->ite_list);
 659
 660        /* This put matches the get in vgic_add_lpi. */
 661        if (ite->irq) {
 662                if (ite->irq->hw)
 663                        WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
 664
 665                vgic_put_irq(kvm, ite->irq);
 666        }
 667
 668        kfree(ite);
 669}
 670
 671static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
 672{
 673        return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
 674}
 675
 676#define its_cmd_get_command(cmd)        its_cmd_mask_field(cmd, 0,  0,  8)
 677#define its_cmd_get_deviceid(cmd)       its_cmd_mask_field(cmd, 0, 32, 32)
 678#define its_cmd_get_size(cmd)           (its_cmd_mask_field(cmd, 1,  0,  5) + 1)
 679#define its_cmd_get_id(cmd)             its_cmd_mask_field(cmd, 1,  0, 32)
 680#define its_cmd_get_physical_id(cmd)    its_cmd_mask_field(cmd, 1, 32, 32)
 681#define its_cmd_get_collection(cmd)     its_cmd_mask_field(cmd, 2,  0, 16)
 682#define its_cmd_get_ittaddr(cmd)        (its_cmd_mask_field(cmd, 2,  8, 44) << 8)
 683#define its_cmd_get_target_addr(cmd)    its_cmd_mask_field(cmd, 2, 16, 32)
 684#define its_cmd_get_validbit(cmd)       its_cmd_mask_field(cmd, 2, 63,  1)
 685
 686/*
 687 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
 688 * Must be called with the its_lock mutex held.
 689 */
 690static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
 691                                       u64 *its_cmd)
 692{
 693        u32 device_id = its_cmd_get_deviceid(its_cmd);
 694        u32 event_id = its_cmd_get_id(its_cmd);
 695        struct its_ite *ite;
 696
 697
 698        ite = find_ite(its, device_id, event_id);
 699        if (ite && ite->collection) {
 700                /*
 701                 * Though the spec talks about removing the pending state, we
 702                 * don't bother here since we clear the ITTE anyway and the
 703                 * pending state is a property of the ITTE struct.
 704                 */
 705                its_free_ite(kvm, ite);
 706                return 0;
 707        }
 708
 709        return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
 710}
 711
 712/*
 713 * The MOVI command moves an ITTE to a different collection.
 714 * Must be called with the its_lock mutex held.
 715 */
 716static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
 717                                    u64 *its_cmd)
 718{
 719        u32 device_id = its_cmd_get_deviceid(its_cmd);
 720        u32 event_id = its_cmd_get_id(its_cmd);
 721        u32 coll_id = its_cmd_get_collection(its_cmd);
 722        struct kvm_vcpu *vcpu;
 723        struct its_ite *ite;
 724        struct its_collection *collection;
 725
 726        ite = find_ite(its, device_id, event_id);
 727        if (!ite)
 728                return E_ITS_MOVI_UNMAPPED_INTERRUPT;
 729
 730        if (!its_is_collection_mapped(ite->collection))
 731                return E_ITS_MOVI_UNMAPPED_COLLECTION;
 732
 733        collection = find_collection(its, coll_id);
 734        if (!its_is_collection_mapped(collection))
 735                return E_ITS_MOVI_UNMAPPED_COLLECTION;
 736
 737        ite->collection = collection;
 738        vcpu = kvm_get_vcpu(kvm, collection->target_addr);
 739
 740        return update_affinity(ite->irq, vcpu);
 741}
 742
 743/*
 744 * Check whether an ID can be stored into the corresponding guest table.
 745 * For a direct table this is pretty easy, but gets a bit nasty for
 746 * indirect tables. We check whether the resulting guest physical address
 747 * is actually valid (covered by a memslot and guest accessible).
 748 * For this we have to read the respective first level entry.
 749 */
 750static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 751                              gpa_t *eaddr)
 752{
 753        int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
 754        u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
 755        int esz = GITS_BASER_ENTRY_SIZE(baser);
 756        int index;
 757        gfn_t gfn;
 758
 759        switch (type) {
 760        case GITS_BASER_TYPE_DEVICE:
 761                if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
 762                        return false;
 763                break;
 764        case GITS_BASER_TYPE_COLLECTION:
 765                /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
 766                if (id >= BIT_ULL(16))
 767                        return false;
 768                break;
 769        default:
 770                return false;
 771        }
 772
 773        if (!(baser & GITS_BASER_INDIRECT)) {
 774                phys_addr_t addr;
 775
 776                if (id >= (l1_tbl_size / esz))
 777                        return false;
 778
 779                addr = BASER_ADDRESS(baser) + id * esz;
 780                gfn = addr >> PAGE_SHIFT;
 781
 782                if (eaddr)
 783                        *eaddr = addr;
 784                return kvm_is_visible_gfn(its->dev->kvm, gfn);
 785        }
 786
 787        /* calculate and check the index into the 1st level */
 788        index = id / (SZ_64K / esz);
 789        if (index >= (l1_tbl_size / sizeof(u64)))
 790                return false;
 791
 792        /* Each 1st level entry is represented by a 64-bit value. */
 793        if (kvm_read_guest_lock(its->dev->kvm,
 794                           BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
 795                           &indirect_ptr, sizeof(indirect_ptr)))
 796                return false;
 797
 798        indirect_ptr = le64_to_cpu(indirect_ptr);
 799
 800        /* check the valid bit of the first level entry */
 801        if (!(indirect_ptr & BIT_ULL(63)))
 802                return false;
 803
 804        /*
 805         * Mask the guest physical address and calculate the frame number.
 806         * Any address beyond our supported 48 bits of PA will be caught
 807         * by the actual check in the final step.
 808         */
 809        indirect_ptr &= GENMASK_ULL(51, 16);
 810
 811        /* Find the address of the actual entry */
 812        index = id % (SZ_64K / esz);
 813        indirect_ptr += index * esz;
 814        gfn = indirect_ptr >> PAGE_SHIFT;
 815
 816        if (eaddr)
 817                *eaddr = indirect_ptr;
 818        return kvm_is_visible_gfn(its->dev->kvm, gfn);
 819}
 820
 821static int vgic_its_alloc_collection(struct vgic_its *its,
 822                                     struct its_collection **colp,
 823                                     u32 coll_id)
 824{
 825        struct its_collection *collection;
 826
 827        if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
 828                return E_ITS_MAPC_COLLECTION_OOR;
 829
 830        collection = kzalloc(sizeof(*collection), GFP_KERNEL);
 831        if (!collection)
 832                return -ENOMEM;
 833
 834        collection->collection_id = coll_id;
 835        collection->target_addr = COLLECTION_NOT_MAPPED;
 836
 837        list_add_tail(&collection->coll_list, &its->collection_list);
 838        *colp = collection;
 839
 840        return 0;
 841}
 842
 843static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
 844{
 845        struct its_collection *collection;
 846        struct its_device *device;
 847        struct its_ite *ite;
 848
 849        /*
 850         * Clearing the mapping for that collection ID removes the
 851         * entry from the list. If there wasn't any before, we can
 852         * go home early.
 853         */
 854        collection = find_collection(its, coll_id);
 855        if (!collection)
 856                return;
 857
 858        for_each_lpi_its(device, ite, its)
 859                if (ite->collection &&
 860                    ite->collection->collection_id == coll_id)
 861                        ite->collection = NULL;
 862
 863        list_del(&collection->coll_list);
 864        kfree(collection);
 865}
 866
 867/* Must be called with its_lock mutex held */
 868static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
 869                                          struct its_collection *collection,
 870                                          u32 event_id)
 871{
 872        struct its_ite *ite;
 873
 874        ite = kzalloc(sizeof(*ite), GFP_KERNEL);
 875        if (!ite)
 876                return ERR_PTR(-ENOMEM);
 877
 878        ite->event_id   = event_id;
 879        ite->collection = collection;
 880
 881        list_add_tail(&ite->ite_list, &device->itt_head);
 882        return ite;
 883}
 884
 885/*
 886 * The MAPTI and MAPI commands map LPIs to ITTEs.
 887 * Must be called with its_lock mutex held.
 888 */
 889static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
 890                                    u64 *its_cmd)
 891{
 892        u32 device_id = its_cmd_get_deviceid(its_cmd);
 893        u32 event_id = its_cmd_get_id(its_cmd);
 894        u32 coll_id = its_cmd_get_collection(its_cmd);
 895        struct its_ite *ite;
 896        struct kvm_vcpu *vcpu = NULL;
 897        struct its_device *device;
 898        struct its_collection *collection, *new_coll = NULL;
 899        struct vgic_irq *irq;
 900        int lpi_nr;
 901
 902        device = find_its_device(its, device_id);
 903        if (!device)
 904                return E_ITS_MAPTI_UNMAPPED_DEVICE;
 905
 906        if (event_id >= BIT_ULL(device->num_eventid_bits))
 907                return E_ITS_MAPTI_ID_OOR;
 908
 909        if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
 910                lpi_nr = its_cmd_get_physical_id(its_cmd);
 911        else
 912                lpi_nr = event_id;
 913        if (lpi_nr < GIC_LPI_OFFSET ||
 914            lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
 915                return E_ITS_MAPTI_PHYSICALID_OOR;
 916
 917        /* If there is an existing mapping, behavior is UNPREDICTABLE. */
 918        if (find_ite(its, device_id, event_id))
 919                return 0;
 920
 921        collection = find_collection(its, coll_id);
 922        if (!collection) {
 923                int ret = vgic_its_alloc_collection(its, &collection, coll_id);
 924                if (ret)
 925                        return ret;
 926                new_coll = collection;
 927        }
 928
 929        ite = vgic_its_alloc_ite(device, collection, event_id);
 930        if (IS_ERR(ite)) {
 931                if (new_coll)
 932                        vgic_its_free_collection(its, coll_id);
 933                return PTR_ERR(ite);
 934        }
 935
 936        if (its_is_collection_mapped(collection))
 937                vcpu = kvm_get_vcpu(kvm, collection->target_addr);
 938
 939        irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
 940        if (IS_ERR(irq)) {
 941                if (new_coll)
 942                        vgic_its_free_collection(its, coll_id);
 943                its_free_ite(kvm, ite);
 944                return PTR_ERR(irq);
 945        }
 946        ite->irq = irq;
 947
 948        return 0;
 949}
 950
 951/* Requires the its_lock to be held. */
 952static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
 953{
 954        struct its_ite *ite, *temp;
 955
 956        /*
 957         * The spec says that unmapping a device with still valid
 958         * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
 959         * since we cannot leave the memory unreferenced.
 960         */
 961        list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
 962                its_free_ite(kvm, ite);
 963
 964        list_del(&device->dev_list);
 965        kfree(device);
 966}
 967
 968/* its lock must be held */
 969static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
 970{
 971        struct its_device *cur, *temp;
 972
 973        list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
 974                vgic_its_free_device(kvm, cur);
 975}
 976
 977/* its lock must be held */
 978static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
 979{
 980        struct its_collection *cur, *temp;
 981
 982        list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
 983                vgic_its_free_collection(its, cur->collection_id);
 984}
 985
 986/* Must be called with its_lock mutex held */
 987static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
 988                                                u32 device_id, gpa_t itt_addr,
 989                                                u8 num_eventid_bits)
 990{
 991        struct its_device *device;
 992
 993        device = kzalloc(sizeof(*device), GFP_KERNEL);
 994        if (!device)
 995                return ERR_PTR(-ENOMEM);
 996
 997        device->device_id = device_id;
 998        device->itt_addr = itt_addr;
 999        device->num_eventid_bits = num_eventid_bits;
1000        INIT_LIST_HEAD(&device->itt_head);
1001
1002        list_add_tail(&device->dev_list, &its->device_list);
1003        return device;
1004}
1005
1006/*
1007 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1008 * Must be called with the its_lock mutex held.
1009 */
1010static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1011                                    u64 *its_cmd)
1012{
1013        u32 device_id = its_cmd_get_deviceid(its_cmd);
1014        bool valid = its_cmd_get_validbit(its_cmd);
1015        u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1016        gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1017        struct its_device *device;
1018
1019        if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1020                return E_ITS_MAPD_DEVICE_OOR;
1021
1022        if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1023                return E_ITS_MAPD_ITTSIZE_OOR;
1024
1025        device = find_its_device(its, device_id);
1026
1027        /*
1028         * The spec says that calling MAPD on an already mapped device
1029         * invalidates all cached data for this device. We implement this
1030         * by removing the mapping and re-establishing it.
1031         */
1032        if (device)
1033                vgic_its_free_device(kvm, device);
1034
1035        /*
1036         * The spec does not say whether unmapping a not-mapped device
1037         * is an error, so we are done in any case.
1038         */
1039        if (!valid)
1040                return 0;
1041
1042        device = vgic_its_alloc_device(its, device_id, itt_addr,
1043                                       num_eventid_bits);
1044
1045        return PTR_ERR_OR_ZERO(device);
1046}
1047
1048/*
1049 * The MAPC command maps collection IDs to redistributors.
1050 * Must be called with the its_lock mutex held.
1051 */
1052static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1053                                    u64 *its_cmd)
1054{
1055        u16 coll_id;
1056        u32 target_addr;
1057        struct its_collection *collection;
1058        bool valid;
1059
1060        valid = its_cmd_get_validbit(its_cmd);
1061        coll_id = its_cmd_get_collection(its_cmd);
1062        target_addr = its_cmd_get_target_addr(its_cmd);
1063
1064        if (target_addr >= atomic_read(&kvm->online_vcpus))
1065                return E_ITS_MAPC_PROCNUM_OOR;
1066
1067        if (!valid) {
1068                vgic_its_free_collection(its, coll_id);
1069        } else {
1070                collection = find_collection(its, coll_id);
1071
1072                if (!collection) {
1073                        int ret;
1074
1075                        ret = vgic_its_alloc_collection(its, &collection,
1076                                                        coll_id);
1077                        if (ret)
1078                                return ret;
1079                        collection->target_addr = target_addr;
1080                } else {
1081                        collection->target_addr = target_addr;
1082                        update_affinity_collection(kvm, its, collection);
1083                }
1084        }
1085
1086        return 0;
1087}
1088
1089/*
1090 * The CLEAR command removes the pending state for a particular LPI.
1091 * Must be called with the its_lock mutex held.
1092 */
1093static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1094                                     u64 *its_cmd)
1095{
1096        u32 device_id = its_cmd_get_deviceid(its_cmd);
1097        u32 event_id = its_cmd_get_id(its_cmd);
1098        struct its_ite *ite;
1099
1100
1101        ite = find_ite(its, device_id, event_id);
1102        if (!ite)
1103                return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1104
1105        ite->irq->pending_latch = false;
1106
1107        if (ite->irq->hw)
1108                return irq_set_irqchip_state(ite->irq->host_irq,
1109                                             IRQCHIP_STATE_PENDING, false);
1110
1111        return 0;
1112}
1113
1114/*
1115 * The INV command syncs the configuration bits from the memory table.
1116 * Must be called with the its_lock mutex held.
1117 */
1118static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1119                                   u64 *its_cmd)
1120{
1121        u32 device_id = its_cmd_get_deviceid(its_cmd);
1122        u32 event_id = its_cmd_get_id(its_cmd);
1123        struct its_ite *ite;
1124
1125
1126        ite = find_ite(its, device_id, event_id);
1127        if (!ite)
1128                return E_ITS_INV_UNMAPPED_INTERRUPT;
1129
1130        return update_lpi_config(kvm, ite->irq, NULL, true);
1131}
1132
1133/*
1134 * The INVALL command requests flushing of all IRQ data in this collection.
1135 * Find the VCPU mapped to that collection, then iterate over the VM's list
1136 * of mapped LPIs and update the configuration for each IRQ which targets
1137 * the specified vcpu. The configuration will be read from the in-memory
1138 * configuration table.
1139 * Must be called with the its_lock mutex held.
1140 */
1141static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1142                                      u64 *its_cmd)
1143{
1144        u32 coll_id = its_cmd_get_collection(its_cmd);
1145        struct its_collection *collection;
1146        struct kvm_vcpu *vcpu;
1147        struct vgic_irq *irq;
1148        u32 *intids;
1149        int irq_count, i;
1150
1151        collection = find_collection(its, coll_id);
1152        if (!its_is_collection_mapped(collection))
1153                return E_ITS_INVALL_UNMAPPED_COLLECTION;
1154
1155        vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1156
1157        irq_count = vgic_copy_lpi_list(vcpu, &intids);
1158        if (irq_count < 0)
1159                return irq_count;
1160
1161        for (i = 0; i < irq_count; i++) {
1162                irq = vgic_get_irq(kvm, NULL, intids[i]);
1163                if (!irq)
1164                        continue;
1165                update_lpi_config(kvm, irq, vcpu, false);
1166                vgic_put_irq(kvm, irq);
1167        }
1168
1169        kfree(intids);
1170
1171        if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1172                its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1173
1174        return 0;
1175}
1176
1177/*
1178 * The MOVALL command moves the pending state of all IRQs targeting one
1179 * redistributor to another. We don't hold the pending state in the VCPUs,
1180 * but in the IRQs instead, so there is really not much to do for us here.
1181 * However the spec says that no IRQ must target the old redistributor
1182 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1183 * This command affects all LPIs in the system that target that redistributor.
1184 */
1185static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1186                                      u64 *its_cmd)
1187{
1188        u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1189        u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1190        struct kvm_vcpu *vcpu1, *vcpu2;
1191        struct vgic_irq *irq;
1192        u32 *intids;
1193        int irq_count, i;
1194
1195        if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1196            target2_addr >= atomic_read(&kvm->online_vcpus))
1197                return E_ITS_MOVALL_PROCNUM_OOR;
1198
1199        if (target1_addr == target2_addr)
1200                return 0;
1201
1202        vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1203        vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1204
1205        irq_count = vgic_copy_lpi_list(vcpu1, &intids);
1206        if (irq_count < 0)
1207                return irq_count;
1208
1209        for (i = 0; i < irq_count; i++) {
1210                irq = vgic_get_irq(kvm, NULL, intids[i]);
1211
1212                update_affinity(irq, vcpu2);
1213
1214                vgic_put_irq(kvm, irq);
1215        }
1216
1217        kfree(intids);
1218        return 0;
1219}
1220
1221/*
1222 * The INT command injects the LPI associated with that DevID/EvID pair.
1223 * Must be called with the its_lock mutex held.
1224 */
1225static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1226                                   u64 *its_cmd)
1227{
1228        u32 msi_data = its_cmd_get_id(its_cmd);
1229        u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1230
1231        return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1232}
1233
1234/*
1235 * This function is called with the its_cmd lock held, but the ITS data
1236 * structure lock dropped.
1237 */
1238static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1239                                   u64 *its_cmd)
1240{
1241        int ret = -ENODEV;
1242
1243        mutex_lock(&its->its_lock);
1244        switch (its_cmd_get_command(its_cmd)) {
1245        case GITS_CMD_MAPD:
1246                ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1247                break;
1248        case GITS_CMD_MAPC:
1249                ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1250                break;
1251        case GITS_CMD_MAPI:
1252                ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1253                break;
1254        case GITS_CMD_MAPTI:
1255                ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1256                break;
1257        case GITS_CMD_MOVI:
1258                ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1259                break;
1260        case GITS_CMD_DISCARD:
1261                ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1262                break;
1263        case GITS_CMD_CLEAR:
1264                ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1265                break;
1266        case GITS_CMD_MOVALL:
1267                ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1268                break;
1269        case GITS_CMD_INT:
1270                ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1271                break;
1272        case GITS_CMD_INV:
1273                ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1274                break;
1275        case GITS_CMD_INVALL:
1276                ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1277                break;
1278        case GITS_CMD_SYNC:
1279                /* we ignore this command: we are in sync all of the time */
1280                ret = 0;
1281                break;
1282        }
1283        mutex_unlock(&its->its_lock);
1284
1285        return ret;
1286}
1287
1288static u64 vgic_sanitise_its_baser(u64 reg)
1289{
1290        reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1291                                  GITS_BASER_SHAREABILITY_SHIFT,
1292                                  vgic_sanitise_shareability);
1293        reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1294                                  GITS_BASER_INNER_CACHEABILITY_SHIFT,
1295                                  vgic_sanitise_inner_cacheability);
1296        reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1297                                  GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1298                                  vgic_sanitise_outer_cacheability);
1299
1300        /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1301        reg &= ~GENMASK_ULL(15, 12);
1302
1303        /* We support only one (ITS) page size: 64K */
1304        reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1305
1306        return reg;
1307}
1308
1309static u64 vgic_sanitise_its_cbaser(u64 reg)
1310{
1311        reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1312                                  GITS_CBASER_SHAREABILITY_SHIFT,
1313                                  vgic_sanitise_shareability);
1314        reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1315                                  GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1316                                  vgic_sanitise_inner_cacheability);
1317        reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1318                                  GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1319                                  vgic_sanitise_outer_cacheability);
1320
1321        /*
1322         * Sanitise the physical address to be 64k aligned.
1323         * Also limit the physical addresses to 48 bits.
1324         */
1325        reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1326
1327        return reg;
1328}
1329
1330static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1331                                               struct vgic_its *its,
1332                                               gpa_t addr, unsigned int len)
1333{
1334        return extract_bytes(its->cbaser, addr & 7, len);
1335}
1336
1337static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1338                                       gpa_t addr, unsigned int len,
1339                                       unsigned long val)
1340{
1341        /* When GITS_CTLR.Enable is 1, this register is RO. */
1342        if (its->enabled)
1343                return;
1344
1345        mutex_lock(&its->cmd_lock);
1346        its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1347        its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1348        its->creadr = 0;
1349        /*
1350         * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1351         * it to CREADR to make sure we start with an empty command buffer.
1352         */
1353        its->cwriter = its->creadr;
1354        mutex_unlock(&its->cmd_lock);
1355}
1356
1357#define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
1358#define ITS_CMD_SIZE                    32
1359#define ITS_CMD_OFFSET(reg)             ((reg) & GENMASK(19, 5))
1360
1361/* Must be called with the cmd_lock held. */
1362static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1363{
1364        gpa_t cbaser;
1365        u64 cmd_buf[4];
1366
1367        /* Commands are only processed when the ITS is enabled. */
1368        if (!its->enabled)
1369                return;
1370
1371        cbaser = CBASER_ADDRESS(its->cbaser);
1372
1373        while (its->cwriter != its->creadr) {
1374                int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1375                                              cmd_buf, ITS_CMD_SIZE);
1376                /*
1377                 * If kvm_read_guest() fails, this could be due to the guest
1378                 * programming a bogus value in CBASER or something else going
1379                 * wrong from which we cannot easily recover.
1380                 * According to section 6.3.2 in the GICv3 spec we can just
1381                 * ignore that command then.
1382                 */
1383                if (!ret)
1384                        vgic_its_handle_command(kvm, its, cmd_buf);
1385
1386                its->creadr += ITS_CMD_SIZE;
1387                if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1388                        its->creadr = 0;
1389        }
1390}
1391
1392/*
1393 * By writing to CWRITER the guest announces new commands to be processed.
1394 * To avoid any races in the first place, we take the its_cmd lock, which
1395 * protects our ring buffer variables, so that there is only one user
1396 * per ITS handling commands at a given time.
1397 */
1398static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1399                                        gpa_t addr, unsigned int len,
1400                                        unsigned long val)
1401{
1402        u64 reg;
1403
1404        if (!its)
1405                return;
1406
1407        mutex_lock(&its->cmd_lock);
1408
1409        reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1410        reg = ITS_CMD_OFFSET(reg);
1411        if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1412                mutex_unlock(&its->cmd_lock);
1413                return;
1414        }
1415        its->cwriter = reg;
1416
1417        vgic_its_process_commands(kvm, its);
1418
1419        mutex_unlock(&its->cmd_lock);
1420}
1421
1422static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1423                                                struct vgic_its *its,
1424                                                gpa_t addr, unsigned int len)
1425{
1426        return extract_bytes(its->cwriter, addr & 0x7, len);
1427}
1428
1429static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1430                                               struct vgic_its *its,
1431                                               gpa_t addr, unsigned int len)
1432{
1433        return extract_bytes(its->creadr, addr & 0x7, len);
1434}
1435
1436static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1437                                              struct vgic_its *its,
1438                                              gpa_t addr, unsigned int len,
1439                                              unsigned long val)
1440{
1441        u32 cmd_offset;
1442        int ret = 0;
1443
1444        mutex_lock(&its->cmd_lock);
1445
1446        if (its->enabled) {
1447                ret = -EBUSY;
1448                goto out;
1449        }
1450
1451        cmd_offset = ITS_CMD_OFFSET(val);
1452        if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1453                ret = -EINVAL;
1454                goto out;
1455        }
1456
1457        its->creadr = cmd_offset;
1458out:
1459        mutex_unlock(&its->cmd_lock);
1460        return ret;
1461}
1462
1463#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1464static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1465                                              struct vgic_its *its,
1466                                              gpa_t addr, unsigned int len)
1467{
1468        u64 reg;
1469
1470        switch (BASER_INDEX(addr)) {
1471        case 0:
1472                reg = its->baser_device_table;
1473                break;
1474        case 1:
1475                reg = its->baser_coll_table;
1476                break;
1477        default:
1478                reg = 0;
1479                break;
1480        }
1481
1482        return extract_bytes(reg, addr & 7, len);
1483}
1484
1485#define GITS_BASER_RO_MASK      (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1486static void vgic_mmio_write_its_baser(struct kvm *kvm,
1487                                      struct vgic_its *its,
1488                                      gpa_t addr, unsigned int len,
1489                                      unsigned long val)
1490{
1491        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1492        u64 entry_size, table_type;
1493        u64 reg, *regptr, clearbits = 0;
1494
1495        /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1496        if (its->enabled)
1497                return;
1498
1499        switch (BASER_INDEX(addr)) {
1500        case 0:
1501                regptr = &its->baser_device_table;
1502                entry_size = abi->dte_esz;
1503                table_type = GITS_BASER_TYPE_DEVICE;
1504                break;
1505        case 1:
1506                regptr = &its->baser_coll_table;
1507                entry_size = abi->cte_esz;
1508                table_type = GITS_BASER_TYPE_COLLECTION;
1509                clearbits = GITS_BASER_INDIRECT;
1510                break;
1511        default:
1512                return;
1513        }
1514
1515        reg = update_64bit_reg(*regptr, addr & 7, len, val);
1516        reg &= ~GITS_BASER_RO_MASK;
1517        reg &= ~clearbits;
1518
1519        reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1520        reg |= table_type << GITS_BASER_TYPE_SHIFT;
1521        reg = vgic_sanitise_its_baser(reg);
1522
1523        *regptr = reg;
1524
1525        if (!(reg & GITS_BASER_VALID)) {
1526                /* Take the its_lock to prevent a race with a save/restore */
1527                mutex_lock(&its->its_lock);
1528                switch (table_type) {
1529                case GITS_BASER_TYPE_DEVICE:
1530                        vgic_its_free_device_list(kvm, its);
1531                        break;
1532                case GITS_BASER_TYPE_COLLECTION:
1533                        vgic_its_free_collection_list(kvm, its);
1534                        break;
1535                }
1536                mutex_unlock(&its->its_lock);
1537        }
1538}
1539
1540static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1541                                             struct vgic_its *its,
1542                                             gpa_t addr, unsigned int len)
1543{
1544        u32 reg = 0;
1545
1546        mutex_lock(&its->cmd_lock);
1547        if (its->creadr == its->cwriter)
1548                reg |= GITS_CTLR_QUIESCENT;
1549        if (its->enabled)
1550                reg |= GITS_CTLR_ENABLE;
1551        mutex_unlock(&its->cmd_lock);
1552
1553        return reg;
1554}
1555
1556static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1557                                     gpa_t addr, unsigned int len,
1558                                     unsigned long val)
1559{
1560        mutex_lock(&its->cmd_lock);
1561
1562        /*
1563         * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1564         * device/collection BASER are invalid
1565         */
1566        if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1567                (!(its->baser_device_table & GITS_BASER_VALID) ||
1568                 !(its->baser_coll_table & GITS_BASER_VALID) ||
1569                 !(its->cbaser & GITS_CBASER_VALID)))
1570                goto out;
1571
1572        its->enabled = !!(val & GITS_CTLR_ENABLE);
1573
1574        /*
1575         * Try to process any pending commands. This function bails out early
1576         * if the ITS is disabled or no commands have been queued.
1577         */
1578        vgic_its_process_commands(kvm, its);
1579
1580out:
1581        mutex_unlock(&its->cmd_lock);
1582}
1583
1584#define REGISTER_ITS_DESC(off, rd, wr, length, acc)             \
1585{                                                               \
1586        .reg_offset = off,                                      \
1587        .len = length,                                          \
1588        .access_flags = acc,                                    \
1589        .its_read = rd,                                         \
1590        .its_write = wr,                                        \
1591}
1592
1593#define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1594{                                                               \
1595        .reg_offset = off,                                      \
1596        .len = length,                                          \
1597        .access_flags = acc,                                    \
1598        .its_read = rd,                                         \
1599        .its_write = wr,                                        \
1600        .uaccess_its_write = uwr,                               \
1601}
1602
1603static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1604                              gpa_t addr, unsigned int len, unsigned long val)
1605{
1606        /* Ignore */
1607}
1608
1609static struct vgic_register_region its_registers[] = {
1610        REGISTER_ITS_DESC(GITS_CTLR,
1611                vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1612                VGIC_ACCESS_32bit),
1613        REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1614                vgic_mmio_read_its_iidr, its_mmio_write_wi,
1615                vgic_mmio_uaccess_write_its_iidr, 4,
1616                VGIC_ACCESS_32bit),
1617        REGISTER_ITS_DESC(GITS_TYPER,
1618                vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1619                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1620        REGISTER_ITS_DESC(GITS_CBASER,
1621                vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1622                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1623        REGISTER_ITS_DESC(GITS_CWRITER,
1624                vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1625                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1626        REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1627                vgic_mmio_read_its_creadr, its_mmio_write_wi,
1628                vgic_mmio_uaccess_write_its_creadr, 8,
1629                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1630        REGISTER_ITS_DESC(GITS_BASER,
1631                vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1632                VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1633        REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1634                vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1635                VGIC_ACCESS_32bit),
1636};
1637
1638/* This is called on setting the LPI enable bit in the redistributor. */
1639void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1640{
1641        if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1642                its_sync_lpi_pending_table(vcpu);
1643}
1644
1645static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1646                                   u64 addr)
1647{
1648        struct vgic_io_device *iodev = &its->iodev;
1649        int ret;
1650
1651        mutex_lock(&kvm->slots_lock);
1652        if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1653                ret = -EBUSY;
1654                goto out;
1655        }
1656
1657        its->vgic_its_base = addr;
1658        iodev->regions = its_registers;
1659        iodev->nr_regions = ARRAY_SIZE(its_registers);
1660        kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1661
1662        iodev->base_addr = its->vgic_its_base;
1663        iodev->iodev_type = IODEV_ITS;
1664        iodev->its = its;
1665        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1666                                      KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1667out:
1668        mutex_unlock(&kvm->slots_lock);
1669
1670        return ret;
1671}
1672
1673#define INITIAL_BASER_VALUE                                               \
1674        (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)                | \
1675         GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner)         | \
1676         GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)             | \
1677         GITS_BASER_PAGE_SIZE_64K)
1678
1679#define INITIAL_PROPBASER_VALUE                                           \
1680        (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)            | \
1681         GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner)     | \
1682         GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1683
1684static int vgic_its_create(struct kvm_device *dev, u32 type)
1685{
1686        struct vgic_its *its;
1687
1688        if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1689                return -ENODEV;
1690
1691        its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1692        if (!its)
1693                return -ENOMEM;
1694
1695        if (vgic_initialized(dev->kvm)) {
1696                int ret = vgic_v4_init(dev->kvm);
1697                if (ret < 0) {
1698                        kfree(its);
1699                        return ret;
1700                }
1701        }
1702
1703        mutex_init(&its->its_lock);
1704        mutex_init(&its->cmd_lock);
1705
1706        its->vgic_its_base = VGIC_ADDR_UNDEF;
1707
1708        INIT_LIST_HEAD(&its->device_list);
1709        INIT_LIST_HEAD(&its->collection_list);
1710
1711        dev->kvm->arch.vgic.msis_require_devid = true;
1712        dev->kvm->arch.vgic.has_its = true;
1713        its->enabled = false;
1714        its->dev = dev;
1715
1716        its->baser_device_table = INITIAL_BASER_VALUE                   |
1717                ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1718        its->baser_coll_table = INITIAL_BASER_VALUE |
1719                ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1720        dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1721
1722        dev->private = its;
1723
1724        return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1725}
1726
1727static void vgic_its_destroy(struct kvm_device *kvm_dev)
1728{
1729        struct kvm *kvm = kvm_dev->kvm;
1730        struct vgic_its *its = kvm_dev->private;
1731
1732        mutex_lock(&its->its_lock);
1733
1734        vgic_its_free_device_list(kvm, its);
1735        vgic_its_free_collection_list(kvm, its);
1736
1737        mutex_unlock(&its->its_lock);
1738        kfree(its);
1739}
1740
1741int vgic_its_has_attr_regs(struct kvm_device *dev,
1742                           struct kvm_device_attr *attr)
1743{
1744        const struct vgic_register_region *region;
1745        gpa_t offset = attr->attr;
1746        int align;
1747
1748        align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1749
1750        if (offset & align)
1751                return -EINVAL;
1752
1753        region = vgic_find_mmio_region(its_registers,
1754                                       ARRAY_SIZE(its_registers),
1755                                       offset);
1756        if (!region)
1757                return -ENXIO;
1758
1759        return 0;
1760}
1761
1762int vgic_its_attr_regs_access(struct kvm_device *dev,
1763                              struct kvm_device_attr *attr,
1764                              u64 *reg, bool is_write)
1765{
1766        const struct vgic_register_region *region;
1767        struct vgic_its *its;
1768        gpa_t addr, offset;
1769        unsigned int len;
1770        int align, ret = 0;
1771
1772        its = dev->private;
1773        offset = attr->attr;
1774
1775        /*
1776         * Although the spec supports upper/lower 32-bit accesses to
1777         * 64-bit ITS registers, the userspace ABI requires 64-bit
1778         * accesses to all 64-bit wide registers. We therefore only
1779         * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1780         * registers
1781         */
1782        if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1783                align = 0x3;
1784        else
1785                align = 0x7;
1786
1787        if (offset & align)
1788                return -EINVAL;
1789
1790        mutex_lock(&dev->kvm->lock);
1791
1792        if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1793                ret = -ENXIO;
1794                goto out;
1795        }
1796
1797        region = vgic_find_mmio_region(its_registers,
1798                                       ARRAY_SIZE(its_registers),
1799                                       offset);
1800        if (!region) {
1801                ret = -ENXIO;
1802                goto out;
1803        }
1804
1805        if (!lock_all_vcpus(dev->kvm)) {
1806                ret = -EBUSY;
1807                goto out;
1808        }
1809
1810        addr = its->vgic_its_base + offset;
1811
1812        len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1813
1814        if (is_write) {
1815                if (region->uaccess_its_write)
1816                        ret = region->uaccess_its_write(dev->kvm, its, addr,
1817                                                        len, *reg);
1818                else
1819                        region->its_write(dev->kvm, its, addr, len, *reg);
1820        } else {
1821                *reg = region->its_read(dev->kvm, its, addr, len);
1822        }
1823        unlock_all_vcpus(dev->kvm);
1824out:
1825        mutex_unlock(&dev->kvm->lock);
1826        return ret;
1827}
1828
1829static u32 compute_next_devid_offset(struct list_head *h,
1830                                     struct its_device *dev)
1831{
1832        struct its_device *next;
1833        u32 next_offset;
1834
1835        if (list_is_last(&dev->dev_list, h))
1836                return 0;
1837        next = list_next_entry(dev, dev_list);
1838        next_offset = next->device_id - dev->device_id;
1839
1840        return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
1841}
1842
1843static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
1844{
1845        struct its_ite *next;
1846        u32 next_offset;
1847
1848        if (list_is_last(&ite->ite_list, h))
1849                return 0;
1850        next = list_next_entry(ite, ite_list);
1851        next_offset = next->event_id - ite->event_id;
1852
1853        return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
1854}
1855
1856/**
1857 * entry_fn_t - Callback called on a table entry restore path
1858 * @its: its handle
1859 * @id: id of the entry
1860 * @entry: pointer to the entry
1861 * @opaque: pointer to an opaque data
1862 *
1863 * Return: < 0 on error, 0 if last element was identified, id offset to next
1864 * element otherwise
1865 */
1866typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
1867                          void *opaque);
1868
1869/**
1870 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
1871 * to each entry
1872 *
1873 * @its: its handle
1874 * @base: base gpa of the table
1875 * @size: size of the table in bytes
1876 * @esz: entry size in bytes
1877 * @start_id: the ID of the first entry in the table
1878 * (non zero for 2d level tables)
1879 * @fn: function to apply on each entry
1880 *
1881 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
1882 * (the last element may not be found on second level tables)
1883 */
1884static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
1885                          int start_id, entry_fn_t fn, void *opaque)
1886{
1887        struct kvm *kvm = its->dev->kvm;
1888        unsigned long len = size;
1889        int id = start_id;
1890        gpa_t gpa = base;
1891        char entry[esz];
1892        int ret;
1893
1894        memset(entry, 0, esz);
1895
1896        while (len > 0) {
1897                int next_offset;
1898                size_t byte_offset;
1899
1900                ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
1901                if (ret)
1902                        return ret;
1903
1904                next_offset = fn(its, id, entry, opaque);
1905                if (next_offset <= 0)
1906                        return next_offset;
1907
1908                byte_offset = next_offset * esz;
1909                id += next_offset;
1910                gpa += byte_offset;
1911                len -= byte_offset;
1912        }
1913        return 1;
1914}
1915
1916/**
1917 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
1918 */
1919static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
1920                              struct its_ite *ite, gpa_t gpa, int ite_esz)
1921{
1922        struct kvm *kvm = its->dev->kvm;
1923        u32 next_offset;
1924        u64 val;
1925
1926        next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
1927        val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
1928               ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
1929                ite->collection->collection_id;
1930        val = cpu_to_le64(val);
1931        return kvm_write_guest(kvm, gpa, &val, ite_esz);
1932}
1933
1934/**
1935 * vgic_its_restore_ite - restore an interrupt translation entry
1936 * @event_id: id used for indexing
1937 * @ptr: pointer to the ITE entry
1938 * @opaque: pointer to the its_device
1939 */
1940static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
1941                                void *ptr, void *opaque)
1942{
1943        struct its_device *dev = (struct its_device *)opaque;
1944        struct its_collection *collection;
1945        struct kvm *kvm = its->dev->kvm;
1946        struct kvm_vcpu *vcpu = NULL;
1947        u64 val;
1948        u64 *p = (u64 *)ptr;
1949        struct vgic_irq *irq;
1950        u32 coll_id, lpi_id;
1951        struct its_ite *ite;
1952        u32 offset;
1953
1954        val = *p;
1955
1956        val = le64_to_cpu(val);
1957
1958        coll_id = val & KVM_ITS_ITE_ICID_MASK;
1959        lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
1960
1961        if (!lpi_id)
1962                return 1; /* invalid entry, no choice but to scan next entry */
1963
1964        if (lpi_id < VGIC_MIN_LPI)
1965                return -EINVAL;
1966
1967        offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
1968        if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
1969                return -EINVAL;
1970
1971        collection = find_collection(its, coll_id);
1972        if (!collection)
1973                return -EINVAL;
1974
1975        ite = vgic_its_alloc_ite(dev, collection, event_id);
1976        if (IS_ERR(ite))
1977                return PTR_ERR(ite);
1978
1979        if (its_is_collection_mapped(collection))
1980                vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1981
1982        irq = vgic_add_lpi(kvm, lpi_id, vcpu);
1983        if (IS_ERR(irq))
1984                return PTR_ERR(irq);
1985        ite->irq = irq;
1986
1987        return offset;
1988}
1989
1990static int vgic_its_ite_cmp(void *priv, struct list_head *a,
1991                            struct list_head *b)
1992{
1993        struct its_ite *itea = container_of(a, struct its_ite, ite_list);
1994        struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
1995
1996        if (itea->event_id < iteb->event_id)
1997                return -1;
1998        else
1999                return 1;
2000}
2001
2002static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2003{
2004        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2005        gpa_t base = device->itt_addr;
2006        struct its_ite *ite;
2007        int ret;
2008        int ite_esz = abi->ite_esz;
2009
2010        list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2011
2012        list_for_each_entry(ite, &device->itt_head, ite_list) {
2013                gpa_t gpa = base + ite->event_id * ite_esz;
2014
2015                /*
2016                 * If an LPI carries the HW bit, this means that this
2017                 * interrupt is controlled by GICv4, and we do not
2018                 * have direct access to that state. Let's simply fail
2019                 * the save operation...
2020                 */
2021                if (ite->irq->hw)
2022                        return -EACCES;
2023
2024                ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2025                if (ret)
2026                        return ret;
2027        }
2028        return 0;
2029}
2030
2031/**
2032 * vgic_its_restore_itt - restore the ITT of a device
2033 *
2034 * @its: its handle
2035 * @dev: device handle
2036 *
2037 * Return 0 on success, < 0 on error
2038 */
2039static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2040{
2041        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2042        gpa_t base = dev->itt_addr;
2043        int ret;
2044        int ite_esz = abi->ite_esz;
2045        size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2046
2047        ret = scan_its_table(its, base, max_size, ite_esz, 0,
2048                             vgic_its_restore_ite, dev);
2049
2050        /* scan_its_table returns +1 if all ITEs are invalid */
2051        if (ret > 0)
2052                ret = 0;
2053
2054        return ret;
2055}
2056
2057/**
2058 * vgic_its_save_dte - Save a device table entry at a given GPA
2059 *
2060 * @its: ITS handle
2061 * @dev: ITS device
2062 * @ptr: GPA
2063 */
2064static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2065                             gpa_t ptr, int dte_esz)
2066{
2067        struct kvm *kvm = its->dev->kvm;
2068        u64 val, itt_addr_field;
2069        u32 next_offset;
2070
2071        itt_addr_field = dev->itt_addr >> 8;
2072        next_offset = compute_next_devid_offset(&its->device_list, dev);
2073        val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2074               ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2075               (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2076                (dev->num_eventid_bits - 1));
2077        val = cpu_to_le64(val);
2078        return kvm_write_guest(kvm, ptr, &val, dte_esz);
2079}
2080
2081/**
2082 * vgic_its_restore_dte - restore a device table entry
2083 *
2084 * @its: its handle
2085 * @id: device id the DTE corresponds to
2086 * @ptr: kernel VA where the 8 byte DTE is located
2087 * @opaque: unused
2088 *
2089 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2090 * next dte otherwise
2091 */
2092static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2093                                void *ptr, void *opaque)
2094{
2095        struct its_device *dev;
2096        gpa_t itt_addr;
2097        u8 num_eventid_bits;
2098        u64 entry = *(u64 *)ptr;
2099        bool valid;
2100        u32 offset;
2101        int ret;
2102
2103        entry = le64_to_cpu(entry);
2104
2105        valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2106        num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2107        itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2108                        >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2109
2110        if (!valid)
2111                return 1;
2112
2113        /* dte entry is valid */
2114        offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2115
2116        dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2117        if (IS_ERR(dev))
2118                return PTR_ERR(dev);
2119
2120        ret = vgic_its_restore_itt(its, dev);
2121        if (ret) {
2122                vgic_its_free_device(its->dev->kvm, dev);
2123                return ret;
2124        }
2125
2126        return offset;
2127}
2128
2129static int vgic_its_device_cmp(void *priv, struct list_head *a,
2130                               struct list_head *b)
2131{
2132        struct its_device *deva = container_of(a, struct its_device, dev_list);
2133        struct its_device *devb = container_of(b, struct its_device, dev_list);
2134
2135        if (deva->device_id < devb->device_id)
2136                return -1;
2137        else
2138                return 1;
2139}
2140
2141/**
2142 * vgic_its_save_device_tables - Save the device table and all ITT
2143 * into guest RAM
2144 *
2145 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2146 * returns the GPA of the device entry
2147 */
2148static int vgic_its_save_device_tables(struct vgic_its *its)
2149{
2150        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2151        u64 baser = its->baser_device_table;
2152        struct its_device *dev;
2153        int dte_esz = abi->dte_esz;
2154
2155        if (!(baser & GITS_BASER_VALID))
2156                return 0;
2157
2158        list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2159
2160        list_for_each_entry(dev, &its->device_list, dev_list) {
2161                int ret;
2162                gpa_t eaddr;
2163
2164                if (!vgic_its_check_id(its, baser,
2165                                       dev->device_id, &eaddr))
2166                        return -EINVAL;
2167
2168                ret = vgic_its_save_itt(its, dev);
2169                if (ret)
2170                        return ret;
2171
2172                ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2173                if (ret)
2174                        return ret;
2175        }
2176        return 0;
2177}
2178
2179/**
2180 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2181 *
2182 * @its: its handle
2183 * @id: index of the entry in the L1 table
2184 * @addr: kernel VA
2185 * @opaque: unused
2186 *
2187 * L1 table entries are scanned by steps of 1 entry
2188 * Return < 0 if error, 0 if last dte was found when scanning the L2
2189 * table, +1 otherwise (meaning next L1 entry must be scanned)
2190 */
2191static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2192                         void *opaque)
2193{
2194        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2195        int l2_start_id = id * (SZ_64K / abi->dte_esz);
2196        u64 entry = *(u64 *)addr;
2197        int dte_esz = abi->dte_esz;
2198        gpa_t gpa;
2199        int ret;
2200
2201        entry = le64_to_cpu(entry);
2202
2203        if (!(entry & KVM_ITS_L1E_VALID_MASK))
2204                return 1;
2205
2206        gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2207
2208        ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2209                             l2_start_id, vgic_its_restore_dte, NULL);
2210
2211        return ret;
2212}
2213
2214/**
2215 * vgic_its_restore_device_tables - Restore the device table and all ITT
2216 * from guest RAM to internal data structs
2217 */
2218static int vgic_its_restore_device_tables(struct vgic_its *its)
2219{
2220        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2221        u64 baser = its->baser_device_table;
2222        int l1_esz, ret;
2223        int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2224        gpa_t l1_gpa;
2225
2226        if (!(baser & GITS_BASER_VALID))
2227                return 0;
2228
2229        l1_gpa = BASER_ADDRESS(baser);
2230
2231        if (baser & GITS_BASER_INDIRECT) {
2232                l1_esz = GITS_LVL1_ENTRY_SIZE;
2233                ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2234                                     handle_l1_dte, NULL);
2235        } else {
2236                l1_esz = abi->dte_esz;
2237                ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2238                                     vgic_its_restore_dte, NULL);
2239        }
2240
2241        /* scan_its_table returns +1 if all entries are invalid */
2242        if (ret > 0)
2243                ret = 0;
2244
2245        return ret;
2246}
2247
2248static int vgic_its_save_cte(struct vgic_its *its,
2249                             struct its_collection *collection,
2250                             gpa_t gpa, int esz)
2251{
2252        u64 val;
2253
2254        val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2255               ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2256               collection->collection_id);
2257        val = cpu_to_le64(val);
2258        return kvm_write_guest(its->dev->kvm, gpa, &val, esz);
2259}
2260
2261static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2262{
2263        struct its_collection *collection;
2264        struct kvm *kvm = its->dev->kvm;
2265        u32 target_addr, coll_id;
2266        u64 val;
2267        int ret;
2268
2269        BUG_ON(esz > sizeof(val));
2270        ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2271        if (ret)
2272                return ret;
2273        val = le64_to_cpu(val);
2274        if (!(val & KVM_ITS_CTE_VALID_MASK))
2275                return 0;
2276
2277        target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2278        coll_id = val & KVM_ITS_CTE_ICID_MASK;
2279
2280        if (target_addr >= atomic_read(&kvm->online_vcpus))
2281                return -EINVAL;
2282
2283        collection = find_collection(its, coll_id);
2284        if (collection)
2285                return -EEXIST;
2286        ret = vgic_its_alloc_collection(its, &collection, coll_id);
2287        if (ret)
2288                return ret;
2289        collection->target_addr = target_addr;
2290        return 1;
2291}
2292
2293/**
2294 * vgic_its_save_collection_table - Save the collection table into
2295 * guest RAM
2296 */
2297static int vgic_its_save_collection_table(struct vgic_its *its)
2298{
2299        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2300        u64 baser = its->baser_coll_table;
2301        gpa_t gpa = BASER_ADDRESS(baser);
2302        struct its_collection *collection;
2303        u64 val;
2304        size_t max_size, filled = 0;
2305        int ret, cte_esz = abi->cte_esz;
2306
2307        if (!(baser & GITS_BASER_VALID))
2308                return 0;
2309
2310        max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2311
2312        list_for_each_entry(collection, &its->collection_list, coll_list) {
2313                ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2314                if (ret)
2315                        return ret;
2316                gpa += cte_esz;
2317                filled += cte_esz;
2318        }
2319
2320        if (filled == max_size)
2321                return 0;
2322
2323        /*
2324         * table is not fully filled, add a last dummy element
2325         * with valid bit unset
2326         */
2327        val = 0;
2328        BUG_ON(cte_esz > sizeof(val));
2329        ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz);
2330        return ret;
2331}
2332
2333/**
2334 * vgic_its_restore_collection_table - reads the collection table
2335 * in guest memory and restores the ITS internal state. Requires the
2336 * BASER registers to be restored before.
2337 */
2338static int vgic_its_restore_collection_table(struct vgic_its *its)
2339{
2340        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2341        u64 baser = its->baser_coll_table;
2342        int cte_esz = abi->cte_esz;
2343        size_t max_size, read = 0;
2344        gpa_t gpa;
2345        int ret;
2346
2347        if (!(baser & GITS_BASER_VALID))
2348                return 0;
2349
2350        gpa = BASER_ADDRESS(baser);
2351
2352        max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2353
2354        while (read < max_size) {
2355                ret = vgic_its_restore_cte(its, gpa, cte_esz);
2356                if (ret <= 0)
2357                        break;
2358                gpa += cte_esz;
2359                read += cte_esz;
2360        }
2361
2362        if (ret > 0)
2363                return 0;
2364
2365        return ret;
2366}
2367
2368/**
2369 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2370 * according to v0 ABI
2371 */
2372static int vgic_its_save_tables_v0(struct vgic_its *its)
2373{
2374        int ret;
2375
2376        ret = vgic_its_save_device_tables(its);
2377        if (ret)
2378                return ret;
2379
2380        return vgic_its_save_collection_table(its);
2381}
2382
2383/**
2384 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2385 * to internal data structs according to V0 ABI
2386 *
2387 */
2388static int vgic_its_restore_tables_v0(struct vgic_its *its)
2389{
2390        int ret;
2391
2392        ret = vgic_its_restore_collection_table(its);
2393        if (ret)
2394                return ret;
2395
2396        return vgic_its_restore_device_tables(its);
2397}
2398
2399static int vgic_its_commit_v0(struct vgic_its *its)
2400{
2401        const struct vgic_its_abi *abi;
2402
2403        abi = vgic_its_get_abi(its);
2404        its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2405        its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2406
2407        its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2408                                        << GITS_BASER_ENTRY_SIZE_SHIFT);
2409
2410        its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2411                                        << GITS_BASER_ENTRY_SIZE_SHIFT);
2412        return 0;
2413}
2414
2415static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2416{
2417        /* We need to keep the ABI specific field values */
2418        its->baser_coll_table &= ~GITS_BASER_VALID;
2419        its->baser_device_table &= ~GITS_BASER_VALID;
2420        its->cbaser = 0;
2421        its->creadr = 0;
2422        its->cwriter = 0;
2423        its->enabled = 0;
2424        vgic_its_free_device_list(kvm, its);
2425        vgic_its_free_collection_list(kvm, its);
2426}
2427
2428static int vgic_its_has_attr(struct kvm_device *dev,
2429                             struct kvm_device_attr *attr)
2430{
2431        switch (attr->group) {
2432        case KVM_DEV_ARM_VGIC_GRP_ADDR:
2433                switch (attr->attr) {
2434                case KVM_VGIC_ITS_ADDR_TYPE:
2435                        return 0;
2436                }
2437                break;
2438        case KVM_DEV_ARM_VGIC_GRP_CTRL:
2439                switch (attr->attr) {
2440                case KVM_DEV_ARM_VGIC_CTRL_INIT:
2441                        return 0;
2442                case KVM_DEV_ARM_ITS_CTRL_RESET:
2443                        return 0;
2444                case KVM_DEV_ARM_ITS_SAVE_TABLES:
2445                        return 0;
2446                case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2447                        return 0;
2448                }
2449                break;
2450        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2451                return vgic_its_has_attr_regs(dev, attr);
2452        }
2453        return -ENXIO;
2454}
2455
2456static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2457{
2458        const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2459        int ret = 0;
2460
2461        if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2462                return 0;
2463
2464        mutex_lock(&kvm->lock);
2465        mutex_lock(&its->its_lock);
2466
2467        if (!lock_all_vcpus(kvm)) {
2468                mutex_unlock(&its->its_lock);
2469                mutex_unlock(&kvm->lock);
2470                return -EBUSY;
2471        }
2472
2473        switch (attr) {
2474        case KVM_DEV_ARM_ITS_CTRL_RESET:
2475                vgic_its_reset(kvm, its);
2476                break;
2477        case KVM_DEV_ARM_ITS_SAVE_TABLES:
2478                ret = abi->save_tables(its);
2479                break;
2480        case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2481                ret = abi->restore_tables(its);
2482                break;
2483        }
2484
2485        unlock_all_vcpus(kvm);
2486        mutex_unlock(&its->its_lock);
2487        mutex_unlock(&kvm->lock);
2488        return ret;
2489}
2490
2491static int vgic_its_set_attr(struct kvm_device *dev,
2492                             struct kvm_device_attr *attr)
2493{
2494        struct vgic_its *its = dev->private;
2495        int ret;
2496
2497        switch (attr->group) {
2498        case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2499                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2500                unsigned long type = (unsigned long)attr->attr;
2501                u64 addr;
2502
2503                if (type != KVM_VGIC_ITS_ADDR_TYPE)
2504                        return -ENODEV;
2505
2506                if (copy_from_user(&addr, uaddr, sizeof(addr)))
2507                        return -EFAULT;
2508
2509                ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2510                                        addr, SZ_64K);
2511                if (ret)
2512                        return ret;
2513
2514                return vgic_register_its_iodev(dev->kvm, its, addr);
2515        }
2516        case KVM_DEV_ARM_VGIC_GRP_CTRL:
2517                return vgic_its_ctrl(dev->kvm, its, attr->attr);
2518        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2519                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2520                u64 reg;
2521
2522                if (get_user(reg, uaddr))
2523                        return -EFAULT;
2524
2525                return vgic_its_attr_regs_access(dev, attr, &reg, true);
2526        }
2527        }
2528        return -ENXIO;
2529}
2530
2531static int vgic_its_get_attr(struct kvm_device *dev,
2532                             struct kvm_device_attr *attr)
2533{
2534        switch (attr->group) {
2535        case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2536                struct vgic_its *its = dev->private;
2537                u64 addr = its->vgic_its_base;
2538                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2539                unsigned long type = (unsigned long)attr->attr;
2540
2541                if (type != KVM_VGIC_ITS_ADDR_TYPE)
2542                        return -ENODEV;
2543
2544                if (copy_to_user(uaddr, &addr, sizeof(addr)))
2545                        return -EFAULT;
2546                break;
2547        }
2548        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2549                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2550                u64 reg;
2551                int ret;
2552
2553                ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2554                if (ret)
2555                        return ret;
2556                return put_user(reg, uaddr);
2557        }
2558        default:
2559                return -ENXIO;
2560        }
2561
2562        return 0;
2563}
2564
2565static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2566        .name = "kvm-arm-vgic-its",
2567        .create = vgic_its_create,
2568        .destroy = vgic_its_destroy,
2569        .set_attr = vgic_its_set_attr,
2570        .get_attr = vgic_its_get_attr,
2571        .has_attr = vgic_its_has_attr,
2572};
2573
2574int kvm_vgic_register_its_device(void)
2575{
2576        return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2577                                       KVM_DEV_TYPE_ARM_VGIC_ITS);
2578}
2579