linux/arch/arm64/kvm/hyp/vgic-v3-sr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012-2015 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#include <hyp/adjust_pc.h>
   8
   9#include <linux/compiler.h>
  10#include <linux/irqchip/arm-gic-v3.h>
  11#include <linux/kvm_host.h>
  12
  13#include <asm/kvm_emulate.h>
  14#include <asm/kvm_hyp.h>
  15#include <asm/kvm_mmu.h>
  16
  17#define vtr_to_max_lr_idx(v)            ((v) & 0xf)
  18#define vtr_to_nr_pre_bits(v)           ((((u32)(v) >> 26) & 7) + 1)
  19#define vtr_to_nr_apr_regs(v)           (1 << (vtr_to_nr_pre_bits(v) - 5))
  20
  21static u64 __gic_v3_get_lr(unsigned int lr)
  22{
  23        switch (lr & 0xf) {
  24        case 0:
  25                return read_gicreg(ICH_LR0_EL2);
  26        case 1:
  27                return read_gicreg(ICH_LR1_EL2);
  28        case 2:
  29                return read_gicreg(ICH_LR2_EL2);
  30        case 3:
  31                return read_gicreg(ICH_LR3_EL2);
  32        case 4:
  33                return read_gicreg(ICH_LR4_EL2);
  34        case 5:
  35                return read_gicreg(ICH_LR5_EL2);
  36        case 6:
  37                return read_gicreg(ICH_LR6_EL2);
  38        case 7:
  39                return read_gicreg(ICH_LR7_EL2);
  40        case 8:
  41                return read_gicreg(ICH_LR8_EL2);
  42        case 9:
  43                return read_gicreg(ICH_LR9_EL2);
  44        case 10:
  45                return read_gicreg(ICH_LR10_EL2);
  46        case 11:
  47                return read_gicreg(ICH_LR11_EL2);
  48        case 12:
  49                return read_gicreg(ICH_LR12_EL2);
  50        case 13:
  51                return read_gicreg(ICH_LR13_EL2);
  52        case 14:
  53                return read_gicreg(ICH_LR14_EL2);
  54        case 15:
  55                return read_gicreg(ICH_LR15_EL2);
  56        }
  57
  58        unreachable();
  59}
  60
  61static void __gic_v3_set_lr(u64 val, int lr)
  62{
  63        switch (lr & 0xf) {
  64        case 0:
  65                write_gicreg(val, ICH_LR0_EL2);
  66                break;
  67        case 1:
  68                write_gicreg(val, ICH_LR1_EL2);
  69                break;
  70        case 2:
  71                write_gicreg(val, ICH_LR2_EL2);
  72                break;
  73        case 3:
  74                write_gicreg(val, ICH_LR3_EL2);
  75                break;
  76        case 4:
  77                write_gicreg(val, ICH_LR4_EL2);
  78                break;
  79        case 5:
  80                write_gicreg(val, ICH_LR5_EL2);
  81                break;
  82        case 6:
  83                write_gicreg(val, ICH_LR6_EL2);
  84                break;
  85        case 7:
  86                write_gicreg(val, ICH_LR7_EL2);
  87                break;
  88        case 8:
  89                write_gicreg(val, ICH_LR8_EL2);
  90                break;
  91        case 9:
  92                write_gicreg(val, ICH_LR9_EL2);
  93                break;
  94        case 10:
  95                write_gicreg(val, ICH_LR10_EL2);
  96                break;
  97        case 11:
  98                write_gicreg(val, ICH_LR11_EL2);
  99                break;
 100        case 12:
 101                write_gicreg(val, ICH_LR12_EL2);
 102                break;
 103        case 13:
 104                write_gicreg(val, ICH_LR13_EL2);
 105                break;
 106        case 14:
 107                write_gicreg(val, ICH_LR14_EL2);
 108                break;
 109        case 15:
 110                write_gicreg(val, ICH_LR15_EL2);
 111                break;
 112        }
 113}
 114
 115static void __vgic_v3_write_ap0rn(u32 val, int n)
 116{
 117        switch (n) {
 118        case 0:
 119                write_gicreg(val, ICH_AP0R0_EL2);
 120                break;
 121        case 1:
 122                write_gicreg(val, ICH_AP0R1_EL2);
 123                break;
 124        case 2:
 125                write_gicreg(val, ICH_AP0R2_EL2);
 126                break;
 127        case 3:
 128                write_gicreg(val, ICH_AP0R3_EL2);
 129                break;
 130        }
 131}
 132
 133static void __vgic_v3_write_ap1rn(u32 val, int n)
 134{
 135        switch (n) {
 136        case 0:
 137                write_gicreg(val, ICH_AP1R0_EL2);
 138                break;
 139        case 1:
 140                write_gicreg(val, ICH_AP1R1_EL2);
 141                break;
 142        case 2:
 143                write_gicreg(val, ICH_AP1R2_EL2);
 144                break;
 145        case 3:
 146                write_gicreg(val, ICH_AP1R3_EL2);
 147                break;
 148        }
 149}
 150
 151static u32 __vgic_v3_read_ap0rn(int n)
 152{
 153        u32 val;
 154
 155        switch (n) {
 156        case 0:
 157                val = read_gicreg(ICH_AP0R0_EL2);
 158                break;
 159        case 1:
 160                val = read_gicreg(ICH_AP0R1_EL2);
 161                break;
 162        case 2:
 163                val = read_gicreg(ICH_AP0R2_EL2);
 164                break;
 165        case 3:
 166                val = read_gicreg(ICH_AP0R3_EL2);
 167                break;
 168        default:
 169                unreachable();
 170        }
 171
 172        return val;
 173}
 174
 175static u32 __vgic_v3_read_ap1rn(int n)
 176{
 177        u32 val;
 178
 179        switch (n) {
 180        case 0:
 181                val = read_gicreg(ICH_AP1R0_EL2);
 182                break;
 183        case 1:
 184                val = read_gicreg(ICH_AP1R1_EL2);
 185                break;
 186        case 2:
 187                val = read_gicreg(ICH_AP1R2_EL2);
 188                break;
 189        case 3:
 190                val = read_gicreg(ICH_AP1R3_EL2);
 191                break;
 192        default:
 193                unreachable();
 194        }
 195
 196        return val;
 197}
 198
 199void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
 200{
 201        u64 used_lrs = cpu_if->used_lrs;
 202
 203        /*
 204         * Make sure stores to the GIC via the memory mapped interface
 205         * are now visible to the system register interface when reading the
 206         * LRs, and when reading back the VMCR on non-VHE systems.
 207         */
 208        if (used_lrs || !has_vhe()) {
 209                if (!cpu_if->vgic_sre) {
 210                        dsb(sy);
 211                        isb();
 212                }
 213        }
 214
 215        if (used_lrs || cpu_if->its_vpe.its_vm) {
 216                int i;
 217                u32 elrsr;
 218
 219                elrsr = read_gicreg(ICH_ELRSR_EL2);
 220
 221                write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
 222
 223                for (i = 0; i < used_lrs; i++) {
 224                        if (elrsr & (1 << i))
 225                                cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
 226                        else
 227                                cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
 228
 229                        __gic_v3_set_lr(0, i);
 230                }
 231        }
 232}
 233
 234void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
 235{
 236        u64 used_lrs = cpu_if->used_lrs;
 237        int i;
 238
 239        if (used_lrs || cpu_if->its_vpe.its_vm) {
 240                write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
 241
 242                for (i = 0; i < used_lrs; i++)
 243                        __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
 244        }
 245
 246        /*
 247         * Ensure that writes to the LRs, and on non-VHE systems ensure that
 248         * the write to the VMCR in __vgic_v3_activate_traps(), will have
 249         * reached the (re)distributors. This ensure the guest will read the
 250         * correct values from the memory-mapped interface.
 251         */
 252        if (used_lrs || !has_vhe()) {
 253                if (!cpu_if->vgic_sre) {
 254                        isb();
 255                        dsb(sy);
 256                }
 257        }
 258}
 259
 260void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
 261{
 262        /*
 263         * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
 264         * Group0 interrupt (as generated in GICv2 mode) to be
 265         * delivered as a FIQ to the guest, with potentially fatal
 266         * consequences. So we must make sure that ICC_SRE_EL1 has
 267         * been actually programmed with the value we want before
 268         * starting to mess with the rest of the GIC, and VMCR_EL2 in
 269         * particular.  This logic must be called before
 270         * __vgic_v3_restore_state().
 271         */
 272        if (!cpu_if->vgic_sre) {
 273                write_gicreg(0, ICC_SRE_EL1);
 274                isb();
 275                write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
 276
 277
 278                if (has_vhe()) {
 279                        /*
 280                         * Ensure that the write to the VMCR will have reached
 281                         * the (re)distributors. This ensure the guest will
 282                         * read the correct values from the memory-mapped
 283                         * interface.
 284                         */
 285                        isb();
 286                        dsb(sy);
 287                }
 288        }
 289
 290        /*
 291         * Prevent the guest from touching the GIC system registers if
 292         * SRE isn't enabled for GICv3 emulation.
 293         */
 294        write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
 295                     ICC_SRE_EL2);
 296
 297        /*
 298         * If we need to trap system registers, we must write
 299         * ICH_HCR_EL2 anyway, even if no interrupts are being
 300         * injected,
 301         */
 302        if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
 303            cpu_if->its_vpe.its_vm)
 304                write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
 305}
 306
 307void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
 308{
 309        u64 val;
 310
 311        if (!cpu_if->vgic_sre) {
 312                cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
 313        }
 314
 315        val = read_gicreg(ICC_SRE_EL2);
 316        write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
 317
 318        if (!cpu_if->vgic_sre) {
 319                /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
 320                isb();
 321                write_gicreg(1, ICC_SRE_EL1);
 322        }
 323
 324        /*
 325         * If we were trapping system registers, we enabled the VGIC even if
 326         * no interrupts were being injected, and we disable it again here.
 327         */
 328        if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
 329            cpu_if->its_vpe.its_vm)
 330                write_gicreg(0, ICH_HCR_EL2);
 331}
 332
 333void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
 334{
 335        u64 val;
 336        u32 nr_pre_bits;
 337
 338        val = read_gicreg(ICH_VTR_EL2);
 339        nr_pre_bits = vtr_to_nr_pre_bits(val);
 340
 341        switch (nr_pre_bits) {
 342        case 7:
 343                cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
 344                cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
 345                fallthrough;
 346        case 6:
 347                cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
 348                fallthrough;
 349        default:
 350                cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
 351        }
 352
 353        switch (nr_pre_bits) {
 354        case 7:
 355                cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
 356                cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
 357                fallthrough;
 358        case 6:
 359                cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
 360                fallthrough;
 361        default:
 362                cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
 363        }
 364}
 365
 366void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
 367{
 368        u64 val;
 369        u32 nr_pre_bits;
 370
 371        val = read_gicreg(ICH_VTR_EL2);
 372        nr_pre_bits = vtr_to_nr_pre_bits(val);
 373
 374        switch (nr_pre_bits) {
 375        case 7:
 376                __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
 377                __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
 378                fallthrough;
 379        case 6:
 380                __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
 381                fallthrough;
 382        default:
 383                __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
 384        }
 385
 386        switch (nr_pre_bits) {
 387        case 7:
 388                __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
 389                __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
 390                fallthrough;
 391        case 6:
 392                __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
 393                fallthrough;
 394        default:
 395                __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
 396        }
 397}
 398
 399void __vgic_v3_init_lrs(void)
 400{
 401        int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
 402        int i;
 403
 404        for (i = 0; i <= max_lr_idx; i++)
 405                __gic_v3_set_lr(0, i);
 406}
 407
 408/*
 409 * Return the GIC CPU configuration:
 410 * - [31:0]  ICH_VTR_EL2
 411 * - [62:32] RES0
 412 * - [63]    MMIO (GICv2) capable
 413 */
 414u64 __vgic_v3_get_gic_config(void)
 415{
 416        u64 val, sre = read_gicreg(ICC_SRE_EL1);
 417        unsigned long flags = 0;
 418
 419        /*
 420         * To check whether we have a MMIO-based (GICv2 compatible)
 421         * CPU interface, we need to disable the system register
 422         * view. To do that safely, we have to prevent any interrupt
 423         * from firing (which would be deadly).
 424         *
 425         * Note that this only makes sense on VHE, as interrupts are
 426         * already masked for nVHE as part of the exception entry to
 427         * EL2.
 428         */
 429        if (has_vhe())
 430                flags = local_daif_save();
 431
 432        /*
 433         * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
 434         * that to be able to set ICC_SRE_EL1.SRE to 0, all the
 435         * interrupt overrides must be set. You've got to love this.
 436         */
 437        sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
 438        isb();
 439        write_gicreg(0, ICC_SRE_EL1);
 440        isb();
 441
 442        val = read_gicreg(ICC_SRE_EL1);
 443
 444        write_gicreg(sre, ICC_SRE_EL1);
 445        isb();
 446        sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
 447        isb();
 448
 449        if (has_vhe())
 450                local_daif_restore(flags);
 451
 452        val  = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
 453        val |= read_gicreg(ICH_VTR_EL2);
 454
 455        return val;
 456}
 457
 458u64 __vgic_v3_read_vmcr(void)
 459{
 460        return read_gicreg(ICH_VMCR_EL2);
 461}
 462
 463void __vgic_v3_write_vmcr(u32 vmcr)
 464{
 465        write_gicreg(vmcr, ICH_VMCR_EL2);
 466}
 467
 468static int __vgic_v3_bpr_min(void)
 469{
 470        /* See Pseudocode for VPriorityGroup */
 471        return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
 472}
 473
 474static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
 475{
 476        u64 esr = kvm_vcpu_get_esr(vcpu);
 477        u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
 478
 479        return crm != 8;
 480}
 481
 482#define GICv3_IDLE_PRIORITY     0xff
 483
 484static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
 485                                         u64 *lr_val)
 486{
 487        unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
 488        u8 priority = GICv3_IDLE_PRIORITY;
 489        int i, lr = -1;
 490
 491        for (i = 0; i < used_lrs; i++) {
 492                u64 val = __gic_v3_get_lr(i);
 493                u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
 494
 495                /* Not pending in the state? */
 496                if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
 497                        continue;
 498
 499                /* Group-0 interrupt, but Group-0 disabled? */
 500                if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
 501                        continue;
 502
 503                /* Group-1 interrupt, but Group-1 disabled? */
 504                if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
 505                        continue;
 506
 507                /* Not the highest priority? */
 508                if (lr_prio >= priority)
 509                        continue;
 510
 511                /* This is a candidate */
 512                priority = lr_prio;
 513                *lr_val = val;
 514                lr = i;
 515        }
 516
 517        if (lr == -1)
 518                *lr_val = ICC_IAR1_EL1_SPURIOUS;
 519
 520        return lr;
 521}
 522
 523static int __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, int intid,
 524                                    u64 *lr_val)
 525{
 526        unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
 527        int i;
 528
 529        for (i = 0; i < used_lrs; i++) {
 530                u64 val = __gic_v3_get_lr(i);
 531
 532                if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
 533                    (val & ICH_LR_ACTIVE_BIT)) {
 534                        *lr_val = val;
 535                        return i;
 536                }
 537        }
 538
 539        *lr_val = ICC_IAR1_EL1_SPURIOUS;
 540        return -1;
 541}
 542
 543static int __vgic_v3_get_highest_active_priority(void)
 544{
 545        u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
 546        u32 hap = 0;
 547        int i;
 548
 549        for (i = 0; i < nr_apr_regs; i++) {
 550                u32 val;
 551
 552                /*
 553                 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
 554                 * contain the active priority levels for this VCPU
 555                 * for the maximum number of supported priority
 556                 * levels, and we return the full priority level only
 557                 * if the BPR is programmed to its minimum, otherwise
 558                 * we return a combination of the priority level and
 559                 * subpriority, as determined by the setting of the
 560                 * BPR, but without the full subpriority.
 561                 */
 562                val  = __vgic_v3_read_ap0rn(i);
 563                val |= __vgic_v3_read_ap1rn(i);
 564                if (!val) {
 565                        hap += 32;
 566                        continue;
 567                }
 568
 569                return (hap + __ffs(val)) << __vgic_v3_bpr_min();
 570        }
 571
 572        return GICv3_IDLE_PRIORITY;
 573}
 574
 575static unsigned int __vgic_v3_get_bpr0(u32 vmcr)
 576{
 577        return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
 578}
 579
 580static unsigned int __vgic_v3_get_bpr1(u32 vmcr)
 581{
 582        unsigned int bpr;
 583
 584        if (vmcr & ICH_VMCR_CBPR_MASK) {
 585                bpr = __vgic_v3_get_bpr0(vmcr);
 586                if (bpr < 7)
 587                        bpr++;
 588        } else {
 589                bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
 590        }
 591
 592        return bpr;
 593}
 594
 595/*
 596 * Convert a priority to a preemption level, taking the relevant BPR
 597 * into account by zeroing the sub-priority bits.
 598 */
 599static u8 __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
 600{
 601        unsigned int bpr;
 602
 603        if (!grp)
 604                bpr = __vgic_v3_get_bpr0(vmcr) + 1;
 605        else
 606                bpr = __vgic_v3_get_bpr1(vmcr);
 607
 608        return pri & (GENMASK(7, 0) << bpr);
 609}
 610
 611/*
 612 * The priority value is independent of any of the BPR values, so we
 613 * normalize it using the minimal BPR value. This guarantees that no
 614 * matter what the guest does with its BPR, we can always set/get the
 615 * same value of a priority.
 616 */
 617static void __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
 618{
 619        u8 pre, ap;
 620        u32 val;
 621        int apr;
 622
 623        pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
 624        ap = pre >> __vgic_v3_bpr_min();
 625        apr = ap / 32;
 626
 627        if (!grp) {
 628                val = __vgic_v3_read_ap0rn(apr);
 629                __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
 630        } else {
 631                val = __vgic_v3_read_ap1rn(apr);
 632                __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
 633        }
 634}
 635
 636static int __vgic_v3_clear_highest_active_priority(void)
 637{
 638        u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
 639        u32 hap = 0;
 640        int i;
 641
 642        for (i = 0; i < nr_apr_regs; i++) {
 643                u32 ap0, ap1;
 644                int c0, c1;
 645
 646                ap0 = __vgic_v3_read_ap0rn(i);
 647                ap1 = __vgic_v3_read_ap1rn(i);
 648                if (!ap0 && !ap1) {
 649                        hap += 32;
 650                        continue;
 651                }
 652
 653                c0 = ap0 ? __ffs(ap0) : 32;
 654                c1 = ap1 ? __ffs(ap1) : 32;
 655
 656                /* Always clear the LSB, which is the highest priority */
 657                if (c0 < c1) {
 658                        ap0 &= ~BIT(c0);
 659                        __vgic_v3_write_ap0rn(ap0, i);
 660                        hap += c0;
 661                } else {
 662                        ap1 &= ~BIT(c1);
 663                        __vgic_v3_write_ap1rn(ap1, i);
 664                        hap += c1;
 665                }
 666
 667                /* Rescale to 8 bits of priority */
 668                return hap << __vgic_v3_bpr_min();
 669        }
 670
 671        return GICv3_IDLE_PRIORITY;
 672}
 673
 674static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 675{
 676        u64 lr_val;
 677        u8 lr_prio, pmr;
 678        int lr, grp;
 679
 680        grp = __vgic_v3_get_group(vcpu);
 681
 682        lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
 683        if (lr < 0)
 684                goto spurious;
 685
 686        if (grp != !!(lr_val & ICH_LR_GROUP))
 687                goto spurious;
 688
 689        pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
 690        lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
 691        if (pmr <= lr_prio)
 692                goto spurious;
 693
 694        if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
 695                goto spurious;
 696
 697        lr_val &= ~ICH_LR_STATE;
 698        lr_val |= ICH_LR_ACTIVE_BIT;
 699        __gic_v3_set_lr(lr_val, lr);
 700        __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
 701        vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
 702        return;
 703
 704spurious:
 705        vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
 706}
 707
 708static void __vgic_v3_clear_active_lr(int lr, u64 lr_val)
 709{
 710        lr_val &= ~ICH_LR_ACTIVE_BIT;
 711        if (lr_val & ICH_LR_HW) {
 712                u32 pid;
 713
 714                pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
 715                gic_write_dir(pid);
 716        }
 717
 718        __gic_v3_set_lr(lr_val, lr);
 719}
 720
 721static void __vgic_v3_bump_eoicount(void)
 722{
 723        u32 hcr;
 724
 725        hcr = read_gicreg(ICH_HCR_EL2);
 726        hcr += 1 << ICH_HCR_EOIcount_SHIFT;
 727        write_gicreg(hcr, ICH_HCR_EL2);
 728}
 729
 730static void __vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 731{
 732        u32 vid = vcpu_get_reg(vcpu, rt);
 733        u64 lr_val;
 734        int lr;
 735
 736        /* EOImode == 0, nothing to be done here */
 737        if (!(vmcr & ICH_VMCR_EOIM_MASK))
 738                return;
 739
 740        /* No deactivate to be performed on an LPI */
 741        if (vid >= VGIC_MIN_LPI)
 742                return;
 743
 744        lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
 745        if (lr == -1) {
 746                __vgic_v3_bump_eoicount();
 747                return;
 748        }
 749
 750        __vgic_v3_clear_active_lr(lr, lr_val);
 751}
 752
 753static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 754{
 755        u32 vid = vcpu_get_reg(vcpu, rt);
 756        u64 lr_val;
 757        u8 lr_prio, act_prio;
 758        int lr, grp;
 759
 760        grp = __vgic_v3_get_group(vcpu);
 761
 762        /* Drop priority in any case */
 763        act_prio = __vgic_v3_clear_highest_active_priority();
 764
 765        lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
 766        if (lr == -1) {
 767                /* Do not bump EOIcount for LPIs that aren't in the LRs */
 768                if (!(vid >= VGIC_MIN_LPI))
 769                        __vgic_v3_bump_eoicount();
 770                return;
 771        }
 772
 773        /* EOImode == 1 and not an LPI, nothing to be done here */
 774        if ((vmcr & ICH_VMCR_EOIM_MASK) && !(vid >= VGIC_MIN_LPI))
 775                return;
 776
 777        lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
 778
 779        /* If priorities or group do not match, the guest has fscked-up. */
 780        if (grp != !!(lr_val & ICH_LR_GROUP) ||
 781            __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
 782                return;
 783
 784        /* Let's now perform the deactivation */
 785        __vgic_v3_clear_active_lr(lr, lr_val);
 786}
 787
 788static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 789{
 790        vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
 791}
 792
 793static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 794{
 795        vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
 796}
 797
 798static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 799{
 800        u64 val = vcpu_get_reg(vcpu, rt);
 801
 802        if (val & 1)
 803                vmcr |= ICH_VMCR_ENG0_MASK;
 804        else
 805                vmcr &= ~ICH_VMCR_ENG0_MASK;
 806
 807        __vgic_v3_write_vmcr(vmcr);
 808}
 809
 810static void __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 811{
 812        u64 val = vcpu_get_reg(vcpu, rt);
 813
 814        if (val & 1)
 815                vmcr |= ICH_VMCR_ENG1_MASK;
 816        else
 817                vmcr &= ~ICH_VMCR_ENG1_MASK;
 818
 819        __vgic_v3_write_vmcr(vmcr);
 820}
 821
 822static void __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 823{
 824        vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
 825}
 826
 827static void __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 828{
 829        vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
 830}
 831
 832static void __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 833{
 834        u64 val = vcpu_get_reg(vcpu, rt);
 835        u8 bpr_min = __vgic_v3_bpr_min() - 1;
 836
 837        /* Enforce BPR limiting */
 838        if (val < bpr_min)
 839                val = bpr_min;
 840
 841        val <<= ICH_VMCR_BPR0_SHIFT;
 842        val &= ICH_VMCR_BPR0_MASK;
 843        vmcr &= ~ICH_VMCR_BPR0_MASK;
 844        vmcr |= val;
 845
 846        __vgic_v3_write_vmcr(vmcr);
 847}
 848
 849static void __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 850{
 851        u64 val = vcpu_get_reg(vcpu, rt);
 852        u8 bpr_min = __vgic_v3_bpr_min();
 853
 854        if (vmcr & ICH_VMCR_CBPR_MASK)
 855                return;
 856
 857        /* Enforce BPR limiting */
 858        if (val < bpr_min)
 859                val = bpr_min;
 860
 861        val <<= ICH_VMCR_BPR1_SHIFT;
 862        val &= ICH_VMCR_BPR1_MASK;
 863        vmcr &= ~ICH_VMCR_BPR1_MASK;
 864        vmcr |= val;
 865
 866        __vgic_v3_write_vmcr(vmcr);
 867}
 868
 869static void __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
 870{
 871        u32 val;
 872
 873        if (!__vgic_v3_get_group(vcpu))
 874                val = __vgic_v3_read_ap0rn(n);
 875        else
 876                val = __vgic_v3_read_ap1rn(n);
 877
 878        vcpu_set_reg(vcpu, rt, val);
 879}
 880
 881static void __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
 882{
 883        u32 val = vcpu_get_reg(vcpu, rt);
 884
 885        if (!__vgic_v3_get_group(vcpu))
 886                __vgic_v3_write_ap0rn(val, n);
 887        else
 888                __vgic_v3_write_ap1rn(val, n);
 889}
 890
 891static void __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
 892                                            u32 vmcr, int rt)
 893{
 894        __vgic_v3_read_apxrn(vcpu, rt, 0);
 895}
 896
 897static void __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
 898                                            u32 vmcr, int rt)
 899{
 900        __vgic_v3_read_apxrn(vcpu, rt, 1);
 901}
 902
 903static void __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 904{
 905        __vgic_v3_read_apxrn(vcpu, rt, 2);
 906}
 907
 908static void __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 909{
 910        __vgic_v3_read_apxrn(vcpu, rt, 3);
 911}
 912
 913static void __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 914{
 915        __vgic_v3_write_apxrn(vcpu, rt, 0);
 916}
 917
 918static void __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 919{
 920        __vgic_v3_write_apxrn(vcpu, rt, 1);
 921}
 922
 923static void __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 924{
 925        __vgic_v3_write_apxrn(vcpu, rt, 2);
 926}
 927
 928static void __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 929{
 930        __vgic_v3_write_apxrn(vcpu, rt, 3);
 931}
 932
 933static void __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 934{
 935        u64 lr_val;
 936        int lr, lr_grp, grp;
 937
 938        grp = __vgic_v3_get_group(vcpu);
 939
 940        lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
 941        if (lr == -1)
 942                goto spurious;
 943
 944        lr_grp = !!(lr_val & ICH_LR_GROUP);
 945        if (lr_grp != grp)
 946                lr_val = ICC_IAR1_EL1_SPURIOUS;
 947
 948spurious:
 949        vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
 950}
 951
 952static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 953{
 954        vmcr &= ICH_VMCR_PMR_MASK;
 955        vmcr >>= ICH_VMCR_PMR_SHIFT;
 956        vcpu_set_reg(vcpu, rt, vmcr);
 957}
 958
 959static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 960{
 961        u32 val = vcpu_get_reg(vcpu, rt);
 962
 963        val <<= ICH_VMCR_PMR_SHIFT;
 964        val &= ICH_VMCR_PMR_MASK;
 965        vmcr &= ~ICH_VMCR_PMR_MASK;
 966        vmcr |= val;
 967
 968        write_gicreg(vmcr, ICH_VMCR_EL2);
 969}
 970
 971static void __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 972{
 973        u32 val = __vgic_v3_get_highest_active_priority();
 974        vcpu_set_reg(vcpu, rt, val);
 975}
 976
 977static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 978{
 979        u32 vtr, val;
 980
 981        vtr = read_gicreg(ICH_VTR_EL2);
 982        /* PRIbits */
 983        val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
 984        /* IDbits */
 985        val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
 986        /* SEIS */
 987        if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
 988                val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
 989        /* A3V */
 990        val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
 991        /* EOImode */
 992        val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
 993        /* CBPR */
 994        val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
 995
 996        vcpu_set_reg(vcpu, rt, val);
 997}
 998
 999static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
1000{
1001        u32 val = vcpu_get_reg(vcpu, rt);
1002
1003        if (val & ICC_CTLR_EL1_CBPR_MASK)
1004                vmcr |= ICH_VMCR_CBPR_MASK;
1005        else
1006                vmcr &= ~ICH_VMCR_CBPR_MASK;
1007
1008        if (val & ICC_CTLR_EL1_EOImode_MASK)
1009                vmcr |= ICH_VMCR_EOIM_MASK;
1010        else
1011                vmcr &= ~ICH_VMCR_EOIM_MASK;
1012
1013        write_gicreg(vmcr, ICH_VMCR_EL2);
1014}
1015
1016int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
1017{
1018        int rt;
1019        u64 esr;
1020        u32 vmcr;
1021        void (*fn)(struct kvm_vcpu *, u32, int);
1022        bool is_read;
1023        u32 sysreg;
1024
1025        esr = kvm_vcpu_get_esr(vcpu);
1026        if (vcpu_mode_is_32bit(vcpu)) {
1027                if (!kvm_condition_valid(vcpu)) {
1028                        __kvm_skip_instr(vcpu);
1029                        return 1;
1030                }
1031
1032                sysreg = esr_cp15_to_sysreg(esr);
1033        } else {
1034                sysreg = esr_sys64_to_sysreg(esr);
1035        }
1036
1037        is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1038
1039        switch (sysreg) {
1040        case SYS_ICC_IAR0_EL1:
1041        case SYS_ICC_IAR1_EL1:
1042                if (unlikely(!is_read))
1043                        return 0;
1044                fn = __vgic_v3_read_iar;
1045                break;
1046        case SYS_ICC_EOIR0_EL1:
1047        case SYS_ICC_EOIR1_EL1:
1048                if (unlikely(is_read))
1049                        return 0;
1050                fn = __vgic_v3_write_eoir;
1051                break;
1052        case SYS_ICC_IGRPEN1_EL1:
1053                if (is_read)
1054                        fn = __vgic_v3_read_igrpen1;
1055                else
1056                        fn = __vgic_v3_write_igrpen1;
1057                break;
1058        case SYS_ICC_BPR1_EL1:
1059                if (is_read)
1060                        fn = __vgic_v3_read_bpr1;
1061                else
1062                        fn = __vgic_v3_write_bpr1;
1063                break;
1064        case SYS_ICC_AP0Rn_EL1(0):
1065        case SYS_ICC_AP1Rn_EL1(0):
1066                if (is_read)
1067                        fn = __vgic_v3_read_apxr0;
1068                else
1069                        fn = __vgic_v3_write_apxr0;
1070                break;
1071        case SYS_ICC_AP0Rn_EL1(1):
1072        case SYS_ICC_AP1Rn_EL1(1):
1073                if (is_read)
1074                        fn = __vgic_v3_read_apxr1;
1075                else
1076                        fn = __vgic_v3_write_apxr1;
1077                break;
1078        case SYS_ICC_AP0Rn_EL1(2):
1079        case SYS_ICC_AP1Rn_EL1(2):
1080                if (is_read)
1081                        fn = __vgic_v3_read_apxr2;
1082                else
1083                        fn = __vgic_v3_write_apxr2;
1084                break;
1085        case SYS_ICC_AP0Rn_EL1(3):
1086        case SYS_ICC_AP1Rn_EL1(3):
1087                if (is_read)
1088                        fn = __vgic_v3_read_apxr3;
1089                else
1090                        fn = __vgic_v3_write_apxr3;
1091                break;
1092        case SYS_ICC_HPPIR0_EL1:
1093        case SYS_ICC_HPPIR1_EL1:
1094                if (unlikely(!is_read))
1095                        return 0;
1096                fn = __vgic_v3_read_hppir;
1097                break;
1098        case SYS_ICC_IGRPEN0_EL1:
1099                if (is_read)
1100                        fn = __vgic_v3_read_igrpen0;
1101                else
1102                        fn = __vgic_v3_write_igrpen0;
1103                break;
1104        case SYS_ICC_BPR0_EL1:
1105                if (is_read)
1106                        fn = __vgic_v3_read_bpr0;
1107                else
1108                        fn = __vgic_v3_write_bpr0;
1109                break;
1110        case SYS_ICC_DIR_EL1:
1111                if (unlikely(is_read))
1112                        return 0;
1113                fn = __vgic_v3_write_dir;
1114                break;
1115        case SYS_ICC_RPR_EL1:
1116                if (unlikely(!is_read))
1117                        return 0;
1118                fn = __vgic_v3_read_rpr;
1119                break;
1120        case SYS_ICC_CTLR_EL1:
1121                if (is_read)
1122                        fn = __vgic_v3_read_ctlr;
1123                else
1124                        fn = __vgic_v3_write_ctlr;
1125                break;
1126        case SYS_ICC_PMR_EL1:
1127                if (is_read)
1128                        fn = __vgic_v3_read_pmr;
1129                else
1130                        fn = __vgic_v3_write_pmr;
1131                break;
1132        default:
1133                return 0;
1134        }
1135
1136        vmcr = __vgic_v3_read_vmcr();
1137        rt = kvm_vcpu_sys_get_rt(vcpu);
1138        fn(vcpu, vmcr, rt);
1139
1140        __kvm_skip_instr(vcpu);
1141
1142        return 1;
1143}
1144