linux/virt/kvm/arm/arm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   5 */
   6
   7#include <linux/bug.h>
   8#include <linux/cpu_pm.h>
   9#include <linux/errno.h>
  10#include <linux/err.h>
  11#include <linux/kvm_host.h>
  12#include <linux/list.h>
  13#include <linux/module.h>
  14#include <linux/vmalloc.h>
  15#include <linux/fs.h>
  16#include <linux/mman.h>
  17#include <linux/sched.h>
  18#include <linux/kvm.h>
  19#include <linux/kvm_irqfd.h>
  20#include <linux/irqbypass.h>
  21#include <linux/sched/stat.h>
  22#include <trace/events/kvm.h>
  23
  24#define CREATE_TRACE_POINTS
  25#include "trace.h"
  26
  27#include <linux/uaccess.h>
  28#include <asm/ptrace.h>
  29#include <asm/mman.h>
  30#include <asm/tlbflush.h>
  31#include <asm/cacheflush.h>
  32#include <asm/cpufeature.h>
  33#include <asm/virt.h>
  34#include <asm/kvm_arm.h>
  35#include <asm/kvm_asm.h>
  36#include <asm/kvm_mmu.h>
  37#include <asm/kvm_emulate.h>
  38#include <asm/kvm_coproc.h>
  39#include <asm/sections.h>
  40
  41#include <kvm/arm_hypercalls.h>
  42#include <kvm/arm_pmu.h>
  43#include <kvm/arm_psci.h>
  44
  45#ifdef REQUIRES_VIRT
  46__asm__(".arch_extension        virt");
  47#endif
  48
  49DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
  50static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
  51
  52/* The VMID used in the VTTBR */
  53static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
  54static u32 kvm_next_vmid;
  55static DEFINE_SPINLOCK(kvm_vmid_lock);
  56
  57static bool vgic_present;
  58
  59static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
  60DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
  61
  62int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  63{
  64        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
  65}
  66
  67int kvm_arch_hardware_setup(void *opaque)
  68{
  69        return 0;
  70}
  71
  72int kvm_arch_check_processor_compat(void *opaque)
  73{
  74        return 0;
  75}
  76
  77int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
  78                            struct kvm_enable_cap *cap)
  79{
  80        int r;
  81
  82        if (cap->flags)
  83                return -EINVAL;
  84
  85        switch (cap->cap) {
  86        case KVM_CAP_ARM_NISV_TO_USER:
  87                r = 0;
  88                kvm->arch.return_nisv_io_abort_to_user = true;
  89                break;
  90        default:
  91                r = -EINVAL;
  92                break;
  93        }
  94
  95        return r;
  96}
  97
  98/**
  99 * kvm_arch_init_vm - initializes a VM data structure
 100 * @kvm:        pointer to the KVM struct
 101 */
 102int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 103{
 104        int ret, cpu;
 105
 106        ret = kvm_arm_setup_stage2(kvm, type);
 107        if (ret)
 108                return ret;
 109
 110        kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
 111        if (!kvm->arch.last_vcpu_ran)
 112                return -ENOMEM;
 113
 114        for_each_possible_cpu(cpu)
 115                *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
 116
 117        ret = kvm_alloc_stage2_pgd(kvm);
 118        if (ret)
 119                goto out_fail_alloc;
 120
 121        ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
 122        if (ret)
 123                goto out_free_stage2_pgd;
 124
 125        kvm_vgic_early_init(kvm);
 126
 127        /* Mark the initial VMID generation invalid */
 128        kvm->arch.vmid.vmid_gen = 0;
 129
 130        /* The maximum number of VCPUs is limited by the host's GIC model */
 131        kvm->arch.max_vcpus = vgic_present ?
 132                                kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
 133
 134        return ret;
 135out_free_stage2_pgd:
 136        kvm_free_stage2_pgd(kvm);
 137out_fail_alloc:
 138        free_percpu(kvm->arch.last_vcpu_ran);
 139        kvm->arch.last_vcpu_ran = NULL;
 140        return ret;
 141}
 142
 143int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
 144{
 145        return 0;
 146}
 147
 148vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 149{
 150        return VM_FAULT_SIGBUS;
 151}
 152
 153
 154/**
 155 * kvm_arch_destroy_vm - destroy the VM data structure
 156 * @kvm:        pointer to the KVM struct
 157 */
 158void kvm_arch_destroy_vm(struct kvm *kvm)
 159{
 160        int i;
 161
 162        kvm_vgic_destroy(kvm);
 163
 164        free_percpu(kvm->arch.last_vcpu_ran);
 165        kvm->arch.last_vcpu_ran = NULL;
 166
 167        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 168                if (kvm->vcpus[i]) {
 169                        kvm_vcpu_destroy(kvm->vcpus[i]);
 170                        kvm->vcpus[i] = NULL;
 171                }
 172        }
 173        atomic_set(&kvm->online_vcpus, 0);
 174}
 175
 176int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 177{
 178        int r;
 179        switch (ext) {
 180        case KVM_CAP_IRQCHIP:
 181                r = vgic_present;
 182                break;
 183        case KVM_CAP_IOEVENTFD:
 184        case KVM_CAP_DEVICE_CTRL:
 185        case KVM_CAP_USER_MEMORY:
 186        case KVM_CAP_SYNC_MMU:
 187        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
 188        case KVM_CAP_ONE_REG:
 189        case KVM_CAP_ARM_PSCI:
 190        case KVM_CAP_ARM_PSCI_0_2:
 191        case KVM_CAP_READONLY_MEM:
 192        case KVM_CAP_MP_STATE:
 193        case KVM_CAP_IMMEDIATE_EXIT:
 194        case KVM_CAP_VCPU_EVENTS:
 195        case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
 196        case KVM_CAP_ARM_NISV_TO_USER:
 197        case KVM_CAP_ARM_INJECT_EXT_DABT:
 198                r = 1;
 199                break;
 200        case KVM_CAP_ARM_SET_DEVICE_ADDR:
 201                r = 1;
 202                break;
 203        case KVM_CAP_NR_VCPUS:
 204                r = num_online_cpus();
 205                break;
 206        case KVM_CAP_MAX_VCPUS:
 207                r = KVM_MAX_VCPUS;
 208                break;
 209        case KVM_CAP_MAX_VCPU_ID:
 210                r = KVM_MAX_VCPU_ID;
 211                break;
 212        case KVM_CAP_MSI_DEVID:
 213                if (!kvm)
 214                        r = -EINVAL;
 215                else
 216                        r = kvm->arch.vgic.msis_require_devid;
 217                break;
 218        case KVM_CAP_ARM_USER_IRQ:
 219                /*
 220                 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
 221                 * (bump this number if adding more devices)
 222                 */
 223                r = 1;
 224                break;
 225        default:
 226                r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
 227                break;
 228        }
 229        return r;
 230}
 231
 232long kvm_arch_dev_ioctl(struct file *filp,
 233                        unsigned int ioctl, unsigned long arg)
 234{
 235        return -EINVAL;
 236}
 237
 238struct kvm *kvm_arch_alloc_vm(void)
 239{
 240        if (!has_vhe())
 241                return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 242
 243        return vzalloc(sizeof(struct kvm));
 244}
 245
 246void kvm_arch_free_vm(struct kvm *kvm)
 247{
 248        if (!has_vhe())
 249                kfree(kvm);
 250        else
 251                vfree(kvm);
 252}
 253
 254int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 255{
 256        if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
 257                return -EBUSY;
 258
 259        if (id >= kvm->arch.max_vcpus)
 260                return -EINVAL;
 261
 262        return 0;
 263}
 264
 265int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 266{
 267        int err;
 268
 269        /* Force users to call KVM_ARM_VCPU_INIT */
 270        vcpu->arch.target = -1;
 271        bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 272
 273        /* Set up the timer */
 274        kvm_timer_vcpu_init(vcpu);
 275
 276        kvm_pmu_vcpu_init(vcpu);
 277
 278        kvm_arm_reset_debug_ptr(vcpu);
 279
 280        kvm_arm_pvtime_vcpu_init(&vcpu->arch);
 281
 282        err = kvm_vgic_vcpu_init(vcpu);
 283        if (err)
 284                return err;
 285
 286        return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
 287}
 288
 289void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 290{
 291}
 292
 293void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 294{
 295        if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
 296                static_branch_dec(&userspace_irqchip_in_use);
 297
 298        kvm_mmu_free_memory_caches(vcpu);
 299        kvm_timer_vcpu_terminate(vcpu);
 300        kvm_pmu_vcpu_destroy(vcpu);
 301
 302        kvm_arm_vcpu_destroy(vcpu);
 303}
 304
 305int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 306{
 307        return kvm_timer_is_pending(vcpu);
 308}
 309
 310void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 311{
 312        /*
 313         * If we're about to block (most likely because we've just hit a
 314         * WFI), we need to sync back the state of the GIC CPU interface
 315         * so that we have the latest PMR and group enables. This ensures
 316         * that kvm_arch_vcpu_runnable has up-to-date data to decide
 317         * whether we have pending interrupts.
 318         *
 319         * For the same reason, we want to tell GICv4 that we need
 320         * doorbells to be signalled, should an interrupt become pending.
 321         */
 322        preempt_disable();
 323        kvm_vgic_vmcr_sync(vcpu);
 324        vgic_v4_put(vcpu, true);
 325        preempt_enable();
 326}
 327
 328void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 329{
 330        preempt_disable();
 331        vgic_v4_load(vcpu);
 332        preempt_enable();
 333}
 334
 335void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 336{
 337        int *last_ran;
 338        kvm_host_data_t *cpu_data;
 339
 340        last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
 341        cpu_data = this_cpu_ptr(&kvm_host_data);
 342
 343        /*
 344         * We might get preempted before the vCPU actually runs, but
 345         * over-invalidation doesn't affect correctness.
 346         */
 347        if (*last_ran != vcpu->vcpu_id) {
 348                kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
 349                *last_ran = vcpu->vcpu_id;
 350        }
 351
 352        vcpu->cpu = cpu;
 353        vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
 354
 355        kvm_vgic_load(vcpu);
 356        kvm_timer_vcpu_load(vcpu);
 357        kvm_vcpu_load_sysregs(vcpu);
 358        kvm_arch_vcpu_load_fp(vcpu);
 359        kvm_vcpu_pmu_restore_guest(vcpu);
 360        if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
 361                kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
 362
 363        if (single_task_running())
 364                vcpu_clear_wfx_traps(vcpu);
 365        else
 366                vcpu_set_wfx_traps(vcpu);
 367
 368        vcpu_ptrauth_setup_lazy(vcpu);
 369}
 370
 371void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 372{
 373        kvm_arch_vcpu_put_fp(vcpu);
 374        kvm_vcpu_put_sysregs(vcpu);
 375        kvm_timer_vcpu_put(vcpu);
 376        kvm_vgic_put(vcpu);
 377        kvm_vcpu_pmu_restore_host(vcpu);
 378
 379        vcpu->cpu = -1;
 380}
 381
 382static void vcpu_power_off(struct kvm_vcpu *vcpu)
 383{
 384        vcpu->arch.power_off = true;
 385        kvm_make_request(KVM_REQ_SLEEP, vcpu);
 386        kvm_vcpu_kick(vcpu);
 387}
 388
 389int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 390                                    struct kvm_mp_state *mp_state)
 391{
 392        if (vcpu->arch.power_off)
 393                mp_state->mp_state = KVM_MP_STATE_STOPPED;
 394        else
 395                mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
 396
 397        return 0;
 398}
 399
 400int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 401                                    struct kvm_mp_state *mp_state)
 402{
 403        int ret = 0;
 404
 405        switch (mp_state->mp_state) {
 406        case KVM_MP_STATE_RUNNABLE:
 407                vcpu->arch.power_off = false;
 408                break;
 409        case KVM_MP_STATE_STOPPED:
 410                vcpu_power_off(vcpu);
 411                break;
 412        default:
 413                ret = -EINVAL;
 414        }
 415
 416        return ret;
 417}
 418
 419/**
 420 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
 421 * @v:          The VCPU pointer
 422 *
 423 * If the guest CPU is not waiting for interrupts or an interrupt line is
 424 * asserted, the CPU is by definition runnable.
 425 */
 426int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 427{
 428        bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
 429        return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
 430                && !v->arch.power_off && !v->arch.pause);
 431}
 432
 433bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 434{
 435        return vcpu_mode_priv(vcpu);
 436}
 437
 438/* Just ensure a guest exit from a particular CPU */
 439static void exit_vm_noop(void *info)
 440{
 441}
 442
 443void force_vm_exit(const cpumask_t *mask)
 444{
 445        preempt_disable();
 446        smp_call_function_many(mask, exit_vm_noop, NULL, true);
 447        preempt_enable();
 448}
 449
 450/**
 451 * need_new_vmid_gen - check that the VMID is still valid
 452 * @vmid: The VMID to check
 453 *
 454 * return true if there is a new generation of VMIDs being used
 455 *
 456 * The hardware supports a limited set of values with the value zero reserved
 457 * for the host, so we check if an assigned value belongs to a previous
 458 * generation, which which requires us to assign a new value. If we're the
 459 * first to use a VMID for the new generation, we must flush necessary caches
 460 * and TLBs on all CPUs.
 461 */
 462static bool need_new_vmid_gen(struct kvm_vmid *vmid)
 463{
 464        u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
 465        smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
 466        return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
 467}
 468
 469/**
 470 * update_vmid - Update the vmid with a valid VMID for the current generation
 471 * @kvm: The guest that struct vmid belongs to
 472 * @vmid: The stage-2 VMID information struct
 473 */
 474static void update_vmid(struct kvm_vmid *vmid)
 475{
 476        if (!need_new_vmid_gen(vmid))
 477                return;
 478
 479        spin_lock(&kvm_vmid_lock);
 480
 481        /*
 482         * We need to re-check the vmid_gen here to ensure that if another vcpu
 483         * already allocated a valid vmid for this vm, then this vcpu should
 484         * use the same vmid.
 485         */
 486        if (!need_new_vmid_gen(vmid)) {
 487                spin_unlock(&kvm_vmid_lock);
 488                return;
 489        }
 490
 491        /* First user of a new VMID generation? */
 492        if (unlikely(kvm_next_vmid == 0)) {
 493                atomic64_inc(&kvm_vmid_gen);
 494                kvm_next_vmid = 1;
 495
 496                /*
 497                 * On SMP we know no other CPUs can use this CPU's or each
 498                 * other's VMID after force_vm_exit returns since the
 499                 * kvm_vmid_lock blocks them from reentry to the guest.
 500                 */
 501                force_vm_exit(cpu_all_mask);
 502                /*
 503                 * Now broadcast TLB + ICACHE invalidation over the inner
 504                 * shareable domain to make sure all data structures are
 505                 * clean.
 506                 */
 507                kvm_call_hyp(__kvm_flush_vm_context);
 508        }
 509
 510        vmid->vmid = kvm_next_vmid;
 511        kvm_next_vmid++;
 512        kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
 513
 514        smp_wmb();
 515        WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
 516
 517        spin_unlock(&kvm_vmid_lock);
 518}
 519
 520static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 521{
 522        struct kvm *kvm = vcpu->kvm;
 523        int ret = 0;
 524
 525        if (likely(vcpu->arch.has_run_once))
 526                return 0;
 527
 528        if (!kvm_arm_vcpu_is_finalized(vcpu))
 529                return -EPERM;
 530
 531        vcpu->arch.has_run_once = true;
 532
 533        if (likely(irqchip_in_kernel(kvm))) {
 534                /*
 535                 * Map the VGIC hardware resources before running a vcpu the
 536                 * first time on this VM.
 537                 */
 538                if (unlikely(!vgic_ready(kvm))) {
 539                        ret = kvm_vgic_map_resources(kvm);
 540                        if (ret)
 541                                return ret;
 542                }
 543        } else {
 544                /*
 545                 * Tell the rest of the code that there are userspace irqchip
 546                 * VMs in the wild.
 547                 */
 548                static_branch_inc(&userspace_irqchip_in_use);
 549        }
 550
 551        ret = kvm_timer_enable(vcpu);
 552        if (ret)
 553                return ret;
 554
 555        ret = kvm_arm_pmu_v3_enable(vcpu);
 556
 557        return ret;
 558}
 559
 560bool kvm_arch_intc_initialized(struct kvm *kvm)
 561{
 562        return vgic_initialized(kvm);
 563}
 564
 565void kvm_arm_halt_guest(struct kvm *kvm)
 566{
 567        int i;
 568        struct kvm_vcpu *vcpu;
 569
 570        kvm_for_each_vcpu(i, vcpu, kvm)
 571                vcpu->arch.pause = true;
 572        kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
 573}
 574
 575void kvm_arm_resume_guest(struct kvm *kvm)
 576{
 577        int i;
 578        struct kvm_vcpu *vcpu;
 579
 580        kvm_for_each_vcpu(i, vcpu, kvm) {
 581                vcpu->arch.pause = false;
 582                swake_up_one(kvm_arch_vcpu_wq(vcpu));
 583        }
 584}
 585
 586static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
 587{
 588        struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
 589
 590        swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
 591                                       (!vcpu->arch.pause)));
 592
 593        if (vcpu->arch.power_off || vcpu->arch.pause) {
 594                /* Awaken to handle a signal, request we sleep again later. */
 595                kvm_make_request(KVM_REQ_SLEEP, vcpu);
 596        }
 597
 598        /*
 599         * Make sure we will observe a potential reset request if we've
 600         * observed a change to the power state. Pairs with the smp_wmb() in
 601         * kvm_psci_vcpu_on().
 602         */
 603        smp_rmb();
 604}
 605
 606static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
 607{
 608        return vcpu->arch.target >= 0;
 609}
 610
 611static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 612{
 613        if (kvm_request_pending(vcpu)) {
 614                if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
 615                        vcpu_req_sleep(vcpu);
 616
 617                if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
 618                        kvm_reset_vcpu(vcpu);
 619
 620                /*
 621                 * Clear IRQ_PENDING requests that were made to guarantee
 622                 * that a VCPU sees new virtual interrupts.
 623                 */
 624                kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
 625
 626                if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
 627                        kvm_update_stolen_time(vcpu);
 628
 629                if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
 630                        /* The distributor enable bits were changed */
 631                        preempt_disable();
 632                        vgic_v4_put(vcpu, false);
 633                        vgic_v4_load(vcpu);
 634                        preempt_enable();
 635                }
 636        }
 637}
 638
 639/**
 640 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
 641 * @vcpu:       The VCPU pointer
 642 * @run:        The kvm_run structure pointer used for userspace state exchange
 643 *
 644 * This function is called through the VCPU_RUN ioctl called from user space. It
 645 * will execute VM code in a loop until the time slice for the process is used
 646 * or some emulation is needed from user space in which case the function will
 647 * return with return value 0 and with the kvm_run structure filled in with the
 648 * required data for the requested emulation.
 649 */
 650int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 651{
 652        int ret;
 653
 654        if (unlikely(!kvm_vcpu_initialized(vcpu)))
 655                return -ENOEXEC;
 656
 657        ret = kvm_vcpu_first_run_init(vcpu);
 658        if (ret)
 659                return ret;
 660
 661        if (run->exit_reason == KVM_EXIT_MMIO) {
 662                ret = kvm_handle_mmio_return(vcpu, vcpu->run);
 663                if (ret)
 664                        return ret;
 665        }
 666
 667        if (run->immediate_exit)
 668                return -EINTR;
 669
 670        vcpu_load(vcpu);
 671
 672        kvm_sigset_activate(vcpu);
 673
 674        ret = 1;
 675        run->exit_reason = KVM_EXIT_UNKNOWN;
 676        while (ret > 0) {
 677                /*
 678                 * Check conditions before entering the guest
 679                 */
 680                cond_resched();
 681
 682                update_vmid(&vcpu->kvm->arch.vmid);
 683
 684                check_vcpu_requests(vcpu);
 685
 686                /*
 687                 * Preparing the interrupts to be injected also
 688                 * involves poking the GIC, which must be done in a
 689                 * non-preemptible context.
 690                 */
 691                preempt_disable();
 692
 693                kvm_pmu_flush_hwstate(vcpu);
 694
 695                local_irq_disable();
 696
 697                kvm_vgic_flush_hwstate(vcpu);
 698
 699                /*
 700                 * Exit if we have a signal pending so that we can deliver the
 701                 * signal to user space.
 702                 */
 703                if (signal_pending(current)) {
 704                        ret = -EINTR;
 705                        run->exit_reason = KVM_EXIT_INTR;
 706                }
 707
 708                /*
 709                 * If we're using a userspace irqchip, then check if we need
 710                 * to tell a userspace irqchip about timer or PMU level
 711                 * changes and if so, exit to userspace (the actual level
 712                 * state gets updated in kvm_timer_update_run and
 713                 * kvm_pmu_update_run below).
 714                 */
 715                if (static_branch_unlikely(&userspace_irqchip_in_use)) {
 716                        if (kvm_timer_should_notify_user(vcpu) ||
 717                            kvm_pmu_should_notify_user(vcpu)) {
 718                                ret = -EINTR;
 719                                run->exit_reason = KVM_EXIT_INTR;
 720                        }
 721                }
 722
 723                /*
 724                 * Ensure we set mode to IN_GUEST_MODE after we disable
 725                 * interrupts and before the final VCPU requests check.
 726                 * See the comment in kvm_vcpu_exiting_guest_mode() and
 727                 * Documentation/virt/kvm/vcpu-requests.rst
 728                 */
 729                smp_store_mb(vcpu->mode, IN_GUEST_MODE);
 730
 731                if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
 732                    kvm_request_pending(vcpu)) {
 733                        vcpu->mode = OUTSIDE_GUEST_MODE;
 734                        isb(); /* Ensure work in x_flush_hwstate is committed */
 735                        kvm_pmu_sync_hwstate(vcpu);
 736                        if (static_branch_unlikely(&userspace_irqchip_in_use))
 737                                kvm_timer_sync_hwstate(vcpu);
 738                        kvm_vgic_sync_hwstate(vcpu);
 739                        local_irq_enable();
 740                        preempt_enable();
 741                        continue;
 742                }
 743
 744                kvm_arm_setup_debug(vcpu);
 745
 746                /**************************************************************
 747                 * Enter the guest
 748                 */
 749                trace_kvm_entry(*vcpu_pc(vcpu));
 750                guest_enter_irqoff();
 751
 752                if (has_vhe()) {
 753                        ret = kvm_vcpu_run_vhe(vcpu);
 754                } else {
 755                        ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
 756                }
 757
 758                vcpu->mode = OUTSIDE_GUEST_MODE;
 759                vcpu->stat.exits++;
 760                /*
 761                 * Back from guest
 762                 *************************************************************/
 763
 764                kvm_arm_clear_debug(vcpu);
 765
 766                /*
 767                 * We must sync the PMU state before the vgic state so
 768                 * that the vgic can properly sample the updated state of the
 769                 * interrupt line.
 770                 */
 771                kvm_pmu_sync_hwstate(vcpu);
 772
 773                /*
 774                 * Sync the vgic state before syncing the timer state because
 775                 * the timer code needs to know if the virtual timer
 776                 * interrupts are active.
 777                 */
 778                kvm_vgic_sync_hwstate(vcpu);
 779
 780                /*
 781                 * Sync the timer hardware state before enabling interrupts as
 782                 * we don't want vtimer interrupts to race with syncing the
 783                 * timer virtual interrupt state.
 784                 */
 785                if (static_branch_unlikely(&userspace_irqchip_in_use))
 786                        kvm_timer_sync_hwstate(vcpu);
 787
 788                kvm_arch_vcpu_ctxsync_fp(vcpu);
 789
 790                /*
 791                 * We may have taken a host interrupt in HYP mode (ie
 792                 * while executing the guest). This interrupt is still
 793                 * pending, as we haven't serviced it yet!
 794                 *
 795                 * We're now back in SVC mode, with interrupts
 796                 * disabled.  Enabling the interrupts now will have
 797                 * the effect of taking the interrupt again, in SVC
 798                 * mode this time.
 799                 */
 800                local_irq_enable();
 801
 802                /*
 803                 * We do local_irq_enable() before calling guest_exit() so
 804                 * that if a timer interrupt hits while running the guest we
 805                 * account that tick as being spent in the guest.  We enable
 806                 * preemption after calling guest_exit() so that if we get
 807                 * preempted we make sure ticks after that is not counted as
 808                 * guest time.
 809                 */
 810                guest_exit();
 811                trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
 812
 813                /* Exit types that need handling before we can be preempted */
 814                handle_exit_early(vcpu, run, ret);
 815
 816                preempt_enable();
 817
 818                ret = handle_exit(vcpu, run, ret);
 819        }
 820
 821        /* Tell userspace about in-kernel device output levels */
 822        if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
 823                kvm_timer_update_run(vcpu);
 824                kvm_pmu_update_run(vcpu);
 825        }
 826
 827        kvm_sigset_deactivate(vcpu);
 828
 829        vcpu_put(vcpu);
 830        return ret;
 831}
 832
 833static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
 834{
 835        int bit_index;
 836        bool set;
 837        unsigned long *hcr;
 838
 839        if (number == KVM_ARM_IRQ_CPU_IRQ)
 840                bit_index = __ffs(HCR_VI);
 841        else /* KVM_ARM_IRQ_CPU_FIQ */
 842                bit_index = __ffs(HCR_VF);
 843
 844        hcr = vcpu_hcr(vcpu);
 845        if (level)
 846                set = test_and_set_bit(bit_index, hcr);
 847        else
 848                set = test_and_clear_bit(bit_index, hcr);
 849
 850        /*
 851         * If we didn't change anything, no need to wake up or kick other CPUs
 852         */
 853        if (set == level)
 854                return 0;
 855
 856        /*
 857         * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
 858         * trigger a world-switch round on the running physical CPU to set the
 859         * virtual IRQ/FIQ fields in the HCR appropriately.
 860         */
 861        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
 862        kvm_vcpu_kick(vcpu);
 863
 864        return 0;
 865}
 866
 867int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
 868                          bool line_status)
 869{
 870        u32 irq = irq_level->irq;
 871        unsigned int irq_type, vcpu_idx, irq_num;
 872        int nrcpus = atomic_read(&kvm->online_vcpus);
 873        struct kvm_vcpu *vcpu = NULL;
 874        bool level = irq_level->level;
 875
 876        irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
 877        vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
 878        vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
 879        irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
 880
 881        trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
 882
 883        switch (irq_type) {
 884        case KVM_ARM_IRQ_TYPE_CPU:
 885                if (irqchip_in_kernel(kvm))
 886                        return -ENXIO;
 887
 888                if (vcpu_idx >= nrcpus)
 889                        return -EINVAL;
 890
 891                vcpu = kvm_get_vcpu(kvm, vcpu_idx);
 892                if (!vcpu)
 893                        return -EINVAL;
 894
 895                if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
 896                        return -EINVAL;
 897
 898                return vcpu_interrupt_line(vcpu, irq_num, level);
 899        case KVM_ARM_IRQ_TYPE_PPI:
 900                if (!irqchip_in_kernel(kvm))
 901                        return -ENXIO;
 902
 903                if (vcpu_idx >= nrcpus)
 904                        return -EINVAL;
 905
 906                vcpu = kvm_get_vcpu(kvm, vcpu_idx);
 907                if (!vcpu)
 908                        return -EINVAL;
 909
 910                if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
 911                        return -EINVAL;
 912
 913                return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
 914        case KVM_ARM_IRQ_TYPE_SPI:
 915                if (!irqchip_in_kernel(kvm))
 916                        return -ENXIO;
 917
 918                if (irq_num < VGIC_NR_PRIVATE_IRQS)
 919                        return -EINVAL;
 920
 921                return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
 922        }
 923
 924        return -EINVAL;
 925}
 926
 927static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 928                               const struct kvm_vcpu_init *init)
 929{
 930        unsigned int i, ret;
 931        int phys_target = kvm_target_cpu();
 932
 933        if (init->target != phys_target)
 934                return -EINVAL;
 935
 936        /*
 937         * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
 938         * use the same target.
 939         */
 940        if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
 941                return -EINVAL;
 942
 943        /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
 944        for (i = 0; i < sizeof(init->features) * 8; i++) {
 945                bool set = (init->features[i / 32] & (1 << (i % 32)));
 946
 947                if (set && i >= KVM_VCPU_MAX_FEATURES)
 948                        return -ENOENT;
 949
 950                /*
 951                 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
 952                 * use the same feature set.
 953                 */
 954                if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
 955                    test_bit(i, vcpu->arch.features) != set)
 956                        return -EINVAL;
 957
 958                if (set)
 959                        set_bit(i, vcpu->arch.features);
 960        }
 961
 962        vcpu->arch.target = phys_target;
 963
 964        /* Now we know what it is, we can reset it. */
 965        ret = kvm_reset_vcpu(vcpu);
 966        if (ret) {
 967                vcpu->arch.target = -1;
 968                bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 969        }
 970
 971        return ret;
 972}
 973
 974static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
 975                                         struct kvm_vcpu_init *init)
 976{
 977        int ret;
 978
 979        ret = kvm_vcpu_set_target(vcpu, init);
 980        if (ret)
 981                return ret;
 982
 983        /*
 984         * Ensure a rebooted VM will fault in RAM pages and detect if the
 985         * guest MMU is turned off and flush the caches as needed.
 986         */
 987        if (vcpu->arch.has_run_once)
 988                stage2_unmap_vm(vcpu->kvm);
 989
 990        vcpu_reset_hcr(vcpu);
 991
 992        /*
 993         * Handle the "start in power-off" case.
 994         */
 995        if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
 996                vcpu_power_off(vcpu);
 997        else
 998                vcpu->arch.power_off = false;
 999
1000        return 0;
1001}
1002
1003static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1004                                 struct kvm_device_attr *attr)
1005{
1006        int ret = -ENXIO;
1007
1008        switch (attr->group) {
1009        default:
1010                ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1011                break;
1012        }
1013
1014        return ret;
1015}
1016
1017static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1018                                 struct kvm_device_attr *attr)
1019{
1020        int ret = -ENXIO;
1021
1022        switch (attr->group) {
1023        default:
1024                ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1025                break;
1026        }
1027
1028        return ret;
1029}
1030
1031static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1032                                 struct kvm_device_attr *attr)
1033{
1034        int ret = -ENXIO;
1035
1036        switch (attr->group) {
1037        default:
1038                ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1039                break;
1040        }
1041
1042        return ret;
1043}
1044
1045static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1046                                   struct kvm_vcpu_events *events)
1047{
1048        memset(events, 0, sizeof(*events));
1049
1050        return __kvm_arm_vcpu_get_events(vcpu, events);
1051}
1052
1053static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1054                                   struct kvm_vcpu_events *events)
1055{
1056        int i;
1057
1058        /* check whether the reserved field is zero */
1059        for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1060                if (events->reserved[i])
1061                        return -EINVAL;
1062
1063        /* check whether the pad field is zero */
1064        for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1065                if (events->exception.pad[i])
1066                        return -EINVAL;
1067
1068        return __kvm_arm_vcpu_set_events(vcpu, events);
1069}
1070
1071long kvm_arch_vcpu_ioctl(struct file *filp,
1072                         unsigned int ioctl, unsigned long arg)
1073{
1074        struct kvm_vcpu *vcpu = filp->private_data;
1075        void __user *argp = (void __user *)arg;
1076        struct kvm_device_attr attr;
1077        long r;
1078
1079        switch (ioctl) {
1080        case KVM_ARM_VCPU_INIT: {
1081                struct kvm_vcpu_init init;
1082
1083                r = -EFAULT;
1084                if (copy_from_user(&init, argp, sizeof(init)))
1085                        break;
1086
1087                r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1088                break;
1089        }
1090        case KVM_SET_ONE_REG:
1091        case KVM_GET_ONE_REG: {
1092                struct kvm_one_reg reg;
1093
1094                r = -ENOEXEC;
1095                if (unlikely(!kvm_vcpu_initialized(vcpu)))
1096                        break;
1097
1098                r = -EFAULT;
1099                if (copy_from_user(&reg, argp, sizeof(reg)))
1100                        break;
1101
1102                if (ioctl == KVM_SET_ONE_REG)
1103                        r = kvm_arm_set_reg(vcpu, &reg);
1104                else
1105                        r = kvm_arm_get_reg(vcpu, &reg);
1106                break;
1107        }
1108        case KVM_GET_REG_LIST: {
1109                struct kvm_reg_list __user *user_list = argp;
1110                struct kvm_reg_list reg_list;
1111                unsigned n;
1112
1113                r = -ENOEXEC;
1114                if (unlikely(!kvm_vcpu_initialized(vcpu)))
1115                        break;
1116
1117                r = -EPERM;
1118                if (!kvm_arm_vcpu_is_finalized(vcpu))
1119                        break;
1120
1121                r = -EFAULT;
1122                if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1123                        break;
1124                n = reg_list.n;
1125                reg_list.n = kvm_arm_num_regs(vcpu);
1126                if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1127                        break;
1128                r = -E2BIG;
1129                if (n < reg_list.n)
1130                        break;
1131                r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1132                break;
1133        }
1134        case KVM_SET_DEVICE_ATTR: {
1135                r = -EFAULT;
1136                if (copy_from_user(&attr, argp, sizeof(attr)))
1137                        break;
1138                r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1139                break;
1140        }
1141        case KVM_GET_DEVICE_ATTR: {
1142                r = -EFAULT;
1143                if (copy_from_user(&attr, argp, sizeof(attr)))
1144                        break;
1145                r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1146                break;
1147        }
1148        case KVM_HAS_DEVICE_ATTR: {
1149                r = -EFAULT;
1150                if (copy_from_user(&attr, argp, sizeof(attr)))
1151                        break;
1152                r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1153                break;
1154        }
1155        case KVM_GET_VCPU_EVENTS: {
1156                struct kvm_vcpu_events events;
1157
1158                if (kvm_arm_vcpu_get_events(vcpu, &events))
1159                        return -EINVAL;
1160
1161                if (copy_to_user(argp, &events, sizeof(events)))
1162                        return -EFAULT;
1163
1164                return 0;
1165        }
1166        case KVM_SET_VCPU_EVENTS: {
1167                struct kvm_vcpu_events events;
1168
1169                if (copy_from_user(&events, argp, sizeof(events)))
1170                        return -EFAULT;
1171
1172                return kvm_arm_vcpu_set_events(vcpu, &events);
1173        }
1174        case KVM_ARM_VCPU_FINALIZE: {
1175                int what;
1176
1177                if (!kvm_vcpu_initialized(vcpu))
1178                        return -ENOEXEC;
1179
1180                if (get_user(what, (const int __user *)argp))
1181                        return -EFAULT;
1182
1183                return kvm_arm_vcpu_finalize(vcpu, what);
1184        }
1185        default:
1186                r = -EINVAL;
1187        }
1188
1189        return r;
1190}
1191
1192void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1193{
1194
1195}
1196
1197void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1198                                        struct kvm_memory_slot *memslot)
1199{
1200        kvm_flush_remote_tlbs(kvm);
1201}
1202
1203static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1204                                        struct kvm_arm_device_addr *dev_addr)
1205{
1206        unsigned long dev_id, type;
1207
1208        dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
1209                KVM_ARM_DEVICE_ID_SHIFT;
1210        type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
1211                KVM_ARM_DEVICE_TYPE_SHIFT;
1212
1213        switch (dev_id) {
1214        case KVM_ARM_DEVICE_VGIC_V2:
1215                if (!vgic_present)
1216                        return -ENXIO;
1217                return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1218        default:
1219                return -ENODEV;
1220        }
1221}
1222
1223long kvm_arch_vm_ioctl(struct file *filp,
1224                       unsigned int ioctl, unsigned long arg)
1225{
1226        struct kvm *kvm = filp->private_data;
1227        void __user *argp = (void __user *)arg;
1228
1229        switch (ioctl) {
1230        case KVM_CREATE_IRQCHIP: {
1231                int ret;
1232                if (!vgic_present)
1233                        return -ENXIO;
1234                mutex_lock(&kvm->lock);
1235                ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1236                mutex_unlock(&kvm->lock);
1237                return ret;
1238        }
1239        case KVM_ARM_SET_DEVICE_ADDR: {
1240                struct kvm_arm_device_addr dev_addr;
1241
1242                if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1243                        return -EFAULT;
1244                return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1245        }
1246        case KVM_ARM_PREFERRED_TARGET: {
1247                int err;
1248                struct kvm_vcpu_init init;
1249
1250                err = kvm_vcpu_preferred_target(&init);
1251                if (err)
1252                        return err;
1253
1254                if (copy_to_user(argp, &init, sizeof(init)))
1255                        return -EFAULT;
1256
1257                return 0;
1258        }
1259        default:
1260                return -EINVAL;
1261        }
1262}
1263
1264static void cpu_init_hyp_mode(void)
1265{
1266        phys_addr_t pgd_ptr;
1267        unsigned long hyp_stack_ptr;
1268        unsigned long stack_page;
1269        unsigned long vector_ptr;
1270
1271        /* Switch from the HYP stub to our own HYP init vector */
1272        __hyp_set_vectors(kvm_get_idmap_vector());
1273
1274        pgd_ptr = kvm_mmu_get_httbr();
1275        stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
1276        hyp_stack_ptr = stack_page + PAGE_SIZE;
1277        vector_ptr = (unsigned long)kvm_get_hyp_vector();
1278
1279        __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1280        __cpu_init_stage2();
1281}
1282
1283static void cpu_hyp_reset(void)
1284{
1285        if (!is_kernel_in_hyp_mode())
1286                __hyp_reset_vectors();
1287}
1288
1289static void cpu_hyp_reinit(void)
1290{
1291        kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
1292
1293        cpu_hyp_reset();
1294
1295        if (is_kernel_in_hyp_mode())
1296                kvm_timer_init_vhe();
1297        else
1298                cpu_init_hyp_mode();
1299
1300        kvm_arm_init_debug();
1301
1302        if (vgic_present)
1303                kvm_vgic_init_cpu_hardware();
1304}
1305
1306static void _kvm_arch_hardware_enable(void *discard)
1307{
1308        if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1309                cpu_hyp_reinit();
1310                __this_cpu_write(kvm_arm_hardware_enabled, 1);
1311        }
1312}
1313
1314int kvm_arch_hardware_enable(void)
1315{
1316        _kvm_arch_hardware_enable(NULL);
1317        return 0;
1318}
1319
1320static void _kvm_arch_hardware_disable(void *discard)
1321{
1322        if (__this_cpu_read(kvm_arm_hardware_enabled)) {
1323                cpu_hyp_reset();
1324                __this_cpu_write(kvm_arm_hardware_enabled, 0);
1325        }
1326}
1327
1328void kvm_arch_hardware_disable(void)
1329{
1330        _kvm_arch_hardware_disable(NULL);
1331}
1332
1333#ifdef CONFIG_CPU_PM
1334static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1335                                    unsigned long cmd,
1336                                    void *v)
1337{
1338        /*
1339         * kvm_arm_hardware_enabled is left with its old value over
1340         * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
1341         * re-enable hyp.
1342         */
1343        switch (cmd) {
1344        case CPU_PM_ENTER:
1345                if (__this_cpu_read(kvm_arm_hardware_enabled))
1346                        /*
1347                         * don't update kvm_arm_hardware_enabled here
1348                         * so that the hardware will be re-enabled
1349                         * when we resume. See below.
1350                         */
1351                        cpu_hyp_reset();
1352
1353                return NOTIFY_OK;
1354        case CPU_PM_ENTER_FAILED:
1355        case CPU_PM_EXIT:
1356                if (__this_cpu_read(kvm_arm_hardware_enabled))
1357                        /* The hardware was enabled before suspend. */
1358                        cpu_hyp_reinit();
1359
1360                return NOTIFY_OK;
1361
1362        default:
1363                return NOTIFY_DONE;
1364        }
1365}
1366
1367static struct notifier_block hyp_init_cpu_pm_nb = {
1368        .notifier_call = hyp_init_cpu_pm_notifier,
1369};
1370
1371static void __init hyp_cpu_pm_init(void)
1372{
1373        cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1374}
1375static void __init hyp_cpu_pm_exit(void)
1376{
1377        cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1378}
1379#else
1380static inline void hyp_cpu_pm_init(void)
1381{
1382}
1383static inline void hyp_cpu_pm_exit(void)
1384{
1385}
1386#endif
1387
1388static int init_common_resources(void)
1389{
1390        kvm_set_ipa_limit();
1391
1392        return 0;
1393}
1394
1395static int init_subsystems(void)
1396{
1397        int err = 0;
1398
1399        /*
1400         * Enable hardware so that subsystem initialisation can access EL2.
1401         */
1402        on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1403
1404        /*
1405         * Register CPU lower-power notifier
1406         */
1407        hyp_cpu_pm_init();
1408
1409        /*
1410         * Init HYP view of VGIC
1411         */
1412        err = kvm_vgic_hyp_init();
1413        switch (err) {
1414        case 0:
1415                vgic_present = true;
1416                break;
1417        case -ENODEV:
1418        case -ENXIO:
1419                vgic_present = false;
1420                err = 0;
1421                break;
1422        default:
1423                goto out;
1424        }
1425
1426        /*
1427         * Init HYP architected timer support
1428         */
1429        err = kvm_timer_hyp_init(vgic_present);
1430        if (err)
1431                goto out;
1432
1433        kvm_perf_init();
1434        kvm_coproc_table_init();
1435
1436out:
1437        on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
1438
1439        return err;
1440}
1441
1442static void teardown_hyp_mode(void)
1443{
1444        int cpu;
1445
1446        free_hyp_pgds();
1447        for_each_possible_cpu(cpu)
1448                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1449}
1450
1451/**
1452 * Inits Hyp-mode on all online CPUs
1453 */
1454static int init_hyp_mode(void)
1455{
1456        int cpu;
1457        int err = 0;
1458
1459        /*
1460         * Allocate Hyp PGD and setup Hyp identity mapping
1461         */
1462        err = kvm_mmu_init();
1463        if (err)
1464                goto out_err;
1465
1466        /*
1467         * Allocate stack pages for Hypervisor-mode
1468         */
1469        for_each_possible_cpu(cpu) {
1470                unsigned long stack_page;
1471
1472                stack_page = __get_free_page(GFP_KERNEL);
1473                if (!stack_page) {
1474                        err = -ENOMEM;
1475                        goto out_err;
1476                }
1477
1478                per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
1479        }
1480
1481        /*
1482         * Map the Hyp-code called directly from the host
1483         */
1484        err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1485                                  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1486        if (err) {
1487                kvm_err("Cannot map world-switch code\n");
1488                goto out_err;
1489        }
1490
1491        err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1492                                  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1493        if (err) {
1494                kvm_err("Cannot map rodata section\n");
1495                goto out_err;
1496        }
1497
1498        err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
1499                                  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
1500        if (err) {
1501                kvm_err("Cannot map bss section\n");
1502                goto out_err;
1503        }
1504
1505        err = kvm_map_vectors();
1506        if (err) {
1507                kvm_err("Cannot map vectors\n");
1508                goto out_err;
1509        }
1510
1511        /*
1512         * Map the Hyp stack pages
1513         */
1514        for_each_possible_cpu(cpu) {
1515                char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1516                err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
1517                                          PAGE_HYP);
1518
1519                if (err) {
1520                        kvm_err("Cannot map hyp stack\n");
1521                        goto out_err;
1522                }
1523        }
1524
1525        for_each_possible_cpu(cpu) {
1526                kvm_host_data_t *cpu_data;
1527
1528                cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
1529                err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
1530
1531                if (err) {
1532                        kvm_err("Cannot map host CPU state: %d\n", err);
1533                        goto out_err;
1534                }
1535        }
1536
1537        err = hyp_map_aux_data();
1538        if (err)
1539                kvm_err("Cannot map host auxiliary data: %d\n", err);
1540
1541        return 0;
1542
1543out_err:
1544        teardown_hyp_mode();
1545        kvm_err("error initializing Hyp mode: %d\n", err);
1546        return err;
1547}
1548
1549static void check_kvm_target_cpu(void *ret)
1550{
1551        *(int *)ret = kvm_target_cpu();
1552}
1553
1554struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1555{
1556        struct kvm_vcpu *vcpu;
1557        int i;
1558
1559        mpidr &= MPIDR_HWID_BITMASK;
1560        kvm_for_each_vcpu(i, vcpu, kvm) {
1561                if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
1562                        return vcpu;
1563        }
1564        return NULL;
1565}
1566
1567bool kvm_arch_has_irq_bypass(void)
1568{
1569        return true;
1570}
1571
1572int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
1573                                      struct irq_bypass_producer *prod)
1574{
1575        struct kvm_kernel_irqfd *irqfd =
1576                container_of(cons, struct kvm_kernel_irqfd, consumer);
1577
1578        return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
1579                                          &irqfd->irq_entry);
1580}
1581void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
1582                                      struct irq_bypass_producer *prod)
1583{
1584        struct kvm_kernel_irqfd *irqfd =
1585                container_of(cons, struct kvm_kernel_irqfd, consumer);
1586
1587        kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
1588                                     &irqfd->irq_entry);
1589}
1590
1591void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
1592{
1593        struct kvm_kernel_irqfd *irqfd =
1594                container_of(cons, struct kvm_kernel_irqfd, consumer);
1595
1596        kvm_arm_halt_guest(irqfd->kvm);
1597}
1598
1599void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
1600{
1601        struct kvm_kernel_irqfd *irqfd =
1602                container_of(cons, struct kvm_kernel_irqfd, consumer);
1603
1604        kvm_arm_resume_guest(irqfd->kvm);
1605}
1606
1607/**
1608 * Initialize Hyp-mode and memory mappings on all CPUs.
1609 */
1610int kvm_arch_init(void *opaque)
1611{
1612        int err;
1613        int ret, cpu;
1614        bool in_hyp_mode;
1615
1616        if (!is_hyp_mode_available()) {
1617                kvm_info("HYP mode not available\n");
1618                return -ENODEV;
1619        }
1620
1621        in_hyp_mode = is_kernel_in_hyp_mode();
1622
1623        if (!in_hyp_mode && kvm_arch_requires_vhe()) {
1624                kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
1625                return -ENODEV;
1626        }
1627
1628        for_each_online_cpu(cpu) {
1629                smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
1630                if (ret < 0) {
1631                        kvm_err("Error, CPU %d not supported!\n", cpu);
1632                        return -ENODEV;
1633                }
1634        }
1635
1636        err = init_common_resources();
1637        if (err)
1638                return err;
1639
1640        err = kvm_arm_init_sve();
1641        if (err)
1642                return err;
1643
1644        if (!in_hyp_mode) {
1645                err = init_hyp_mode();
1646                if (err)
1647                        goto out_err;
1648        }
1649
1650        err = init_subsystems();
1651        if (err)
1652                goto out_hyp;
1653
1654        if (in_hyp_mode)
1655                kvm_info("VHE mode initialized successfully\n");
1656        else
1657                kvm_info("Hyp mode initialized successfully\n");
1658
1659        return 0;
1660
1661out_hyp:
1662        hyp_cpu_pm_exit();
1663        if (!in_hyp_mode)
1664                teardown_hyp_mode();
1665out_err:
1666        return err;
1667}
1668
1669/* NOP: Compiling as a module not supported */
1670void kvm_arch_exit(void)
1671{
1672        kvm_perf_teardown();
1673}
1674
1675static int arm_init(void)
1676{
1677        int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1678        return rc;
1679}
1680
1681module_init(arm_init);
1682