linux/virt/kvm/arm/arm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   5 */
   6
   7#include <linux/bug.h>
   8#include <linux/cpu_pm.h>
   9#include <linux/errno.h>
  10#include <linux/err.h>
  11#include <linux/kvm_host.h>
  12#include <linux/list.h>
  13#include <linux/module.h>
  14#include <linux/vmalloc.h>
  15#include <linux/fs.h>
  16#include <linux/mman.h>
  17#include <linux/sched.h>
  18#include <linux/kvm.h>
  19#include <linux/kvm_irqfd.h>
  20#include <linux/irqbypass.h>
  21#include <linux/sched/stat.h>
  22#include <trace/events/kvm.h>
  23
  24#define CREATE_TRACE_POINTS
  25#include "trace.h"
  26
  27#include <linux/uaccess.h>
  28#include <asm/ptrace.h>
  29#include <asm/mman.h>
  30#include <asm/tlbflush.h>
  31#include <asm/cacheflush.h>
  32#include <asm/cpufeature.h>
  33#include <asm/virt.h>
  34#include <asm/kvm_arm.h>
  35#include <asm/kvm_asm.h>
  36#include <asm/kvm_mmu.h>
  37#include <asm/kvm_emulate.h>
  38#include <asm/kvm_coproc.h>
  39#include <asm/sections.h>
  40
  41#include <kvm/arm_hypercalls.h>
  42#include <kvm/arm_pmu.h>
  43#include <kvm/arm_psci.h>
  44
  45#ifdef REQUIRES_VIRT
  46__asm__(".arch_extension        virt");
  47#endif
  48
  49DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
  50static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
  51
  52/* The VMID used in the VTTBR */
  53static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
  54static u32 kvm_next_vmid;
  55static DEFINE_SPINLOCK(kvm_vmid_lock);
  56
  57static bool vgic_present;
  58
  59static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
  60DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
  61
  62int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  63{
  64        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
  65}
  66
  67int kvm_arch_hardware_setup(void)
  68{
  69        return 0;
  70}
  71
  72int kvm_arch_check_processor_compat(void)
  73{
  74        return 0;
  75}
  76
  77int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
  78                            struct kvm_enable_cap *cap)
  79{
  80        int r;
  81
  82        if (cap->flags)
  83                return -EINVAL;
  84
  85        switch (cap->cap) {
  86        case KVM_CAP_ARM_NISV_TO_USER:
  87                r = 0;
  88                kvm->arch.return_nisv_io_abort_to_user = true;
  89                break;
  90        default:
  91                r = -EINVAL;
  92                break;
  93        }
  94
  95        return r;
  96}
  97
  98/**
  99 * kvm_arch_init_vm - initializes a VM data structure
 100 * @kvm:        pointer to the KVM struct
 101 */
 102int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 103{
 104        int ret, cpu;
 105
 106        ret = kvm_arm_setup_stage2(kvm, type);
 107        if (ret)
 108                return ret;
 109
 110        kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
 111        if (!kvm->arch.last_vcpu_ran)
 112                return -ENOMEM;
 113
 114        for_each_possible_cpu(cpu)
 115                *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
 116
 117        ret = kvm_alloc_stage2_pgd(kvm);
 118        if (ret)
 119                goto out_fail_alloc;
 120
 121        ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
 122        if (ret)
 123                goto out_free_stage2_pgd;
 124
 125        kvm_vgic_early_init(kvm);
 126
 127        /* Mark the initial VMID generation invalid */
 128        kvm->arch.vmid.vmid_gen = 0;
 129
 130        /* The maximum number of VCPUs is limited by the host's GIC model */
 131        kvm->arch.max_vcpus = vgic_present ?
 132                                kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
 133
 134        return ret;
 135out_free_stage2_pgd:
 136        kvm_free_stage2_pgd(kvm);
 137out_fail_alloc:
 138        free_percpu(kvm->arch.last_vcpu_ran);
 139        kvm->arch.last_vcpu_ran = NULL;
 140        return ret;
 141}
 142
 143int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
 144{
 145        return 0;
 146}
 147
 148vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 149{
 150        return VM_FAULT_SIGBUS;
 151}
 152
 153
 154/**
 155 * kvm_arch_destroy_vm - destroy the VM data structure
 156 * @kvm:        pointer to the KVM struct
 157 */
 158void kvm_arch_destroy_vm(struct kvm *kvm)
 159{
 160        int i;
 161
 162        kvm_vgic_destroy(kvm);
 163
 164        free_percpu(kvm->arch.last_vcpu_ran);
 165        kvm->arch.last_vcpu_ran = NULL;
 166
 167        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 168                if (kvm->vcpus[i]) {
 169                        kvm_vcpu_destroy(kvm->vcpus[i]);
 170                        kvm->vcpus[i] = NULL;
 171                }
 172        }
 173        atomic_set(&kvm->online_vcpus, 0);
 174}
 175
 176int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 177{
 178        int r;
 179        switch (ext) {
 180        case KVM_CAP_IRQCHIP:
 181                r = vgic_present;
 182                break;
 183        case KVM_CAP_IOEVENTFD:
 184        case KVM_CAP_DEVICE_CTRL:
 185        case KVM_CAP_USER_MEMORY:
 186        case KVM_CAP_SYNC_MMU:
 187        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
 188        case KVM_CAP_ONE_REG:
 189        case KVM_CAP_ARM_PSCI:
 190        case KVM_CAP_ARM_PSCI_0_2:
 191        case KVM_CAP_READONLY_MEM:
 192        case KVM_CAP_MP_STATE:
 193        case KVM_CAP_IMMEDIATE_EXIT:
 194        case KVM_CAP_VCPU_EVENTS:
 195        case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
 196        case KVM_CAP_ARM_NISV_TO_USER:
 197        case KVM_CAP_ARM_INJECT_EXT_DABT:
 198                r = 1;
 199                break;
 200        case KVM_CAP_ARM_SET_DEVICE_ADDR:
 201                r = 1;
 202                break;
 203        case KVM_CAP_NR_VCPUS:
 204                r = num_online_cpus();
 205                break;
 206        case KVM_CAP_MAX_VCPUS:
 207                r = KVM_MAX_VCPUS;
 208                break;
 209        case KVM_CAP_MAX_VCPU_ID:
 210                r = KVM_MAX_VCPU_ID;
 211                break;
 212        case KVM_CAP_MSI_DEVID:
 213                if (!kvm)
 214                        r = -EINVAL;
 215                else
 216                        r = kvm->arch.vgic.msis_require_devid;
 217                break;
 218        case KVM_CAP_ARM_USER_IRQ:
 219                /*
 220                 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
 221                 * (bump this number if adding more devices)
 222                 */
 223                r = 1;
 224                break;
 225        default:
 226                r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
 227                break;
 228        }
 229        return r;
 230}
 231
 232long kvm_arch_dev_ioctl(struct file *filp,
 233                        unsigned int ioctl, unsigned long arg)
 234{
 235        return -EINVAL;
 236}
 237
 238struct kvm *kvm_arch_alloc_vm(void)
 239{
 240        if (!has_vhe())
 241                return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 242
 243        return vzalloc(sizeof(struct kvm));
 244}
 245
 246void kvm_arch_free_vm(struct kvm *kvm)
 247{
 248        if (!has_vhe())
 249                kfree(kvm);
 250        else
 251                vfree(kvm);
 252}
 253
 254int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 255{
 256        if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
 257                return -EBUSY;
 258
 259        if (id >= kvm->arch.max_vcpus)
 260                return -EINVAL;
 261
 262        return 0;
 263}
 264
 265int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 266{
 267        int err;
 268
 269        /* Force users to call KVM_ARM_VCPU_INIT */
 270        vcpu->arch.target = -1;
 271        bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 272
 273        /* Set up the timer */
 274        kvm_timer_vcpu_init(vcpu);
 275
 276        kvm_pmu_vcpu_init(vcpu);
 277
 278        kvm_arm_reset_debug_ptr(vcpu);
 279
 280        kvm_arm_pvtime_vcpu_init(&vcpu->arch);
 281
 282        err = kvm_vgic_vcpu_init(vcpu);
 283        if (err)
 284                return err;
 285
 286        return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
 287}
 288
 289void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 290{
 291}
 292
 293void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 294{
 295        if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
 296                static_branch_dec(&userspace_irqchip_in_use);
 297
 298        kvm_mmu_free_memory_caches(vcpu);
 299        kvm_timer_vcpu_terminate(vcpu);
 300        kvm_pmu_vcpu_destroy(vcpu);
 301
 302        kvm_arm_vcpu_destroy(vcpu);
 303}
 304
 305int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 306{
 307        return kvm_timer_is_pending(vcpu);
 308}
 309
 310void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 311{
 312        /*
 313         * If we're about to block (most likely because we've just hit a
 314         * WFI), we need to sync back the state of the GIC CPU interface
 315         * so that we have the latest PMR and group enables. This ensures
 316         * that kvm_arch_vcpu_runnable has up-to-date data to decide
 317         * whether we have pending interrupts.
 318         *
 319         * For the same reason, we want to tell GICv4 that we need
 320         * doorbells to be signalled, should an interrupt become pending.
 321         */
 322        preempt_disable();
 323        kvm_vgic_vmcr_sync(vcpu);
 324        vgic_v4_put(vcpu, true);
 325        preempt_enable();
 326}
 327
 328void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 329{
 330        preempt_disable();
 331        vgic_v4_load(vcpu);
 332        preempt_enable();
 333}
 334
 335void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 336{
 337        int *last_ran;
 338        kvm_host_data_t *cpu_data;
 339
 340        last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
 341        cpu_data = this_cpu_ptr(&kvm_host_data);
 342
 343        /*
 344         * We might get preempted before the vCPU actually runs, but
 345         * over-invalidation doesn't affect correctness.
 346         */
 347        if (*last_ran != vcpu->vcpu_id) {
 348                kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
 349                *last_ran = vcpu->vcpu_id;
 350        }
 351
 352        vcpu->cpu = cpu;
 353        vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
 354
 355        kvm_vgic_load(vcpu);
 356        kvm_timer_vcpu_load(vcpu);
 357        kvm_vcpu_load_sysregs(vcpu);
 358        kvm_arch_vcpu_load_fp(vcpu);
 359        kvm_vcpu_pmu_restore_guest(vcpu);
 360        if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
 361                kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
 362
 363        if (single_task_running())
 364                vcpu_clear_wfx_traps(vcpu);
 365        else
 366                vcpu_set_wfx_traps(vcpu);
 367
 368        vcpu_ptrauth_setup_lazy(vcpu);
 369}
 370
 371void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 372{
 373        kvm_arch_vcpu_put_fp(vcpu);
 374        kvm_vcpu_put_sysregs(vcpu);
 375        kvm_timer_vcpu_put(vcpu);
 376        kvm_vgic_put(vcpu);
 377        kvm_vcpu_pmu_restore_host(vcpu);
 378
 379        vcpu->cpu = -1;
 380}
 381
 382static void vcpu_power_off(struct kvm_vcpu *vcpu)
 383{
 384        vcpu->arch.power_off = true;
 385        kvm_make_request(KVM_REQ_SLEEP, vcpu);
 386        kvm_vcpu_kick(vcpu);
 387}
 388
 389int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 390                                    struct kvm_mp_state *mp_state)
 391{
 392        if (vcpu->arch.power_off)
 393                mp_state->mp_state = KVM_MP_STATE_STOPPED;
 394        else
 395                mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
 396
 397        return 0;
 398}
 399
 400int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 401                                    struct kvm_mp_state *mp_state)
 402{
 403        int ret = 0;
 404
 405        switch (mp_state->mp_state) {
 406        case KVM_MP_STATE_RUNNABLE:
 407                vcpu->arch.power_off = false;
 408                break;
 409        case KVM_MP_STATE_STOPPED:
 410                vcpu_power_off(vcpu);
 411                break;
 412        default:
 413                ret = -EINVAL;
 414        }
 415
 416        return ret;
 417}
 418
 419/**
 420 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
 421 * @v:          The VCPU pointer
 422 *
 423 * If the guest CPU is not waiting for interrupts or an interrupt line is
 424 * asserted, the CPU is by definition runnable.
 425 */
 426int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 427{
 428        bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
 429        return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
 430                && !v->arch.power_off && !v->arch.pause);
 431}
 432
 433bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 434{
 435        return vcpu_mode_priv(vcpu);
 436}
 437
 438/* Just ensure a guest exit from a particular CPU */
 439static void exit_vm_noop(void *info)
 440{
 441}
 442
 443void force_vm_exit(const cpumask_t *mask)
 444{
 445        preempt_disable();
 446        smp_call_function_many(mask, exit_vm_noop, NULL, true);
 447        preempt_enable();
 448}
 449
 450/**
 451 * need_new_vmid_gen - check that the VMID is still valid
 452 * @vmid: The VMID to check
 453 *
 454 * return true if there is a new generation of VMIDs being used
 455 *
 456 * The hardware supports a limited set of values with the value zero reserved
 457 * for the host, so we check if an assigned value belongs to a previous
 458 * generation, which which requires us to assign a new value. If we're the
 459 * first to use a VMID for the new generation, we must flush necessary caches
 460 * and TLBs on all CPUs.
 461 */
 462static bool need_new_vmid_gen(struct kvm_vmid *vmid)
 463{
 464        u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
 465        smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
 466        return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
 467}
 468
 469/**
 470 * update_vmid - Update the vmid with a valid VMID for the current generation
 471 * @kvm: The guest that struct vmid belongs to
 472 * @vmid: The stage-2 VMID information struct
 473 */
 474static void update_vmid(struct kvm_vmid *vmid)
 475{
 476        if (!need_new_vmid_gen(vmid))
 477                return;
 478
 479        spin_lock(&kvm_vmid_lock);
 480
 481        /*
 482         * We need to re-check the vmid_gen here to ensure that if another vcpu
 483         * already allocated a valid vmid for this vm, then this vcpu should
 484         * use the same vmid.
 485         */
 486        if (!need_new_vmid_gen(vmid)) {
 487                spin_unlock(&kvm_vmid_lock);
 488                return;
 489        }
 490
 491        /* First user of a new VMID generation? */
 492        if (unlikely(kvm_next_vmid == 0)) {
 493                atomic64_inc(&kvm_vmid_gen);
 494                kvm_next_vmid = 1;
 495
 496                /*
 497                 * On SMP we know no other CPUs can use this CPU's or each
 498                 * other's VMID after force_vm_exit returns since the
 499                 * kvm_vmid_lock blocks them from reentry to the guest.
 500                 */
 501                force_vm_exit(cpu_all_mask);
 502                /*
 503                 * Now broadcast TLB + ICACHE invalidation over the inner
 504                 * shareable domain to make sure all data structures are
 505                 * clean.
 506                 */
 507                kvm_call_hyp(__kvm_flush_vm_context);
 508        }
 509
 510        vmid->vmid = kvm_next_vmid;
 511        kvm_next_vmid++;
 512        kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
 513
 514        smp_wmb();
 515        WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
 516
 517        spin_unlock(&kvm_vmid_lock);
 518}
 519
 520static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 521{
 522        struct kvm *kvm = vcpu->kvm;
 523        int ret = 0;
 524
 525        if (likely(vcpu->arch.has_run_once))
 526                return 0;
 527
 528        if (!kvm_arm_vcpu_is_finalized(vcpu))
 529                return -EPERM;
 530
 531        vcpu->arch.has_run_once = true;
 532
 533        if (likely(irqchip_in_kernel(kvm))) {
 534                /*
 535                 * Map the VGIC hardware resources before running a vcpu the
 536                 * first time on this VM.
 537                 */
 538                if (unlikely(!vgic_ready(kvm))) {
 539                        ret = kvm_vgic_map_resources(kvm);
 540                        if (ret)
 541                                return ret;
 542                }
 543        } else {
 544                /*
 545                 * Tell the rest of the code that there are userspace irqchip
 546                 * VMs in the wild.
 547                 */
 548                static_branch_inc(&userspace_irqchip_in_use);
 549        }
 550
 551        ret = kvm_timer_enable(vcpu);
 552        if (ret)
 553                return ret;
 554
 555        ret = kvm_arm_pmu_v3_enable(vcpu);
 556
 557        return ret;
 558}
 559
 560bool kvm_arch_intc_initialized(struct kvm *kvm)
 561{
 562        return vgic_initialized(kvm);
 563}
 564
 565void kvm_arm_halt_guest(struct kvm *kvm)
 566{
 567        int i;
 568        struct kvm_vcpu *vcpu;
 569
 570        kvm_for_each_vcpu(i, vcpu, kvm)
 571                vcpu->arch.pause = true;
 572        kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
 573}
 574
 575void kvm_arm_resume_guest(struct kvm *kvm)
 576{
 577        int i;
 578        struct kvm_vcpu *vcpu;
 579
 580        kvm_for_each_vcpu(i, vcpu, kvm) {
 581                vcpu->arch.pause = false;
 582                swake_up_one(kvm_arch_vcpu_wq(vcpu));
 583        }
 584}
 585
 586static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
 587{
 588        struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
 589
 590        swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
 591                                       (!vcpu->arch.pause)));
 592
 593        if (vcpu->arch.power_off || vcpu->arch.pause) {
 594                /* Awaken to handle a signal, request we sleep again later. */
 595                kvm_make_request(KVM_REQ_SLEEP, vcpu);
 596        }
 597
 598        /*
 599         * Make sure we will observe a potential reset request if we've
 600         * observed a change to the power state. Pairs with the smp_wmb() in
 601         * kvm_psci_vcpu_on().
 602         */
 603        smp_rmb();
 604}
 605
 606static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
 607{
 608        return vcpu->arch.target >= 0;
 609}
 610
 611static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 612{
 613        if (kvm_request_pending(vcpu)) {
 614                if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
 615                        vcpu_req_sleep(vcpu);
 616
 617                if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
 618                        kvm_reset_vcpu(vcpu);
 619
 620                /*
 621                 * Clear IRQ_PENDING requests that were made to guarantee
 622                 * that a VCPU sees new virtual interrupts.
 623                 */
 624                kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
 625
 626                if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
 627                        kvm_update_stolen_time(vcpu);
 628        }
 629}
 630
 631/**
 632 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
 633 * @vcpu:       The VCPU pointer
 634 * @run:        The kvm_run structure pointer used for userspace state exchange
 635 *
 636 * This function is called through the VCPU_RUN ioctl called from user space. It
 637 * will execute VM code in a loop until the time slice for the process is used
 638 * or some emulation is needed from user space in which case the function will
 639 * return with return value 0 and with the kvm_run structure filled in with the
 640 * required data for the requested emulation.
 641 */
 642int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 643{
 644        int ret;
 645
 646        if (unlikely(!kvm_vcpu_initialized(vcpu)))
 647                return -ENOEXEC;
 648
 649        ret = kvm_vcpu_first_run_init(vcpu);
 650        if (ret)
 651                return ret;
 652
 653        if (run->exit_reason == KVM_EXIT_MMIO) {
 654                ret = kvm_handle_mmio_return(vcpu, vcpu->run);
 655                if (ret)
 656                        return ret;
 657        }
 658
 659        if (run->immediate_exit)
 660                return -EINTR;
 661
 662        vcpu_load(vcpu);
 663
 664        kvm_sigset_activate(vcpu);
 665
 666        ret = 1;
 667        run->exit_reason = KVM_EXIT_UNKNOWN;
 668        while (ret > 0) {
 669                /*
 670                 * Check conditions before entering the guest
 671                 */
 672                cond_resched();
 673
 674                update_vmid(&vcpu->kvm->arch.vmid);
 675
 676                check_vcpu_requests(vcpu);
 677
 678                /*
 679                 * Preparing the interrupts to be injected also
 680                 * involves poking the GIC, which must be done in a
 681                 * non-preemptible context.
 682                 */
 683                preempt_disable();
 684
 685                kvm_pmu_flush_hwstate(vcpu);
 686
 687                local_irq_disable();
 688
 689                kvm_vgic_flush_hwstate(vcpu);
 690
 691                /*
 692                 * Exit if we have a signal pending so that we can deliver the
 693                 * signal to user space.
 694                 */
 695                if (signal_pending(current)) {
 696                        ret = -EINTR;
 697                        run->exit_reason = KVM_EXIT_INTR;
 698                }
 699
 700                /*
 701                 * If we're using a userspace irqchip, then check if we need
 702                 * to tell a userspace irqchip about timer or PMU level
 703                 * changes and if so, exit to userspace (the actual level
 704                 * state gets updated in kvm_timer_update_run and
 705                 * kvm_pmu_update_run below).
 706                 */
 707                if (static_branch_unlikely(&userspace_irqchip_in_use)) {
 708                        if (kvm_timer_should_notify_user(vcpu) ||
 709                            kvm_pmu_should_notify_user(vcpu)) {
 710                                ret = -EINTR;
 711                                run->exit_reason = KVM_EXIT_INTR;
 712                        }
 713                }
 714
 715                /*
 716                 * Ensure we set mode to IN_GUEST_MODE after we disable
 717                 * interrupts and before the final VCPU requests check.
 718                 * See the comment in kvm_vcpu_exiting_guest_mode() and
 719                 * Documentation/virt/kvm/vcpu-requests.rst
 720                 */
 721                smp_store_mb(vcpu->mode, IN_GUEST_MODE);
 722
 723                if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
 724                    kvm_request_pending(vcpu)) {
 725                        vcpu->mode = OUTSIDE_GUEST_MODE;
 726                        isb(); /* Ensure work in x_flush_hwstate is committed */
 727                        kvm_pmu_sync_hwstate(vcpu);
 728                        if (static_branch_unlikely(&userspace_irqchip_in_use))
 729                                kvm_timer_sync_hwstate(vcpu);
 730                        kvm_vgic_sync_hwstate(vcpu);
 731                        local_irq_enable();
 732                        preempt_enable();
 733                        continue;
 734                }
 735
 736                kvm_arm_setup_debug(vcpu);
 737
 738                /**************************************************************
 739                 * Enter the guest
 740                 */
 741                trace_kvm_entry(*vcpu_pc(vcpu));
 742                guest_enter_irqoff();
 743
 744                if (has_vhe()) {
 745                        ret = kvm_vcpu_run_vhe(vcpu);
 746                } else {
 747                        ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
 748                }
 749
 750                vcpu->mode = OUTSIDE_GUEST_MODE;
 751                vcpu->stat.exits++;
 752                /*
 753                 * Back from guest
 754                 *************************************************************/
 755
 756                kvm_arm_clear_debug(vcpu);
 757
 758                /*
 759                 * We must sync the PMU state before the vgic state so
 760                 * that the vgic can properly sample the updated state of the
 761                 * interrupt line.
 762                 */
 763                kvm_pmu_sync_hwstate(vcpu);
 764
 765                /*
 766                 * Sync the vgic state before syncing the timer state because
 767                 * the timer code needs to know if the virtual timer
 768                 * interrupts are active.
 769                 */
 770                kvm_vgic_sync_hwstate(vcpu);
 771
 772                /*
 773                 * Sync the timer hardware state before enabling interrupts as
 774                 * we don't want vtimer interrupts to race with syncing the
 775                 * timer virtual interrupt state.
 776                 */
 777                if (static_branch_unlikely(&userspace_irqchip_in_use))
 778                        kvm_timer_sync_hwstate(vcpu);
 779
 780                kvm_arch_vcpu_ctxsync_fp(vcpu);
 781
 782                /*
 783                 * We may have taken a host interrupt in HYP mode (ie
 784                 * while executing the guest). This interrupt is still
 785                 * pending, as we haven't serviced it yet!
 786                 *
 787                 * We're now back in SVC mode, with interrupts
 788                 * disabled.  Enabling the interrupts now will have
 789                 * the effect of taking the interrupt again, in SVC
 790                 * mode this time.
 791                 */
 792                local_irq_enable();
 793
 794                /*
 795                 * We do local_irq_enable() before calling guest_exit() so
 796                 * that if a timer interrupt hits while running the guest we
 797                 * account that tick as being spent in the guest.  We enable
 798                 * preemption after calling guest_exit() so that if we get
 799                 * preempted we make sure ticks after that is not counted as
 800                 * guest time.
 801                 */
 802                guest_exit();
 803                trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
 804
 805                /* Exit types that need handling before we can be preempted */
 806                handle_exit_early(vcpu, run, ret);
 807
 808                preempt_enable();
 809
 810                ret = handle_exit(vcpu, run, ret);
 811        }
 812
 813        /* Tell userspace about in-kernel device output levels */
 814        if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
 815                kvm_timer_update_run(vcpu);
 816                kvm_pmu_update_run(vcpu);
 817        }
 818
 819        kvm_sigset_deactivate(vcpu);
 820
 821        vcpu_put(vcpu);
 822        return ret;
 823}
 824
 825static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
 826{
 827        int bit_index;
 828        bool set;
 829        unsigned long *hcr;
 830
 831        if (number == KVM_ARM_IRQ_CPU_IRQ)
 832                bit_index = __ffs(HCR_VI);
 833        else /* KVM_ARM_IRQ_CPU_FIQ */
 834                bit_index = __ffs(HCR_VF);
 835
 836        hcr = vcpu_hcr(vcpu);
 837        if (level)
 838                set = test_and_set_bit(bit_index, hcr);
 839        else
 840                set = test_and_clear_bit(bit_index, hcr);
 841
 842        /*
 843         * If we didn't change anything, no need to wake up or kick other CPUs
 844         */
 845        if (set == level)
 846                return 0;
 847
 848        /*
 849         * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
 850         * trigger a world-switch round on the running physical CPU to set the
 851         * virtual IRQ/FIQ fields in the HCR appropriately.
 852         */
 853        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
 854        kvm_vcpu_kick(vcpu);
 855
 856        return 0;
 857}
 858
 859int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
 860                          bool line_status)
 861{
 862        u32 irq = irq_level->irq;
 863        unsigned int irq_type, vcpu_idx, irq_num;
 864        int nrcpus = atomic_read(&kvm->online_vcpus);
 865        struct kvm_vcpu *vcpu = NULL;
 866        bool level = irq_level->level;
 867
 868        irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
 869        vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
 870        vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
 871        irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
 872
 873        trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
 874
 875        switch (irq_type) {
 876        case KVM_ARM_IRQ_TYPE_CPU:
 877                if (irqchip_in_kernel(kvm))
 878                        return -ENXIO;
 879
 880                if (vcpu_idx >= nrcpus)
 881                        return -EINVAL;
 882
 883                vcpu = kvm_get_vcpu(kvm, vcpu_idx);
 884                if (!vcpu)
 885                        return -EINVAL;
 886
 887                if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
 888                        return -EINVAL;
 889
 890                return vcpu_interrupt_line(vcpu, irq_num, level);
 891        case KVM_ARM_IRQ_TYPE_PPI:
 892                if (!irqchip_in_kernel(kvm))
 893                        return -ENXIO;
 894
 895                if (vcpu_idx >= nrcpus)
 896                        return -EINVAL;
 897
 898                vcpu = kvm_get_vcpu(kvm, vcpu_idx);
 899                if (!vcpu)
 900                        return -EINVAL;
 901
 902                if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
 903                        return -EINVAL;
 904
 905                return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
 906        case KVM_ARM_IRQ_TYPE_SPI:
 907                if (!irqchip_in_kernel(kvm))
 908                        return -ENXIO;
 909
 910                if (irq_num < VGIC_NR_PRIVATE_IRQS)
 911                        return -EINVAL;
 912
 913                return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
 914        }
 915
 916        return -EINVAL;
 917}
 918
 919static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 920                               const struct kvm_vcpu_init *init)
 921{
 922        unsigned int i, ret;
 923        int phys_target = kvm_target_cpu();
 924
 925        if (init->target != phys_target)
 926                return -EINVAL;
 927
 928        /*
 929         * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
 930         * use the same target.
 931         */
 932        if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
 933                return -EINVAL;
 934
 935        /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
 936        for (i = 0; i < sizeof(init->features) * 8; i++) {
 937                bool set = (init->features[i / 32] & (1 << (i % 32)));
 938
 939                if (set && i >= KVM_VCPU_MAX_FEATURES)
 940                        return -ENOENT;
 941
 942                /*
 943                 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
 944                 * use the same feature set.
 945                 */
 946                if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
 947                    test_bit(i, vcpu->arch.features) != set)
 948                        return -EINVAL;
 949
 950                if (set)
 951                        set_bit(i, vcpu->arch.features);
 952        }
 953
 954        vcpu->arch.target = phys_target;
 955
 956        /* Now we know what it is, we can reset it. */
 957        ret = kvm_reset_vcpu(vcpu);
 958        if (ret) {
 959                vcpu->arch.target = -1;
 960                bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 961        }
 962
 963        return ret;
 964}
 965
 966static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
 967                                         struct kvm_vcpu_init *init)
 968{
 969        int ret;
 970
 971        ret = kvm_vcpu_set_target(vcpu, init);
 972        if (ret)
 973                return ret;
 974
 975        /*
 976         * Ensure a rebooted VM will fault in RAM pages and detect if the
 977         * guest MMU is turned off and flush the caches as needed.
 978         */
 979        if (vcpu->arch.has_run_once)
 980                stage2_unmap_vm(vcpu->kvm);
 981
 982        vcpu_reset_hcr(vcpu);
 983
 984        /*
 985         * Handle the "start in power-off" case.
 986         */
 987        if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
 988                vcpu_power_off(vcpu);
 989        else
 990                vcpu->arch.power_off = false;
 991
 992        return 0;
 993}
 994
 995static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
 996                                 struct kvm_device_attr *attr)
 997{
 998        int ret = -ENXIO;
 999
1000        switch (attr->group) {
1001        default:
1002                ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1003                break;
1004        }
1005
1006        return ret;
1007}
1008
1009static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1010                                 struct kvm_device_attr *attr)
1011{
1012        int ret = -ENXIO;
1013
1014        switch (attr->group) {
1015        default:
1016                ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1017                break;
1018        }
1019
1020        return ret;
1021}
1022
1023static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1024                                 struct kvm_device_attr *attr)
1025{
1026        int ret = -ENXIO;
1027
1028        switch (attr->group) {
1029        default:
1030                ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1031                break;
1032        }
1033
1034        return ret;
1035}
1036
1037static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1038                                   struct kvm_vcpu_events *events)
1039{
1040        memset(events, 0, sizeof(*events));
1041
1042        return __kvm_arm_vcpu_get_events(vcpu, events);
1043}
1044
1045static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1046                                   struct kvm_vcpu_events *events)
1047{
1048        int i;
1049
1050        /* check whether the reserved field is zero */
1051        for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1052                if (events->reserved[i])
1053                        return -EINVAL;
1054
1055        /* check whether the pad field is zero */
1056        for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1057                if (events->exception.pad[i])
1058                        return -EINVAL;
1059
1060        return __kvm_arm_vcpu_set_events(vcpu, events);
1061}
1062
1063long kvm_arch_vcpu_ioctl(struct file *filp,
1064                         unsigned int ioctl, unsigned long arg)
1065{
1066        struct kvm_vcpu *vcpu = filp->private_data;
1067        void __user *argp = (void __user *)arg;
1068        struct kvm_device_attr attr;
1069        long r;
1070
1071        switch (ioctl) {
1072        case KVM_ARM_VCPU_INIT: {
1073                struct kvm_vcpu_init init;
1074
1075                r = -EFAULT;
1076                if (copy_from_user(&init, argp, sizeof(init)))
1077                        break;
1078
1079                r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1080                break;
1081        }
1082        case KVM_SET_ONE_REG:
1083        case KVM_GET_ONE_REG: {
1084                struct kvm_one_reg reg;
1085
1086                r = -ENOEXEC;
1087                if (unlikely(!kvm_vcpu_initialized(vcpu)))
1088                        break;
1089
1090                r = -EFAULT;
1091                if (copy_from_user(&reg, argp, sizeof(reg)))
1092                        break;
1093
1094                if (ioctl == KVM_SET_ONE_REG)
1095                        r = kvm_arm_set_reg(vcpu, &reg);
1096                else
1097                        r = kvm_arm_get_reg(vcpu, &reg);
1098                break;
1099        }
1100        case KVM_GET_REG_LIST: {
1101                struct kvm_reg_list __user *user_list = argp;
1102                struct kvm_reg_list reg_list;
1103                unsigned n;
1104
1105                r = -ENOEXEC;
1106                if (unlikely(!kvm_vcpu_initialized(vcpu)))
1107                        break;
1108
1109                r = -EPERM;
1110                if (!kvm_arm_vcpu_is_finalized(vcpu))
1111                        break;
1112
1113                r = -EFAULT;
1114                if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1115                        break;
1116                n = reg_list.n;
1117                reg_list.n = kvm_arm_num_regs(vcpu);
1118                if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1119                        break;
1120                r = -E2BIG;
1121                if (n < reg_list.n)
1122                        break;
1123                r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1124                break;
1125        }
1126        case KVM_SET_DEVICE_ATTR: {
1127                r = -EFAULT;
1128                if (copy_from_user(&attr, argp, sizeof(attr)))
1129                        break;
1130                r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1131                break;
1132        }
1133        case KVM_GET_DEVICE_ATTR: {
1134                r = -EFAULT;
1135                if (copy_from_user(&attr, argp, sizeof(attr)))
1136                        break;
1137                r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1138                break;
1139        }
1140        case KVM_HAS_DEVICE_ATTR: {
1141                r = -EFAULT;
1142                if (copy_from_user(&attr, argp, sizeof(attr)))
1143                        break;
1144                r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1145                break;
1146        }
1147        case KVM_GET_VCPU_EVENTS: {
1148                struct kvm_vcpu_events events;
1149
1150                if (kvm_arm_vcpu_get_events(vcpu, &events))
1151                        return -EINVAL;
1152
1153                if (copy_to_user(argp, &events, sizeof(events)))
1154                        return -EFAULT;
1155
1156                return 0;
1157        }
1158        case KVM_SET_VCPU_EVENTS: {
1159                struct kvm_vcpu_events events;
1160
1161                if (copy_from_user(&events, argp, sizeof(events)))
1162                        return -EFAULT;
1163
1164                return kvm_arm_vcpu_set_events(vcpu, &events);
1165        }
1166        case KVM_ARM_VCPU_FINALIZE: {
1167                int what;
1168
1169                if (!kvm_vcpu_initialized(vcpu))
1170                        return -ENOEXEC;
1171
1172                if (get_user(what, (const int __user *)argp))
1173                        return -EFAULT;
1174
1175                return kvm_arm_vcpu_finalize(vcpu, what);
1176        }
1177        default:
1178                r = -EINVAL;
1179        }
1180
1181        return r;
1182}
1183
1184/**
1185 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1186 * @kvm: kvm instance
1187 * @log: slot id and address to which we copy the log
1188 *
1189 * Steps 1-4 below provide general overview of dirty page logging. See
1190 * kvm_get_dirty_log_protect() function description for additional details.
1191 *
1192 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1193 * always flush the TLB (step 4) even if previous step failed  and the dirty
1194 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1195 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1196 * writes will be marked dirty for next log read.
1197 *
1198 *   1. Take a snapshot of the bit and clear it if needed.
1199 *   2. Write protect the corresponding page.
1200 *   3. Copy the snapshot to the userspace.
1201 *   4. Flush TLB's if needed.
1202 */
1203int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1204{
1205        bool flush = false;
1206        int r;
1207
1208        mutex_lock(&kvm->slots_lock);
1209
1210        r = kvm_get_dirty_log_protect(kvm, log, &flush);
1211
1212        if (flush)
1213                kvm_flush_remote_tlbs(kvm);
1214
1215        mutex_unlock(&kvm->slots_lock);
1216        return r;
1217}
1218
1219int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
1220{
1221        bool flush = false;
1222        int r;
1223
1224        mutex_lock(&kvm->slots_lock);
1225
1226        r = kvm_clear_dirty_log_protect(kvm, log, &flush);
1227
1228        if (flush)
1229                kvm_flush_remote_tlbs(kvm);
1230
1231        mutex_unlock(&kvm->slots_lock);
1232        return r;
1233}
1234
1235static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1236                                        struct kvm_arm_device_addr *dev_addr)
1237{
1238        unsigned long dev_id, type;
1239
1240        dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
1241                KVM_ARM_DEVICE_ID_SHIFT;
1242        type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
1243                KVM_ARM_DEVICE_TYPE_SHIFT;
1244
1245        switch (dev_id) {
1246        case KVM_ARM_DEVICE_VGIC_V2:
1247                if (!vgic_present)
1248                        return -ENXIO;
1249                return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1250        default:
1251                return -ENODEV;
1252        }
1253}
1254
1255long kvm_arch_vm_ioctl(struct file *filp,
1256                       unsigned int ioctl, unsigned long arg)
1257{
1258        struct kvm *kvm = filp->private_data;
1259        void __user *argp = (void __user *)arg;
1260
1261        switch (ioctl) {
1262        case KVM_CREATE_IRQCHIP: {
1263                int ret;
1264                if (!vgic_present)
1265                        return -ENXIO;
1266                mutex_lock(&kvm->lock);
1267                ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1268                mutex_unlock(&kvm->lock);
1269                return ret;
1270        }
1271        case KVM_ARM_SET_DEVICE_ADDR: {
1272                struct kvm_arm_device_addr dev_addr;
1273
1274                if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1275                        return -EFAULT;
1276                return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1277        }
1278        case KVM_ARM_PREFERRED_TARGET: {
1279                int err;
1280                struct kvm_vcpu_init init;
1281
1282                err = kvm_vcpu_preferred_target(&init);
1283                if (err)
1284                        return err;
1285
1286                if (copy_to_user(argp, &init, sizeof(init)))
1287                        return -EFAULT;
1288
1289                return 0;
1290        }
1291        default:
1292                return -EINVAL;
1293        }
1294}
1295
1296static void cpu_init_hyp_mode(void)
1297{
1298        phys_addr_t pgd_ptr;
1299        unsigned long hyp_stack_ptr;
1300        unsigned long stack_page;
1301        unsigned long vector_ptr;
1302
1303        /* Switch from the HYP stub to our own HYP init vector */
1304        __hyp_set_vectors(kvm_get_idmap_vector());
1305
1306        pgd_ptr = kvm_mmu_get_httbr();
1307        stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
1308        hyp_stack_ptr = stack_page + PAGE_SIZE;
1309        vector_ptr = (unsigned long)kvm_get_hyp_vector();
1310
1311        __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1312        __cpu_init_stage2();
1313}
1314
1315static void cpu_hyp_reset(void)
1316{
1317        if (!is_kernel_in_hyp_mode())
1318                __hyp_reset_vectors();
1319}
1320
1321static void cpu_hyp_reinit(void)
1322{
1323        kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
1324
1325        cpu_hyp_reset();
1326
1327        if (is_kernel_in_hyp_mode())
1328                kvm_timer_init_vhe();
1329        else
1330                cpu_init_hyp_mode();
1331
1332        kvm_arm_init_debug();
1333
1334        if (vgic_present)
1335                kvm_vgic_init_cpu_hardware();
1336}
1337
1338static void _kvm_arch_hardware_enable(void *discard)
1339{
1340        if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1341                cpu_hyp_reinit();
1342                __this_cpu_write(kvm_arm_hardware_enabled, 1);
1343        }
1344}
1345
1346int kvm_arch_hardware_enable(void)
1347{
1348        _kvm_arch_hardware_enable(NULL);
1349        return 0;
1350}
1351
1352static void _kvm_arch_hardware_disable(void *discard)
1353{
1354        if (__this_cpu_read(kvm_arm_hardware_enabled)) {
1355                cpu_hyp_reset();
1356                __this_cpu_write(kvm_arm_hardware_enabled, 0);
1357        }
1358}
1359
1360void kvm_arch_hardware_disable(void)
1361{
1362        _kvm_arch_hardware_disable(NULL);
1363}
1364
1365#ifdef CONFIG_CPU_PM
1366static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1367                                    unsigned long cmd,
1368                                    void *v)
1369{
1370        /*
1371         * kvm_arm_hardware_enabled is left with its old value over
1372         * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
1373         * re-enable hyp.
1374         */
1375        switch (cmd) {
1376        case CPU_PM_ENTER:
1377                if (__this_cpu_read(kvm_arm_hardware_enabled))
1378                        /*
1379                         * don't update kvm_arm_hardware_enabled here
1380                         * so that the hardware will be re-enabled
1381                         * when we resume. See below.
1382                         */
1383                        cpu_hyp_reset();
1384
1385                return NOTIFY_OK;
1386        case CPU_PM_ENTER_FAILED:
1387        case CPU_PM_EXIT:
1388                if (__this_cpu_read(kvm_arm_hardware_enabled))
1389                        /* The hardware was enabled before suspend. */
1390                        cpu_hyp_reinit();
1391
1392                return NOTIFY_OK;
1393
1394        default:
1395                return NOTIFY_DONE;
1396        }
1397}
1398
1399static struct notifier_block hyp_init_cpu_pm_nb = {
1400        .notifier_call = hyp_init_cpu_pm_notifier,
1401};
1402
1403static void __init hyp_cpu_pm_init(void)
1404{
1405        cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1406}
1407static void __init hyp_cpu_pm_exit(void)
1408{
1409        cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1410}
1411#else
1412static inline void hyp_cpu_pm_init(void)
1413{
1414}
1415static inline void hyp_cpu_pm_exit(void)
1416{
1417}
1418#endif
1419
1420static int init_common_resources(void)
1421{
1422        kvm_set_ipa_limit();
1423
1424        return 0;
1425}
1426
1427static int init_subsystems(void)
1428{
1429        int err = 0;
1430
1431        /*
1432         * Enable hardware so that subsystem initialisation can access EL2.
1433         */
1434        on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1435
1436        /*
1437         * Register CPU lower-power notifier
1438         */
1439        hyp_cpu_pm_init();
1440
1441        /*
1442         * Init HYP view of VGIC
1443         */
1444        err = kvm_vgic_hyp_init();
1445        switch (err) {
1446        case 0:
1447                vgic_present = true;
1448                break;
1449        case -ENODEV:
1450        case -ENXIO:
1451                vgic_present = false;
1452                err = 0;
1453                break;
1454        default:
1455                goto out;
1456        }
1457
1458        /*
1459         * Init HYP architected timer support
1460         */
1461        err = kvm_timer_hyp_init(vgic_present);
1462        if (err)
1463                goto out;
1464
1465        kvm_perf_init();
1466        kvm_coproc_table_init();
1467
1468out:
1469        on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
1470
1471        return err;
1472}
1473
1474static void teardown_hyp_mode(void)
1475{
1476        int cpu;
1477
1478        free_hyp_pgds();
1479        for_each_possible_cpu(cpu)
1480                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1481}
1482
1483/**
1484 * Inits Hyp-mode on all online CPUs
1485 */
1486static int init_hyp_mode(void)
1487{
1488        int cpu;
1489        int err = 0;
1490
1491        /*
1492         * Allocate Hyp PGD and setup Hyp identity mapping
1493         */
1494        err = kvm_mmu_init();
1495        if (err)
1496                goto out_err;
1497
1498        /*
1499         * Allocate stack pages for Hypervisor-mode
1500         */
1501        for_each_possible_cpu(cpu) {
1502                unsigned long stack_page;
1503
1504                stack_page = __get_free_page(GFP_KERNEL);
1505                if (!stack_page) {
1506                        err = -ENOMEM;
1507                        goto out_err;
1508                }
1509
1510                per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
1511        }
1512
1513        /*
1514         * Map the Hyp-code called directly from the host
1515         */
1516        err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1517                                  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1518        if (err) {
1519                kvm_err("Cannot map world-switch code\n");
1520                goto out_err;
1521        }
1522
1523        err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1524                                  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1525        if (err) {
1526                kvm_err("Cannot map rodata section\n");
1527                goto out_err;
1528        }
1529
1530        err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
1531                                  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
1532        if (err) {
1533                kvm_err("Cannot map bss section\n");
1534                goto out_err;
1535        }
1536
1537        err = kvm_map_vectors();
1538        if (err) {
1539                kvm_err("Cannot map vectors\n");
1540                goto out_err;
1541        }
1542
1543        /*
1544         * Map the Hyp stack pages
1545         */
1546        for_each_possible_cpu(cpu) {
1547                char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1548                err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
1549                                          PAGE_HYP);
1550
1551                if (err) {
1552                        kvm_err("Cannot map hyp stack\n");
1553                        goto out_err;
1554                }
1555        }
1556
1557        for_each_possible_cpu(cpu) {
1558                kvm_host_data_t *cpu_data;
1559
1560                cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
1561                err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
1562
1563                if (err) {
1564                        kvm_err("Cannot map host CPU state: %d\n", err);
1565                        goto out_err;
1566                }
1567        }
1568
1569        err = hyp_map_aux_data();
1570        if (err)
1571                kvm_err("Cannot map host auxiliary data: %d\n", err);
1572
1573        return 0;
1574
1575out_err:
1576        teardown_hyp_mode();
1577        kvm_err("error initializing Hyp mode: %d\n", err);
1578        return err;
1579}
1580
1581static void check_kvm_target_cpu(void *ret)
1582{
1583        *(int *)ret = kvm_target_cpu();
1584}
1585
1586struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1587{
1588        struct kvm_vcpu *vcpu;
1589        int i;
1590
1591        mpidr &= MPIDR_HWID_BITMASK;
1592        kvm_for_each_vcpu(i, vcpu, kvm) {
1593                if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
1594                        return vcpu;
1595        }
1596        return NULL;
1597}
1598
1599bool kvm_arch_has_irq_bypass(void)
1600{
1601        return true;
1602}
1603
1604int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
1605                                      struct irq_bypass_producer *prod)
1606{
1607        struct kvm_kernel_irqfd *irqfd =
1608                container_of(cons, struct kvm_kernel_irqfd, consumer);
1609
1610        return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
1611                                          &irqfd->irq_entry);
1612}
1613void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
1614                                      struct irq_bypass_producer *prod)
1615{
1616        struct kvm_kernel_irqfd *irqfd =
1617                container_of(cons, struct kvm_kernel_irqfd, consumer);
1618
1619        kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
1620                                     &irqfd->irq_entry);
1621}
1622
1623void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
1624{
1625        struct kvm_kernel_irqfd *irqfd =
1626                container_of(cons, struct kvm_kernel_irqfd, consumer);
1627
1628        kvm_arm_halt_guest(irqfd->kvm);
1629}
1630
1631void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
1632{
1633        struct kvm_kernel_irqfd *irqfd =
1634                container_of(cons, struct kvm_kernel_irqfd, consumer);
1635
1636        kvm_arm_resume_guest(irqfd->kvm);
1637}
1638
1639/**
1640 * Initialize Hyp-mode and memory mappings on all CPUs.
1641 */
1642int kvm_arch_init(void *opaque)
1643{
1644        int err;
1645        int ret, cpu;
1646        bool in_hyp_mode;
1647
1648        if (!is_hyp_mode_available()) {
1649                kvm_info("HYP mode not available\n");
1650                return -ENODEV;
1651        }
1652
1653        in_hyp_mode = is_kernel_in_hyp_mode();
1654
1655        if (!in_hyp_mode && kvm_arch_requires_vhe()) {
1656                kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
1657                return -ENODEV;
1658        }
1659
1660        for_each_online_cpu(cpu) {
1661                smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
1662                if (ret < 0) {
1663                        kvm_err("Error, CPU %d not supported!\n", cpu);
1664                        return -ENODEV;
1665                }
1666        }
1667
1668        err = init_common_resources();
1669        if (err)
1670                return err;
1671
1672        err = kvm_arm_init_sve();
1673        if (err)
1674                return err;
1675
1676        if (!in_hyp_mode) {
1677                err = init_hyp_mode();
1678                if (err)
1679                        goto out_err;
1680        }
1681
1682        err = init_subsystems();
1683        if (err)
1684                goto out_hyp;
1685
1686        if (in_hyp_mode)
1687                kvm_info("VHE mode initialized successfully\n");
1688        else
1689                kvm_info("Hyp mode initialized successfully\n");
1690
1691        return 0;
1692
1693out_hyp:
1694        hyp_cpu_pm_exit();
1695        if (!in_hyp_mode)
1696                teardown_hyp_mode();
1697out_err:
1698        return err;
1699}
1700
1701/* NOP: Compiling as a module not supported */
1702void kvm_arch_exit(void)
1703{
1704        kvm_perf_teardown();
1705}
1706
1707static int arm_init(void)
1708{
1709        int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1710        return rc;
1711}
1712
1713module_init(arm_init);
1714