linux/arch/arm/kvm/arm.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License, version 2, as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  17 */
  18
  19#include <linux/cpu.h>
  20#include <linux/cpu_pm.h>
  21#include <linux/errno.h>
  22#include <linux/err.h>
  23#include <linux/kvm_host.h>
  24#include <linux/module.h>
  25#include <linux/vmalloc.h>
  26#include <linux/fs.h>
  27#include <linux/mman.h>
  28#include <linux/sched.h>
  29#include <linux/kvm.h>
  30#include <trace/events/kvm.h>
  31
  32#define CREATE_TRACE_POINTS
  33#include "trace.h"
  34
  35#include <asm/uaccess.h>
  36#include <asm/ptrace.h>
  37#include <asm/mman.h>
  38#include <asm/tlbflush.h>
  39#include <asm/cacheflush.h>
  40#include <asm/virt.h>
  41#include <asm/kvm_arm.h>
  42#include <asm/kvm_asm.h>
  43#include <asm/kvm_mmu.h>
  44#include <asm/kvm_emulate.h>
  45#include <asm/kvm_coproc.h>
  46#include <asm/kvm_psci.h>
  47
  48#ifdef REQUIRES_VIRT
  49__asm__(".arch_extension        virt");
  50#endif
  51
  52static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
  53static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
  54static unsigned long hyp_default_vectors;
  55
  56/* Per-CPU variable containing the currently running vcpu. */
  57static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
  58
  59/* The VMID used in the VTTBR */
  60static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
  61static u8 kvm_next_vmid;
  62static DEFINE_SPINLOCK(kvm_vmid_lock);
  63
  64static bool vgic_present;
  65
  66static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
  67{
  68        BUG_ON(preemptible());
  69        __this_cpu_write(kvm_arm_running_vcpu, vcpu);
  70}
  71
  72/**
  73 * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
  74 * Must be called from non-preemptible context
  75 */
  76struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
  77{
  78        BUG_ON(preemptible());
  79        return __this_cpu_read(kvm_arm_running_vcpu);
  80}
  81
  82/**
  83 * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
  84 */
  85struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
  86{
  87        return &kvm_arm_running_vcpu;
  88}
  89
  90int kvm_arch_hardware_enable(void)
  91{
  92        return 0;
  93}
  94
  95int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  96{
  97        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
  98}
  99
 100int kvm_arch_hardware_setup(void)
 101{
 102        return 0;
 103}
 104
 105void kvm_arch_check_processor_compat(void *rtn)
 106{
 107        *(int *)rtn = 0;
 108}
 109
 110
 111/**
 112 * kvm_arch_init_vm - initializes a VM data structure
 113 * @kvm:        pointer to the KVM struct
 114 */
 115int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 116{
 117        int ret = 0;
 118
 119        if (type)
 120                return -EINVAL;
 121
 122        ret = kvm_alloc_stage2_pgd(kvm);
 123        if (ret)
 124                goto out_fail_alloc;
 125
 126        ret = create_hyp_mappings(kvm, kvm + 1);
 127        if (ret)
 128                goto out_free_stage2_pgd;
 129
 130        kvm_timer_init(kvm);
 131
 132        /* Mark the initial VMID generation invalid */
 133        kvm->arch.vmid_gen = 0;
 134
 135        return ret;
 136out_free_stage2_pgd:
 137        kvm_free_stage2_pgd(kvm);
 138out_fail_alloc:
 139        return ret;
 140}
 141
 142int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 143{
 144        return VM_FAULT_SIGBUS;
 145}
 146
 147
 148/**
 149 * kvm_arch_destroy_vm - destroy the VM data structure
 150 * @kvm:        pointer to the KVM struct
 151 */
 152void kvm_arch_destroy_vm(struct kvm *kvm)
 153{
 154        int i;
 155
 156        kvm_free_stage2_pgd(kvm);
 157
 158        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 159                if (kvm->vcpus[i]) {
 160                        kvm_arch_vcpu_free(kvm->vcpus[i]);
 161                        kvm->vcpus[i] = NULL;
 162                }
 163        }
 164
 165        kvm_vgic_destroy(kvm);
 166}
 167
 168int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 169{
 170        int r;
 171        switch (ext) {
 172        case KVM_CAP_IRQCHIP:
 173                r = vgic_present;
 174                break;
 175        case KVM_CAP_DEVICE_CTRL:
 176        case KVM_CAP_USER_MEMORY:
 177        case KVM_CAP_SYNC_MMU:
 178        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
 179        case KVM_CAP_ONE_REG:
 180        case KVM_CAP_ARM_PSCI:
 181        case KVM_CAP_ARM_PSCI_0_2:
 182        case KVM_CAP_READONLY_MEM:
 183                r = 1;
 184                break;
 185        case KVM_CAP_COALESCED_MMIO:
 186                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 187                break;
 188        case KVM_CAP_ARM_SET_DEVICE_ADDR:
 189                r = 1;
 190                break;
 191        case KVM_CAP_NR_VCPUS:
 192                r = num_online_cpus();
 193                break;
 194        case KVM_CAP_MAX_VCPUS:
 195                r = KVM_MAX_VCPUS;
 196                break;
 197        default:
 198                r = kvm_arch_dev_ioctl_check_extension(ext);
 199                break;
 200        }
 201        return r;
 202}
 203
 204long kvm_arch_dev_ioctl(struct file *filp,
 205                        unsigned int ioctl, unsigned long arg)
 206{
 207        return -EINVAL;
 208}
 209
 210
 211struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 212{
 213        int err;
 214        struct kvm_vcpu *vcpu;
 215
 216        vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
 217        if (!vcpu) {
 218                err = -ENOMEM;
 219                goto out;
 220        }
 221
 222        err = kvm_vcpu_init(vcpu, kvm, id);
 223        if (err)
 224                goto free_vcpu;
 225
 226        err = create_hyp_mappings(vcpu, vcpu + 1);
 227        if (err)
 228                goto vcpu_uninit;
 229
 230        return vcpu;
 231vcpu_uninit:
 232        kvm_vcpu_uninit(vcpu);
 233free_vcpu:
 234        kmem_cache_free(kvm_vcpu_cache, vcpu);
 235out:
 236        return ERR_PTR(err);
 237}
 238
 239int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 240{
 241        return 0;
 242}
 243
 244void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 245{
 246        kvm_mmu_free_memory_caches(vcpu);
 247        kvm_timer_vcpu_terminate(vcpu);
 248        kvm_vgic_vcpu_destroy(vcpu);
 249        kmem_cache_free(kvm_vcpu_cache, vcpu);
 250}
 251
 252void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 253{
 254        kvm_arch_vcpu_free(vcpu);
 255}
 256
 257int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 258{
 259        return 0;
 260}
 261
 262int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 263{
 264        /* Force users to call KVM_ARM_VCPU_INIT */
 265        vcpu->arch.target = -1;
 266
 267        /* Set up the timer */
 268        kvm_timer_vcpu_init(vcpu);
 269
 270        return 0;
 271}
 272
 273void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 274{
 275        vcpu->cpu = cpu;
 276        vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 277
 278        /*
 279         * Check whether this vcpu requires the cache to be flushed on
 280         * this physical CPU. This is a consequence of doing dcache
 281         * operations by set/way on this vcpu. We do it here to be in
 282         * a non-preemptible section.
 283         */
 284        if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
 285                flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
 286
 287        kvm_arm_set_running_vcpu(vcpu);
 288}
 289
 290void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 291{
 292        /*
 293         * The arch-generic KVM code expects the cpu field of a vcpu to be -1
 294         * if the vcpu is no longer assigned to a cpu.  This is used for the
 295         * optimized make_all_cpus_request path.
 296         */
 297        vcpu->cpu = -1;
 298
 299        kvm_arm_set_running_vcpu(NULL);
 300}
 301
 302int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 303                                        struct kvm_guest_debug *dbg)
 304{
 305        return -EINVAL;
 306}
 307
 308
 309int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 310                                    struct kvm_mp_state *mp_state)
 311{
 312        return -EINVAL;
 313}
 314
 315int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 316                                    struct kvm_mp_state *mp_state)
 317{
 318        return -EINVAL;
 319}
 320
 321/**
 322 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
 323 * @v:          The VCPU pointer
 324 *
 325 * If the guest CPU is not waiting for interrupts or an interrupt line is
 326 * asserted, the CPU is by definition runnable.
 327 */
 328int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 329{
 330        return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v);
 331}
 332
 333/* Just ensure a guest exit from a particular CPU */
 334static void exit_vm_noop(void *info)
 335{
 336}
 337
 338void force_vm_exit(const cpumask_t *mask)
 339{
 340        smp_call_function_many(mask, exit_vm_noop, NULL, true);
 341}
 342
 343/**
 344 * need_new_vmid_gen - check that the VMID is still valid
 345 * @kvm: The VM's VMID to checkt
 346 *
 347 * return true if there is a new generation of VMIDs being used
 348 *
 349 * The hardware supports only 256 values with the value zero reserved for the
 350 * host, so we check if an assigned value belongs to a previous generation,
 351 * which which requires us to assign a new value. If we're the first to use a
 352 * VMID for the new generation, we must flush necessary caches and TLBs on all
 353 * CPUs.
 354 */
 355static bool need_new_vmid_gen(struct kvm *kvm)
 356{
 357        return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
 358}
 359
 360/**
 361 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
 362 * @kvm The guest that we are about to run
 363 *
 364 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
 365 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
 366 * caches and TLBs.
 367 */
 368static void update_vttbr(struct kvm *kvm)
 369{
 370        phys_addr_t pgd_phys;
 371        u64 vmid;
 372
 373        if (!need_new_vmid_gen(kvm))
 374                return;
 375
 376        spin_lock(&kvm_vmid_lock);
 377
 378        /*
 379         * We need to re-check the vmid_gen here to ensure that if another vcpu
 380         * already allocated a valid vmid for this vm, then this vcpu should
 381         * use the same vmid.
 382         */
 383        if (!need_new_vmid_gen(kvm)) {
 384                spin_unlock(&kvm_vmid_lock);
 385                return;
 386        }
 387
 388        /* First user of a new VMID generation? */
 389        if (unlikely(kvm_next_vmid == 0)) {
 390                atomic64_inc(&kvm_vmid_gen);
 391                kvm_next_vmid = 1;
 392
 393                /*
 394                 * On SMP we know no other CPUs can use this CPU's or each
 395                 * other's VMID after force_vm_exit returns since the
 396                 * kvm_vmid_lock blocks them from reentry to the guest.
 397                 */
 398                force_vm_exit(cpu_all_mask);
 399                /*
 400                 * Now broadcast TLB + ICACHE invalidation over the inner
 401                 * shareable domain to make sure all data structures are
 402                 * clean.
 403                 */
 404                kvm_call_hyp(__kvm_flush_vm_context);
 405        }
 406
 407        kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
 408        kvm->arch.vmid = kvm_next_vmid;
 409        kvm_next_vmid++;
 410
 411        /* update vttbr to be used with the new vmid */
 412        pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
 413        BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
 414        vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
 415        kvm->arch.vttbr = pgd_phys | vmid;
 416
 417        spin_unlock(&kvm_vmid_lock);
 418}
 419
 420static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 421{
 422        int ret;
 423
 424        if (likely(vcpu->arch.has_run_once))
 425                return 0;
 426
 427        vcpu->arch.has_run_once = true;
 428
 429        /*
 430         * Initialize the VGIC before running a vcpu the first time on
 431         * this VM.
 432         */
 433        if (unlikely(!vgic_initialized(vcpu->kvm))) {
 434                ret = kvm_vgic_init(vcpu->kvm);
 435                if (ret)
 436                        return ret;
 437        }
 438
 439        return 0;
 440}
 441
 442static void vcpu_pause(struct kvm_vcpu *vcpu)
 443{
 444        wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
 445
 446        wait_event_interruptible(*wq, !vcpu->arch.pause);
 447}
 448
 449static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
 450{
 451        return vcpu->arch.target >= 0;
 452}
 453
 454/**
 455 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
 456 * @vcpu:       The VCPU pointer
 457 * @run:        The kvm_run structure pointer used for userspace state exchange
 458 *
 459 * This function is called through the VCPU_RUN ioctl called from user space. It
 460 * will execute VM code in a loop until the time slice for the process is used
 461 * or some emulation is needed from user space in which case the function will
 462 * return with return value 0 and with the kvm_run structure filled in with the
 463 * required data for the requested emulation.
 464 */
 465int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 466{
 467        int ret;
 468        sigset_t sigsaved;
 469
 470        if (unlikely(!kvm_vcpu_initialized(vcpu)))
 471                return -ENOEXEC;
 472
 473        ret = kvm_vcpu_first_run_init(vcpu);
 474        if (ret)
 475                return ret;
 476
 477        if (run->exit_reason == KVM_EXIT_MMIO) {
 478                ret = kvm_handle_mmio_return(vcpu, vcpu->run);
 479                if (ret)
 480                        return ret;
 481        }
 482
 483        if (vcpu->sigset_active)
 484                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 485
 486        ret = 1;
 487        run->exit_reason = KVM_EXIT_UNKNOWN;
 488        while (ret > 0) {
 489                /*
 490                 * Check conditions before entering the guest
 491                 */
 492                cond_resched();
 493
 494                update_vttbr(vcpu->kvm);
 495
 496                if (vcpu->arch.pause)
 497                        vcpu_pause(vcpu);
 498
 499                kvm_vgic_flush_hwstate(vcpu);
 500                kvm_timer_flush_hwstate(vcpu);
 501
 502                local_irq_disable();
 503
 504                /*
 505                 * Re-check atomic conditions
 506                 */
 507                if (signal_pending(current)) {
 508                        ret = -EINTR;
 509                        run->exit_reason = KVM_EXIT_INTR;
 510                }
 511
 512                if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
 513                        local_irq_enable();
 514                        kvm_timer_sync_hwstate(vcpu);
 515                        kvm_vgic_sync_hwstate(vcpu);
 516                        continue;
 517                }
 518
 519                /**************************************************************
 520                 * Enter the guest
 521                 */
 522                trace_kvm_entry(*vcpu_pc(vcpu));
 523                kvm_guest_enter();
 524                vcpu->mode = IN_GUEST_MODE;
 525
 526                ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
 527
 528                vcpu->mode = OUTSIDE_GUEST_MODE;
 529                vcpu->arch.last_pcpu = smp_processor_id();
 530                kvm_guest_exit();
 531                trace_kvm_exit(*vcpu_pc(vcpu));
 532                /*
 533                 * We may have taken a host interrupt in HYP mode (ie
 534                 * while executing the guest). This interrupt is still
 535                 * pending, as we haven't serviced it yet!
 536                 *
 537                 * We're now back in SVC mode, with interrupts
 538                 * disabled.  Enabling the interrupts now will have
 539                 * the effect of taking the interrupt again, in SVC
 540                 * mode this time.
 541                 */
 542                local_irq_enable();
 543
 544                /*
 545                 * Back from guest
 546                 *************************************************************/
 547
 548                kvm_timer_sync_hwstate(vcpu);
 549                kvm_vgic_sync_hwstate(vcpu);
 550
 551                ret = handle_exit(vcpu, run, ret);
 552        }
 553
 554        if (vcpu->sigset_active)
 555                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 556        return ret;
 557}
 558
 559static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
 560{
 561        int bit_index;
 562        bool set;
 563        unsigned long *ptr;
 564
 565        if (number == KVM_ARM_IRQ_CPU_IRQ)
 566                bit_index = __ffs(HCR_VI);
 567        else /* KVM_ARM_IRQ_CPU_FIQ */
 568                bit_index = __ffs(HCR_VF);
 569
 570        ptr = (unsigned long *)&vcpu->arch.irq_lines;
 571        if (level)
 572                set = test_and_set_bit(bit_index, ptr);
 573        else
 574                set = test_and_clear_bit(bit_index, ptr);
 575
 576        /*
 577         * If we didn't change anything, no need to wake up or kick other CPUs
 578         */
 579        if (set == level)
 580                return 0;
 581
 582        /*
 583         * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
 584         * trigger a world-switch round on the running physical CPU to set the
 585         * virtual IRQ/FIQ fields in the HCR appropriately.
 586         */
 587        kvm_vcpu_kick(vcpu);
 588
 589        return 0;
 590}
 591
 592int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
 593                          bool line_status)
 594{
 595        u32 irq = irq_level->irq;
 596        unsigned int irq_type, vcpu_idx, irq_num;
 597        int nrcpus = atomic_read(&kvm->online_vcpus);
 598        struct kvm_vcpu *vcpu = NULL;
 599        bool level = irq_level->level;
 600
 601        irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
 602        vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
 603        irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
 604
 605        trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
 606
 607        switch (irq_type) {
 608        case KVM_ARM_IRQ_TYPE_CPU:
 609                if (irqchip_in_kernel(kvm))
 610                        return -ENXIO;
 611
 612                if (vcpu_idx >= nrcpus)
 613                        return -EINVAL;
 614
 615                vcpu = kvm_get_vcpu(kvm, vcpu_idx);
 616                if (!vcpu)
 617                        return -EINVAL;
 618
 619                if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
 620                        return -EINVAL;
 621
 622                return vcpu_interrupt_line(vcpu, irq_num, level);
 623        case KVM_ARM_IRQ_TYPE_PPI:
 624                if (!irqchip_in_kernel(kvm))
 625                        return -ENXIO;
 626
 627                if (vcpu_idx >= nrcpus)
 628                        return -EINVAL;
 629
 630                vcpu = kvm_get_vcpu(kvm, vcpu_idx);
 631                if (!vcpu)
 632                        return -EINVAL;
 633
 634                if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
 635                        return -EINVAL;
 636
 637                return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
 638        case KVM_ARM_IRQ_TYPE_SPI:
 639                if (!irqchip_in_kernel(kvm))
 640                        return -ENXIO;
 641
 642                if (irq_num < VGIC_NR_PRIVATE_IRQS ||
 643                    irq_num > KVM_ARM_IRQ_GIC_MAX)
 644                        return -EINVAL;
 645
 646                return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
 647        }
 648
 649        return -EINVAL;
 650}
 651
 652static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
 653                                         struct kvm_vcpu_init *init)
 654{
 655        int ret;
 656
 657        ret = kvm_vcpu_set_target(vcpu, init);
 658        if (ret)
 659                return ret;
 660
 661        /*
 662         * Handle the "start in power-off" case by marking the VCPU as paused.
 663         */
 664        if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
 665                vcpu->arch.pause = true;
 666
 667        return 0;
 668}
 669
 670long kvm_arch_vcpu_ioctl(struct file *filp,
 671                         unsigned int ioctl, unsigned long arg)
 672{
 673        struct kvm_vcpu *vcpu = filp->private_data;
 674        void __user *argp = (void __user *)arg;
 675
 676        switch (ioctl) {
 677        case KVM_ARM_VCPU_INIT: {
 678                struct kvm_vcpu_init init;
 679
 680                if (copy_from_user(&init, argp, sizeof(init)))
 681                        return -EFAULT;
 682
 683                return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
 684        }
 685        case KVM_SET_ONE_REG:
 686        case KVM_GET_ONE_REG: {
 687                struct kvm_one_reg reg;
 688
 689                if (unlikely(!kvm_vcpu_initialized(vcpu)))
 690                        return -ENOEXEC;
 691
 692                if (copy_from_user(&reg, argp, sizeof(reg)))
 693                        return -EFAULT;
 694                if (ioctl == KVM_SET_ONE_REG)
 695                        return kvm_arm_set_reg(vcpu, &reg);
 696                else
 697                        return kvm_arm_get_reg(vcpu, &reg);
 698        }
 699        case KVM_GET_REG_LIST: {
 700                struct kvm_reg_list __user *user_list = argp;
 701                struct kvm_reg_list reg_list;
 702                unsigned n;
 703
 704                if (unlikely(!kvm_vcpu_initialized(vcpu)))
 705                        return -ENOEXEC;
 706
 707                if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
 708                        return -EFAULT;
 709                n = reg_list.n;
 710                reg_list.n = kvm_arm_num_regs(vcpu);
 711                if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
 712                        return -EFAULT;
 713                if (n < reg_list.n)
 714                        return -E2BIG;
 715                return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
 716        }
 717        default:
 718                return -EINVAL;
 719        }
 720}
 721
 722int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 723{
 724        return -EINVAL;
 725}
 726
 727static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
 728                                        struct kvm_arm_device_addr *dev_addr)
 729{
 730        unsigned long dev_id, type;
 731
 732        dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
 733                KVM_ARM_DEVICE_ID_SHIFT;
 734        type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
 735                KVM_ARM_DEVICE_TYPE_SHIFT;
 736
 737        switch (dev_id) {
 738        case KVM_ARM_DEVICE_VGIC_V2:
 739                if (!vgic_present)
 740                        return -ENXIO;
 741                return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
 742        default:
 743                return -ENODEV;
 744        }
 745}
 746
 747long kvm_arch_vm_ioctl(struct file *filp,
 748                       unsigned int ioctl, unsigned long arg)
 749{
 750        struct kvm *kvm = filp->private_data;
 751        void __user *argp = (void __user *)arg;
 752
 753        switch (ioctl) {
 754        case KVM_CREATE_IRQCHIP: {
 755                if (vgic_present)
 756                        return kvm_vgic_create(kvm);
 757                else
 758                        return -ENXIO;
 759        }
 760        case KVM_ARM_SET_DEVICE_ADDR: {
 761                struct kvm_arm_device_addr dev_addr;
 762
 763                if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
 764                        return -EFAULT;
 765                return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
 766        }
 767        case KVM_ARM_PREFERRED_TARGET: {
 768                int err;
 769                struct kvm_vcpu_init init;
 770
 771                err = kvm_vcpu_preferred_target(&init);
 772                if (err)
 773                        return err;
 774
 775                if (copy_to_user(argp, &init, sizeof(init)))
 776                        return -EFAULT;
 777
 778                return 0;
 779        }
 780        default:
 781                return -EINVAL;
 782        }
 783}
 784
 785static void cpu_init_hyp_mode(void *dummy)
 786{
 787        phys_addr_t boot_pgd_ptr;
 788        phys_addr_t pgd_ptr;
 789        unsigned long hyp_stack_ptr;
 790        unsigned long stack_page;
 791        unsigned long vector_ptr;
 792
 793        /* Switch from the HYP stub to our own HYP init vector */
 794        __hyp_set_vectors(kvm_get_idmap_vector());
 795
 796        boot_pgd_ptr = kvm_mmu_get_boot_httbr();
 797        pgd_ptr = kvm_mmu_get_httbr();
 798        stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
 799        hyp_stack_ptr = stack_page + PAGE_SIZE;
 800        vector_ptr = (unsigned long)__kvm_hyp_vector;
 801
 802        __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
 803}
 804
 805static int hyp_init_cpu_notify(struct notifier_block *self,
 806                               unsigned long action, void *cpu)
 807{
 808        switch (action) {
 809        case CPU_STARTING:
 810        case CPU_STARTING_FROZEN:
 811                if (__hyp_get_vectors() == hyp_default_vectors)
 812                        cpu_init_hyp_mode(NULL);
 813                break;
 814        }
 815
 816        return NOTIFY_OK;
 817}
 818
 819static struct notifier_block hyp_init_cpu_nb = {
 820        .notifier_call = hyp_init_cpu_notify,
 821};
 822
 823#ifdef CONFIG_CPU_PM
 824static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
 825                                    unsigned long cmd,
 826                                    void *v)
 827{
 828        if (cmd == CPU_PM_EXIT &&
 829            __hyp_get_vectors() == hyp_default_vectors) {
 830                cpu_init_hyp_mode(NULL);
 831                return NOTIFY_OK;
 832        }
 833
 834        return NOTIFY_DONE;
 835}
 836
 837static struct notifier_block hyp_init_cpu_pm_nb = {
 838        .notifier_call = hyp_init_cpu_pm_notifier,
 839};
 840
 841static void __init hyp_cpu_pm_init(void)
 842{
 843        cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
 844}
 845#else
 846static inline void hyp_cpu_pm_init(void)
 847{
 848}
 849#endif
 850
 851/**
 852 * Inits Hyp-mode on all online CPUs
 853 */
 854static int init_hyp_mode(void)
 855{
 856        int cpu;
 857        int err = 0;
 858
 859        /*
 860         * Allocate Hyp PGD and setup Hyp identity mapping
 861         */
 862        err = kvm_mmu_init();
 863        if (err)
 864                goto out_err;
 865
 866        /*
 867         * It is probably enough to obtain the default on one
 868         * CPU. It's unlikely to be different on the others.
 869         */
 870        hyp_default_vectors = __hyp_get_vectors();
 871
 872        /*
 873         * Allocate stack pages for Hypervisor-mode
 874         */
 875        for_each_possible_cpu(cpu) {
 876                unsigned long stack_page;
 877
 878                stack_page = __get_free_page(GFP_KERNEL);
 879                if (!stack_page) {
 880                        err = -ENOMEM;
 881                        goto out_free_stack_pages;
 882                }
 883
 884                per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
 885        }
 886
 887        /*
 888         * Map the Hyp-code called directly from the host
 889         */
 890        err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
 891        if (err) {
 892                kvm_err("Cannot map world-switch code\n");
 893                goto out_free_mappings;
 894        }
 895
 896        /*
 897         * Map the Hyp stack pages
 898         */
 899        for_each_possible_cpu(cpu) {
 900                char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
 901                err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
 902
 903                if (err) {
 904                        kvm_err("Cannot map hyp stack\n");
 905                        goto out_free_mappings;
 906                }
 907        }
 908
 909        /*
 910         * Map the host CPU structures
 911         */
 912        kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
 913        if (!kvm_host_cpu_state) {
 914                err = -ENOMEM;
 915                kvm_err("Cannot allocate host CPU state\n");
 916                goto out_free_mappings;
 917        }
 918
 919        for_each_possible_cpu(cpu) {
 920                kvm_cpu_context_t *cpu_ctxt;
 921
 922                cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
 923                err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
 924
 925                if (err) {
 926                        kvm_err("Cannot map host CPU state: %d\n", err);
 927                        goto out_free_context;
 928                }
 929        }
 930
 931        /*
 932         * Execute the init code on each CPU.
 933         */
 934        on_each_cpu(cpu_init_hyp_mode, NULL, 1);
 935
 936        /*
 937         * Init HYP view of VGIC
 938         */
 939        err = kvm_vgic_hyp_init();
 940        if (err)
 941                goto out_free_context;
 942
 943#ifdef CONFIG_KVM_ARM_VGIC
 944                vgic_present = true;
 945#endif
 946
 947        /*
 948         * Init HYP architected timer support
 949         */
 950        err = kvm_timer_hyp_init();
 951        if (err)
 952                goto out_free_mappings;
 953
 954#ifndef CONFIG_HOTPLUG_CPU
 955        free_boot_hyp_pgd();
 956#endif
 957
 958        kvm_perf_init();
 959
 960        kvm_info("Hyp mode initialized successfully\n");
 961
 962        return 0;
 963out_free_context:
 964        free_percpu(kvm_host_cpu_state);
 965out_free_mappings:
 966        free_hyp_pgds();
 967out_free_stack_pages:
 968        for_each_possible_cpu(cpu)
 969                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
 970out_err:
 971        kvm_err("error initializing Hyp mode: %d\n", err);
 972        return err;
 973}
 974
 975static void check_kvm_target_cpu(void *ret)
 976{
 977        *(int *)ret = kvm_target_cpu();
 978}
 979
 980/**
 981 * Initialize Hyp-mode and memory mappings on all CPUs.
 982 */
 983int kvm_arch_init(void *opaque)
 984{
 985        int err;
 986        int ret, cpu;
 987
 988        if (!is_hyp_mode_available()) {
 989                kvm_err("HYP mode not available\n");
 990                return -ENODEV;
 991        }
 992
 993        for_each_online_cpu(cpu) {
 994                smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
 995                if (ret < 0) {
 996                        kvm_err("Error, CPU %d not supported!\n", cpu);
 997                        return -ENODEV;
 998                }
 999        }
1000
1001        cpu_notifier_register_begin();
1002
1003        err = init_hyp_mode();
1004        if (err)
1005                goto out_err;
1006
1007        err = __register_cpu_notifier(&hyp_init_cpu_nb);
1008        if (err) {
1009                kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
1010                goto out_err;
1011        }
1012
1013        cpu_notifier_register_done();
1014
1015        hyp_cpu_pm_init();
1016
1017        kvm_coproc_table_init();
1018        return 0;
1019out_err:
1020        cpu_notifier_register_done();
1021        return err;
1022}
1023
1024/* NOP: Compiling as a module not supported */
1025void kvm_arch_exit(void)
1026{
1027        kvm_perf_teardown();
1028}
1029
1030static int arm_init(void)
1031{
1032        int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1033        return rc;
1034}
1035
1036module_init(arm_init);
1037