1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <kvm/iodev.h>
17
18#include <linux/kvm_host.h>
19#include <linux/kvm.h>
20#include <linux/module.h>
21#include <linux/errno.h>
22#include <linux/percpu.h>
23#include <linux/mm.h>
24#include <linux/miscdevice.h>
25#include <linux/vmalloc.h>
26#include <linux/reboot.h>
27#include <linux/debugfs.h>
28#include <linux/highmem.h>
29#include <linux/file.h>
30#include <linux/syscore_ops.h>
31#include <linux/cpu.h>
32#include <linux/sched/signal.h>
33#include <linux/sched/mm.h>
34#include <linux/sched/stat.h>
35#include <linux/cpumask.h>
36#include <linux/smp.h>
37#include <linux/anon_inodes.h>
38#include <linux/profile.h>
39#include <linux/kvm_para.h>
40#include <linux/pagemap.h>
41#include <linux/mman.h>
42#include <linux/swap.h>
43#include <linux/bitops.h>
44#include <linux/spinlock.h>
45#include <linux/compat.h>
46#include <linux/srcu.h>
47#include <linux/hugetlb.h>
48#include <linux/slab.h>
49#include <linux/sort.h>
50#include <linux/bsearch.h>
51#include <linux/io.h>
52#include <linux/lockdep.h>
53
54#include <asm/processor.h>
55#include <asm/ioctl.h>
56#include <linux/uaccess.h>
57#include <asm/pgtable.h>
58
59#include "coalesced_mmio.h"
60#include "async_pf.h"
61#include "vfio.h"
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/kvm.h>
65
66
67#define ITOA_MAX_LEN 12
68
69MODULE_AUTHOR("Qumranet");
70MODULE_LICENSE("GPL");
71
72
73unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
74module_param(halt_poll_ns, uint, 0644);
75EXPORT_SYMBOL_GPL(halt_poll_ns);
76
77
78unsigned int halt_poll_ns_grow = 2;
79module_param(halt_poll_ns_grow, uint, 0644);
80EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
81
82
83unsigned int halt_poll_ns_grow_start = 10000;
84module_param(halt_poll_ns_grow_start, uint, 0644);
85EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
86
87
88unsigned int halt_poll_ns_shrink;
89module_param(halt_poll_ns_shrink, uint, 0644);
90EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
91
92
93
94
95
96
97
98DEFINE_MUTEX(kvm_lock);
99static DEFINE_RAW_SPINLOCK(kvm_count_lock);
100LIST_HEAD(vm_list);
101
102static cpumask_var_t cpus_hardware_enabled;
103static int kvm_usage_count;
104static atomic_t hardware_enable_failed;
105
106struct kmem_cache *kvm_vcpu_cache;
107EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
108
109static __read_mostly struct preempt_ops kvm_preempt_ops;
110
111struct dentry *kvm_debugfs_dir;
112EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
113
114static int kvm_debugfs_num_entries;
115static const struct file_operations *stat_fops_per_vm[];
116
117static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
118 unsigned long arg);
119#ifdef CONFIG_KVM_COMPAT
120static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
121 unsigned long arg);
122#define KVM_COMPAT(c) .compat_ioctl = (c)
123#else
124static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
125 unsigned long arg) { return -EINVAL; }
126#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl
127#endif
128static int hardware_enable_all(void);
129static void hardware_disable_all(void);
130
131static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
132
133static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
134
135__visible bool kvm_rebooting;
136EXPORT_SYMBOL_GPL(kvm_rebooting);
137
138static bool largepages_enabled = true;
139
140#define KVM_EVENT_CREATE_VM 0
141#define KVM_EVENT_DESTROY_VM 1
142static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
143static unsigned long long kvm_createvm_count;
144static unsigned long long kvm_active_vms;
145
146__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
147 unsigned long start, unsigned long end, bool blockable)
148{
149 return 0;
150}
151
152bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
153{
154 if (pfn_valid(pfn))
155 return PageReserved(pfn_to_page(pfn));
156
157 return true;
158}
159
160
161
162
163void vcpu_load(struct kvm_vcpu *vcpu)
164{
165 int cpu = get_cpu();
166 preempt_notifier_register(&vcpu->preempt_notifier);
167 kvm_arch_vcpu_load(vcpu, cpu);
168 put_cpu();
169}
170EXPORT_SYMBOL_GPL(vcpu_load);
171
172void vcpu_put(struct kvm_vcpu *vcpu)
173{
174 preempt_disable();
175 kvm_arch_vcpu_put(vcpu);
176 preempt_notifier_unregister(&vcpu->preempt_notifier);
177 preempt_enable();
178}
179EXPORT_SYMBOL_GPL(vcpu_put);
180
181
182static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
183{
184 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
185
186
187
188
189
190 if (req & KVM_REQUEST_WAIT)
191 return mode != OUTSIDE_GUEST_MODE;
192
193
194
195
196 return mode == IN_GUEST_MODE;
197}
198
199static void ack_flush(void *_completed)
200{
201}
202
203static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
204{
205 if (unlikely(!cpus))
206 cpus = cpu_online_mask;
207
208 if (cpumask_empty(cpus))
209 return false;
210
211 smp_call_function_many(cpus, ack_flush, NULL, wait);
212 return true;
213}
214
215bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
216 unsigned long *vcpu_bitmap, cpumask_var_t tmp)
217{
218 int i, cpu, me;
219 struct kvm_vcpu *vcpu;
220 bool called;
221
222 me = get_cpu();
223
224 kvm_for_each_vcpu(i, vcpu, kvm) {
225 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
226 continue;
227
228 kvm_make_request(req, vcpu);
229 cpu = vcpu->cpu;
230
231 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
232 continue;
233
234 if (tmp != NULL && cpu != -1 && cpu != me &&
235 kvm_request_needs_ipi(vcpu, req))
236 __cpumask_set_cpu(cpu, tmp);
237 }
238
239 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
240 put_cpu();
241
242 return called;
243}
244
245bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
246{
247 cpumask_var_t cpus;
248 bool called;
249
250 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
251
252 called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
253
254 free_cpumask_var(cpus);
255 return called;
256}
257
258#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
259void kvm_flush_remote_tlbs(struct kvm *kvm)
260{
261
262
263
264
265 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
266
267
268
269
270
271
272
273
274
275
276
277
278 if (!kvm_arch_flush_remote_tlb(kvm)
279 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
280 ++kvm->stat.remote_tlb_flush;
281 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
282}
283EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
284#endif
285
286void kvm_reload_remote_mmus(struct kvm *kvm)
287{
288 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
289}
290
291int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
292{
293 struct page *page;
294 int r;
295
296 mutex_init(&vcpu->mutex);
297 vcpu->cpu = -1;
298 vcpu->kvm = kvm;
299 vcpu->vcpu_id = id;
300 vcpu->pid = NULL;
301 init_swait_queue_head(&vcpu->wq);
302 kvm_async_pf_vcpu_init(vcpu);
303
304 vcpu->pre_pcpu = -1;
305 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
306
307 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
308 if (!page) {
309 r = -ENOMEM;
310 goto fail;
311 }
312 vcpu->run = page_address(page);
313
314 kvm_vcpu_set_in_spin_loop(vcpu, false);
315 kvm_vcpu_set_dy_eligible(vcpu, false);
316 vcpu->preempted = false;
317 vcpu->ready = false;
318
319 r = kvm_arch_vcpu_init(vcpu);
320 if (r < 0)
321 goto fail_free_run;
322 return 0;
323
324fail_free_run:
325 free_page((unsigned long)vcpu->run);
326fail:
327 return r;
328}
329EXPORT_SYMBOL_GPL(kvm_vcpu_init);
330
331void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
332{
333
334
335
336
337
338 put_pid(rcu_dereference_protected(vcpu->pid, 1));
339 kvm_arch_vcpu_uninit(vcpu);
340 free_page((unsigned long)vcpu->run);
341}
342EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
343
344#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
345static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
346{
347 return container_of(mn, struct kvm, mmu_notifier);
348}
349
350static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
351 struct mm_struct *mm,
352 unsigned long address,
353 pte_t pte)
354{
355 struct kvm *kvm = mmu_notifier_to_kvm(mn);
356 int idx;
357
358 idx = srcu_read_lock(&kvm->srcu);
359 spin_lock(&kvm->mmu_lock);
360 kvm->mmu_notifier_seq++;
361
362 if (kvm_set_spte_hva(kvm, address, pte))
363 kvm_flush_remote_tlbs(kvm);
364
365 spin_unlock(&kvm->mmu_lock);
366 srcu_read_unlock(&kvm->srcu, idx);
367}
368
369static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
370 const struct mmu_notifier_range *range)
371{
372 struct kvm *kvm = mmu_notifier_to_kvm(mn);
373 int need_tlb_flush = 0, idx;
374 int ret;
375
376 idx = srcu_read_lock(&kvm->srcu);
377 spin_lock(&kvm->mmu_lock);
378
379
380
381
382
383 kvm->mmu_notifier_count++;
384 need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
385 need_tlb_flush |= kvm->tlbs_dirty;
386
387 if (need_tlb_flush)
388 kvm_flush_remote_tlbs(kvm);
389
390 spin_unlock(&kvm->mmu_lock);
391
392 ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
393 range->end,
394 mmu_notifier_range_blockable(range));
395
396 srcu_read_unlock(&kvm->srcu, idx);
397
398 return ret;
399}
400
401static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
402 const struct mmu_notifier_range *range)
403{
404 struct kvm *kvm = mmu_notifier_to_kvm(mn);
405
406 spin_lock(&kvm->mmu_lock);
407
408
409
410
411
412 kvm->mmu_notifier_seq++;
413 smp_wmb();
414
415
416
417
418
419 kvm->mmu_notifier_count--;
420 spin_unlock(&kvm->mmu_lock);
421
422 BUG_ON(kvm->mmu_notifier_count < 0);
423}
424
425static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
426 struct mm_struct *mm,
427 unsigned long start,
428 unsigned long end)
429{
430 struct kvm *kvm = mmu_notifier_to_kvm(mn);
431 int young, idx;
432
433 idx = srcu_read_lock(&kvm->srcu);
434 spin_lock(&kvm->mmu_lock);
435
436 young = kvm_age_hva(kvm, start, end);
437 if (young)
438 kvm_flush_remote_tlbs(kvm);
439
440 spin_unlock(&kvm->mmu_lock);
441 srcu_read_unlock(&kvm->srcu, idx);
442
443 return young;
444}
445
446static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
447 struct mm_struct *mm,
448 unsigned long start,
449 unsigned long end)
450{
451 struct kvm *kvm = mmu_notifier_to_kvm(mn);
452 int young, idx;
453
454 idx = srcu_read_lock(&kvm->srcu);
455 spin_lock(&kvm->mmu_lock);
456
457
458
459
460
461
462
463
464
465
466
467
468
469 young = kvm_age_hva(kvm, start, end);
470 spin_unlock(&kvm->mmu_lock);
471 srcu_read_unlock(&kvm->srcu, idx);
472
473 return young;
474}
475
476static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
477 struct mm_struct *mm,
478 unsigned long address)
479{
480 struct kvm *kvm = mmu_notifier_to_kvm(mn);
481 int young, idx;
482
483 idx = srcu_read_lock(&kvm->srcu);
484 spin_lock(&kvm->mmu_lock);
485 young = kvm_test_age_hva(kvm, address);
486 spin_unlock(&kvm->mmu_lock);
487 srcu_read_unlock(&kvm->srcu, idx);
488
489 return young;
490}
491
492static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
493 struct mm_struct *mm)
494{
495 struct kvm *kvm = mmu_notifier_to_kvm(mn);
496 int idx;
497
498 idx = srcu_read_lock(&kvm->srcu);
499 kvm_arch_flush_shadow_all(kvm);
500 srcu_read_unlock(&kvm->srcu, idx);
501}
502
503static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
504 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
505 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
506 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
507 .clear_young = kvm_mmu_notifier_clear_young,
508 .test_young = kvm_mmu_notifier_test_young,
509 .change_pte = kvm_mmu_notifier_change_pte,
510 .release = kvm_mmu_notifier_release,
511};
512
513static int kvm_init_mmu_notifier(struct kvm *kvm)
514{
515 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
516 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
517}
518
519#else
520
521static int kvm_init_mmu_notifier(struct kvm *kvm)
522{
523 return 0;
524}
525
526#endif
527
528static struct kvm_memslots *kvm_alloc_memslots(void)
529{
530 int i;
531 struct kvm_memslots *slots;
532
533 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
534 if (!slots)
535 return NULL;
536
537 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
538 slots->id_to_index[i] = slots->memslots[i].id = i;
539
540 return slots;
541}
542
543static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
544{
545 if (!memslot->dirty_bitmap)
546 return;
547
548 kvfree(memslot->dirty_bitmap);
549 memslot->dirty_bitmap = NULL;
550}
551
552
553
554
555static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
556 struct kvm_memory_slot *dont)
557{
558 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
559 kvm_destroy_dirty_bitmap(free);
560
561 kvm_arch_free_memslot(kvm, free, dont);
562
563 free->npages = 0;
564}
565
566static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
567{
568 struct kvm_memory_slot *memslot;
569
570 if (!slots)
571 return;
572
573 kvm_for_each_memslot(memslot, slots)
574 kvm_free_memslot(kvm, memslot, NULL);
575
576 kvfree(slots);
577}
578
579static void kvm_destroy_vm_debugfs(struct kvm *kvm)
580{
581 int i;
582
583 if (!kvm->debugfs_dentry)
584 return;
585
586 debugfs_remove_recursive(kvm->debugfs_dentry);
587
588 if (kvm->debugfs_stat_data) {
589 for (i = 0; i < kvm_debugfs_num_entries; i++)
590 kfree(kvm->debugfs_stat_data[i]);
591 kfree(kvm->debugfs_stat_data);
592 }
593}
594
595static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
596{
597 char dir_name[ITOA_MAX_LEN * 2];
598 struct kvm_stat_data *stat_data;
599 struct kvm_stats_debugfs_item *p;
600
601 if (!debugfs_initialized())
602 return 0;
603
604 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
605 kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
606
607 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
608 sizeof(*kvm->debugfs_stat_data),
609 GFP_KERNEL_ACCOUNT);
610 if (!kvm->debugfs_stat_data)
611 return -ENOMEM;
612
613 for (p = debugfs_entries; p->name; p++) {
614 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
615 if (!stat_data)
616 return -ENOMEM;
617
618 stat_data->kvm = kvm;
619 stat_data->offset = p->offset;
620 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
621 debugfs_create_file(p->name, 0644, kvm->debugfs_dentry,
622 stat_data, stat_fops_per_vm[p->kind]);
623 }
624 return 0;
625}
626
627static struct kvm *kvm_create_vm(unsigned long type)
628{
629 int r, i;
630 struct kvm *kvm = kvm_arch_alloc_vm();
631
632 if (!kvm)
633 return ERR_PTR(-ENOMEM);
634
635 spin_lock_init(&kvm->mmu_lock);
636 mmgrab(current->mm);
637 kvm->mm = current->mm;
638 kvm_eventfd_init(kvm);
639 mutex_init(&kvm->lock);
640 mutex_init(&kvm->irq_lock);
641 mutex_init(&kvm->slots_lock);
642 refcount_set(&kvm->users_count, 1);
643 INIT_LIST_HEAD(&kvm->devices);
644
645 r = kvm_arch_init_vm(kvm, type);
646 if (r)
647 goto out_err_no_disable;
648
649 r = hardware_enable_all();
650 if (r)
651 goto out_err_no_disable;
652
653#ifdef CONFIG_HAVE_KVM_IRQFD
654 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
655#endif
656
657 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
658
659 r = -ENOMEM;
660 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
661 struct kvm_memslots *slots = kvm_alloc_memslots();
662 if (!slots)
663 goto out_err_no_srcu;
664
665 slots->generation = i;
666 rcu_assign_pointer(kvm->memslots[i], slots);
667 }
668
669 if (init_srcu_struct(&kvm->srcu))
670 goto out_err_no_srcu;
671 if (init_srcu_struct(&kvm->irq_srcu))
672 goto out_err_no_irq_srcu;
673 for (i = 0; i < KVM_NR_BUSES; i++) {
674 rcu_assign_pointer(kvm->buses[i],
675 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
676 if (!kvm->buses[i])
677 goto out_err;
678 }
679
680 r = kvm_init_mmu_notifier(kvm);
681 if (r)
682 goto out_err;
683
684 mutex_lock(&kvm_lock);
685 list_add(&kvm->vm_list, &vm_list);
686 mutex_unlock(&kvm_lock);
687
688 preempt_notifier_inc();
689
690 return kvm;
691
692out_err:
693 cleanup_srcu_struct(&kvm->irq_srcu);
694out_err_no_irq_srcu:
695 cleanup_srcu_struct(&kvm->srcu);
696out_err_no_srcu:
697 hardware_disable_all();
698out_err_no_disable:
699 refcount_set(&kvm->users_count, 0);
700 for (i = 0; i < KVM_NR_BUSES; i++)
701 kfree(kvm_get_bus(kvm, i));
702 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
703 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
704 kvm_arch_free_vm(kvm);
705 mmdrop(current->mm);
706 return ERR_PTR(r);
707}
708
709static void kvm_destroy_devices(struct kvm *kvm)
710{
711 struct kvm_device *dev, *tmp;
712
713
714
715
716
717
718 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
719 list_del(&dev->vm_node);
720 dev->ops->destroy(dev);
721 }
722}
723
724static void kvm_destroy_vm(struct kvm *kvm)
725{
726 int i;
727 struct mm_struct *mm = kvm->mm;
728
729 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
730 kvm_destroy_vm_debugfs(kvm);
731 kvm_arch_sync_events(kvm);
732 mutex_lock(&kvm_lock);
733 list_del(&kvm->vm_list);
734 mutex_unlock(&kvm_lock);
735 kvm_free_irq_routing(kvm);
736 for (i = 0; i < KVM_NR_BUSES; i++) {
737 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
738
739 if (bus)
740 kvm_io_bus_destroy(bus);
741 kvm->buses[i] = NULL;
742 }
743 kvm_coalesced_mmio_free(kvm);
744#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
745 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
746#else
747 kvm_arch_flush_shadow_all(kvm);
748#endif
749 kvm_arch_destroy_vm(kvm);
750 kvm_destroy_devices(kvm);
751 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
752 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
753 cleanup_srcu_struct(&kvm->irq_srcu);
754 cleanup_srcu_struct(&kvm->srcu);
755 kvm_arch_free_vm(kvm);
756 preempt_notifier_dec();
757 hardware_disable_all();
758 mmdrop(mm);
759}
760
761void kvm_get_kvm(struct kvm *kvm)
762{
763 refcount_inc(&kvm->users_count);
764}
765EXPORT_SYMBOL_GPL(kvm_get_kvm);
766
767void kvm_put_kvm(struct kvm *kvm)
768{
769 if (refcount_dec_and_test(&kvm->users_count))
770 kvm_destroy_vm(kvm);
771}
772EXPORT_SYMBOL_GPL(kvm_put_kvm);
773
774
775static int kvm_vm_release(struct inode *inode, struct file *filp)
776{
777 struct kvm *kvm = filp->private_data;
778
779 kvm_irqfd_release(kvm);
780
781 kvm_put_kvm(kvm);
782 return 0;
783}
784
785
786
787
788
789static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
790{
791 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
792
793 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
794 if (!memslot->dirty_bitmap)
795 return -ENOMEM;
796
797 return 0;
798}
799
800
801
802
803
804
805
806static void update_memslots(struct kvm_memslots *slots,
807 struct kvm_memory_slot *new,
808 enum kvm_mr_change change)
809{
810 int id = new->id;
811 int i = slots->id_to_index[id];
812 struct kvm_memory_slot *mslots = slots->memslots;
813
814 WARN_ON(mslots[i].id != id);
815 switch (change) {
816 case KVM_MR_CREATE:
817 slots->used_slots++;
818 WARN_ON(mslots[i].npages || !new->npages);
819 break;
820 case KVM_MR_DELETE:
821 slots->used_slots--;
822 WARN_ON(new->npages || !mslots[i].npages);
823 break;
824 default:
825 break;
826 }
827
828 while (i < KVM_MEM_SLOTS_NUM - 1 &&
829 new->base_gfn <= mslots[i + 1].base_gfn) {
830 if (!mslots[i + 1].npages)
831 break;
832 mslots[i] = mslots[i + 1];
833 slots->id_to_index[mslots[i].id] = i;
834 i++;
835 }
836
837
838
839
840
841
842
843
844
845
846 if (new->npages) {
847 while (i > 0 &&
848 new->base_gfn >= mslots[i - 1].base_gfn) {
849 mslots[i] = mslots[i - 1];
850 slots->id_to_index[mslots[i].id] = i;
851 i--;
852 }
853 } else
854 WARN_ON_ONCE(i != slots->used_slots);
855
856 mslots[i] = *new;
857 slots->id_to_index[mslots[i].id] = i;
858}
859
860static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
861{
862 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
863
864#ifdef __KVM_HAVE_READONLY_MEM
865 valid_flags |= KVM_MEM_READONLY;
866#endif
867
868 if (mem->flags & ~valid_flags)
869 return -EINVAL;
870
871 return 0;
872}
873
874static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
875 int as_id, struct kvm_memslots *slots)
876{
877 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
878 u64 gen = old_memslots->generation;
879
880 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
881 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
882
883 rcu_assign_pointer(kvm->memslots[as_id], slots);
884 synchronize_srcu_expedited(&kvm->srcu);
885
886
887
888
889
890
891
892 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
893
894
895
896
897
898
899
900
901 gen += KVM_ADDRESS_SPACE_NUM;
902
903 kvm_arch_memslots_updated(kvm, gen);
904
905 slots->generation = gen;
906
907 return old_memslots;
908}
909
910
911
912
913
914
915
916
917
918int __kvm_set_memory_region(struct kvm *kvm,
919 const struct kvm_userspace_memory_region *mem)
920{
921 int r;
922 gfn_t base_gfn;
923 unsigned long npages;
924 struct kvm_memory_slot *slot;
925 struct kvm_memory_slot old, new;
926 struct kvm_memslots *slots = NULL, *old_memslots;
927 int as_id, id;
928 enum kvm_mr_change change;
929
930 r = check_memory_region_flags(mem);
931 if (r)
932 goto out;
933
934 r = -EINVAL;
935 as_id = mem->slot >> 16;
936 id = (u16)mem->slot;
937
938
939 if (mem->memory_size & (PAGE_SIZE - 1))
940 goto out;
941 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
942 goto out;
943
944 if ((id < KVM_USER_MEM_SLOTS) &&
945 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
946 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
947 mem->memory_size)))
948 goto out;
949 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
950 goto out;
951 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
952 goto out;
953
954 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
955 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
956 npages = mem->memory_size >> PAGE_SHIFT;
957
958 if (npages > KVM_MEM_MAX_NR_PAGES)
959 goto out;
960
961 new = old = *slot;
962
963 new.id = id;
964 new.base_gfn = base_gfn;
965 new.npages = npages;
966 new.flags = mem->flags;
967
968 if (npages) {
969 if (!old.npages)
970 change = KVM_MR_CREATE;
971 else {
972 if ((mem->userspace_addr != old.userspace_addr) ||
973 (npages != old.npages) ||
974 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
975 goto out;
976
977 if (base_gfn != old.base_gfn)
978 change = KVM_MR_MOVE;
979 else if (new.flags != old.flags)
980 change = KVM_MR_FLAGS_ONLY;
981 else {
982 r = 0;
983 goto out;
984 }
985 }
986 } else {
987 if (!old.npages)
988 goto out;
989
990 change = KVM_MR_DELETE;
991 new.base_gfn = 0;
992 new.flags = 0;
993 }
994
995 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
996
997 r = -EEXIST;
998 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
999 if (slot->id == id)
1000 continue;
1001 if (!((base_gfn + npages <= slot->base_gfn) ||
1002 (base_gfn >= slot->base_gfn + slot->npages)))
1003 goto out;
1004 }
1005 }
1006
1007
1008 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1009 new.dirty_bitmap = NULL;
1010
1011 r = -ENOMEM;
1012 if (change == KVM_MR_CREATE) {
1013 new.userspace_addr = mem->userspace_addr;
1014
1015 if (kvm_arch_create_memslot(kvm, &new, npages))
1016 goto out_free;
1017 }
1018
1019
1020 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1021 if (kvm_create_dirty_bitmap(&new) < 0)
1022 goto out_free;
1023 }
1024
1025 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
1026 if (!slots)
1027 goto out_free;
1028 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
1029
1030 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
1031 slot = id_to_memslot(slots, id);
1032 slot->flags |= KVM_MEMSLOT_INVALID;
1033
1034 old_memslots = install_new_memslots(kvm, as_id, slots);
1035
1036
1037
1038
1039
1040
1041
1042
1043 kvm_arch_flush_shadow_memslot(kvm, slot);
1044
1045
1046
1047
1048
1049
1050 slots = old_memslots;
1051 }
1052
1053 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
1054 if (r)
1055 goto out_slots;
1056
1057
1058 if (change == KVM_MR_DELETE) {
1059 new.dirty_bitmap = NULL;
1060 memset(&new.arch, 0, sizeof(new.arch));
1061 }
1062
1063 update_memslots(slots, &new, change);
1064 old_memslots = install_new_memslots(kvm, as_id, slots);
1065
1066 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
1067
1068 kvm_free_memslot(kvm, &old, &new);
1069 kvfree(old_memslots);
1070 return 0;
1071
1072out_slots:
1073 kvfree(slots);
1074out_free:
1075 kvm_free_memslot(kvm, &new, &old);
1076out:
1077 return r;
1078}
1079EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1080
1081int kvm_set_memory_region(struct kvm *kvm,
1082 const struct kvm_userspace_memory_region *mem)
1083{
1084 int r;
1085
1086 mutex_lock(&kvm->slots_lock);
1087 r = __kvm_set_memory_region(kvm, mem);
1088 mutex_unlock(&kvm->slots_lock);
1089 return r;
1090}
1091EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1092
1093static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1094 struct kvm_userspace_memory_region *mem)
1095{
1096 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1097 return -EINVAL;
1098
1099 return kvm_set_memory_region(kvm, mem);
1100}
1101
1102int kvm_get_dirty_log(struct kvm *kvm,
1103 struct kvm_dirty_log *log, int *is_dirty)
1104{
1105 struct kvm_memslots *slots;
1106 struct kvm_memory_slot *memslot;
1107 int i, as_id, id;
1108 unsigned long n;
1109 unsigned long any = 0;
1110
1111 as_id = log->slot >> 16;
1112 id = (u16)log->slot;
1113 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1114 return -EINVAL;
1115
1116 slots = __kvm_memslots(kvm, as_id);
1117 memslot = id_to_memslot(slots, id);
1118 if (!memslot->dirty_bitmap)
1119 return -ENOENT;
1120
1121 n = kvm_dirty_bitmap_bytes(memslot);
1122
1123 for (i = 0; !any && i < n/sizeof(long); ++i)
1124 any = memslot->dirty_bitmap[i];
1125
1126 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1127 return -EFAULT;
1128
1129 if (any)
1130 *is_dirty = 1;
1131 return 0;
1132}
1133EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
1134
1135#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158int kvm_get_dirty_log_protect(struct kvm *kvm,
1159 struct kvm_dirty_log *log, bool *flush)
1160{
1161 struct kvm_memslots *slots;
1162 struct kvm_memory_slot *memslot;
1163 int i, as_id, id;
1164 unsigned long n;
1165 unsigned long *dirty_bitmap;
1166 unsigned long *dirty_bitmap_buffer;
1167
1168 as_id = log->slot >> 16;
1169 id = (u16)log->slot;
1170 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1171 return -EINVAL;
1172
1173 slots = __kvm_memslots(kvm, as_id);
1174 memslot = id_to_memslot(slots, id);
1175
1176 dirty_bitmap = memslot->dirty_bitmap;
1177 if (!dirty_bitmap)
1178 return -ENOENT;
1179
1180 n = kvm_dirty_bitmap_bytes(memslot);
1181 *flush = false;
1182 if (kvm->manual_dirty_log_protect) {
1183
1184
1185
1186
1187
1188
1189
1190
1191 dirty_bitmap_buffer = dirty_bitmap;
1192 } else {
1193 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1194 memset(dirty_bitmap_buffer, 0, n);
1195
1196 spin_lock(&kvm->mmu_lock);
1197 for (i = 0; i < n / sizeof(long); i++) {
1198 unsigned long mask;
1199 gfn_t offset;
1200
1201 if (!dirty_bitmap[i])
1202 continue;
1203
1204 *flush = true;
1205 mask = xchg(&dirty_bitmap[i], 0);
1206 dirty_bitmap_buffer[i] = mask;
1207
1208 offset = i * BITS_PER_LONG;
1209 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1210 offset, mask);
1211 }
1212 spin_unlock(&kvm->mmu_lock);
1213 }
1214
1215 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1216 return -EFAULT;
1217 return 0;
1218}
1219EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
1220
1221
1222
1223
1224
1225
1226
1227
1228int kvm_clear_dirty_log_protect(struct kvm *kvm,
1229 struct kvm_clear_dirty_log *log, bool *flush)
1230{
1231 struct kvm_memslots *slots;
1232 struct kvm_memory_slot *memslot;
1233 int as_id, id;
1234 gfn_t offset;
1235 unsigned long i, n;
1236 unsigned long *dirty_bitmap;
1237 unsigned long *dirty_bitmap_buffer;
1238
1239 as_id = log->slot >> 16;
1240 id = (u16)log->slot;
1241 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1242 return -EINVAL;
1243
1244 if (log->first_page & 63)
1245 return -EINVAL;
1246
1247 slots = __kvm_memslots(kvm, as_id);
1248 memslot = id_to_memslot(slots, id);
1249
1250 dirty_bitmap = memslot->dirty_bitmap;
1251 if (!dirty_bitmap)
1252 return -ENOENT;
1253
1254 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
1255
1256 if (log->first_page > memslot->npages ||
1257 log->num_pages > memslot->npages - log->first_page ||
1258 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
1259 return -EINVAL;
1260
1261 *flush = false;
1262 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1263 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
1264 return -EFAULT;
1265
1266 spin_lock(&kvm->mmu_lock);
1267 for (offset = log->first_page, i = offset / BITS_PER_LONG,
1268 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
1269 i++, offset += BITS_PER_LONG) {
1270 unsigned long mask = *dirty_bitmap_buffer++;
1271 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
1272 if (!mask)
1273 continue;
1274
1275 mask &= atomic_long_fetch_andnot(mask, p);
1276
1277
1278
1279
1280
1281
1282
1283 if (mask) {
1284 *flush = true;
1285 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1286 offset, mask);
1287 }
1288 }
1289 spin_unlock(&kvm->mmu_lock);
1290
1291 return 0;
1292}
1293EXPORT_SYMBOL_GPL(kvm_clear_dirty_log_protect);
1294#endif
1295
1296bool kvm_largepages_enabled(void)
1297{
1298 return largepages_enabled;
1299}
1300
1301void kvm_disable_largepages(void)
1302{
1303 largepages_enabled = false;
1304}
1305EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1306
1307struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1308{
1309 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1310}
1311EXPORT_SYMBOL_GPL(gfn_to_memslot);
1312
1313struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1314{
1315 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1316}
1317
1318bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1319{
1320 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1321
1322 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
1323 memslot->flags & KVM_MEMSLOT_INVALID)
1324 return false;
1325
1326 return true;
1327}
1328EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1329
1330unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
1331{
1332 struct vm_area_struct *vma;
1333 unsigned long addr, size;
1334
1335 size = PAGE_SIZE;
1336
1337 addr = gfn_to_hva(kvm, gfn);
1338 if (kvm_is_error_hva(addr))
1339 return PAGE_SIZE;
1340
1341 down_read(¤t->mm->mmap_sem);
1342 vma = find_vma(current->mm, addr);
1343 if (!vma)
1344 goto out;
1345
1346 size = vma_kernel_pagesize(vma);
1347
1348out:
1349 up_read(¤t->mm->mmap_sem);
1350
1351 return size;
1352}
1353
1354static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1355{
1356 return slot->flags & KVM_MEM_READONLY;
1357}
1358
1359static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1360 gfn_t *nr_pages, bool write)
1361{
1362 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1363 return KVM_HVA_ERR_BAD;
1364
1365 if (memslot_is_readonly(slot) && write)
1366 return KVM_HVA_ERR_RO_BAD;
1367
1368 if (nr_pages)
1369 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1370
1371 return __gfn_to_hva_memslot(slot, gfn);
1372}
1373
1374static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1375 gfn_t *nr_pages)
1376{
1377 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
1378}
1379
1380unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1381 gfn_t gfn)
1382{
1383 return gfn_to_hva_many(slot, gfn, NULL);
1384}
1385EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1386
1387unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1388{
1389 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1390}
1391EXPORT_SYMBOL_GPL(gfn_to_hva);
1392
1393unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
1394{
1395 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
1396}
1397EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
1408 gfn_t gfn, bool *writable)
1409{
1410 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1411
1412 if (!kvm_is_error_hva(hva) && writable)
1413 *writable = !memslot_is_readonly(slot);
1414
1415 return hva;
1416}
1417
1418unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1419{
1420 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1421
1422 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1423}
1424
1425unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
1426{
1427 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1428
1429 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1430}
1431
1432static inline int check_user_page_hwpoison(unsigned long addr)
1433{
1434 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
1435
1436 rc = get_user_pages(addr, 1, flags, NULL, NULL);
1437 return rc == -EHWPOISON;
1438}
1439
1440
1441
1442
1443
1444
1445static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
1446 bool *writable, kvm_pfn_t *pfn)
1447{
1448 struct page *page[1];
1449 int npages;
1450
1451
1452
1453
1454
1455
1456 if (!(write_fault || writable))
1457 return false;
1458
1459 npages = __get_user_pages_fast(addr, 1, 1, page);
1460 if (npages == 1) {
1461 *pfn = page_to_pfn(page[0]);
1462
1463 if (writable)
1464 *writable = true;
1465 return true;
1466 }
1467
1468 return false;
1469}
1470
1471
1472
1473
1474
1475static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1476 bool *writable, kvm_pfn_t *pfn)
1477{
1478 unsigned int flags = FOLL_HWPOISON;
1479 struct page *page;
1480 int npages = 0;
1481
1482 might_sleep();
1483
1484 if (writable)
1485 *writable = write_fault;
1486
1487 if (write_fault)
1488 flags |= FOLL_WRITE;
1489 if (async)
1490 flags |= FOLL_NOWAIT;
1491
1492 npages = get_user_pages_unlocked(addr, 1, &page, flags);
1493 if (npages != 1)
1494 return npages;
1495
1496
1497 if (unlikely(!write_fault) && writable) {
1498 struct page *wpage;
1499
1500 if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
1501 *writable = true;
1502 put_page(page);
1503 page = wpage;
1504 }
1505 }
1506 *pfn = page_to_pfn(page);
1507 return npages;
1508}
1509
1510static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1511{
1512 if (unlikely(!(vma->vm_flags & VM_READ)))
1513 return false;
1514
1515 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1516 return false;
1517
1518 return true;
1519}
1520
1521static int hva_to_pfn_remapped(struct vm_area_struct *vma,
1522 unsigned long addr, bool *async,
1523 bool write_fault, bool *writable,
1524 kvm_pfn_t *p_pfn)
1525{
1526 unsigned long pfn;
1527 int r;
1528
1529 r = follow_pfn(vma, addr, &pfn);
1530 if (r) {
1531
1532
1533
1534
1535 bool unlocked = false;
1536 r = fixup_user_fault(current, current->mm, addr,
1537 (write_fault ? FAULT_FLAG_WRITE : 0),
1538 &unlocked);
1539 if (unlocked)
1540 return -EAGAIN;
1541 if (r)
1542 return r;
1543
1544 r = follow_pfn(vma, addr, &pfn);
1545 if (r)
1546 return r;
1547
1548 }
1549
1550 if (writable)
1551 *writable = true;
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 kvm_get_pfn(pfn);
1565
1566 *p_pfn = pfn;
1567 return 0;
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1585 bool write_fault, bool *writable)
1586{
1587 struct vm_area_struct *vma;
1588 kvm_pfn_t pfn = 0;
1589 int npages, r;
1590
1591
1592 BUG_ON(atomic && async);
1593
1594 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
1595 return pfn;
1596
1597 if (atomic)
1598 return KVM_PFN_ERR_FAULT;
1599
1600 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
1601 if (npages == 1)
1602 return pfn;
1603
1604 down_read(¤t->mm->mmap_sem);
1605 if (npages == -EHWPOISON ||
1606 (!async && check_user_page_hwpoison(addr))) {
1607 pfn = KVM_PFN_ERR_HWPOISON;
1608 goto exit;
1609 }
1610
1611retry:
1612 vma = find_vma_intersection(current->mm, addr, addr + 1);
1613
1614 if (vma == NULL)
1615 pfn = KVM_PFN_ERR_FAULT;
1616 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1617 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
1618 if (r == -EAGAIN)
1619 goto retry;
1620 if (r < 0)
1621 pfn = KVM_PFN_ERR_FAULT;
1622 } else {
1623 if (async && vma_is_valid(vma, write_fault))
1624 *async = true;
1625 pfn = KVM_PFN_ERR_FAULT;
1626 }
1627exit:
1628 up_read(¤t->mm->mmap_sem);
1629 return pfn;
1630}
1631
1632kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
1633 bool atomic, bool *async, bool write_fault,
1634 bool *writable)
1635{
1636 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
1637
1638 if (addr == KVM_HVA_ERR_RO_BAD) {
1639 if (writable)
1640 *writable = false;
1641 return KVM_PFN_ERR_RO_FAULT;
1642 }
1643
1644 if (kvm_is_error_hva(addr)) {
1645 if (writable)
1646 *writable = false;
1647 return KVM_PFN_NOSLOT;
1648 }
1649
1650
1651 if (writable && memslot_is_readonly(slot)) {
1652 *writable = false;
1653 writable = NULL;
1654 }
1655
1656 return hva_to_pfn(addr, atomic, async, write_fault,
1657 writable);
1658}
1659EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
1660
1661kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1662 bool *writable)
1663{
1664 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
1665 write_fault, writable);
1666}
1667EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1668
1669kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1670{
1671 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
1672}
1673EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
1674
1675kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
1676{
1677 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
1678}
1679EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
1680
1681kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1682{
1683 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
1684}
1685EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1686
1687kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
1688{
1689 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1690}
1691EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
1692
1693kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1694{
1695 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
1696}
1697EXPORT_SYMBOL_GPL(gfn_to_pfn);
1698
1699kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1700{
1701 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1702}
1703EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
1704
1705int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1706 struct page **pages, int nr_pages)
1707{
1708 unsigned long addr;
1709 gfn_t entry = 0;
1710
1711 addr = gfn_to_hva_many(slot, gfn, &entry);
1712 if (kvm_is_error_hva(addr))
1713 return -1;
1714
1715 if (entry < nr_pages)
1716 return 0;
1717
1718 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1719}
1720EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1721
1722static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
1723{
1724 if (is_error_noslot_pfn(pfn))
1725 return KVM_ERR_PTR_BAD_PAGE;
1726
1727 if (kvm_is_reserved_pfn(pfn)) {
1728 WARN_ON(1);
1729 return KVM_ERR_PTR_BAD_PAGE;
1730 }
1731
1732 return pfn_to_page(pfn);
1733}
1734
1735struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1736{
1737 kvm_pfn_t pfn;
1738
1739 pfn = gfn_to_pfn(kvm, gfn);
1740
1741 return kvm_pfn_to_page(pfn);
1742}
1743EXPORT_SYMBOL_GPL(gfn_to_page);
1744
1745static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
1746 struct kvm_host_map *map)
1747{
1748 kvm_pfn_t pfn;
1749 void *hva = NULL;
1750 struct page *page = KVM_UNMAPPED_PAGE;
1751
1752 if (!map)
1753 return -EINVAL;
1754
1755 pfn = gfn_to_pfn_memslot(slot, gfn);
1756 if (is_error_noslot_pfn(pfn))
1757 return -EINVAL;
1758
1759 if (pfn_valid(pfn)) {
1760 page = pfn_to_page(pfn);
1761 hva = kmap(page);
1762#ifdef CONFIG_HAS_IOMEM
1763 } else {
1764 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
1765#endif
1766 }
1767
1768 if (!hva)
1769 return -EFAULT;
1770
1771 map->page = page;
1772 map->hva = hva;
1773 map->pfn = pfn;
1774 map->gfn = gfn;
1775
1776 return 0;
1777}
1778
1779int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
1780{
1781 return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
1782}
1783EXPORT_SYMBOL_GPL(kvm_vcpu_map);
1784
1785void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
1786 bool dirty)
1787{
1788 if (!map)
1789 return;
1790
1791 if (!map->hva)
1792 return;
1793
1794 if (map->page != KVM_UNMAPPED_PAGE)
1795 kunmap(map->page);
1796#ifdef CONFIG_HAS_IOMEM
1797 else
1798 memunmap(map->hva);
1799#endif
1800
1801 if (dirty) {
1802 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
1803 kvm_release_pfn_dirty(map->pfn);
1804 } else {
1805 kvm_release_pfn_clean(map->pfn);
1806 }
1807
1808 map->hva = NULL;
1809 map->page = NULL;
1810}
1811EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
1812
1813struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1814{
1815 kvm_pfn_t pfn;
1816
1817 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
1818
1819 return kvm_pfn_to_page(pfn);
1820}
1821EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
1822
1823void kvm_release_page_clean(struct page *page)
1824{
1825 WARN_ON(is_error_page(page));
1826
1827 kvm_release_pfn_clean(page_to_pfn(page));
1828}
1829EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1830
1831void kvm_release_pfn_clean(kvm_pfn_t pfn)
1832{
1833 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1834 put_page(pfn_to_page(pfn));
1835}
1836EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1837
1838void kvm_release_page_dirty(struct page *page)
1839{
1840 WARN_ON(is_error_page(page));
1841
1842 kvm_release_pfn_dirty(page_to_pfn(page));
1843}
1844EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1845
1846void kvm_release_pfn_dirty(kvm_pfn_t pfn)
1847{
1848 kvm_set_pfn_dirty(pfn);
1849 kvm_release_pfn_clean(pfn);
1850}
1851EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1852
1853void kvm_set_pfn_dirty(kvm_pfn_t pfn)
1854{
1855 if (!kvm_is_reserved_pfn(pfn)) {
1856 struct page *page = pfn_to_page(pfn);
1857
1858 SetPageDirty(page);
1859 }
1860}
1861EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1862
1863void kvm_set_pfn_accessed(kvm_pfn_t pfn)
1864{
1865 if (!kvm_is_reserved_pfn(pfn))
1866 mark_page_accessed(pfn_to_page(pfn));
1867}
1868EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1869
1870void kvm_get_pfn(kvm_pfn_t pfn)
1871{
1872 if (!kvm_is_reserved_pfn(pfn))
1873 get_page(pfn_to_page(pfn));
1874}
1875EXPORT_SYMBOL_GPL(kvm_get_pfn);
1876
1877static int next_segment(unsigned long len, int offset)
1878{
1879 if (len > PAGE_SIZE - offset)
1880 return PAGE_SIZE - offset;
1881 else
1882 return len;
1883}
1884
1885static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
1886 void *data, int offset, int len)
1887{
1888 int r;
1889 unsigned long addr;
1890
1891 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1892 if (kvm_is_error_hva(addr))
1893 return -EFAULT;
1894 r = __copy_from_user(data, (void __user *)addr + offset, len);
1895 if (r)
1896 return -EFAULT;
1897 return 0;
1898}
1899
1900int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1901 int len)
1902{
1903 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1904
1905 return __kvm_read_guest_page(slot, gfn, data, offset, len);
1906}
1907EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1908
1909int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
1910 int offset, int len)
1911{
1912 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1913
1914 return __kvm_read_guest_page(slot, gfn, data, offset, len);
1915}
1916EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
1917
1918int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1919{
1920 gfn_t gfn = gpa >> PAGE_SHIFT;
1921 int seg;
1922 int offset = offset_in_page(gpa);
1923 int ret;
1924
1925 while ((seg = next_segment(len, offset)) != 0) {
1926 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1927 if (ret < 0)
1928 return ret;
1929 offset = 0;
1930 len -= seg;
1931 data += seg;
1932 ++gfn;
1933 }
1934 return 0;
1935}
1936EXPORT_SYMBOL_GPL(kvm_read_guest);
1937
1938int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
1939{
1940 gfn_t gfn = gpa >> PAGE_SHIFT;
1941 int seg;
1942 int offset = offset_in_page(gpa);
1943 int ret;
1944
1945 while ((seg = next_segment(len, offset)) != 0) {
1946 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
1947 if (ret < 0)
1948 return ret;
1949 offset = 0;
1950 len -= seg;
1951 data += seg;
1952 ++gfn;
1953 }
1954 return 0;
1955}
1956EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
1957
1958static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1959 void *data, int offset, unsigned long len)
1960{
1961 int r;
1962 unsigned long addr;
1963
1964 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1965 if (kvm_is_error_hva(addr))
1966 return -EFAULT;
1967 pagefault_disable();
1968 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1969 pagefault_enable();
1970 if (r)
1971 return -EFAULT;
1972 return 0;
1973}
1974
1975int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1976 unsigned long len)
1977{
1978 gfn_t gfn = gpa >> PAGE_SHIFT;
1979 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1980 int offset = offset_in_page(gpa);
1981
1982 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
1983}
1984EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
1985
1986int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
1987 void *data, unsigned long len)
1988{
1989 gfn_t gfn = gpa >> PAGE_SHIFT;
1990 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1991 int offset = offset_in_page(gpa);
1992
1993 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
1994}
1995EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
1996
1997static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
1998 const void *data, int offset, int len)
1999{
2000 int r;
2001 unsigned long addr;
2002
2003 addr = gfn_to_hva_memslot(memslot, gfn);
2004 if (kvm_is_error_hva(addr))
2005 return -EFAULT;
2006 r = __copy_to_user((void __user *)addr + offset, data, len);
2007 if (r)
2008 return -EFAULT;
2009 mark_page_dirty_in_slot(memslot, gfn);
2010 return 0;
2011}
2012
2013int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2014 const void *data, int offset, int len)
2015{
2016 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2017
2018 return __kvm_write_guest_page(slot, gfn, data, offset, len);
2019}
2020EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2021
2022int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2023 const void *data, int offset, int len)
2024{
2025 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2026
2027 return __kvm_write_guest_page(slot, gfn, data, offset, len);
2028}
2029EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
2030
2031int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
2032 unsigned long len)
2033{
2034 gfn_t gfn = gpa >> PAGE_SHIFT;
2035 int seg;
2036 int offset = offset_in_page(gpa);
2037 int ret;
2038
2039 while ((seg = next_segment(len, offset)) != 0) {
2040 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2041 if (ret < 0)
2042 return ret;
2043 offset = 0;
2044 len -= seg;
2045 data += seg;
2046 ++gfn;
2047 }
2048 return 0;
2049}
2050EXPORT_SYMBOL_GPL(kvm_write_guest);
2051
2052int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
2053 unsigned long len)
2054{
2055 gfn_t gfn = gpa >> PAGE_SHIFT;
2056 int seg;
2057 int offset = offset_in_page(gpa);
2058 int ret;
2059
2060 while ((seg = next_segment(len, offset)) != 0) {
2061 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
2062 if (ret < 0)
2063 return ret;
2064 offset = 0;
2065 len -= seg;
2066 data += seg;
2067 ++gfn;
2068 }
2069 return 0;
2070}
2071EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
2072
2073static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
2074 struct gfn_to_hva_cache *ghc,
2075 gpa_t gpa, unsigned long len)
2076{
2077 int offset = offset_in_page(gpa);
2078 gfn_t start_gfn = gpa >> PAGE_SHIFT;
2079 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
2080 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
2081 gfn_t nr_pages_avail;
2082 int r = start_gfn <= end_gfn ? 0 : -EINVAL;
2083
2084 ghc->gpa = gpa;
2085 ghc->generation = slots->generation;
2086 ghc->len = len;
2087 ghc->hva = KVM_HVA_ERR_BAD;
2088
2089
2090
2091
2092
2093 while (!r && start_gfn <= end_gfn) {
2094 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2095 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2096 &nr_pages_avail);
2097 if (kvm_is_error_hva(ghc->hva))
2098 r = -EFAULT;
2099 start_gfn += nr_pages_avail;
2100 }
2101
2102
2103 if (!r && nr_pages_needed == 1)
2104 ghc->hva += offset;
2105 else
2106 ghc->memslot = NULL;
2107
2108 return r;
2109}
2110
2111int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2112 gpa_t gpa, unsigned long len)
2113{
2114 struct kvm_memslots *slots = kvm_memslots(kvm);
2115 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
2116}
2117EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
2118
2119int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2120 void *data, unsigned int offset,
2121 unsigned long len)
2122{
2123 struct kvm_memslots *slots = kvm_memslots(kvm);
2124 int r;
2125 gpa_t gpa = ghc->gpa + offset;
2126
2127 BUG_ON(len + offset > ghc->len);
2128
2129 if (slots->generation != ghc->generation)
2130 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
2131
2132 if (unlikely(!ghc->memslot))
2133 return kvm_write_guest(kvm, gpa, data, len);
2134
2135 if (kvm_is_error_hva(ghc->hva))
2136 return -EFAULT;
2137
2138 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
2139 if (r)
2140 return -EFAULT;
2141 mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
2142
2143 return 0;
2144}
2145EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
2146
2147int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2148 void *data, unsigned long len)
2149{
2150 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
2151}
2152EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
2153
2154int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2155 void *data, unsigned long len)
2156{
2157 struct kvm_memslots *slots = kvm_memslots(kvm);
2158 int r;
2159
2160 BUG_ON(len > ghc->len);
2161
2162 if (slots->generation != ghc->generation)
2163 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
2164
2165 if (unlikely(!ghc->memslot))
2166 return kvm_read_guest(kvm, ghc->gpa, data, len);
2167
2168 if (kvm_is_error_hva(ghc->hva))
2169 return -EFAULT;
2170
2171 r = __copy_from_user(data, (void __user *)ghc->hva, len);
2172 if (r)
2173 return -EFAULT;
2174
2175 return 0;
2176}
2177EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
2178
2179int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
2180{
2181 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
2182
2183 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
2184}
2185EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
2186
2187int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
2188{
2189 gfn_t gfn = gpa >> PAGE_SHIFT;
2190 int seg;
2191 int offset = offset_in_page(gpa);
2192 int ret;
2193
2194 while ((seg = next_segment(len, offset)) != 0) {
2195 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
2196 if (ret < 0)
2197 return ret;
2198 offset = 0;
2199 len -= seg;
2200 ++gfn;
2201 }
2202 return 0;
2203}
2204EXPORT_SYMBOL_GPL(kvm_clear_guest);
2205
2206static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
2207 gfn_t gfn)
2208{
2209 if (memslot && memslot->dirty_bitmap) {
2210 unsigned long rel_gfn = gfn - memslot->base_gfn;
2211
2212 set_bit_le(rel_gfn, memslot->dirty_bitmap);
2213 }
2214}
2215
2216void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
2217{
2218 struct kvm_memory_slot *memslot;
2219
2220 memslot = gfn_to_memslot(kvm, gfn);
2221 mark_page_dirty_in_slot(memslot, gfn);
2222}
2223EXPORT_SYMBOL_GPL(mark_page_dirty);
2224
2225void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
2226{
2227 struct kvm_memory_slot *memslot;
2228
2229 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2230 mark_page_dirty_in_slot(memslot, gfn);
2231}
2232EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
2233
2234void kvm_sigset_activate(struct kvm_vcpu *vcpu)
2235{
2236 if (!vcpu->sigset_active)
2237 return;
2238
2239
2240
2241
2242
2243
2244
2245 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
2246}
2247
2248void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
2249{
2250 if (!vcpu->sigset_active)
2251 return;
2252
2253 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
2254 sigemptyset(¤t->real_blocked);
2255}
2256
2257static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
2258{
2259 unsigned int old, val, grow, grow_start;
2260
2261 old = val = vcpu->halt_poll_ns;
2262 grow_start = READ_ONCE(halt_poll_ns_grow_start);
2263 grow = READ_ONCE(halt_poll_ns_grow);
2264 if (!grow)
2265 goto out;
2266
2267 val *= grow;
2268 if (val < grow_start)
2269 val = grow_start;
2270
2271 if (val > halt_poll_ns)
2272 val = halt_poll_ns;
2273
2274 vcpu->halt_poll_ns = val;
2275out:
2276 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
2277}
2278
2279static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
2280{
2281 unsigned int old, val, shrink;
2282
2283 old = val = vcpu->halt_poll_ns;
2284 shrink = READ_ONCE(halt_poll_ns_shrink);
2285 if (shrink == 0)
2286 val = 0;
2287 else
2288 val /= shrink;
2289
2290 vcpu->halt_poll_ns = val;
2291 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
2292}
2293
2294static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
2295{
2296 int ret = -EINTR;
2297 int idx = srcu_read_lock(&vcpu->kvm->srcu);
2298
2299 if (kvm_arch_vcpu_runnable(vcpu)) {
2300 kvm_make_request(KVM_REQ_UNHALT, vcpu);
2301 goto out;
2302 }
2303 if (kvm_cpu_has_pending_timer(vcpu))
2304 goto out;
2305 if (signal_pending(current))
2306 goto out;
2307
2308 ret = 0;
2309out:
2310 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2311 return ret;
2312}
2313
2314
2315
2316
2317void kvm_vcpu_block(struct kvm_vcpu *vcpu)
2318{
2319 ktime_t start, cur;
2320 DECLARE_SWAITQUEUE(wait);
2321 bool waited = false;
2322 u64 block_ns;
2323
2324 start = cur = ktime_get();
2325 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
2326 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
2327
2328 ++vcpu->stat.halt_attempted_poll;
2329 do {
2330
2331
2332
2333
2334 if (kvm_vcpu_check_block(vcpu) < 0) {
2335 ++vcpu->stat.halt_successful_poll;
2336 if (!vcpu_valid_wakeup(vcpu))
2337 ++vcpu->stat.halt_poll_invalid;
2338 goto out;
2339 }
2340 cur = ktime_get();
2341 } while (single_task_running() && ktime_before(cur, stop));
2342 }
2343
2344 kvm_arch_vcpu_blocking(vcpu);
2345
2346 for (;;) {
2347 prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
2348
2349 if (kvm_vcpu_check_block(vcpu) < 0)
2350 break;
2351
2352 waited = true;
2353 schedule();
2354 }
2355
2356 finish_swait(&vcpu->wq, &wait);
2357 cur = ktime_get();
2358
2359 kvm_arch_vcpu_unblocking(vcpu);
2360out:
2361 block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
2362
2363 if (!vcpu_valid_wakeup(vcpu))
2364 shrink_halt_poll_ns(vcpu);
2365 else if (halt_poll_ns) {
2366 if (block_ns <= vcpu->halt_poll_ns)
2367 ;
2368
2369 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
2370 shrink_halt_poll_ns(vcpu);
2371
2372 else if (vcpu->halt_poll_ns < halt_poll_ns &&
2373 block_ns < halt_poll_ns)
2374 grow_halt_poll_ns(vcpu);
2375 } else
2376 vcpu->halt_poll_ns = 0;
2377
2378 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
2379 kvm_arch_vcpu_block_finish(vcpu);
2380}
2381EXPORT_SYMBOL_GPL(kvm_vcpu_block);
2382
2383bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
2384{
2385 struct swait_queue_head *wqp;
2386
2387 wqp = kvm_arch_vcpu_wq(vcpu);
2388 if (swq_has_sleeper(wqp)) {
2389 swake_up_one(wqp);
2390 WRITE_ONCE(vcpu->ready, true);
2391 ++vcpu->stat.halt_wakeup;
2392 return true;
2393 }
2394
2395 return false;
2396}
2397EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
2398
2399#ifndef CONFIG_S390
2400
2401
2402
2403void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
2404{
2405 int me;
2406 int cpu = vcpu->cpu;
2407
2408 if (kvm_vcpu_wake_up(vcpu))
2409 return;
2410
2411 me = get_cpu();
2412 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
2413 if (kvm_arch_vcpu_should_kick(vcpu))
2414 smp_send_reschedule(cpu);
2415 put_cpu();
2416}
2417EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
2418#endif
2419
2420int kvm_vcpu_yield_to(struct kvm_vcpu *target)
2421{
2422 struct pid *pid;
2423 struct task_struct *task = NULL;
2424 int ret = 0;
2425
2426 rcu_read_lock();
2427 pid = rcu_dereference(target->pid);
2428 if (pid)
2429 task = get_pid_task(pid, PIDTYPE_PID);
2430 rcu_read_unlock();
2431 if (!task)
2432 return ret;
2433 ret = yield_to(task, 1);
2434 put_task_struct(task);
2435
2436 return ret;
2437}
2438EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2463{
2464#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2465 bool eligible;
2466
2467 eligible = !vcpu->spin_loop.in_spin_loop ||
2468 vcpu->spin_loop.dy_eligible;
2469
2470 if (vcpu->spin_loop.in_spin_loop)
2471 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
2472
2473 return eligible;
2474#else
2475 return true;
2476#endif
2477}
2478
2479
2480
2481
2482
2483
2484bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
2485{
2486 return kvm_arch_vcpu_runnable(vcpu);
2487}
2488
2489static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
2490{
2491 if (kvm_arch_dy_runnable(vcpu))
2492 return true;
2493
2494#ifdef CONFIG_KVM_ASYNC_PF
2495 if (!list_empty_careful(&vcpu->async_pf.done))
2496 return true;
2497#endif
2498
2499 return false;
2500}
2501
2502void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2503{
2504 struct kvm *kvm = me->kvm;
2505 struct kvm_vcpu *vcpu;
2506 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
2507 int yielded = 0;
2508 int try = 3;
2509 int pass;
2510 int i;
2511
2512 kvm_vcpu_set_in_spin_loop(me, true);
2513
2514
2515
2516
2517
2518
2519
2520 for (pass = 0; pass < 2 && !yielded && try; pass++) {
2521 kvm_for_each_vcpu(i, vcpu, kvm) {
2522 if (!pass && i <= last_boosted_vcpu) {
2523 i = last_boosted_vcpu;
2524 continue;
2525 } else if (pass && i > last_boosted_vcpu)
2526 break;
2527 if (!READ_ONCE(vcpu->ready))
2528 continue;
2529 if (vcpu == me)
2530 continue;
2531 if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
2532 continue;
2533 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
2534 !kvm_arch_vcpu_in_kernel(vcpu))
2535 continue;
2536 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
2537 continue;
2538
2539 yielded = kvm_vcpu_yield_to(vcpu);
2540 if (yielded > 0) {
2541 kvm->last_boosted_vcpu = i;
2542 break;
2543 } else if (yielded < 0) {
2544 try--;
2545 if (!try)
2546 break;
2547 }
2548 }
2549 }
2550 kvm_vcpu_set_in_spin_loop(me, false);
2551
2552
2553 kvm_vcpu_set_dy_eligible(me, false);
2554}
2555EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
2556
2557static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
2558{
2559 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
2560 struct page *page;
2561
2562 if (vmf->pgoff == 0)
2563 page = virt_to_page(vcpu->run);
2564#ifdef CONFIG_X86
2565 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
2566 page = virt_to_page(vcpu->arch.pio_data);
2567#endif
2568#ifdef CONFIG_KVM_MMIO
2569 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
2570 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
2571#endif
2572 else
2573 return kvm_arch_vcpu_fault(vcpu, vmf);
2574 get_page(page);
2575 vmf->page = page;
2576 return 0;
2577}
2578
2579static const struct vm_operations_struct kvm_vcpu_vm_ops = {
2580 .fault = kvm_vcpu_fault,
2581};
2582
2583static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2584{
2585 vma->vm_ops = &kvm_vcpu_vm_ops;
2586 return 0;
2587}
2588
2589static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2590{
2591 struct kvm_vcpu *vcpu = filp->private_data;
2592
2593 debugfs_remove_recursive(vcpu->debugfs_dentry);
2594 kvm_put_kvm(vcpu->kvm);
2595 return 0;
2596}
2597
2598static struct file_operations kvm_vcpu_fops = {
2599 .release = kvm_vcpu_release,
2600 .unlocked_ioctl = kvm_vcpu_ioctl,
2601 .mmap = kvm_vcpu_mmap,
2602 .llseek = noop_llseek,
2603 KVM_COMPAT(kvm_vcpu_compat_ioctl),
2604};
2605
2606
2607
2608
2609static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2610{
2611 char name[8 + 1 + ITOA_MAX_LEN + 1];
2612
2613 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
2614 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
2615}
2616
2617static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2618{
2619#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
2620 char dir_name[ITOA_MAX_LEN * 2];
2621
2622 if (!debugfs_initialized())
2623 return;
2624
2625 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
2626 vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
2627 vcpu->kvm->debugfs_dentry);
2628
2629 kvm_arch_create_vcpu_debugfs(vcpu);
2630#endif
2631}
2632
2633
2634
2635
2636static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
2637{
2638 int r;
2639 struct kvm_vcpu *vcpu;
2640
2641 if (id >= KVM_MAX_VCPU_ID)
2642 return -EINVAL;
2643
2644 mutex_lock(&kvm->lock);
2645 if (kvm->created_vcpus == KVM_MAX_VCPUS) {
2646 mutex_unlock(&kvm->lock);
2647 return -EINVAL;
2648 }
2649
2650 kvm->created_vcpus++;
2651 mutex_unlock(&kvm->lock);
2652
2653 vcpu = kvm_arch_vcpu_create(kvm, id);
2654 if (IS_ERR(vcpu)) {
2655 r = PTR_ERR(vcpu);
2656 goto vcpu_decrement;
2657 }
2658
2659 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2660
2661 r = kvm_arch_vcpu_setup(vcpu);
2662 if (r)
2663 goto vcpu_destroy;
2664
2665 kvm_create_vcpu_debugfs(vcpu);
2666
2667 mutex_lock(&kvm->lock);
2668 if (kvm_get_vcpu_by_id(kvm, id)) {
2669 r = -EEXIST;
2670 goto unlock_vcpu_destroy;
2671 }
2672
2673 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
2674
2675
2676 kvm_get_kvm(kvm);
2677 r = create_vcpu_fd(vcpu);
2678 if (r < 0) {
2679 kvm_put_kvm(kvm);
2680 goto unlock_vcpu_destroy;
2681 }
2682
2683 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
2684
2685
2686
2687
2688
2689 smp_wmb();
2690 atomic_inc(&kvm->online_vcpus);
2691
2692 mutex_unlock(&kvm->lock);
2693 kvm_arch_vcpu_postcreate(vcpu);
2694 return r;
2695
2696unlock_vcpu_destroy:
2697 mutex_unlock(&kvm->lock);
2698 debugfs_remove_recursive(vcpu->debugfs_dentry);
2699vcpu_destroy:
2700 kvm_arch_vcpu_destroy(vcpu);
2701vcpu_decrement:
2702 mutex_lock(&kvm->lock);
2703 kvm->created_vcpus--;
2704 mutex_unlock(&kvm->lock);
2705 return r;
2706}
2707
2708static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2709{
2710 if (sigset) {
2711 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2712 vcpu->sigset_active = 1;
2713 vcpu->sigset = *sigset;
2714 } else
2715 vcpu->sigset_active = 0;
2716 return 0;
2717}
2718
2719static long kvm_vcpu_ioctl(struct file *filp,
2720 unsigned int ioctl, unsigned long arg)
2721{
2722 struct kvm_vcpu *vcpu = filp->private_data;
2723 void __user *argp = (void __user *)arg;
2724 int r;
2725 struct kvm_fpu *fpu = NULL;
2726 struct kvm_sregs *kvm_sregs = NULL;
2727
2728 if (vcpu->kvm->mm != current->mm)
2729 return -EIO;
2730
2731 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
2732 return -EINVAL;
2733
2734
2735
2736
2737
2738 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
2739 if (r != -ENOIOCTLCMD)
2740 return r;
2741
2742 if (mutex_lock_killable(&vcpu->mutex))
2743 return -EINTR;
2744 switch (ioctl) {
2745 case KVM_RUN: {
2746 struct pid *oldpid;
2747 r = -EINVAL;
2748 if (arg)
2749 goto out;
2750 oldpid = rcu_access_pointer(vcpu->pid);
2751 if (unlikely(oldpid != task_pid(current))) {
2752
2753 struct pid *newpid;
2754
2755 r = kvm_arch_vcpu_run_pid_change(vcpu);
2756 if (r)
2757 break;
2758
2759 newpid = get_task_pid(current, PIDTYPE_PID);
2760 rcu_assign_pointer(vcpu->pid, newpid);
2761 if (oldpid)
2762 synchronize_rcu();
2763 put_pid(oldpid);
2764 }
2765 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
2766 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
2767 break;
2768 }
2769 case KVM_GET_REGS: {
2770 struct kvm_regs *kvm_regs;
2771
2772 r = -ENOMEM;
2773 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
2774 if (!kvm_regs)
2775 goto out;
2776 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
2777 if (r)
2778 goto out_free1;
2779 r = -EFAULT;
2780 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
2781 goto out_free1;
2782 r = 0;
2783out_free1:
2784 kfree(kvm_regs);
2785 break;
2786 }
2787 case KVM_SET_REGS: {
2788 struct kvm_regs *kvm_regs;
2789
2790 r = -ENOMEM;
2791 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
2792 if (IS_ERR(kvm_regs)) {
2793 r = PTR_ERR(kvm_regs);
2794 goto out;
2795 }
2796 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
2797 kfree(kvm_regs);
2798 break;
2799 }
2800 case KVM_GET_SREGS: {
2801 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
2802 GFP_KERNEL_ACCOUNT);
2803 r = -ENOMEM;
2804 if (!kvm_sregs)
2805 goto out;
2806 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
2807 if (r)
2808 goto out;
2809 r = -EFAULT;
2810 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
2811 goto out;
2812 r = 0;
2813 break;
2814 }
2815 case KVM_SET_SREGS: {
2816 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
2817 if (IS_ERR(kvm_sregs)) {
2818 r = PTR_ERR(kvm_sregs);
2819 kvm_sregs = NULL;
2820 goto out;
2821 }
2822 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
2823 break;
2824 }
2825 case KVM_GET_MP_STATE: {
2826 struct kvm_mp_state mp_state;
2827
2828 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
2829 if (r)
2830 goto out;
2831 r = -EFAULT;
2832 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
2833 goto out;
2834 r = 0;
2835 break;
2836 }
2837 case KVM_SET_MP_STATE: {
2838 struct kvm_mp_state mp_state;
2839
2840 r = -EFAULT;
2841 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
2842 goto out;
2843 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
2844 break;
2845 }
2846 case KVM_TRANSLATE: {
2847 struct kvm_translation tr;
2848
2849 r = -EFAULT;
2850 if (copy_from_user(&tr, argp, sizeof(tr)))
2851 goto out;
2852 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
2853 if (r)
2854 goto out;
2855 r = -EFAULT;
2856 if (copy_to_user(argp, &tr, sizeof(tr)))
2857 goto out;
2858 r = 0;
2859 break;
2860 }
2861 case KVM_SET_GUEST_DEBUG: {
2862 struct kvm_guest_debug dbg;
2863
2864 r = -EFAULT;
2865 if (copy_from_user(&dbg, argp, sizeof(dbg)))
2866 goto out;
2867 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
2868 break;
2869 }
2870 case KVM_SET_SIGNAL_MASK: {
2871 struct kvm_signal_mask __user *sigmask_arg = argp;
2872 struct kvm_signal_mask kvm_sigmask;
2873 sigset_t sigset, *p;
2874
2875 p = NULL;
2876 if (argp) {
2877 r = -EFAULT;
2878 if (copy_from_user(&kvm_sigmask, argp,
2879 sizeof(kvm_sigmask)))
2880 goto out;
2881 r = -EINVAL;
2882 if (kvm_sigmask.len != sizeof(sigset))
2883 goto out;
2884 r = -EFAULT;
2885 if (copy_from_user(&sigset, sigmask_arg->sigset,
2886 sizeof(sigset)))
2887 goto out;
2888 p = &sigset;
2889 }
2890 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
2891 break;
2892 }
2893 case KVM_GET_FPU: {
2894 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
2895 r = -ENOMEM;
2896 if (!fpu)
2897 goto out;
2898 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
2899 if (r)
2900 goto out;
2901 r = -EFAULT;
2902 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
2903 goto out;
2904 r = 0;
2905 break;
2906 }
2907 case KVM_SET_FPU: {
2908 fpu = memdup_user(argp, sizeof(*fpu));
2909 if (IS_ERR(fpu)) {
2910 r = PTR_ERR(fpu);
2911 fpu = NULL;
2912 goto out;
2913 }
2914 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
2915 break;
2916 }
2917 default:
2918 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2919 }
2920out:
2921 mutex_unlock(&vcpu->mutex);
2922 kfree(fpu);
2923 kfree(kvm_sregs);
2924 return r;
2925}
2926
2927#ifdef CONFIG_KVM_COMPAT
2928static long kvm_vcpu_compat_ioctl(struct file *filp,
2929 unsigned int ioctl, unsigned long arg)
2930{
2931 struct kvm_vcpu *vcpu = filp->private_data;
2932 void __user *argp = compat_ptr(arg);
2933 int r;
2934
2935 if (vcpu->kvm->mm != current->mm)
2936 return -EIO;
2937
2938 switch (ioctl) {
2939 case KVM_SET_SIGNAL_MASK: {
2940 struct kvm_signal_mask __user *sigmask_arg = argp;
2941 struct kvm_signal_mask kvm_sigmask;
2942 sigset_t sigset;
2943
2944 if (argp) {
2945 r = -EFAULT;
2946 if (copy_from_user(&kvm_sigmask, argp,
2947 sizeof(kvm_sigmask)))
2948 goto out;
2949 r = -EINVAL;
2950 if (kvm_sigmask.len != sizeof(compat_sigset_t))
2951 goto out;
2952 r = -EFAULT;
2953 if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
2954 goto out;
2955 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2956 } else
2957 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
2958 break;
2959 }
2960 default:
2961 r = kvm_vcpu_ioctl(filp, ioctl, arg);
2962 }
2963
2964out:
2965 return r;
2966}
2967#endif
2968
2969static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
2970{
2971 struct kvm_device *dev = filp->private_data;
2972
2973 if (dev->ops->mmap)
2974 return dev->ops->mmap(dev, vma);
2975
2976 return -ENODEV;
2977}
2978
2979static int kvm_device_ioctl_attr(struct kvm_device *dev,
2980 int (*accessor)(struct kvm_device *dev,
2981 struct kvm_device_attr *attr),
2982 unsigned long arg)
2983{
2984 struct kvm_device_attr attr;
2985
2986 if (!accessor)
2987 return -EPERM;
2988
2989 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2990 return -EFAULT;
2991
2992 return accessor(dev, &attr);
2993}
2994
2995static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
2996 unsigned long arg)
2997{
2998 struct kvm_device *dev = filp->private_data;
2999
3000 if (dev->kvm->mm != current->mm)
3001 return -EIO;
3002
3003 switch (ioctl) {
3004 case KVM_SET_DEVICE_ATTR:
3005 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
3006 case KVM_GET_DEVICE_ATTR:
3007 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
3008 case KVM_HAS_DEVICE_ATTR:
3009 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
3010 default:
3011 if (dev->ops->ioctl)
3012 return dev->ops->ioctl(dev, ioctl, arg);
3013
3014 return -ENOTTY;
3015 }
3016}
3017
3018static int kvm_device_release(struct inode *inode, struct file *filp)
3019{
3020 struct kvm_device *dev = filp->private_data;
3021 struct kvm *kvm = dev->kvm;
3022
3023 if (dev->ops->release) {
3024 mutex_lock(&kvm->lock);
3025 list_del(&dev->vm_node);
3026 dev->ops->release(dev);
3027 mutex_unlock(&kvm->lock);
3028 }
3029
3030 kvm_put_kvm(kvm);
3031 return 0;
3032}
3033
3034static const struct file_operations kvm_device_fops = {
3035 .unlocked_ioctl = kvm_device_ioctl,
3036 .release = kvm_device_release,
3037 KVM_COMPAT(kvm_device_ioctl),
3038 .mmap = kvm_device_mmap,
3039};
3040
3041struct kvm_device *kvm_device_from_filp(struct file *filp)
3042{
3043 if (filp->f_op != &kvm_device_fops)
3044 return NULL;
3045
3046 return filp->private_data;
3047}
3048
3049static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
3050#ifdef CONFIG_KVM_MPIC
3051 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
3052 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
3053#endif
3054};
3055
3056int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
3057{
3058 if (type >= ARRAY_SIZE(kvm_device_ops_table))
3059 return -ENOSPC;
3060
3061 if (kvm_device_ops_table[type] != NULL)
3062 return -EEXIST;
3063
3064 kvm_device_ops_table[type] = ops;
3065 return 0;
3066}
3067
3068void kvm_unregister_device_ops(u32 type)
3069{
3070 if (kvm_device_ops_table[type] != NULL)
3071 kvm_device_ops_table[type] = NULL;
3072}
3073
3074static int kvm_ioctl_create_device(struct kvm *kvm,
3075 struct kvm_create_device *cd)
3076{
3077 struct kvm_device_ops *ops = NULL;
3078 struct kvm_device *dev;
3079 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
3080 int type;
3081 int ret;
3082
3083 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
3084 return -ENODEV;
3085
3086 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
3087 ops = kvm_device_ops_table[type];
3088 if (ops == NULL)
3089 return -ENODEV;
3090
3091 if (test)
3092 return 0;
3093
3094 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
3095 if (!dev)
3096 return -ENOMEM;
3097
3098 dev->ops = ops;
3099 dev->kvm = kvm;
3100
3101 mutex_lock(&kvm->lock);
3102 ret = ops->create(dev, type);
3103 if (ret < 0) {
3104 mutex_unlock(&kvm->lock);
3105 kfree(dev);
3106 return ret;
3107 }
3108 list_add(&dev->vm_node, &kvm->devices);
3109 mutex_unlock(&kvm->lock);
3110
3111 if (ops->init)
3112 ops->init(dev);
3113
3114 kvm_get_kvm(kvm);
3115 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
3116 if (ret < 0) {
3117 kvm_put_kvm(kvm);
3118 mutex_lock(&kvm->lock);
3119 list_del(&dev->vm_node);
3120 mutex_unlock(&kvm->lock);
3121 ops->destroy(dev);
3122 return ret;
3123 }
3124
3125 cd->fd = ret;
3126 return 0;
3127}
3128
3129static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
3130{
3131 switch (arg) {
3132 case KVM_CAP_USER_MEMORY:
3133 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
3134 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
3135 case KVM_CAP_INTERNAL_ERROR_DATA:
3136#ifdef CONFIG_HAVE_KVM_MSI
3137 case KVM_CAP_SIGNAL_MSI:
3138#endif
3139#ifdef CONFIG_HAVE_KVM_IRQFD
3140 case KVM_CAP_IRQFD:
3141 case KVM_CAP_IRQFD_RESAMPLE:
3142#endif
3143 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
3144 case KVM_CAP_CHECK_EXTENSION_VM:
3145 case KVM_CAP_ENABLE_CAP_VM:
3146#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3147 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
3148#endif
3149 return 1;
3150#ifdef CONFIG_KVM_MMIO
3151 case KVM_CAP_COALESCED_MMIO:
3152 return KVM_COALESCED_MMIO_PAGE_OFFSET;
3153 case KVM_CAP_COALESCED_PIO:
3154 return 1;
3155#endif
3156#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3157 case KVM_CAP_IRQ_ROUTING:
3158 return KVM_MAX_IRQ_ROUTES;
3159#endif
3160#if KVM_ADDRESS_SPACE_NUM > 1
3161 case KVM_CAP_MULTI_ADDRESS_SPACE:
3162 return KVM_ADDRESS_SPACE_NUM;
3163#endif
3164 case KVM_CAP_NR_MEMSLOTS:
3165 return KVM_USER_MEM_SLOTS;
3166 default:
3167 break;
3168 }
3169 return kvm_vm_ioctl_check_extension(kvm, arg);
3170}
3171
3172int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
3173 struct kvm_enable_cap *cap)
3174{
3175 return -EINVAL;
3176}
3177
3178static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
3179 struct kvm_enable_cap *cap)
3180{
3181 switch (cap->cap) {
3182#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3183 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
3184 if (cap->flags || (cap->args[0] & ~1))
3185 return -EINVAL;
3186 kvm->manual_dirty_log_protect = cap->args[0];
3187 return 0;
3188#endif
3189 default:
3190 return kvm_vm_ioctl_enable_cap(kvm, cap);
3191 }
3192}
3193
3194static long kvm_vm_ioctl(struct file *filp,
3195 unsigned int ioctl, unsigned long arg)
3196{
3197 struct kvm *kvm = filp->private_data;
3198 void __user *argp = (void __user *)arg;
3199 int r;
3200
3201 if (kvm->mm != current->mm)
3202 return -EIO;
3203 switch (ioctl) {
3204 case KVM_CREATE_VCPU:
3205 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
3206 break;
3207 case KVM_ENABLE_CAP: {
3208 struct kvm_enable_cap cap;
3209
3210 r = -EFAULT;
3211 if (copy_from_user(&cap, argp, sizeof(cap)))
3212 goto out;
3213 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
3214 break;
3215 }
3216 case KVM_SET_USER_MEMORY_REGION: {
3217 struct kvm_userspace_memory_region kvm_userspace_mem;
3218
3219 r = -EFAULT;
3220 if (copy_from_user(&kvm_userspace_mem, argp,
3221 sizeof(kvm_userspace_mem)))
3222 goto out;
3223
3224 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
3225 break;
3226 }
3227 case KVM_GET_DIRTY_LOG: {
3228 struct kvm_dirty_log log;
3229
3230 r = -EFAULT;
3231 if (copy_from_user(&log, argp, sizeof(log)))
3232 goto out;
3233 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
3234 break;
3235 }
3236#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3237 case KVM_CLEAR_DIRTY_LOG: {
3238 struct kvm_clear_dirty_log log;
3239
3240 r = -EFAULT;
3241 if (copy_from_user(&log, argp, sizeof(log)))
3242 goto out;
3243 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
3244 break;
3245 }
3246#endif
3247#ifdef CONFIG_KVM_MMIO
3248 case KVM_REGISTER_COALESCED_MMIO: {
3249 struct kvm_coalesced_mmio_zone zone;
3250
3251 r = -EFAULT;
3252 if (copy_from_user(&zone, argp, sizeof(zone)))
3253 goto out;
3254 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
3255 break;
3256 }
3257 case KVM_UNREGISTER_COALESCED_MMIO: {
3258 struct kvm_coalesced_mmio_zone zone;
3259
3260 r = -EFAULT;
3261 if (copy_from_user(&zone, argp, sizeof(zone)))
3262 goto out;
3263 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
3264 break;
3265 }
3266#endif
3267 case KVM_IRQFD: {
3268 struct kvm_irqfd data;
3269
3270 r = -EFAULT;
3271 if (copy_from_user(&data, argp, sizeof(data)))
3272 goto out;
3273 r = kvm_irqfd(kvm, &data);
3274 break;
3275 }
3276 case KVM_IOEVENTFD: {
3277 struct kvm_ioeventfd data;
3278
3279 r = -EFAULT;
3280 if (copy_from_user(&data, argp, sizeof(data)))
3281 goto out;
3282 r = kvm_ioeventfd(kvm, &data);
3283 break;
3284 }
3285#ifdef CONFIG_HAVE_KVM_MSI
3286 case KVM_SIGNAL_MSI: {
3287 struct kvm_msi msi;
3288
3289 r = -EFAULT;
3290 if (copy_from_user(&msi, argp, sizeof(msi)))
3291 goto out;
3292 r = kvm_send_userspace_msi(kvm, &msi);
3293 break;
3294 }
3295#endif
3296#ifdef __KVM_HAVE_IRQ_LINE
3297 case KVM_IRQ_LINE_STATUS:
3298 case KVM_IRQ_LINE: {
3299 struct kvm_irq_level irq_event;
3300
3301 r = -EFAULT;
3302 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
3303 goto out;
3304
3305 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
3306 ioctl == KVM_IRQ_LINE_STATUS);
3307 if (r)
3308 goto out;
3309
3310 r = -EFAULT;
3311 if (ioctl == KVM_IRQ_LINE_STATUS) {
3312 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
3313 goto out;
3314 }
3315
3316 r = 0;
3317 break;
3318 }
3319#endif
3320#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3321 case KVM_SET_GSI_ROUTING: {
3322 struct kvm_irq_routing routing;
3323 struct kvm_irq_routing __user *urouting;
3324 struct kvm_irq_routing_entry *entries = NULL;
3325
3326 r = -EFAULT;
3327 if (copy_from_user(&routing, argp, sizeof(routing)))
3328 goto out;
3329 r = -EINVAL;
3330 if (!kvm_arch_can_set_irq_routing(kvm))
3331 goto out;
3332 if (routing.nr > KVM_MAX_IRQ_ROUTES)
3333 goto out;
3334 if (routing.flags)
3335 goto out;
3336 if (routing.nr) {
3337 r = -ENOMEM;
3338 entries = vmalloc(array_size(sizeof(*entries),
3339 routing.nr));
3340 if (!entries)
3341 goto out;
3342 r = -EFAULT;
3343 urouting = argp;
3344 if (copy_from_user(entries, urouting->entries,
3345 routing.nr * sizeof(*entries)))
3346 goto out_free_irq_routing;
3347 }
3348 r = kvm_set_irq_routing(kvm, entries, routing.nr,
3349 routing.flags);
3350out_free_irq_routing:
3351 vfree(entries);
3352 break;
3353 }
3354#endif
3355 case KVM_CREATE_DEVICE: {
3356 struct kvm_create_device cd;
3357
3358 r = -EFAULT;
3359 if (copy_from_user(&cd, argp, sizeof(cd)))
3360 goto out;
3361
3362 r = kvm_ioctl_create_device(kvm, &cd);
3363 if (r)
3364 goto out;
3365
3366 r = -EFAULT;
3367 if (copy_to_user(argp, &cd, sizeof(cd)))
3368 goto out;
3369
3370 r = 0;
3371 break;
3372 }
3373 case KVM_CHECK_EXTENSION:
3374 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
3375 break;
3376 default:
3377 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
3378 }
3379out:
3380 return r;
3381}
3382
3383#ifdef CONFIG_KVM_COMPAT
3384struct compat_kvm_dirty_log {
3385 __u32 slot;
3386 __u32 padding1;
3387 union {
3388 compat_uptr_t dirty_bitmap;
3389 __u64 padding2;
3390 };
3391};
3392
3393static long kvm_vm_compat_ioctl(struct file *filp,
3394 unsigned int ioctl, unsigned long arg)
3395{
3396 struct kvm *kvm = filp->private_data;
3397 int r;
3398
3399 if (kvm->mm != current->mm)
3400 return -EIO;
3401 switch (ioctl) {
3402 case KVM_GET_DIRTY_LOG: {
3403 struct compat_kvm_dirty_log compat_log;
3404 struct kvm_dirty_log log;
3405
3406 if (copy_from_user(&compat_log, (void __user *)arg,
3407 sizeof(compat_log)))
3408 return -EFAULT;
3409 log.slot = compat_log.slot;
3410 log.padding1 = compat_log.padding1;
3411 log.padding2 = compat_log.padding2;
3412 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
3413
3414 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
3415 break;
3416 }
3417 default:
3418 r = kvm_vm_ioctl(filp, ioctl, arg);
3419 }
3420 return r;
3421}
3422#endif
3423
3424static struct file_operations kvm_vm_fops = {
3425 .release = kvm_vm_release,
3426 .unlocked_ioctl = kvm_vm_ioctl,
3427 .llseek = noop_llseek,
3428 KVM_COMPAT(kvm_vm_compat_ioctl),
3429};
3430
3431static int kvm_dev_ioctl_create_vm(unsigned long type)
3432{
3433 int r;
3434 struct kvm *kvm;
3435 struct file *file;
3436
3437 kvm = kvm_create_vm(type);
3438 if (IS_ERR(kvm))
3439 return PTR_ERR(kvm);
3440#ifdef CONFIG_KVM_MMIO
3441 r = kvm_coalesced_mmio_init(kvm);
3442 if (r < 0)
3443 goto put_kvm;
3444#endif
3445 r = get_unused_fd_flags(O_CLOEXEC);
3446 if (r < 0)
3447 goto put_kvm;
3448
3449 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
3450 if (IS_ERR(file)) {
3451 put_unused_fd(r);
3452 r = PTR_ERR(file);
3453 goto put_kvm;
3454 }
3455
3456
3457
3458
3459
3460
3461
3462 if (kvm_create_vm_debugfs(kvm, r) < 0) {
3463 put_unused_fd(r);
3464 fput(file);
3465 return -ENOMEM;
3466 }
3467 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
3468
3469 fd_install(r, file);
3470 return r;
3471
3472put_kvm:
3473 kvm_put_kvm(kvm);
3474 return r;
3475}
3476
3477static long kvm_dev_ioctl(struct file *filp,
3478 unsigned int ioctl, unsigned long arg)
3479{
3480 long r = -EINVAL;
3481
3482 switch (ioctl) {
3483 case KVM_GET_API_VERSION:
3484 if (arg)
3485 goto out;
3486 r = KVM_API_VERSION;
3487 break;
3488 case KVM_CREATE_VM:
3489 r = kvm_dev_ioctl_create_vm(arg);
3490 break;
3491 case KVM_CHECK_EXTENSION:
3492 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
3493 break;
3494 case KVM_GET_VCPU_MMAP_SIZE:
3495 if (arg)
3496 goto out;
3497 r = PAGE_SIZE;
3498#ifdef CONFIG_X86
3499 r += PAGE_SIZE;
3500#endif
3501#ifdef CONFIG_KVM_MMIO
3502 r += PAGE_SIZE;
3503#endif
3504 break;
3505 case KVM_TRACE_ENABLE:
3506 case KVM_TRACE_PAUSE:
3507 case KVM_TRACE_DISABLE:
3508 r = -EOPNOTSUPP;
3509 break;
3510 default:
3511 return kvm_arch_dev_ioctl(filp, ioctl, arg);
3512 }
3513out:
3514 return r;
3515}
3516
3517static struct file_operations kvm_chardev_ops = {
3518 .unlocked_ioctl = kvm_dev_ioctl,
3519 .llseek = noop_llseek,
3520 KVM_COMPAT(kvm_dev_ioctl),
3521};
3522
3523static struct miscdevice kvm_dev = {
3524 KVM_MINOR,
3525 "kvm",
3526 &kvm_chardev_ops,
3527};
3528
3529static void hardware_enable_nolock(void *junk)
3530{
3531 int cpu = raw_smp_processor_id();
3532 int r;
3533
3534 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
3535 return;
3536
3537 cpumask_set_cpu(cpu, cpus_hardware_enabled);
3538
3539 r = kvm_arch_hardware_enable();
3540
3541 if (r) {
3542 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3543 atomic_inc(&hardware_enable_failed);
3544 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
3545 }
3546}
3547
3548static int kvm_starting_cpu(unsigned int cpu)
3549{
3550 raw_spin_lock(&kvm_count_lock);
3551 if (kvm_usage_count)
3552 hardware_enable_nolock(NULL);
3553 raw_spin_unlock(&kvm_count_lock);
3554 return 0;
3555}
3556
3557static void hardware_disable_nolock(void *junk)
3558{
3559 int cpu = raw_smp_processor_id();
3560
3561 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
3562 return;
3563 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3564 kvm_arch_hardware_disable();
3565}
3566
3567static int kvm_dying_cpu(unsigned int cpu)
3568{
3569 raw_spin_lock(&kvm_count_lock);
3570 if (kvm_usage_count)
3571 hardware_disable_nolock(NULL);
3572 raw_spin_unlock(&kvm_count_lock);
3573 return 0;
3574}
3575
3576static void hardware_disable_all_nolock(void)
3577{
3578 BUG_ON(!kvm_usage_count);
3579
3580 kvm_usage_count--;
3581 if (!kvm_usage_count)
3582 on_each_cpu(hardware_disable_nolock, NULL, 1);
3583}
3584
3585static void hardware_disable_all(void)
3586{
3587 raw_spin_lock(&kvm_count_lock);
3588 hardware_disable_all_nolock();
3589 raw_spin_unlock(&kvm_count_lock);
3590}
3591
3592static int hardware_enable_all(void)
3593{
3594 int r = 0;
3595
3596 raw_spin_lock(&kvm_count_lock);
3597
3598 kvm_usage_count++;
3599 if (kvm_usage_count == 1) {
3600 atomic_set(&hardware_enable_failed, 0);
3601 on_each_cpu(hardware_enable_nolock, NULL, 1);
3602
3603 if (atomic_read(&hardware_enable_failed)) {
3604 hardware_disable_all_nolock();
3605 r = -EBUSY;
3606 }
3607 }
3608
3609 raw_spin_unlock(&kvm_count_lock);
3610
3611 return r;
3612}
3613
3614static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3615 void *v)
3616{
3617
3618
3619
3620
3621
3622
3623 pr_info("kvm: exiting hardware virtualization\n");
3624 kvm_rebooting = true;
3625 on_each_cpu(hardware_disable_nolock, NULL, 1);
3626 return NOTIFY_OK;
3627}
3628
3629static struct notifier_block kvm_reboot_notifier = {
3630 .notifier_call = kvm_reboot,
3631 .priority = 0,
3632};
3633
3634static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3635{
3636 int i;
3637
3638 for (i = 0; i < bus->dev_count; i++) {
3639 struct kvm_io_device *pos = bus->range[i].dev;
3640
3641 kvm_iodevice_destructor(pos);
3642 }
3643 kfree(bus);
3644}
3645
3646static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
3647 const struct kvm_io_range *r2)
3648{
3649 gpa_t addr1 = r1->addr;
3650 gpa_t addr2 = r2->addr;
3651
3652 if (addr1 < addr2)
3653 return -1;
3654
3655
3656
3657
3658
3659
3660 if (r2->len) {
3661 addr1 += r1->len;
3662 addr2 += r2->len;
3663 }
3664
3665 if (addr1 > addr2)
3666 return 1;
3667
3668 return 0;
3669}
3670
3671static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
3672{
3673 return kvm_io_bus_cmp(p1, p2);
3674}
3675
3676static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
3677 gpa_t addr, int len)
3678{
3679 struct kvm_io_range *range, key;
3680 int off;
3681
3682 key = (struct kvm_io_range) {
3683 .addr = addr,
3684 .len = len,
3685 };
3686
3687 range = bsearch(&key, bus->range, bus->dev_count,
3688 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
3689 if (range == NULL)
3690 return -ENOENT;
3691
3692 off = range - bus->range;
3693
3694 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
3695 off--;
3696
3697 return off;
3698}
3699
3700static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3701 struct kvm_io_range *range, const void *val)
3702{
3703 int idx;
3704
3705 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3706 if (idx < 0)
3707 return -EOPNOTSUPP;
3708
3709 while (idx < bus->dev_count &&
3710 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3711 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
3712 range->len, val))
3713 return idx;
3714 idx++;
3715 }
3716
3717 return -EOPNOTSUPP;
3718}
3719
3720
3721int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3722 int len, const void *val)
3723{
3724 struct kvm_io_bus *bus;
3725 struct kvm_io_range range;
3726 int r;
3727
3728 range = (struct kvm_io_range) {
3729 .addr = addr,
3730 .len = len,
3731 };
3732
3733 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3734 if (!bus)
3735 return -ENOMEM;
3736 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3737 return r < 0 ? r : 0;
3738}
3739EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3740
3741
3742int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3743 gpa_t addr, int len, const void *val, long cookie)
3744{
3745 struct kvm_io_bus *bus;
3746 struct kvm_io_range range;
3747
3748 range = (struct kvm_io_range) {
3749 .addr = addr,
3750 .len = len,
3751 };
3752
3753 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3754 if (!bus)
3755 return -ENOMEM;
3756
3757
3758 if ((cookie >= 0) && (cookie < bus->dev_count) &&
3759 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
3760 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
3761 val))
3762 return cookie;
3763
3764
3765
3766
3767
3768 return __kvm_io_bus_write(vcpu, bus, &range, val);
3769}
3770
3771static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3772 struct kvm_io_range *range, void *val)
3773{
3774 int idx;
3775
3776 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3777 if (idx < 0)
3778 return -EOPNOTSUPP;
3779
3780 while (idx < bus->dev_count &&
3781 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3782 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
3783 range->len, val))
3784 return idx;
3785 idx++;
3786 }
3787
3788 return -EOPNOTSUPP;
3789}
3790
3791
3792int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3793 int len, void *val)
3794{
3795 struct kvm_io_bus *bus;
3796 struct kvm_io_range range;
3797 int r;
3798
3799 range = (struct kvm_io_range) {
3800 .addr = addr,
3801 .len = len,
3802 };
3803
3804 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3805 if (!bus)
3806 return -ENOMEM;
3807 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3808 return r < 0 ? r : 0;
3809}
3810
3811
3812int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3813 int len, struct kvm_io_device *dev)
3814{
3815 int i;
3816 struct kvm_io_bus *new_bus, *bus;
3817 struct kvm_io_range range;
3818
3819 bus = kvm_get_bus(kvm, bus_idx);
3820 if (!bus)
3821 return -ENOMEM;
3822
3823
3824 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3825 return -ENOSPC;
3826
3827 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
3828 GFP_KERNEL_ACCOUNT);
3829 if (!new_bus)
3830 return -ENOMEM;
3831
3832 range = (struct kvm_io_range) {
3833 .addr = addr,
3834 .len = len,
3835 .dev = dev,
3836 };
3837
3838 for (i = 0; i < bus->dev_count; i++)
3839 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
3840 break;
3841
3842 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3843 new_bus->dev_count++;
3844 new_bus->range[i] = range;
3845 memcpy(new_bus->range + i + 1, bus->range + i,
3846 (bus->dev_count - i) * sizeof(struct kvm_io_range));
3847 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3848 synchronize_srcu_expedited(&kvm->srcu);
3849 kfree(bus);
3850
3851 return 0;
3852}
3853
3854
3855void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3856 struct kvm_io_device *dev)
3857{
3858 int i;
3859 struct kvm_io_bus *new_bus, *bus;
3860
3861 bus = kvm_get_bus(kvm, bus_idx);
3862 if (!bus)
3863 return;
3864
3865 for (i = 0; i < bus->dev_count; i++)
3866 if (bus->range[i].dev == dev) {
3867 break;
3868 }
3869
3870 if (i == bus->dev_count)
3871 return;
3872
3873 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
3874 GFP_KERNEL_ACCOUNT);
3875 if (!new_bus) {
3876 pr_err("kvm: failed to shrink bus, removing it completely\n");
3877 goto broken;
3878 }
3879
3880 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3881 new_bus->dev_count--;
3882 memcpy(new_bus->range + i, bus->range + i + 1,
3883 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
3884
3885broken:
3886 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3887 synchronize_srcu_expedited(&kvm->srcu);
3888 kfree(bus);
3889 return;
3890}
3891
3892struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3893 gpa_t addr)
3894{
3895 struct kvm_io_bus *bus;
3896 int dev_idx, srcu_idx;
3897 struct kvm_io_device *iodev = NULL;
3898
3899 srcu_idx = srcu_read_lock(&kvm->srcu);
3900
3901 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
3902 if (!bus)
3903 goto out_unlock;
3904
3905 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
3906 if (dev_idx < 0)
3907 goto out_unlock;
3908
3909 iodev = bus->range[dev_idx].dev;
3910
3911out_unlock:
3912 srcu_read_unlock(&kvm->srcu, srcu_idx);
3913
3914 return iodev;
3915}
3916EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
3917
3918static int kvm_debugfs_open(struct inode *inode, struct file *file,
3919 int (*get)(void *, u64 *), int (*set)(void *, u64),
3920 const char *fmt)
3921{
3922 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
3923 inode->i_private;
3924
3925
3926
3927
3928
3929
3930 if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
3931 return -ENOENT;
3932
3933 if (simple_attr_open(inode, file, get, set, fmt)) {
3934 kvm_put_kvm(stat_data->kvm);
3935 return -ENOMEM;
3936 }
3937
3938 return 0;
3939}
3940
3941static int kvm_debugfs_release(struct inode *inode, struct file *file)
3942{
3943 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
3944 inode->i_private;
3945
3946 simple_attr_release(inode, file);
3947 kvm_put_kvm(stat_data->kvm);
3948
3949 return 0;
3950}
3951
3952static int vm_stat_get_per_vm(void *data, u64 *val)
3953{
3954 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
3955
3956 *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
3957
3958 return 0;
3959}
3960
3961static int vm_stat_clear_per_vm(void *data, u64 val)
3962{
3963 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
3964
3965 if (val)
3966 return -EINVAL;
3967
3968 *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
3969
3970 return 0;
3971}
3972
3973static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
3974{
3975 __simple_attr_check_format("%llu\n", 0ull);
3976 return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
3977 vm_stat_clear_per_vm, "%llu\n");
3978}
3979
3980static const struct file_operations vm_stat_get_per_vm_fops = {
3981 .owner = THIS_MODULE,
3982 .open = vm_stat_get_per_vm_open,
3983 .release = kvm_debugfs_release,
3984 .read = simple_attr_read,
3985 .write = simple_attr_write,
3986 .llseek = no_llseek,
3987};
3988
3989static int vcpu_stat_get_per_vm(void *data, u64 *val)
3990{
3991 int i;
3992 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
3993 struct kvm_vcpu *vcpu;
3994
3995 *val = 0;
3996
3997 kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
3998 *val += *(u64 *)((void *)vcpu + stat_data->offset);
3999
4000 return 0;
4001}
4002
4003static int vcpu_stat_clear_per_vm(void *data, u64 val)
4004{
4005 int i;
4006 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
4007 struct kvm_vcpu *vcpu;
4008
4009 if (val)
4010 return -EINVAL;
4011
4012 kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
4013 *(u64 *)((void *)vcpu + stat_data->offset) = 0;
4014
4015 return 0;
4016}
4017
4018static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
4019{
4020 __simple_attr_check_format("%llu\n", 0ull);
4021 return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
4022 vcpu_stat_clear_per_vm, "%llu\n");
4023}
4024
4025static const struct file_operations vcpu_stat_get_per_vm_fops = {
4026 .owner = THIS_MODULE,
4027 .open = vcpu_stat_get_per_vm_open,
4028 .release = kvm_debugfs_release,
4029 .read = simple_attr_read,
4030 .write = simple_attr_write,
4031 .llseek = no_llseek,
4032};
4033
4034static const struct file_operations *stat_fops_per_vm[] = {
4035 [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
4036 [KVM_STAT_VM] = &vm_stat_get_per_vm_fops,
4037};
4038
4039static int vm_stat_get(void *_offset, u64 *val)
4040{
4041 unsigned offset = (long)_offset;
4042 struct kvm *kvm;
4043 struct kvm_stat_data stat_tmp = {.offset = offset};
4044 u64 tmp_val;
4045
4046 *val = 0;
4047 mutex_lock(&kvm_lock);
4048 list_for_each_entry(kvm, &vm_list, vm_list) {
4049 stat_tmp.kvm = kvm;
4050 vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
4051 *val += tmp_val;
4052 }
4053 mutex_unlock(&kvm_lock);
4054 return 0;
4055}
4056
4057static int vm_stat_clear(void *_offset, u64 val)
4058{
4059 unsigned offset = (long)_offset;
4060 struct kvm *kvm;
4061 struct kvm_stat_data stat_tmp = {.offset = offset};
4062
4063 if (val)
4064 return -EINVAL;
4065
4066 mutex_lock(&kvm_lock);
4067 list_for_each_entry(kvm, &vm_list, vm_list) {
4068 stat_tmp.kvm = kvm;
4069 vm_stat_clear_per_vm((void *)&stat_tmp, 0);
4070 }
4071 mutex_unlock(&kvm_lock);
4072
4073 return 0;
4074}
4075
4076DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
4077
4078static int vcpu_stat_get(void *_offset, u64 *val)
4079{
4080 unsigned offset = (long)_offset;
4081 struct kvm *kvm;
4082 struct kvm_stat_data stat_tmp = {.offset = offset};
4083 u64 tmp_val;
4084
4085 *val = 0;
4086 mutex_lock(&kvm_lock);
4087 list_for_each_entry(kvm, &vm_list, vm_list) {
4088 stat_tmp.kvm = kvm;
4089 vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
4090 *val += tmp_val;
4091 }
4092 mutex_unlock(&kvm_lock);
4093 return 0;
4094}
4095
4096static int vcpu_stat_clear(void *_offset, u64 val)
4097{
4098 unsigned offset = (long)_offset;
4099 struct kvm *kvm;
4100 struct kvm_stat_data stat_tmp = {.offset = offset};
4101
4102 if (val)
4103 return -EINVAL;
4104
4105 mutex_lock(&kvm_lock);
4106 list_for_each_entry(kvm, &vm_list, vm_list) {
4107 stat_tmp.kvm = kvm;
4108 vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
4109 }
4110 mutex_unlock(&kvm_lock);
4111
4112 return 0;
4113}
4114
4115DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
4116 "%llu\n");
4117
4118static const struct file_operations *stat_fops[] = {
4119 [KVM_STAT_VCPU] = &vcpu_stat_fops,
4120 [KVM_STAT_VM] = &vm_stat_fops,
4121};
4122
4123static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
4124{
4125 struct kobj_uevent_env *env;
4126 unsigned long long created, active;
4127
4128 if (!kvm_dev.this_device || !kvm)
4129 return;
4130
4131 mutex_lock(&kvm_lock);
4132 if (type == KVM_EVENT_CREATE_VM) {
4133 kvm_createvm_count++;
4134 kvm_active_vms++;
4135 } else if (type == KVM_EVENT_DESTROY_VM) {
4136 kvm_active_vms--;
4137 }
4138 created = kvm_createvm_count;
4139 active = kvm_active_vms;
4140 mutex_unlock(&kvm_lock);
4141
4142 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
4143 if (!env)
4144 return;
4145
4146 add_uevent_var(env, "CREATED=%llu", created);
4147 add_uevent_var(env, "COUNT=%llu", active);
4148
4149 if (type == KVM_EVENT_CREATE_VM) {
4150 add_uevent_var(env, "EVENT=create");
4151 kvm->userspace_pid = task_pid_nr(current);
4152 } else if (type == KVM_EVENT_DESTROY_VM) {
4153 add_uevent_var(env, "EVENT=destroy");
4154 }
4155 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
4156
4157 if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
4158 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
4159
4160 if (p) {
4161 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
4162 if (!IS_ERR(tmp))
4163 add_uevent_var(env, "STATS_PATH=%s", tmp);
4164 kfree(p);
4165 }
4166 }
4167
4168 env->envp[env->envp_idx++] = NULL;
4169 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
4170 kfree(env);
4171}
4172
4173static void kvm_init_debug(void)
4174{
4175 struct kvm_stats_debugfs_item *p;
4176
4177 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
4178
4179 kvm_debugfs_num_entries = 0;
4180 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
4181 debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
4182 (void *)(long)p->offset,
4183 stat_fops[p->kind]);
4184 }
4185}
4186
4187static int kvm_suspend(void)
4188{
4189 if (kvm_usage_count)
4190 hardware_disable_nolock(NULL);
4191 return 0;
4192}
4193
4194static void kvm_resume(void)
4195{
4196 if (kvm_usage_count) {
4197#ifdef CONFIG_LOCKDEP
4198 WARN_ON(lockdep_is_held(&kvm_count_lock));
4199#endif
4200 hardware_enable_nolock(NULL);
4201 }
4202}
4203
4204static struct syscore_ops kvm_syscore_ops = {
4205 .suspend = kvm_suspend,
4206 .resume = kvm_resume,
4207};
4208
4209static inline
4210struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
4211{
4212 return container_of(pn, struct kvm_vcpu, preempt_notifier);
4213}
4214
4215static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
4216{
4217 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4218
4219 WRITE_ONCE(vcpu->preempted, false);
4220 WRITE_ONCE(vcpu->ready, false);
4221
4222 kvm_arch_sched_in(vcpu, cpu);
4223
4224 kvm_arch_vcpu_load(vcpu, cpu);
4225}
4226
4227static void kvm_sched_out(struct preempt_notifier *pn,
4228 struct task_struct *next)
4229{
4230 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4231
4232 if (current->state == TASK_RUNNING) {
4233 WRITE_ONCE(vcpu->preempted, true);
4234 WRITE_ONCE(vcpu->ready, true);
4235 }
4236 kvm_arch_vcpu_put(vcpu);
4237}
4238
4239static void check_processor_compat(void *rtn)
4240{
4241 *(int *)rtn = kvm_arch_check_processor_compat();
4242}
4243
4244int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
4245 struct module *module)
4246{
4247 int r;
4248 int cpu;
4249
4250 r = kvm_arch_init(opaque);
4251 if (r)
4252 goto out_fail;
4253
4254
4255
4256
4257
4258
4259
4260
4261 r = kvm_irqfd_init();
4262 if (r)
4263 goto out_irqfd;
4264
4265 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
4266 r = -ENOMEM;
4267 goto out_free_0;
4268 }
4269
4270 r = kvm_arch_hardware_setup();
4271 if (r < 0)
4272 goto out_free_0a;
4273
4274 for_each_online_cpu(cpu) {
4275 smp_call_function_single(cpu, check_processor_compat, &r, 1);
4276 if (r < 0)
4277 goto out_free_1;
4278 }
4279
4280 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
4281 kvm_starting_cpu, kvm_dying_cpu);
4282 if (r)
4283 goto out_free_2;
4284 register_reboot_notifier(&kvm_reboot_notifier);
4285
4286
4287 if (!vcpu_align)
4288 vcpu_align = __alignof__(struct kvm_vcpu);
4289 kvm_vcpu_cache =
4290 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
4291 SLAB_ACCOUNT,
4292 offsetof(struct kvm_vcpu, arch),
4293 sizeof_field(struct kvm_vcpu, arch),
4294 NULL);
4295 if (!kvm_vcpu_cache) {
4296 r = -ENOMEM;
4297 goto out_free_3;
4298 }
4299
4300 r = kvm_async_pf_init();
4301 if (r)
4302 goto out_free;
4303
4304 kvm_chardev_ops.owner = module;
4305 kvm_vm_fops.owner = module;
4306 kvm_vcpu_fops.owner = module;
4307
4308 r = misc_register(&kvm_dev);
4309 if (r) {
4310 pr_err("kvm: misc device register failed\n");
4311 goto out_unreg;
4312 }
4313
4314 register_syscore_ops(&kvm_syscore_ops);
4315
4316 kvm_preempt_ops.sched_in = kvm_sched_in;
4317 kvm_preempt_ops.sched_out = kvm_sched_out;
4318
4319 kvm_init_debug();
4320
4321 r = kvm_vfio_ops_init();
4322 WARN_ON(r);
4323
4324 return 0;
4325
4326out_unreg:
4327 kvm_async_pf_deinit();
4328out_free:
4329 kmem_cache_destroy(kvm_vcpu_cache);
4330out_free_3:
4331 unregister_reboot_notifier(&kvm_reboot_notifier);
4332 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
4333out_free_2:
4334out_free_1:
4335 kvm_arch_hardware_unsetup();
4336out_free_0a:
4337 free_cpumask_var(cpus_hardware_enabled);
4338out_free_0:
4339 kvm_irqfd_exit();
4340out_irqfd:
4341 kvm_arch_exit();
4342out_fail:
4343 return r;
4344}
4345EXPORT_SYMBOL_GPL(kvm_init);
4346
4347void kvm_exit(void)
4348{
4349 debugfs_remove_recursive(kvm_debugfs_dir);
4350 misc_deregister(&kvm_dev);
4351 kmem_cache_destroy(kvm_vcpu_cache);
4352 kvm_async_pf_deinit();
4353 unregister_syscore_ops(&kvm_syscore_ops);
4354 unregister_reboot_notifier(&kvm_reboot_notifier);
4355 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
4356 on_each_cpu(hardware_disable_nolock, NULL, 1);
4357 kvm_arch_hardware_unsetup();
4358 kvm_arch_exit();
4359 kvm_irqfd_exit();
4360 free_cpumask_var(cpus_hardware_enabled);
4361 kvm_vfio_ops_exit();
4362}
4363EXPORT_SYMBOL_GPL(kvm_exit);
4364