1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kvm_host.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/mmu_context.h>
27#include <linux/sched/mm.h>
28
29#include "async_pf.h"
30#include <trace/events/kvm.h>
31
32static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
33 struct kvm_async_pf *work)
34{
35#ifdef CONFIG_KVM_ASYNC_PF_SYNC
36 kvm_arch_async_page_present(vcpu, work);
37#endif
38}
39static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
40 struct kvm_async_pf *work)
41{
42#ifndef CONFIG_KVM_ASYNC_PF_SYNC
43 kvm_arch_async_page_present(vcpu, work);
44#endif
45}
46
47static struct kmem_cache *async_pf_cache;
48
49int kvm_async_pf_init(void)
50{
51 async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
52
53 if (!async_pf_cache)
54 return -ENOMEM;
55
56 return 0;
57}
58
59void kvm_async_pf_deinit(void)
60{
61 kmem_cache_destroy(async_pf_cache);
62 async_pf_cache = NULL;
63}
64
65void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
66{
67 INIT_LIST_HEAD(&vcpu->async_pf.done);
68 INIT_LIST_HEAD(&vcpu->async_pf.queue);
69 spin_lock_init(&vcpu->async_pf.lock);
70}
71
72static void async_pf_execute(struct work_struct *work)
73{
74 struct kvm_async_pf *apf =
75 container_of(work, struct kvm_async_pf, work);
76 struct mm_struct *mm = apf->mm;
77 struct kvm_vcpu *vcpu = apf->vcpu;
78 unsigned long addr = apf->addr;
79 gva_t gva = apf->gva;
80 int locked = 1;
81
82 might_sleep();
83
84
85
86
87
88
89 down_read(&mm->mmap_sem);
90 get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
91 &locked);
92 if (locked)
93 up_read(&mm->mmap_sem);
94
95 kvm_async_page_present_sync(vcpu, apf);
96
97 spin_lock(&vcpu->async_pf.lock);
98 list_add_tail(&apf->link, &vcpu->async_pf.done);
99 apf->vcpu = NULL;
100 spin_unlock(&vcpu->async_pf.lock);
101
102
103
104
105
106
107 trace_kvm_async_pf_completed(addr, gva);
108
109
110
111
112 smp_mb();
113 if (swait_active(&vcpu->wq))
114 swake_up(&vcpu->wq);
115
116 mmput(mm);
117 kvm_put_kvm(vcpu->kvm);
118}
119
120void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
121{
122 spin_lock(&vcpu->async_pf.lock);
123
124
125 while (!list_empty(&vcpu->async_pf.queue)) {
126 struct kvm_async_pf *work =
127 list_first_entry(&vcpu->async_pf.queue,
128 typeof(*work), queue);
129 list_del(&work->queue);
130
131
132
133
134
135 if (!work->vcpu)
136 continue;
137
138 spin_unlock(&vcpu->async_pf.lock);
139#ifdef CONFIG_KVM_ASYNC_PF_SYNC
140 flush_work(&work->work);
141#else
142 if (cancel_work_sync(&work->work)) {
143 mmput(work->mm);
144 kvm_put_kvm(vcpu->kvm);
145 kmem_cache_free(async_pf_cache, work);
146 }
147#endif
148 spin_lock(&vcpu->async_pf.lock);
149 }
150
151 while (!list_empty(&vcpu->async_pf.done)) {
152 struct kvm_async_pf *work =
153 list_first_entry(&vcpu->async_pf.done,
154 typeof(*work), link);
155 list_del(&work->link);
156 kmem_cache_free(async_pf_cache, work);
157 }
158 spin_unlock(&vcpu->async_pf.lock);
159
160 vcpu->async_pf.queued = 0;
161}
162
163void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
164{
165 struct kvm_async_pf *work;
166
167 while (!list_empty_careful(&vcpu->async_pf.done) &&
168 kvm_arch_can_inject_async_page_present(vcpu)) {
169 spin_lock(&vcpu->async_pf.lock);
170 work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
171 link);
172 list_del(&work->link);
173 spin_unlock(&vcpu->async_pf.lock);
174
175 kvm_arch_async_page_ready(vcpu, work);
176 kvm_async_page_present_async(vcpu, work);
177
178 list_del(&work->queue);
179 vcpu->async_pf.queued--;
180 kmem_cache_free(async_pf_cache, work);
181 }
182}
183
184int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
185 struct kvm_arch_async_pf *arch)
186{
187 struct kvm_async_pf *work;
188
189 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
190 return 0;
191
192
193
194
195
196
197
198 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
199 if (!work)
200 return 0;
201
202 work->wakeup_all = false;
203 work->vcpu = vcpu;
204 work->gva = gva;
205 work->addr = hva;
206 work->arch = *arch;
207 work->mm = current->mm;
208 mmget(work->mm);
209 kvm_get_kvm(work->vcpu->kvm);
210
211
212
213 if (unlikely(kvm_is_error_hva(work->addr)))
214 goto retry_sync;
215
216 INIT_WORK(&work->work, async_pf_execute);
217 if (!schedule_work(&work->work))
218 goto retry_sync;
219
220 list_add_tail(&work->queue, &vcpu->async_pf.queue);
221 vcpu->async_pf.queued++;
222 kvm_arch_async_page_not_present(vcpu, work);
223 return 1;
224retry_sync:
225 kvm_put_kvm(work->vcpu->kvm);
226 mmput(work->mm);
227 kmem_cache_free(async_pf_cache, work);
228 return 0;
229}
230
231int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
232{
233 struct kvm_async_pf *work;
234
235 if (!list_empty_careful(&vcpu->async_pf.done))
236 return 0;
237
238 work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
239 if (!work)
240 return -ENOMEM;
241
242 work->wakeup_all = true;
243 INIT_LIST_HEAD(&work->queue);
244
245 spin_lock(&vcpu->async_pf.lock);
246 list_add_tail(&work->link, &vcpu->async_pf.done);
247 spin_unlock(&vcpu->async_pf.lock);
248
249 vcpu->async_pf.queued++;
250 return 0;
251}
252