1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "x86.h"
22#include "lapic.h"
23#include "ioapic.h"
24#include "cpuid.h"
25#include "hyperv.h"
26#include "xen.h"
27
28#include <linux/cpu.h>
29#include <linux/kvm_host.h>
30#include <linux/highmem.h>
31#include <linux/sched/cputime.h>
32#include <linux/eventfd.h>
33
34#include <asm/apicdef.h>
35#include <trace/events/kvm.h>
36
37#include "trace.h"
38#include "irq.h"
39#include "fpu.h"
40
41
42#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
43
44#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
45
46static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
47 bool vcpu_kick);
48
49static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
50{
51 return atomic64_read(&synic->sint[sint]);
52}
53
54static inline int synic_get_sint_vector(u64 sint_value)
55{
56 if (sint_value & HV_SYNIC_SINT_MASKED)
57 return -1;
58 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
59}
60
61static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
62 int vector)
63{
64 int i;
65
66 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
67 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
68 return true;
69 }
70 return false;
71}
72
73static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
74 int vector)
75{
76 int i;
77 u64 sint_value;
78
79 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
80 sint_value = synic_read_sint(synic, i);
81 if (synic_get_sint_vector(sint_value) == vector &&
82 sint_value & HV_SYNIC_SINT_AUTO_EOI)
83 return true;
84 }
85 return false;
86}
87
88static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
89 int vector)
90{
91 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
92 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
93 int auto_eoi_old, auto_eoi_new;
94
95 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
96 return;
97
98 if (synic_has_vector_connected(synic, vector))
99 __set_bit(vector, synic->vec_bitmap);
100 else
101 __clear_bit(vector, synic->vec_bitmap);
102
103 auto_eoi_old = bitmap_weight(synic->auto_eoi_bitmap, 256);
104
105 if (synic_has_vector_auto_eoi(synic, vector))
106 __set_bit(vector, synic->auto_eoi_bitmap);
107 else
108 __clear_bit(vector, synic->auto_eoi_bitmap);
109
110 auto_eoi_new = bitmap_weight(synic->auto_eoi_bitmap, 256);
111
112 if (!!auto_eoi_old == !!auto_eoi_new)
113 return;
114
115 down_write(&vcpu->kvm->arch.apicv_update_lock);
116
117 if (auto_eoi_new)
118 hv->synic_auto_eoi_used++;
119 else
120 hv->synic_auto_eoi_used--;
121
122 __kvm_request_apicv_update(vcpu->kvm,
123 !hv->synic_auto_eoi_used,
124 APICV_INHIBIT_REASON_HYPERV);
125
126 up_write(&vcpu->kvm->arch.apicv_update_lock);
127}
128
129static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
130 u64 data, bool host)
131{
132 int vector, old_vector;
133 bool masked;
134
135 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
136 masked = data & HV_SYNIC_SINT_MASKED;
137
138
139
140
141
142
143 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
144 return 1;
145
146
147
148
149
150
151 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
152
153 atomic64_set(&synic->sint[sint], data);
154
155 synic_update_vector(synic, old_vector);
156
157 synic_update_vector(synic, vector);
158
159
160 kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
161 return 0;
162}
163
164static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
165{
166 struct kvm_vcpu *vcpu = NULL;
167 unsigned long i;
168
169 if (vpidx >= KVM_MAX_VCPUS)
170 return NULL;
171
172 vcpu = kvm_get_vcpu(kvm, vpidx);
173 if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
174 return vcpu;
175 kvm_for_each_vcpu(i, vcpu, kvm)
176 if (kvm_hv_get_vpindex(vcpu) == vpidx)
177 return vcpu;
178 return NULL;
179}
180
181static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
182{
183 struct kvm_vcpu *vcpu;
184 struct kvm_vcpu_hv_synic *synic;
185
186 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
187 if (!vcpu || !to_hv_vcpu(vcpu))
188 return NULL;
189 synic = to_hv_synic(vcpu);
190 return (synic->active) ? synic : NULL;
191}
192
193static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
194{
195 struct kvm *kvm = vcpu->kvm;
196 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
197 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
198 struct kvm_vcpu_hv_stimer *stimer;
199 int gsi, idx;
200
201 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
202
203
204 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
205 stimer = &hv_vcpu->stimer[idx];
206 if (stimer->msg_pending && stimer->config.enable &&
207 !stimer->config.direct_mode &&
208 stimer->config.sintx == sint)
209 stimer_mark_pending(stimer, false);
210 }
211
212 idx = srcu_read_lock(&kvm->irq_srcu);
213 gsi = atomic_read(&synic->sint_to_gsi[sint]);
214 if (gsi != -1)
215 kvm_notify_acked_gsi(kvm, gsi);
216 srcu_read_unlock(&kvm->irq_srcu, idx);
217}
218
219static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
220{
221 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
222 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
223
224 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
225 hv_vcpu->exit.u.synic.msr = msr;
226 hv_vcpu->exit.u.synic.control = synic->control;
227 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
228 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
229
230 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
231}
232
233static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
234 u32 msr, u64 data, bool host)
235{
236 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
237 int ret;
238
239 if (!synic->active && !host)
240 return 1;
241
242 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
243
244 ret = 0;
245 switch (msr) {
246 case HV_X64_MSR_SCONTROL:
247 synic->control = data;
248 if (!host)
249 synic_exit(synic, msr);
250 break;
251 case HV_X64_MSR_SVERSION:
252 if (!host) {
253 ret = 1;
254 break;
255 }
256 synic->version = data;
257 break;
258 case HV_X64_MSR_SIEFP:
259 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
260 !synic->dont_zero_synic_pages)
261 if (kvm_clear_guest(vcpu->kvm,
262 data & PAGE_MASK, PAGE_SIZE)) {
263 ret = 1;
264 break;
265 }
266 synic->evt_page = data;
267 if (!host)
268 synic_exit(synic, msr);
269 break;
270 case HV_X64_MSR_SIMP:
271 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
272 !synic->dont_zero_synic_pages)
273 if (kvm_clear_guest(vcpu->kvm,
274 data & PAGE_MASK, PAGE_SIZE)) {
275 ret = 1;
276 break;
277 }
278 synic->msg_page = data;
279 if (!host)
280 synic_exit(synic, msr);
281 break;
282 case HV_X64_MSR_EOM: {
283 int i;
284
285 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
286 kvm_hv_notify_acked_sint(vcpu, i);
287 break;
288 }
289 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
290 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
291 break;
292 default:
293 ret = 1;
294 break;
295 }
296 return ret;
297}
298
299static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
300{
301 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
302
303 return hv_vcpu->cpuid_cache.syndbg_cap_eax &
304 HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
305}
306
307static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
308{
309 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
310
311 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
312 hv->hv_syndbg.control.status =
313 vcpu->run->hyperv.u.syndbg.status;
314 return 1;
315}
316
317static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
318{
319 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
320 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
321
322 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
323 hv_vcpu->exit.u.syndbg.msr = msr;
324 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
325 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
326 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
327 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
328 vcpu->arch.complete_userspace_io =
329 kvm_hv_syndbg_complete_userspace;
330
331 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
332}
333
334static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
335{
336 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
337
338 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
339 return 1;
340
341 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
342 to_hv_vcpu(vcpu)->vp_index, msr, data);
343 switch (msr) {
344 case HV_X64_MSR_SYNDBG_CONTROL:
345 syndbg->control.control = data;
346 if (!host)
347 syndbg_exit(vcpu, msr);
348 break;
349 case HV_X64_MSR_SYNDBG_STATUS:
350 syndbg->control.status = data;
351 break;
352 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
353 syndbg->control.send_page = data;
354 break;
355 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
356 syndbg->control.recv_page = data;
357 break;
358 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
359 syndbg->control.pending_page = data;
360 if (!host)
361 syndbg_exit(vcpu, msr);
362 break;
363 case HV_X64_MSR_SYNDBG_OPTIONS:
364 syndbg->options = data;
365 break;
366 default:
367 break;
368 }
369
370 return 0;
371}
372
373static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
374{
375 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
376
377 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
378 return 1;
379
380 switch (msr) {
381 case HV_X64_MSR_SYNDBG_CONTROL:
382 *pdata = syndbg->control.control;
383 break;
384 case HV_X64_MSR_SYNDBG_STATUS:
385 *pdata = syndbg->control.status;
386 break;
387 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
388 *pdata = syndbg->control.send_page;
389 break;
390 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
391 *pdata = syndbg->control.recv_page;
392 break;
393 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
394 *pdata = syndbg->control.pending_page;
395 break;
396 case HV_X64_MSR_SYNDBG_OPTIONS:
397 *pdata = syndbg->options;
398 break;
399 default:
400 break;
401 }
402
403 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
404
405 return 0;
406}
407
408static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
409 bool host)
410{
411 int ret;
412
413 if (!synic->active && !host)
414 return 1;
415
416 ret = 0;
417 switch (msr) {
418 case HV_X64_MSR_SCONTROL:
419 *pdata = synic->control;
420 break;
421 case HV_X64_MSR_SVERSION:
422 *pdata = synic->version;
423 break;
424 case HV_X64_MSR_SIEFP:
425 *pdata = synic->evt_page;
426 break;
427 case HV_X64_MSR_SIMP:
428 *pdata = synic->msg_page;
429 break;
430 case HV_X64_MSR_EOM:
431 *pdata = 0;
432 break;
433 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
434 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
435 break;
436 default:
437 ret = 1;
438 break;
439 }
440 return ret;
441}
442
443static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
444{
445 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
446 struct kvm_lapic_irq irq;
447 int ret, vector;
448
449 if (sint >= ARRAY_SIZE(synic->sint))
450 return -EINVAL;
451
452 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
453 if (vector < 0)
454 return -ENOENT;
455
456 memset(&irq, 0, sizeof(irq));
457 irq.shorthand = APIC_DEST_SELF;
458 irq.dest_mode = APIC_DEST_PHYSICAL;
459 irq.delivery_mode = APIC_DM_FIXED;
460 irq.vector = vector;
461 irq.level = 1;
462
463 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
464 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
465 return ret;
466}
467
468int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
469{
470 struct kvm_vcpu_hv_synic *synic;
471
472 synic = synic_get(kvm, vpidx);
473 if (!synic)
474 return -EINVAL;
475
476 return synic_set_irq(synic, sint);
477}
478
479void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
480{
481 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
482 int i;
483
484 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
485
486 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
487 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
488 kvm_hv_notify_acked_sint(vcpu, i);
489}
490
491static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
492{
493 struct kvm_vcpu_hv_synic *synic;
494
495 synic = synic_get(kvm, vpidx);
496 if (!synic)
497 return -EINVAL;
498
499 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
500 return -EINVAL;
501
502 atomic_set(&synic->sint_to_gsi[sint], gsi);
503 return 0;
504}
505
506void kvm_hv_irq_routing_update(struct kvm *kvm)
507{
508 struct kvm_irq_routing_table *irq_rt;
509 struct kvm_kernel_irq_routing_entry *e;
510 u32 gsi;
511
512 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
513 lockdep_is_held(&kvm->irq_lock));
514
515 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
516 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
517 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
518 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
519 e->hv_sint.sint, gsi);
520 }
521 }
522}
523
524static void synic_init(struct kvm_vcpu_hv_synic *synic)
525{
526 int i;
527
528 memset(synic, 0, sizeof(*synic));
529 synic->version = HV_SYNIC_VERSION_1;
530 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
531 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
532 atomic_set(&synic->sint_to_gsi[i], -1);
533 }
534}
535
536static u64 get_time_ref_counter(struct kvm *kvm)
537{
538 struct kvm_hv *hv = to_kvm_hv(kvm);
539 struct kvm_vcpu *vcpu;
540 u64 tsc;
541
542
543
544
545
546 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
547 return div_u64(get_kvmclock_ns(kvm), 100);
548
549 vcpu = kvm_get_vcpu(kvm, 0);
550 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
551 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
552 + hv->tsc_ref.tsc_offset;
553}
554
555static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
556 bool vcpu_kick)
557{
558 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
559
560 set_bit(stimer->index,
561 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
562 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
563 if (vcpu_kick)
564 kvm_vcpu_kick(vcpu);
565}
566
567static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
568{
569 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
570
571 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
572 stimer->index);
573
574 hrtimer_cancel(&stimer->timer);
575 clear_bit(stimer->index,
576 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
577 stimer->msg_pending = false;
578 stimer->exp_time = 0;
579}
580
581static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
582{
583 struct kvm_vcpu_hv_stimer *stimer;
584
585 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
586 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
587 stimer->index);
588 stimer_mark_pending(stimer, true);
589
590 return HRTIMER_NORESTART;
591}
592
593
594
595
596
597
598static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
599{
600 u64 time_now;
601 ktime_t ktime_now;
602
603 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
604 ktime_now = ktime_get();
605
606 if (stimer->config.periodic) {
607 if (stimer->exp_time) {
608 if (time_now >= stimer->exp_time) {
609 u64 remainder;
610
611 div64_u64_rem(time_now - stimer->exp_time,
612 stimer->count, &remainder);
613 stimer->exp_time =
614 time_now + (stimer->count - remainder);
615 }
616 } else
617 stimer->exp_time = time_now + stimer->count;
618
619 trace_kvm_hv_stimer_start_periodic(
620 hv_stimer_to_vcpu(stimer)->vcpu_id,
621 stimer->index,
622 time_now, stimer->exp_time);
623
624 hrtimer_start(&stimer->timer,
625 ktime_add_ns(ktime_now,
626 100 * (stimer->exp_time - time_now)),
627 HRTIMER_MODE_ABS);
628 return 0;
629 }
630 stimer->exp_time = stimer->count;
631 if (time_now >= stimer->count) {
632
633
634
635
636
637
638 stimer_mark_pending(stimer, false);
639 return 0;
640 }
641
642 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
643 stimer->index,
644 time_now, stimer->count);
645
646 hrtimer_start(&stimer->timer,
647 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
648 HRTIMER_MODE_ABS);
649 return 0;
650}
651
652static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
653 bool host)
654{
655 union hv_stimer_config new_config = {.as_uint64 = config},
656 old_config = {.as_uint64 = stimer->config.as_uint64};
657 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
658 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
659 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
660
661 if (!synic->active && !host)
662 return 1;
663
664 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
665 !(hv_vcpu->cpuid_cache.features_edx &
666 HV_STIMER_DIRECT_MODE_AVAILABLE)))
667 return 1;
668
669 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
670 stimer->index, config, host);
671
672 stimer_cleanup(stimer);
673 if (old_config.enable &&
674 !new_config.direct_mode && new_config.sintx == 0)
675 new_config.enable = 0;
676 stimer->config.as_uint64 = new_config.as_uint64;
677
678 if (stimer->config.enable)
679 stimer_mark_pending(stimer, false);
680
681 return 0;
682}
683
684static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
685 bool host)
686{
687 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
688 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
689
690 if (!synic->active && !host)
691 return 1;
692
693 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
694 stimer->index, count, host);
695
696 stimer_cleanup(stimer);
697 stimer->count = count;
698 if (stimer->count == 0)
699 stimer->config.enable = 0;
700 else if (stimer->config.auto_enable)
701 stimer->config.enable = 1;
702
703 if (stimer->config.enable)
704 stimer_mark_pending(stimer, false);
705
706 return 0;
707}
708
709static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
710{
711 *pconfig = stimer->config.as_uint64;
712 return 0;
713}
714
715static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
716{
717 *pcount = stimer->count;
718 return 0;
719}
720
721static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
722 struct hv_message *src_msg, bool no_retry)
723{
724 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
725 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
726 gfn_t msg_page_gfn;
727 struct hv_message_header hv_hdr;
728 int r;
729
730 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
731 return -ENOENT;
732
733 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
734
735
736
737
738
739
740
741 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
742 msg_off + offsetof(struct hv_message,
743 header.message_type),
744 sizeof(hv_hdr.message_type));
745 if (r < 0)
746 return r;
747
748 if (hv_hdr.message_type != HVMSG_NONE) {
749 if (no_retry)
750 return 0;
751
752 hv_hdr.message_flags.msg_pending = 1;
753 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
754 &hv_hdr.message_flags,
755 msg_off +
756 offsetof(struct hv_message,
757 header.message_flags),
758 sizeof(hv_hdr.message_flags));
759 if (r < 0)
760 return r;
761 return -EAGAIN;
762 }
763
764 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
765 sizeof(src_msg->header) +
766 src_msg->header.payload_size);
767 if (r < 0)
768 return r;
769
770 r = synic_set_irq(synic, sint);
771 if (r < 0)
772 return r;
773 if (r == 0)
774 return -EFAULT;
775 return 0;
776}
777
778static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
779{
780 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
781 struct hv_message *msg = &stimer->msg;
782 struct hv_timer_message_payload *payload =
783 (struct hv_timer_message_payload *)&msg->u.payload;
784
785
786
787
788
789 bool no_retry = stimer->config.periodic;
790
791 payload->expiration_time = stimer->exp_time;
792 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
793 return synic_deliver_msg(to_hv_synic(vcpu),
794 stimer->config.sintx, msg,
795 no_retry);
796}
797
798static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
799{
800 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
801 struct kvm_lapic_irq irq = {
802 .delivery_mode = APIC_DM_FIXED,
803 .vector = stimer->config.apic_vector
804 };
805
806 if (lapic_in_kernel(vcpu))
807 return !kvm_apic_set_irq(vcpu, &irq, NULL);
808 return 0;
809}
810
811static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
812{
813 int r, direct = stimer->config.direct_mode;
814
815 stimer->msg_pending = true;
816 if (!direct)
817 r = stimer_send_msg(stimer);
818 else
819 r = stimer_notify_direct(stimer);
820 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
821 stimer->index, direct, r);
822 if (!r) {
823 stimer->msg_pending = false;
824 if (!(stimer->config.periodic))
825 stimer->config.enable = 0;
826 }
827}
828
829void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
830{
831 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
832 struct kvm_vcpu_hv_stimer *stimer;
833 u64 time_now, exp_time;
834 int i;
835
836 if (!hv_vcpu)
837 return;
838
839 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
840 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
841 stimer = &hv_vcpu->stimer[i];
842 if (stimer->config.enable) {
843 exp_time = stimer->exp_time;
844
845 if (exp_time) {
846 time_now =
847 get_time_ref_counter(vcpu->kvm);
848 if (time_now >= exp_time)
849 stimer_expiration(stimer);
850 }
851
852 if ((stimer->config.enable) &&
853 stimer->count) {
854 if (!stimer->msg_pending)
855 stimer_start(stimer);
856 } else
857 stimer_cleanup(stimer);
858 }
859 }
860}
861
862void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
863{
864 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
865 int i;
866
867 if (!hv_vcpu)
868 return;
869
870 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
871 stimer_cleanup(&hv_vcpu->stimer[i]);
872
873 kfree(hv_vcpu);
874 vcpu->arch.hyperv = NULL;
875}
876
877bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
878{
879 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
880
881 if (!hv_vcpu)
882 return false;
883
884 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
885 return false;
886 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
887}
888EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
889
890bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
891 struct hv_vp_assist_page *assist_page)
892{
893 if (!kvm_hv_assist_page_enabled(vcpu))
894 return false;
895 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
896 assist_page, sizeof(*assist_page));
897}
898EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
899
900static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
901{
902 struct hv_message *msg = &stimer->msg;
903 struct hv_timer_message_payload *payload =
904 (struct hv_timer_message_payload *)&msg->u.payload;
905
906 memset(&msg->header, 0, sizeof(msg->header));
907 msg->header.message_type = HVMSG_TIMER_EXPIRED;
908 msg->header.payload_size = sizeof(*payload);
909
910 payload->timer_index = stimer->index;
911 payload->expiration_time = 0;
912 payload->delivery_time = 0;
913}
914
915static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
916{
917 memset(stimer, 0, sizeof(*stimer));
918 stimer->index = timer_index;
919 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
920 stimer->timer.function = stimer_timer_callback;
921 stimer_prepare_msg(stimer);
922}
923
924static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
925{
926 struct kvm_vcpu_hv *hv_vcpu;
927 int i;
928
929 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
930 if (!hv_vcpu)
931 return -ENOMEM;
932
933 vcpu->arch.hyperv = hv_vcpu;
934 hv_vcpu->vcpu = vcpu;
935
936 synic_init(&hv_vcpu->synic);
937
938 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
939 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
940 stimer_init(&hv_vcpu->stimer[i], i);
941
942 hv_vcpu->vp_index = vcpu->vcpu_idx;
943
944 return 0;
945}
946
947int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
948{
949 struct kvm_vcpu_hv_synic *synic;
950 int r;
951
952 if (!to_hv_vcpu(vcpu)) {
953 r = kvm_hv_vcpu_init(vcpu);
954 if (r)
955 return r;
956 }
957
958 synic = to_hv_synic(vcpu);
959
960 synic->active = true;
961 synic->dont_zero_synic_pages = dont_zero_synic_pages;
962 synic->control = HV_SYNIC_CONTROL_ENABLE;
963 return 0;
964}
965
966static bool kvm_hv_msr_partition_wide(u32 msr)
967{
968 bool r = false;
969
970 switch (msr) {
971 case HV_X64_MSR_GUEST_OS_ID:
972 case HV_X64_MSR_HYPERCALL:
973 case HV_X64_MSR_REFERENCE_TSC:
974 case HV_X64_MSR_TIME_REF_COUNT:
975 case HV_X64_MSR_CRASH_CTL:
976 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
977 case HV_X64_MSR_RESET:
978 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
979 case HV_X64_MSR_TSC_EMULATION_CONTROL:
980 case HV_X64_MSR_TSC_EMULATION_STATUS:
981 case HV_X64_MSR_SYNDBG_OPTIONS:
982 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
983 r = true;
984 break;
985 }
986
987 return r;
988}
989
990static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
991{
992 struct kvm_hv *hv = to_kvm_hv(kvm);
993 size_t size = ARRAY_SIZE(hv->hv_crash_param);
994
995 if (WARN_ON_ONCE(index >= size))
996 return -EINVAL;
997
998 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
999 return 0;
1000}
1001
1002static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
1003{
1004 struct kvm_hv *hv = to_kvm_hv(kvm);
1005
1006 *pdata = hv->hv_crash_ctl;
1007 return 0;
1008}
1009
1010static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
1011{
1012 struct kvm_hv *hv = to_kvm_hv(kvm);
1013
1014 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1015
1016 return 0;
1017}
1018
1019static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1020{
1021 struct kvm_hv *hv = to_kvm_hv(kvm);
1022 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1023
1024 if (WARN_ON_ONCE(index >= size))
1025 return -EINVAL;
1026
1027 hv->hv_crash_param[array_index_nospec(index, size)] = data;
1028 return 0;
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1067 struct ms_hyperv_tsc_page *tsc_ref)
1068{
1069 u64 max_mul;
1070
1071 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1072 return false;
1073
1074
1075
1076
1077
1078
1079
1080 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1081 if (hv_clock->tsc_to_system_mul >= max_mul)
1082 return false;
1083
1084
1085
1086
1087
1088 tsc_ref->tsc_scale =
1089 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1090 hv_clock->tsc_to_system_mul,
1091 100);
1092
1093 tsc_ref->tsc_offset = hv_clock->system_time;
1094 do_div(tsc_ref->tsc_offset, 100);
1095 tsc_ref->tsc_offset -=
1096 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1097 return true;
1098}
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1110{
1111 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1112 hv->hv_tsc_emulation_control;
1113}
1114
1115void kvm_hv_setup_tsc_page(struct kvm *kvm,
1116 struct pvclock_vcpu_time_info *hv_clock)
1117{
1118 struct kvm_hv *hv = to_kvm_hv(kvm);
1119 u32 tsc_seq;
1120 u64 gfn;
1121
1122 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1123 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1124
1125 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1126 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1127 return;
1128
1129 mutex_lock(&hv->hv_lock);
1130 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1131 goto out_unlock;
1132
1133 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1134
1135
1136
1137
1138 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1139 &tsc_seq, sizeof(tsc_seq))))
1140 goto out_err;
1141
1142 if (tsc_seq && tsc_page_update_unsafe(hv)) {
1143 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1144 goto out_err;
1145
1146 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1147 goto out_unlock;
1148 }
1149
1150
1151
1152
1153
1154 hv->tsc_ref.tsc_sequence = 0;
1155 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1156 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1157 goto out_err;
1158
1159 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1160 goto out_err;
1161
1162
1163 smp_wmb();
1164 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1165 goto out_err;
1166
1167
1168
1169
1170 tsc_seq++;
1171 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1172 tsc_seq = 1;
1173
1174
1175 smp_wmb();
1176
1177 hv->tsc_ref.tsc_sequence = tsc_seq;
1178 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1179 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1180 goto out_err;
1181
1182 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1183 goto out_unlock;
1184
1185out_err:
1186 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1187out_unlock:
1188 mutex_unlock(&hv->hv_lock);
1189}
1190
1191void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
1192{
1193 struct kvm_hv *hv = to_kvm_hv(kvm);
1194 u64 gfn;
1195 int idx;
1196
1197 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1198 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
1199 tsc_page_update_unsafe(hv))
1200 return;
1201
1202 mutex_lock(&hv->hv_lock);
1203
1204 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1205 goto out_unlock;
1206
1207
1208 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
1209 hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
1210
1211 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1212
1213 hv->tsc_ref.tsc_sequence = 0;
1214
1215
1216
1217
1218
1219 idx = srcu_read_lock(&kvm->srcu);
1220 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1221 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1222 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1223 srcu_read_unlock(&kvm->srcu, idx);
1224
1225out_unlock:
1226 mutex_unlock(&hv->hv_lock);
1227}
1228
1229
1230static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1231{
1232 if (!hv_vcpu->enforce_cpuid)
1233 return true;
1234
1235 switch (msr) {
1236 case HV_X64_MSR_GUEST_OS_ID:
1237 case HV_X64_MSR_HYPERCALL:
1238 return hv_vcpu->cpuid_cache.features_eax &
1239 HV_MSR_HYPERCALL_AVAILABLE;
1240 case HV_X64_MSR_VP_RUNTIME:
1241 return hv_vcpu->cpuid_cache.features_eax &
1242 HV_MSR_VP_RUNTIME_AVAILABLE;
1243 case HV_X64_MSR_TIME_REF_COUNT:
1244 return hv_vcpu->cpuid_cache.features_eax &
1245 HV_MSR_TIME_REF_COUNT_AVAILABLE;
1246 case HV_X64_MSR_VP_INDEX:
1247 return hv_vcpu->cpuid_cache.features_eax &
1248 HV_MSR_VP_INDEX_AVAILABLE;
1249 case HV_X64_MSR_RESET:
1250 return hv_vcpu->cpuid_cache.features_eax &
1251 HV_MSR_RESET_AVAILABLE;
1252 case HV_X64_MSR_REFERENCE_TSC:
1253 return hv_vcpu->cpuid_cache.features_eax &
1254 HV_MSR_REFERENCE_TSC_AVAILABLE;
1255 case HV_X64_MSR_SCONTROL:
1256 case HV_X64_MSR_SVERSION:
1257 case HV_X64_MSR_SIEFP:
1258 case HV_X64_MSR_SIMP:
1259 case HV_X64_MSR_EOM:
1260 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1261 return hv_vcpu->cpuid_cache.features_eax &
1262 HV_MSR_SYNIC_AVAILABLE;
1263 case HV_X64_MSR_STIMER0_CONFIG:
1264 case HV_X64_MSR_STIMER1_CONFIG:
1265 case HV_X64_MSR_STIMER2_CONFIG:
1266 case HV_X64_MSR_STIMER3_CONFIG:
1267 case HV_X64_MSR_STIMER0_COUNT:
1268 case HV_X64_MSR_STIMER1_COUNT:
1269 case HV_X64_MSR_STIMER2_COUNT:
1270 case HV_X64_MSR_STIMER3_COUNT:
1271 return hv_vcpu->cpuid_cache.features_eax &
1272 HV_MSR_SYNTIMER_AVAILABLE;
1273 case HV_X64_MSR_EOI:
1274 case HV_X64_MSR_ICR:
1275 case HV_X64_MSR_TPR:
1276 case HV_X64_MSR_VP_ASSIST_PAGE:
1277 return hv_vcpu->cpuid_cache.features_eax &
1278 HV_MSR_APIC_ACCESS_AVAILABLE;
1279 break;
1280 case HV_X64_MSR_TSC_FREQUENCY:
1281 case HV_X64_MSR_APIC_FREQUENCY:
1282 return hv_vcpu->cpuid_cache.features_eax &
1283 HV_ACCESS_FREQUENCY_MSRS;
1284 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1285 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1286 case HV_X64_MSR_TSC_EMULATION_STATUS:
1287 return hv_vcpu->cpuid_cache.features_eax &
1288 HV_ACCESS_REENLIGHTENMENT;
1289 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1290 case HV_X64_MSR_CRASH_CTL:
1291 return hv_vcpu->cpuid_cache.features_edx &
1292 HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1293 case HV_X64_MSR_SYNDBG_OPTIONS:
1294 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1295 return hv_vcpu->cpuid_cache.features_edx &
1296 HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1297 default:
1298 break;
1299 }
1300
1301 return false;
1302}
1303
1304static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1305 bool host)
1306{
1307 struct kvm *kvm = vcpu->kvm;
1308 struct kvm_hv *hv = to_kvm_hv(kvm);
1309
1310 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1311 return 1;
1312
1313 switch (msr) {
1314 case HV_X64_MSR_GUEST_OS_ID:
1315 hv->hv_guest_os_id = data;
1316
1317 if (!hv->hv_guest_os_id)
1318 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1319 break;
1320 case HV_X64_MSR_HYPERCALL: {
1321 u8 instructions[9];
1322 int i = 0;
1323 u64 addr;
1324
1325
1326 if (!hv->hv_guest_os_id)
1327 break;
1328 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1329 hv->hv_hypercall = data;
1330 break;
1331 }
1332
1333
1334
1335
1336
1337
1338
1339 if (kvm_xen_hypercall_enabled(kvm)) {
1340
1341 instructions[i++] = 0x0d;
1342 instructions[i++] = 0x00;
1343 instructions[i++] = 0x00;
1344 instructions[i++] = 0x00;
1345 instructions[i++] = 0x80;
1346 }
1347
1348
1349 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1350 i += 3;
1351
1352
1353 ((unsigned char *)instructions)[i++] = 0xc3;
1354
1355 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1356 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1357 return 1;
1358 hv->hv_hypercall = data;
1359 break;
1360 }
1361 case HV_X64_MSR_REFERENCE_TSC:
1362 hv->hv_tsc_page = data;
1363 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1364 if (!host)
1365 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1366 else
1367 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1368 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1369 } else {
1370 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1371 }
1372 break;
1373 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1374 return kvm_hv_msr_set_crash_data(kvm,
1375 msr - HV_X64_MSR_CRASH_P0,
1376 data);
1377 case HV_X64_MSR_CRASH_CTL:
1378 if (host)
1379 return kvm_hv_msr_set_crash_ctl(kvm, data);
1380
1381 if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1382 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1383 hv->hv_crash_param[0],
1384 hv->hv_crash_param[1],
1385 hv->hv_crash_param[2],
1386 hv->hv_crash_param[3],
1387 hv->hv_crash_param[4]);
1388
1389
1390 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1391 }
1392 break;
1393 case HV_X64_MSR_RESET:
1394 if (data == 1) {
1395 vcpu_debug(vcpu, "hyper-v reset requested\n");
1396 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1397 }
1398 break;
1399 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1400 hv->hv_reenlightenment_control = data;
1401 break;
1402 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1403 hv->hv_tsc_emulation_control = data;
1404 break;
1405 case HV_X64_MSR_TSC_EMULATION_STATUS:
1406 if (data && !host)
1407 return 1;
1408
1409 hv->hv_tsc_emulation_status = data;
1410 break;
1411 case HV_X64_MSR_TIME_REF_COUNT:
1412
1413 if (!host)
1414 return 1;
1415 break;
1416 case HV_X64_MSR_SYNDBG_OPTIONS:
1417 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1418 return syndbg_set_msr(vcpu, msr, data, host);
1419 default:
1420 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1421 msr, data);
1422 return 1;
1423 }
1424 return 0;
1425}
1426
1427
1428static u64 current_task_runtime_100ns(void)
1429{
1430 u64 utime, stime;
1431
1432 task_cputime_adjusted(current, &utime, &stime);
1433
1434 return div_u64(utime + stime, 100);
1435}
1436
1437static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1438{
1439 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1440
1441 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1442 return 1;
1443
1444 switch (msr) {
1445 case HV_X64_MSR_VP_INDEX: {
1446 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1447 u32 new_vp_index = (u32)data;
1448
1449 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1450 return 1;
1451
1452 if (new_vp_index == hv_vcpu->vp_index)
1453 return 0;
1454
1455
1456
1457
1458
1459
1460
1461 if (hv_vcpu->vp_index == vcpu->vcpu_idx)
1462 atomic_inc(&hv->num_mismatched_vp_indexes);
1463 else if (new_vp_index == vcpu->vcpu_idx)
1464 atomic_dec(&hv->num_mismatched_vp_indexes);
1465
1466 hv_vcpu->vp_index = new_vp_index;
1467 break;
1468 }
1469 case HV_X64_MSR_VP_ASSIST_PAGE: {
1470 u64 gfn;
1471 unsigned long addr;
1472
1473 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1474 hv_vcpu->hv_vapic = data;
1475 if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
1476 return 1;
1477 break;
1478 }
1479 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1480 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1481 if (kvm_is_error_hva(addr))
1482 return 1;
1483
1484
1485
1486
1487
1488
1489 if (__put_user(0, (u32 __user *)addr))
1490 return 1;
1491 hv_vcpu->hv_vapic = data;
1492 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1493 if (kvm_lapic_set_pv_eoi(vcpu,
1494 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1495 sizeof(struct hv_vp_assist_page)))
1496 return 1;
1497 break;
1498 }
1499 case HV_X64_MSR_EOI:
1500 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1501 case HV_X64_MSR_ICR:
1502 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1503 case HV_X64_MSR_TPR:
1504 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1505 case HV_X64_MSR_VP_RUNTIME:
1506 if (!host)
1507 return 1;
1508 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1509 break;
1510 case HV_X64_MSR_SCONTROL:
1511 case HV_X64_MSR_SVERSION:
1512 case HV_X64_MSR_SIEFP:
1513 case HV_X64_MSR_SIMP:
1514 case HV_X64_MSR_EOM:
1515 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1516 return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1517 case HV_X64_MSR_STIMER0_CONFIG:
1518 case HV_X64_MSR_STIMER1_CONFIG:
1519 case HV_X64_MSR_STIMER2_CONFIG:
1520 case HV_X64_MSR_STIMER3_CONFIG: {
1521 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1522
1523 return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1524 data, host);
1525 }
1526 case HV_X64_MSR_STIMER0_COUNT:
1527 case HV_X64_MSR_STIMER1_COUNT:
1528 case HV_X64_MSR_STIMER2_COUNT:
1529 case HV_X64_MSR_STIMER3_COUNT: {
1530 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1531
1532 return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1533 data, host);
1534 }
1535 case HV_X64_MSR_TSC_FREQUENCY:
1536 case HV_X64_MSR_APIC_FREQUENCY:
1537
1538 if (!host)
1539 return 1;
1540 break;
1541 default:
1542 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1543 msr, data);
1544 return 1;
1545 }
1546
1547 return 0;
1548}
1549
1550static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1551 bool host)
1552{
1553 u64 data = 0;
1554 struct kvm *kvm = vcpu->kvm;
1555 struct kvm_hv *hv = to_kvm_hv(kvm);
1556
1557 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1558 return 1;
1559
1560 switch (msr) {
1561 case HV_X64_MSR_GUEST_OS_ID:
1562 data = hv->hv_guest_os_id;
1563 break;
1564 case HV_X64_MSR_HYPERCALL:
1565 data = hv->hv_hypercall;
1566 break;
1567 case HV_X64_MSR_TIME_REF_COUNT:
1568 data = get_time_ref_counter(kvm);
1569 break;
1570 case HV_X64_MSR_REFERENCE_TSC:
1571 data = hv->hv_tsc_page;
1572 break;
1573 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1574 return kvm_hv_msr_get_crash_data(kvm,
1575 msr - HV_X64_MSR_CRASH_P0,
1576 pdata);
1577 case HV_X64_MSR_CRASH_CTL:
1578 return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1579 case HV_X64_MSR_RESET:
1580 data = 0;
1581 break;
1582 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1583 data = hv->hv_reenlightenment_control;
1584 break;
1585 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1586 data = hv->hv_tsc_emulation_control;
1587 break;
1588 case HV_X64_MSR_TSC_EMULATION_STATUS:
1589 data = hv->hv_tsc_emulation_status;
1590 break;
1591 case HV_X64_MSR_SYNDBG_OPTIONS:
1592 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1593 return syndbg_get_msr(vcpu, msr, pdata, host);
1594 default:
1595 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1596 return 1;
1597 }
1598
1599 *pdata = data;
1600 return 0;
1601}
1602
1603static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1604 bool host)
1605{
1606 u64 data = 0;
1607 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1608
1609 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1610 return 1;
1611
1612 switch (msr) {
1613 case HV_X64_MSR_VP_INDEX:
1614 data = hv_vcpu->vp_index;
1615 break;
1616 case HV_X64_MSR_EOI:
1617 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1618 case HV_X64_MSR_ICR:
1619 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1620 case HV_X64_MSR_TPR:
1621 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1622 case HV_X64_MSR_VP_ASSIST_PAGE:
1623 data = hv_vcpu->hv_vapic;
1624 break;
1625 case HV_X64_MSR_VP_RUNTIME:
1626 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1627 break;
1628 case HV_X64_MSR_SCONTROL:
1629 case HV_X64_MSR_SVERSION:
1630 case HV_X64_MSR_SIEFP:
1631 case HV_X64_MSR_SIMP:
1632 case HV_X64_MSR_EOM:
1633 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1634 return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1635 case HV_X64_MSR_STIMER0_CONFIG:
1636 case HV_X64_MSR_STIMER1_CONFIG:
1637 case HV_X64_MSR_STIMER2_CONFIG:
1638 case HV_X64_MSR_STIMER3_CONFIG: {
1639 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1640
1641 return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1642 pdata);
1643 }
1644 case HV_X64_MSR_STIMER0_COUNT:
1645 case HV_X64_MSR_STIMER1_COUNT:
1646 case HV_X64_MSR_STIMER2_COUNT:
1647 case HV_X64_MSR_STIMER3_COUNT: {
1648 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1649
1650 return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1651 pdata);
1652 }
1653 case HV_X64_MSR_TSC_FREQUENCY:
1654 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1655 break;
1656 case HV_X64_MSR_APIC_FREQUENCY:
1657 data = APIC_BUS_FREQUENCY;
1658 break;
1659 default:
1660 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1661 return 1;
1662 }
1663 *pdata = data;
1664 return 0;
1665}
1666
1667int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1668{
1669 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1670
1671 if (!host && !vcpu->arch.hyperv_enabled)
1672 return 1;
1673
1674 if (!to_hv_vcpu(vcpu)) {
1675 if (kvm_hv_vcpu_init(vcpu))
1676 return 1;
1677 }
1678
1679 if (kvm_hv_msr_partition_wide(msr)) {
1680 int r;
1681
1682 mutex_lock(&hv->hv_lock);
1683 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1684 mutex_unlock(&hv->hv_lock);
1685 return r;
1686 } else
1687 return kvm_hv_set_msr(vcpu, msr, data, host);
1688}
1689
1690int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1691{
1692 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1693
1694 if (!host && !vcpu->arch.hyperv_enabled)
1695 return 1;
1696
1697 if (!to_hv_vcpu(vcpu)) {
1698 if (kvm_hv_vcpu_init(vcpu))
1699 return 1;
1700 }
1701
1702 if (kvm_hv_msr_partition_wide(msr)) {
1703 int r;
1704
1705 mutex_lock(&hv->hv_lock);
1706 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1707 mutex_unlock(&hv->hv_lock);
1708 return r;
1709 } else
1710 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1711}
1712
1713static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1714 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1715 u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1716{
1717 struct kvm_hv *hv = to_kvm_hv(kvm);
1718 struct kvm_vcpu *vcpu;
1719 int bank, sbank = 0;
1720 unsigned long i;
1721
1722 memset(vp_bitmap, 0,
1723 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1724 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1725 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1726 vp_bitmap[bank] = sparse_banks[sbank++];
1727
1728 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1729
1730 return (unsigned long *)vp_bitmap;
1731 }
1732
1733 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1734 kvm_for_each_vcpu(i, vcpu, kvm) {
1735 if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1736 __set_bit(i, vcpu_bitmap);
1737 }
1738 return vcpu_bitmap;
1739}
1740
1741struct kvm_hv_hcall {
1742 u64 param;
1743 u64 ingpa;
1744 u64 outgpa;
1745 u16 code;
1746 u16 rep_cnt;
1747 u16 rep_idx;
1748 bool fast;
1749 bool rep;
1750 sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1751};
1752
1753static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
1754{
1755 int i;
1756 gpa_t gpa;
1757 struct kvm *kvm = vcpu->kvm;
1758 struct hv_tlb_flush_ex flush_ex;
1759 struct hv_tlb_flush flush;
1760 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1761 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1762 unsigned long *vcpu_mask;
1763 u64 valid_bank_mask;
1764 u64 sparse_banks[64];
1765 int sparse_banks_len;
1766 bool all_cpus;
1767
1768 if (!ex) {
1769 if (hc->fast) {
1770 flush.address_space = hc->ingpa;
1771 flush.flags = hc->outgpa;
1772 flush.processor_mask = sse128_lo(hc->xmm[0]);
1773 } else {
1774 if (unlikely(kvm_read_guest(kvm, hc->ingpa,
1775 &flush, sizeof(flush))))
1776 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1777 }
1778
1779 trace_kvm_hv_flush_tlb(flush.processor_mask,
1780 flush.address_space, flush.flags);
1781
1782 valid_bank_mask = BIT_ULL(0);
1783 sparse_banks[0] = flush.processor_mask;
1784
1785
1786
1787
1788
1789
1790
1791
1792 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1793 flush.processor_mask == 0;
1794 } else {
1795 if (hc->fast) {
1796 flush_ex.address_space = hc->ingpa;
1797 flush_ex.flags = hc->outgpa;
1798 memcpy(&flush_ex.hv_vp_set,
1799 &hc->xmm[0], sizeof(hc->xmm[0]));
1800 } else {
1801 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
1802 sizeof(flush_ex))))
1803 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1804 }
1805
1806 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1807 flush_ex.hv_vp_set.format,
1808 flush_ex.address_space,
1809 flush_ex.flags);
1810
1811 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1812 all_cpus = flush_ex.hv_vp_set.format !=
1813 HV_GENERIC_SET_SPARSE_4K;
1814
1815 sparse_banks_len = bitmap_weight((unsigned long *)&valid_bank_mask, 64);
1816
1817 if (!sparse_banks_len && !all_cpus)
1818 goto ret_success;
1819
1820 if (!all_cpus) {
1821 if (hc->fast) {
1822 if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
1823 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1824 for (i = 0; i < sparse_banks_len; i += 2) {
1825 sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
1826 sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
1827 }
1828 } else {
1829 gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex,
1830 hv_vp_set.bank_contents);
1831 if (unlikely(kvm_read_guest(kvm, gpa, sparse_banks,
1832 sparse_banks_len *
1833 sizeof(sparse_banks[0]))))
1834 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1835 }
1836 }
1837 }
1838
1839
1840
1841
1842
1843 if (all_cpus) {
1844 kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST);
1845 } else {
1846 vcpu_mask = sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1847 vp_bitmap, vcpu_bitmap);
1848
1849 kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
1850 vcpu_mask);
1851 }
1852
1853ret_success:
1854
1855 return (u64)HV_STATUS_SUCCESS |
1856 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1857}
1858
1859static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1860 unsigned long *vcpu_bitmap)
1861{
1862 struct kvm_lapic_irq irq = {
1863 .delivery_mode = APIC_DM_FIXED,
1864 .vector = vector
1865 };
1866 struct kvm_vcpu *vcpu;
1867 unsigned long i;
1868
1869 kvm_for_each_vcpu(i, vcpu, kvm) {
1870 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1871 continue;
1872
1873
1874 kvm_apic_set_irq(vcpu, &irq, NULL);
1875 }
1876}
1877
1878static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
1879{
1880 struct kvm *kvm = vcpu->kvm;
1881 struct hv_send_ipi_ex send_ipi_ex;
1882 struct hv_send_ipi send_ipi;
1883 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1884 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1885 unsigned long *vcpu_mask;
1886 unsigned long valid_bank_mask;
1887 u64 sparse_banks[64];
1888 int sparse_banks_len;
1889 u32 vector;
1890 bool all_cpus;
1891
1892 if (!ex) {
1893 if (!hc->fast) {
1894 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
1895 sizeof(send_ipi))))
1896 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1897 sparse_banks[0] = send_ipi.cpu_mask;
1898 vector = send_ipi.vector;
1899 } else {
1900
1901 if (unlikely(hc->ingpa >> 32 != 0))
1902 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1903 sparse_banks[0] = hc->outgpa;
1904 vector = (u32)hc->ingpa;
1905 }
1906 all_cpus = false;
1907 valid_bank_mask = BIT_ULL(0);
1908
1909 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1910 } else {
1911 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
1912 sizeof(send_ipi_ex))))
1913 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1914
1915 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1916 send_ipi_ex.vp_set.format,
1917 send_ipi_ex.vp_set.valid_bank_mask);
1918
1919 vector = send_ipi_ex.vector;
1920 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1921 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1922 sizeof(sparse_banks[0]);
1923
1924 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1925
1926 if (all_cpus)
1927 goto check_and_send_ipi;
1928
1929 if (!sparse_banks_len)
1930 goto ret_success;
1931
1932 if (kvm_read_guest(kvm,
1933 hc->ingpa + offsetof(struct hv_send_ipi_ex,
1934 vp_set.bank_contents),
1935 sparse_banks,
1936 sparse_banks_len))
1937 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1938 }
1939
1940check_and_send_ipi:
1941 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1942 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1943
1944 vcpu_mask = all_cpus ? NULL :
1945 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1946 vp_bitmap, vcpu_bitmap);
1947
1948 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1949
1950ret_success:
1951 return HV_STATUS_SUCCESS;
1952}
1953
1954void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
1955{
1956 struct kvm_cpuid_entry2 *entry;
1957 struct kvm_vcpu_hv *hv_vcpu;
1958
1959 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
1960 if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
1961 vcpu->arch.hyperv_enabled = true;
1962 } else {
1963 vcpu->arch.hyperv_enabled = false;
1964 return;
1965 }
1966
1967 if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
1968 return;
1969
1970 hv_vcpu = to_hv_vcpu(vcpu);
1971
1972 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
1973 if (entry) {
1974 hv_vcpu->cpuid_cache.features_eax = entry->eax;
1975 hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
1976 hv_vcpu->cpuid_cache.features_edx = entry->edx;
1977 } else {
1978 hv_vcpu->cpuid_cache.features_eax = 0;
1979 hv_vcpu->cpuid_cache.features_ebx = 0;
1980 hv_vcpu->cpuid_cache.features_edx = 0;
1981 }
1982
1983 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
1984 if (entry) {
1985 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
1986 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
1987 } else {
1988 hv_vcpu->cpuid_cache.enlightenments_eax = 0;
1989 hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
1990 }
1991
1992 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
1993 if (entry)
1994 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
1995 else
1996 hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
1997}
1998
1999int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
2000{
2001 struct kvm_vcpu_hv *hv_vcpu;
2002 int ret = 0;
2003
2004 if (!to_hv_vcpu(vcpu)) {
2005 if (enforce) {
2006 ret = kvm_hv_vcpu_init(vcpu);
2007 if (ret)
2008 return ret;
2009 } else {
2010 return 0;
2011 }
2012 }
2013
2014 hv_vcpu = to_hv_vcpu(vcpu);
2015 hv_vcpu->enforce_cpuid = enforce;
2016
2017 return ret;
2018}
2019
2020bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
2021{
2022 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
2023}
2024
2025static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2026{
2027 bool longmode;
2028
2029 longmode = is_64_bit_hypercall(vcpu);
2030 if (longmode)
2031 kvm_rax_write(vcpu, result);
2032 else {
2033 kvm_rdx_write(vcpu, result >> 32);
2034 kvm_rax_write(vcpu, result & 0xffffffff);
2035 }
2036}
2037
2038static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2039{
2040 trace_kvm_hv_hypercall_done(result);
2041 kvm_hv_hypercall_set_result(vcpu, result);
2042 ++vcpu->stat.hypercalls;
2043 return kvm_skip_emulated_instruction(vcpu);
2044}
2045
2046static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2047{
2048 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2049}
2050
2051static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2052{
2053 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2054 struct eventfd_ctx *eventfd;
2055
2056 if (unlikely(!hc->fast)) {
2057 int ret;
2058 gpa_t gpa = hc->ingpa;
2059
2060 if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2061 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2062 return HV_STATUS_INVALID_ALIGNMENT;
2063
2064 ret = kvm_vcpu_read_guest(vcpu, gpa,
2065 &hc->ingpa, sizeof(hc->ingpa));
2066 if (ret < 0)
2067 return HV_STATUS_INVALID_ALIGNMENT;
2068 }
2069
2070
2071
2072
2073
2074
2075 if (hc->ingpa & 0xffff00000000ULL)
2076 return HV_STATUS_INVALID_PORT_ID;
2077
2078 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2079 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2080
2081
2082 rcu_read_lock();
2083 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2084 rcu_read_unlock();
2085 if (!eventfd)
2086 return HV_STATUS_INVALID_PORT_ID;
2087
2088 eventfd_signal(eventfd, 1);
2089 return HV_STATUS_SUCCESS;
2090}
2091
2092static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2093{
2094 switch (hc->code) {
2095 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2096 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2097 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2098 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2099 return true;
2100 }
2101
2102 return false;
2103}
2104
2105static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2106{
2107 int reg;
2108
2109 kvm_fpu_get();
2110 for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2111 _kvm_read_sse_reg(reg, &hc->xmm[reg]);
2112 kvm_fpu_put();
2113}
2114
2115static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2116{
2117 if (!hv_vcpu->enforce_cpuid)
2118 return true;
2119
2120 switch (code) {
2121 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2122 return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2123 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2124 case HVCALL_POST_MESSAGE:
2125 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2126 case HVCALL_SIGNAL_EVENT:
2127 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2128 case HVCALL_POST_DEBUG_DATA:
2129 case HVCALL_RETRIEVE_DEBUG_DATA:
2130 case HVCALL_RESET_DEBUG_SESSION:
2131
2132
2133
2134
2135 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2136 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2137 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2138 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2139 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2140 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2141 return false;
2142 fallthrough;
2143 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2144 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2145 return hv_vcpu->cpuid_cache.enlightenments_eax &
2146 HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2147 case HVCALL_SEND_IPI_EX:
2148 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2149 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2150 return false;
2151 fallthrough;
2152 case HVCALL_SEND_IPI:
2153 return hv_vcpu->cpuid_cache.enlightenments_eax &
2154 HV_X64_CLUSTER_IPI_RECOMMENDED;
2155 default:
2156 break;
2157 }
2158
2159 return true;
2160}
2161
2162int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2163{
2164 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2165 struct kvm_hv_hcall hc;
2166 u64 ret = HV_STATUS_SUCCESS;
2167
2168
2169
2170
2171
2172 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2173 kvm_queue_exception(vcpu, UD_VECTOR);
2174 return 1;
2175 }
2176
2177#ifdef CONFIG_X86_64
2178 if (is_64_bit_hypercall(vcpu)) {
2179 hc.param = kvm_rcx_read(vcpu);
2180 hc.ingpa = kvm_rdx_read(vcpu);
2181 hc.outgpa = kvm_r8_read(vcpu);
2182 } else
2183#endif
2184 {
2185 hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2186 (kvm_rax_read(vcpu) & 0xffffffff);
2187 hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2188 (kvm_rcx_read(vcpu) & 0xffffffff);
2189 hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2190 (kvm_rsi_read(vcpu) & 0xffffffff);
2191 }
2192
2193 hc.code = hc.param & 0xffff;
2194 hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2195 hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2196 hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2197 hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2198
2199 trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
2200 hc.ingpa, hc.outgpa);
2201
2202 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2203 ret = HV_STATUS_ACCESS_DENIED;
2204 goto hypercall_complete;
2205 }
2206
2207 if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2208 if (unlikely(hv_vcpu->enforce_cpuid &&
2209 !(hv_vcpu->cpuid_cache.features_edx &
2210 HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2211 kvm_queue_exception(vcpu, UD_VECTOR);
2212 return 1;
2213 }
2214
2215 kvm_hv_hypercall_read_xmm(&hc);
2216 }
2217
2218 switch (hc.code) {
2219 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2220 if (unlikely(hc.rep)) {
2221 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2222 break;
2223 }
2224 kvm_vcpu_on_spin(vcpu, true);
2225 break;
2226 case HVCALL_SIGNAL_EVENT:
2227 if (unlikely(hc.rep)) {
2228 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2229 break;
2230 }
2231 ret = kvm_hvcall_signal_event(vcpu, &hc);
2232 if (ret != HV_STATUS_INVALID_PORT_ID)
2233 break;
2234 fallthrough;
2235 case HVCALL_POST_MESSAGE:
2236
2237 if (unlikely(hc.rep || !to_hv_synic(vcpu)->active)) {
2238 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2239 break;
2240 }
2241 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2242 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2243 vcpu->run->hyperv.u.hcall.input = hc.param;
2244 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2245 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2246 vcpu->arch.complete_userspace_io =
2247 kvm_hv_hypercall_complete_userspace;
2248 return 0;
2249 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2250 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2251 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2252 break;
2253 }
2254 ret = kvm_hv_flush_tlb(vcpu, &hc, false);
2255 break;
2256 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2257 if (unlikely(hc.rep)) {
2258 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2259 break;
2260 }
2261 ret = kvm_hv_flush_tlb(vcpu, &hc, false);
2262 break;
2263 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2264 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2265 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2266 break;
2267 }
2268 ret = kvm_hv_flush_tlb(vcpu, &hc, true);
2269 break;
2270 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2271 if (unlikely(hc.rep)) {
2272 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2273 break;
2274 }
2275 ret = kvm_hv_flush_tlb(vcpu, &hc, true);
2276 break;
2277 case HVCALL_SEND_IPI:
2278 if (unlikely(hc.rep)) {
2279 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2280 break;
2281 }
2282 ret = kvm_hv_send_ipi(vcpu, &hc, false);
2283 break;
2284 case HVCALL_SEND_IPI_EX:
2285 if (unlikely(hc.fast || hc.rep)) {
2286 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2287 break;
2288 }
2289 ret = kvm_hv_send_ipi(vcpu, &hc, true);
2290 break;
2291 case HVCALL_POST_DEBUG_DATA:
2292 case HVCALL_RETRIEVE_DEBUG_DATA:
2293 if (unlikely(hc.fast)) {
2294 ret = HV_STATUS_INVALID_PARAMETER;
2295 break;
2296 }
2297 fallthrough;
2298 case HVCALL_RESET_DEBUG_SESSION: {
2299 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2300
2301 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2302 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2303 break;
2304 }
2305
2306 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2307 ret = HV_STATUS_OPERATION_DENIED;
2308 break;
2309 }
2310 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2311 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2312 vcpu->run->hyperv.u.hcall.input = hc.param;
2313 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2314 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2315 vcpu->arch.complete_userspace_io =
2316 kvm_hv_hypercall_complete_userspace;
2317 return 0;
2318 }
2319 default:
2320 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2321 break;
2322 }
2323
2324hypercall_complete:
2325 return kvm_hv_hypercall_complete(vcpu, ret);
2326}
2327
2328void kvm_hv_init_vm(struct kvm *kvm)
2329{
2330 struct kvm_hv *hv = to_kvm_hv(kvm);
2331
2332 mutex_init(&hv->hv_lock);
2333 idr_init(&hv->conn_to_evt);
2334}
2335
2336void kvm_hv_destroy_vm(struct kvm *kvm)
2337{
2338 struct kvm_hv *hv = to_kvm_hv(kvm);
2339 struct eventfd_ctx *eventfd;
2340 int i;
2341
2342 idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2343 eventfd_ctx_put(eventfd);
2344 idr_destroy(&hv->conn_to_evt);
2345}
2346
2347static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2348{
2349 struct kvm_hv *hv = to_kvm_hv(kvm);
2350 struct eventfd_ctx *eventfd;
2351 int ret;
2352
2353 eventfd = eventfd_ctx_fdget(fd);
2354 if (IS_ERR(eventfd))
2355 return PTR_ERR(eventfd);
2356
2357 mutex_lock(&hv->hv_lock);
2358 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2359 GFP_KERNEL_ACCOUNT);
2360 mutex_unlock(&hv->hv_lock);
2361
2362 if (ret >= 0)
2363 return 0;
2364
2365 if (ret == -ENOSPC)
2366 ret = -EEXIST;
2367 eventfd_ctx_put(eventfd);
2368 return ret;
2369}
2370
2371static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2372{
2373 struct kvm_hv *hv = to_kvm_hv(kvm);
2374 struct eventfd_ctx *eventfd;
2375
2376 mutex_lock(&hv->hv_lock);
2377 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2378 mutex_unlock(&hv->hv_lock);
2379
2380 if (!eventfd)
2381 return -ENOENT;
2382
2383 synchronize_srcu(&kvm->srcu);
2384 eventfd_ctx_put(eventfd);
2385 return 0;
2386}
2387
2388int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2389{
2390 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2391 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2392 return -EINVAL;
2393
2394 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2395 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2396 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2397}
2398
2399int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2400 struct kvm_cpuid_entry2 __user *entries)
2401{
2402 uint16_t evmcs_ver = 0;
2403 struct kvm_cpuid_entry2 cpuid_entries[] = {
2404 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2405 { .function = HYPERV_CPUID_INTERFACE },
2406 { .function = HYPERV_CPUID_VERSION },
2407 { .function = HYPERV_CPUID_FEATURES },
2408 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2409 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2410 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2411 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2412 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2413 { .function = HYPERV_CPUID_NESTED_FEATURES },
2414 };
2415 int i, nent = ARRAY_SIZE(cpuid_entries);
2416
2417 if (kvm_x86_ops.nested_ops->get_evmcs_version)
2418 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2419
2420
2421 if (!evmcs_ver)
2422 --nent;
2423
2424 if (cpuid->nent < nent)
2425 return -E2BIG;
2426
2427 if (cpuid->nent > nent)
2428 cpuid->nent = nent;
2429
2430 for (i = 0; i < nent; i++) {
2431 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2432 u32 signature[3];
2433
2434 switch (ent->function) {
2435 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2436 memcpy(signature, "Linux KVM Hv", 12);
2437
2438 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2439 ent->ebx = signature[0];
2440 ent->ecx = signature[1];
2441 ent->edx = signature[2];
2442 break;
2443
2444 case HYPERV_CPUID_INTERFACE:
2445 ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2446 break;
2447
2448 case HYPERV_CPUID_VERSION:
2449
2450
2451
2452
2453 ent->eax = 0x00003839;
2454 ent->ebx = 0x000A0000;
2455 break;
2456
2457 case HYPERV_CPUID_FEATURES:
2458 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2459 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2460 ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2461 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2462 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2463 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2464 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2465 ent->eax |= HV_MSR_RESET_AVAILABLE;
2466 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2467 ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2468 ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2469
2470 ent->ebx |= HV_POST_MESSAGES;
2471 ent->ebx |= HV_SIGNAL_EVENTS;
2472
2473 ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2474 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2475 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2476
2477 ent->ebx |= HV_DEBUGGING;
2478 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2479 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2480
2481
2482
2483
2484
2485 if (!vcpu || lapic_in_kernel(vcpu))
2486 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2487
2488 break;
2489
2490 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2491 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2492 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2493 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2494 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2495 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2496 if (evmcs_ver)
2497 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2498 if (!cpu_smt_possible())
2499 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2500
2501 ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
2502
2503
2504
2505
2506 ent->ebx = 0x00000FFF;
2507
2508 break;
2509
2510 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2511
2512 ent->eax = KVM_MAX_VCPUS;
2513
2514
2515
2516
2517 ent->ebx = 64;
2518
2519 break;
2520
2521 case HYPERV_CPUID_NESTED_FEATURES:
2522 ent->eax = evmcs_ver;
2523 if (evmcs_ver)
2524 ent->eax |= HV_X64_NESTED_MSR_BITMAP;
2525
2526 break;
2527
2528 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2529 memcpy(signature, "Linux KVM Hv", 12);
2530
2531 ent->eax = 0;
2532 ent->ebx = signature[0];
2533 ent->ecx = signature[1];
2534 ent->edx = signature[2];
2535 break;
2536
2537 case HYPERV_CPUID_SYNDBG_INTERFACE:
2538 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2539 ent->eax = signature[0];
2540 break;
2541
2542 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2543 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2544 break;
2545
2546 default:
2547 break;
2548 }
2549 }
2550
2551 if (copy_to_user(entries, cpuid_entries,
2552 nent * sizeof(struct kvm_cpuid_entry2)))
2553 return -EFAULT;
2554
2555 return 0;
2556}
2557