1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "x86.h"
22#include "lapic.h"
23#include "ioapic.h"
24#include "cpuid.h"
25#include "hyperv.h"
26
27#include <linux/cpu.h>
28#include <linux/kvm_host.h>
29#include <linux/highmem.h>
30#include <linux/sched/cputime.h>
31#include <linux/eventfd.h>
32
33#include <asm/apicdef.h>
34#include <trace/events/kvm.h>
35
36#include "trace.h"
37#include "irq.h"
38
39#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
40
41static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
42 bool vcpu_kick);
43
44static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
45{
46 return atomic64_read(&synic->sint[sint]);
47}
48
49static inline int synic_get_sint_vector(u64 sint_value)
50{
51 if (sint_value & HV_SYNIC_SINT_MASKED)
52 return -1;
53 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
54}
55
56static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
57 int vector)
58{
59 int i;
60
61 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
62 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
63 return true;
64 }
65 return false;
66}
67
68static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
69 int vector)
70{
71 int i;
72 u64 sint_value;
73
74 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
75 sint_value = synic_read_sint(synic, i);
76 if (synic_get_sint_vector(sint_value) == vector &&
77 sint_value & HV_SYNIC_SINT_AUTO_EOI)
78 return true;
79 }
80 return false;
81}
82
83static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
84 int vector)
85{
86 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
87 return;
88
89 if (synic_has_vector_connected(synic, vector))
90 __set_bit(vector, synic->vec_bitmap);
91 else
92 __clear_bit(vector, synic->vec_bitmap);
93
94 if (synic_has_vector_auto_eoi(synic, vector))
95 __set_bit(vector, synic->auto_eoi_bitmap);
96 else
97 __clear_bit(vector, synic->auto_eoi_bitmap);
98}
99
100static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
101 u64 data, bool host)
102{
103 int vector, old_vector;
104 bool masked;
105
106 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
107 masked = data & HV_SYNIC_SINT_MASKED;
108
109
110
111
112
113
114 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
115 return 1;
116
117
118
119
120
121
122 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
123
124 atomic64_set(&synic->sint[sint], data);
125
126 synic_update_vector(synic, old_vector);
127
128 synic_update_vector(synic, vector);
129
130
131 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
132 return 0;
133}
134
135static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
136{
137 struct kvm_vcpu *vcpu = NULL;
138 int i;
139
140 if (vpidx >= KVM_MAX_VCPUS)
141 return NULL;
142
143 vcpu = kvm_get_vcpu(kvm, vpidx);
144 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
145 return vcpu;
146 kvm_for_each_vcpu(i, vcpu, kvm)
147 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
148 return vcpu;
149 return NULL;
150}
151
152static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
153{
154 struct kvm_vcpu *vcpu;
155 struct kvm_vcpu_hv_synic *synic;
156
157 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
158 if (!vcpu)
159 return NULL;
160 synic = vcpu_to_synic(vcpu);
161 return (synic->active) ? synic : NULL;
162}
163
164static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
165{
166 struct kvm *kvm = vcpu->kvm;
167 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
168 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
169 struct kvm_vcpu_hv_stimer *stimer;
170 int gsi, idx;
171
172 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
173
174
175 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
176 stimer = &hv_vcpu->stimer[idx];
177 if (stimer->msg_pending && stimer->config.enable &&
178 !stimer->config.direct_mode &&
179 stimer->config.sintx == sint)
180 stimer_mark_pending(stimer, false);
181 }
182
183 idx = srcu_read_lock(&kvm->irq_srcu);
184 gsi = atomic_read(&synic->sint_to_gsi[sint]);
185 if (gsi != -1)
186 kvm_notify_acked_gsi(kvm, gsi);
187 srcu_read_unlock(&kvm->irq_srcu, idx);
188}
189
190static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
191{
192 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
193 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
194
195 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
196 hv_vcpu->exit.u.synic.msr = msr;
197 hv_vcpu->exit.u.synic.control = synic->control;
198 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
199 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
200
201 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
202}
203
204static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
205 u32 msr, u64 data, bool host)
206{
207 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
208 int ret;
209
210 if (!synic->active && !host)
211 return 1;
212
213 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
214
215 ret = 0;
216 switch (msr) {
217 case HV_X64_MSR_SCONTROL:
218 synic->control = data;
219 if (!host)
220 synic_exit(synic, msr);
221 break;
222 case HV_X64_MSR_SVERSION:
223 if (!host) {
224 ret = 1;
225 break;
226 }
227 synic->version = data;
228 break;
229 case HV_X64_MSR_SIEFP:
230 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
231 !synic->dont_zero_synic_pages)
232 if (kvm_clear_guest(vcpu->kvm,
233 data & PAGE_MASK, PAGE_SIZE)) {
234 ret = 1;
235 break;
236 }
237 synic->evt_page = data;
238 if (!host)
239 synic_exit(synic, msr);
240 break;
241 case HV_X64_MSR_SIMP:
242 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
243 !synic->dont_zero_synic_pages)
244 if (kvm_clear_guest(vcpu->kvm,
245 data & PAGE_MASK, PAGE_SIZE)) {
246 ret = 1;
247 break;
248 }
249 synic->msg_page = data;
250 if (!host)
251 synic_exit(synic, msr);
252 break;
253 case HV_X64_MSR_EOM: {
254 int i;
255
256 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
257 kvm_hv_notify_acked_sint(vcpu, i);
258 break;
259 }
260 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
261 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
262 break;
263 default:
264 ret = 1;
265 break;
266 }
267 return ret;
268}
269
270static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
271{
272 struct kvm_cpuid_entry2 *entry;
273
274 entry = kvm_find_cpuid_entry(vcpu,
275 HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES,
276 0);
277 if (!entry)
278 return false;
279
280 return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
281}
282
283static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
284{
285 struct kvm *kvm = vcpu->kvm;
286 struct kvm_hv *hv = &kvm->arch.hyperv;
287
288 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
289 hv->hv_syndbg.control.status =
290 vcpu->run->hyperv.u.syndbg.status;
291 return 1;
292}
293
294static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
295{
296 struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
297 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
298
299 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
300 hv_vcpu->exit.u.syndbg.msr = msr;
301 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
302 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
303 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
304 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
305 vcpu->arch.complete_userspace_io =
306 kvm_hv_syndbg_complete_userspace;
307
308 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
309}
310
311static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
312{
313 struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
314
315 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
316 return 1;
317
318 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
319 vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data);
320 switch (msr) {
321 case HV_X64_MSR_SYNDBG_CONTROL:
322 syndbg->control.control = data;
323 if (!host)
324 syndbg_exit(vcpu, msr);
325 break;
326 case HV_X64_MSR_SYNDBG_STATUS:
327 syndbg->control.status = data;
328 break;
329 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
330 syndbg->control.send_page = data;
331 break;
332 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
333 syndbg->control.recv_page = data;
334 break;
335 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
336 syndbg->control.pending_page = data;
337 if (!host)
338 syndbg_exit(vcpu, msr);
339 break;
340 case HV_X64_MSR_SYNDBG_OPTIONS:
341 syndbg->options = data;
342 break;
343 default:
344 break;
345 }
346
347 return 0;
348}
349
350static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
351{
352 struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
353
354 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
355 return 1;
356
357 switch (msr) {
358 case HV_X64_MSR_SYNDBG_CONTROL:
359 *pdata = syndbg->control.control;
360 break;
361 case HV_X64_MSR_SYNDBG_STATUS:
362 *pdata = syndbg->control.status;
363 break;
364 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
365 *pdata = syndbg->control.send_page;
366 break;
367 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
368 *pdata = syndbg->control.recv_page;
369 break;
370 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
371 *pdata = syndbg->control.pending_page;
372 break;
373 case HV_X64_MSR_SYNDBG_OPTIONS:
374 *pdata = syndbg->options;
375 break;
376 default:
377 break;
378 }
379
380 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id,
381 vcpu_to_hv_vcpu(vcpu)->vp_index, msr,
382 *pdata);
383
384 return 0;
385}
386
387static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
388 bool host)
389{
390 int ret;
391
392 if (!synic->active && !host)
393 return 1;
394
395 ret = 0;
396 switch (msr) {
397 case HV_X64_MSR_SCONTROL:
398 *pdata = synic->control;
399 break;
400 case HV_X64_MSR_SVERSION:
401 *pdata = synic->version;
402 break;
403 case HV_X64_MSR_SIEFP:
404 *pdata = synic->evt_page;
405 break;
406 case HV_X64_MSR_SIMP:
407 *pdata = synic->msg_page;
408 break;
409 case HV_X64_MSR_EOM:
410 *pdata = 0;
411 break;
412 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
413 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
414 break;
415 default:
416 ret = 1;
417 break;
418 }
419 return ret;
420}
421
422static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
423{
424 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
425 struct kvm_lapic_irq irq;
426 int ret, vector;
427
428 if (sint >= ARRAY_SIZE(synic->sint))
429 return -EINVAL;
430
431 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
432 if (vector < 0)
433 return -ENOENT;
434
435 memset(&irq, 0, sizeof(irq));
436 irq.shorthand = APIC_DEST_SELF;
437 irq.dest_mode = APIC_DEST_PHYSICAL;
438 irq.delivery_mode = APIC_DM_FIXED;
439 irq.vector = vector;
440 irq.level = 1;
441
442 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
443 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
444 return ret;
445}
446
447int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
448{
449 struct kvm_vcpu_hv_synic *synic;
450
451 synic = synic_get(kvm, vpidx);
452 if (!synic)
453 return -EINVAL;
454
455 return synic_set_irq(synic, sint);
456}
457
458void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
459{
460 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
461 int i;
462
463 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
464
465 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
466 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
467 kvm_hv_notify_acked_sint(vcpu, i);
468}
469
470static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
471{
472 struct kvm_vcpu_hv_synic *synic;
473
474 synic = synic_get(kvm, vpidx);
475 if (!synic)
476 return -EINVAL;
477
478 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
479 return -EINVAL;
480
481 atomic_set(&synic->sint_to_gsi[sint], gsi);
482 return 0;
483}
484
485void kvm_hv_irq_routing_update(struct kvm *kvm)
486{
487 struct kvm_irq_routing_table *irq_rt;
488 struct kvm_kernel_irq_routing_entry *e;
489 u32 gsi;
490
491 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
492 lockdep_is_held(&kvm->irq_lock));
493
494 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
495 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
496 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
497 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
498 e->hv_sint.sint, gsi);
499 }
500 }
501}
502
503static void synic_init(struct kvm_vcpu_hv_synic *synic)
504{
505 int i;
506
507 memset(synic, 0, sizeof(*synic));
508 synic->version = HV_SYNIC_VERSION_1;
509 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
510 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
511 atomic_set(&synic->sint_to_gsi[i], -1);
512 }
513}
514
515static u64 get_time_ref_counter(struct kvm *kvm)
516{
517 struct kvm_hv *hv = &kvm->arch.hyperv;
518 struct kvm_vcpu *vcpu;
519 u64 tsc;
520
521
522
523
524
525 if (!hv->tsc_ref.tsc_sequence)
526 return div_u64(get_kvmclock_ns(kvm), 100);
527
528 vcpu = kvm_get_vcpu(kvm, 0);
529 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
530 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
531 + hv->tsc_ref.tsc_offset;
532}
533
534static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
535 bool vcpu_kick)
536{
537 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
538
539 set_bit(stimer->index,
540 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
541 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
542 if (vcpu_kick)
543 kvm_vcpu_kick(vcpu);
544}
545
546static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
547{
548 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
549
550 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
551 stimer->index);
552
553 hrtimer_cancel(&stimer->timer);
554 clear_bit(stimer->index,
555 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
556 stimer->msg_pending = false;
557 stimer->exp_time = 0;
558}
559
560static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
561{
562 struct kvm_vcpu_hv_stimer *stimer;
563
564 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
565 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
566 stimer->index);
567 stimer_mark_pending(stimer, true);
568
569 return HRTIMER_NORESTART;
570}
571
572
573
574
575
576
577static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
578{
579 u64 time_now;
580 ktime_t ktime_now;
581
582 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
583 ktime_now = ktime_get();
584
585 if (stimer->config.periodic) {
586 if (stimer->exp_time) {
587 if (time_now >= stimer->exp_time) {
588 u64 remainder;
589
590 div64_u64_rem(time_now - stimer->exp_time,
591 stimer->count, &remainder);
592 stimer->exp_time =
593 time_now + (stimer->count - remainder);
594 }
595 } else
596 stimer->exp_time = time_now + stimer->count;
597
598 trace_kvm_hv_stimer_start_periodic(
599 stimer_to_vcpu(stimer)->vcpu_id,
600 stimer->index,
601 time_now, stimer->exp_time);
602
603 hrtimer_start(&stimer->timer,
604 ktime_add_ns(ktime_now,
605 100 * (stimer->exp_time - time_now)),
606 HRTIMER_MODE_ABS);
607 return 0;
608 }
609 stimer->exp_time = stimer->count;
610 if (time_now >= stimer->count) {
611
612
613
614
615
616
617 stimer_mark_pending(stimer, false);
618 return 0;
619 }
620
621 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
622 stimer->index,
623 time_now, stimer->count);
624
625 hrtimer_start(&stimer->timer,
626 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
627 HRTIMER_MODE_ABS);
628 return 0;
629}
630
631static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
632 bool host)
633{
634 union hv_stimer_config new_config = {.as_uint64 = config},
635 old_config = {.as_uint64 = stimer->config.as_uint64};
636
637 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
638 stimer->index, config, host);
639
640 stimer_cleanup(stimer);
641 if (old_config.enable &&
642 !new_config.direct_mode && new_config.sintx == 0)
643 new_config.enable = 0;
644 stimer->config.as_uint64 = new_config.as_uint64;
645
646 if (stimer->config.enable)
647 stimer_mark_pending(stimer, false);
648
649 return 0;
650}
651
652static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
653 bool host)
654{
655 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
656 stimer->index, count, host);
657
658 stimer_cleanup(stimer);
659 stimer->count = count;
660 if (stimer->count == 0)
661 stimer->config.enable = 0;
662 else if (stimer->config.auto_enable)
663 stimer->config.enable = 1;
664
665 if (stimer->config.enable)
666 stimer_mark_pending(stimer, false);
667
668 return 0;
669}
670
671static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
672{
673 *pconfig = stimer->config.as_uint64;
674 return 0;
675}
676
677static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
678{
679 *pcount = stimer->count;
680 return 0;
681}
682
683static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
684 struct hv_message *src_msg, bool no_retry)
685{
686 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
687 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
688 gfn_t msg_page_gfn;
689 struct hv_message_header hv_hdr;
690 int r;
691
692 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
693 return -ENOENT;
694
695 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
696
697
698
699
700
701
702
703 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
704 msg_off + offsetof(struct hv_message,
705 header.message_type),
706 sizeof(hv_hdr.message_type));
707 if (r < 0)
708 return r;
709
710 if (hv_hdr.message_type != HVMSG_NONE) {
711 if (no_retry)
712 return 0;
713
714 hv_hdr.message_flags.msg_pending = 1;
715 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
716 &hv_hdr.message_flags,
717 msg_off +
718 offsetof(struct hv_message,
719 header.message_flags),
720 sizeof(hv_hdr.message_flags));
721 if (r < 0)
722 return r;
723 return -EAGAIN;
724 }
725
726 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
727 sizeof(src_msg->header) +
728 src_msg->header.payload_size);
729 if (r < 0)
730 return r;
731
732 r = synic_set_irq(synic, sint);
733 if (r < 0)
734 return r;
735 if (r == 0)
736 return -EFAULT;
737 return 0;
738}
739
740static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
741{
742 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
743 struct hv_message *msg = &stimer->msg;
744 struct hv_timer_message_payload *payload =
745 (struct hv_timer_message_payload *)&msg->u.payload;
746
747
748
749
750
751 bool no_retry = stimer->config.periodic;
752
753 payload->expiration_time = stimer->exp_time;
754 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
755 return synic_deliver_msg(vcpu_to_synic(vcpu),
756 stimer->config.sintx, msg,
757 no_retry);
758}
759
760static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
761{
762 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
763 struct kvm_lapic_irq irq = {
764 .delivery_mode = APIC_DM_FIXED,
765 .vector = stimer->config.apic_vector
766 };
767
768 if (lapic_in_kernel(vcpu))
769 return !kvm_apic_set_irq(vcpu, &irq, NULL);
770 return 0;
771}
772
773static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
774{
775 int r, direct = stimer->config.direct_mode;
776
777 stimer->msg_pending = true;
778 if (!direct)
779 r = stimer_send_msg(stimer);
780 else
781 r = stimer_notify_direct(stimer);
782 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
783 stimer->index, direct, r);
784 if (!r) {
785 stimer->msg_pending = false;
786 if (!(stimer->config.periodic))
787 stimer->config.enable = 0;
788 }
789}
790
791void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
792{
793 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
794 struct kvm_vcpu_hv_stimer *stimer;
795 u64 time_now, exp_time;
796 int i;
797
798 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
799 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
800 stimer = &hv_vcpu->stimer[i];
801 if (stimer->config.enable) {
802 exp_time = stimer->exp_time;
803
804 if (exp_time) {
805 time_now =
806 get_time_ref_counter(vcpu->kvm);
807 if (time_now >= exp_time)
808 stimer_expiration(stimer);
809 }
810
811 if ((stimer->config.enable) &&
812 stimer->count) {
813 if (!stimer->msg_pending)
814 stimer_start(stimer);
815 } else
816 stimer_cleanup(stimer);
817 }
818 }
819}
820
821void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
822{
823 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
824 int i;
825
826 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
827 stimer_cleanup(&hv_vcpu->stimer[i]);
828}
829
830bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
831{
832 if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
833 return false;
834 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
835}
836EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
837
838bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
839 struct hv_vp_assist_page *assist_page)
840{
841 if (!kvm_hv_assist_page_enabled(vcpu))
842 return false;
843 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
844 assist_page, sizeof(*assist_page));
845}
846EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
847
848static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
849{
850 struct hv_message *msg = &stimer->msg;
851 struct hv_timer_message_payload *payload =
852 (struct hv_timer_message_payload *)&msg->u.payload;
853
854 memset(&msg->header, 0, sizeof(msg->header));
855 msg->header.message_type = HVMSG_TIMER_EXPIRED;
856 msg->header.payload_size = sizeof(*payload);
857
858 payload->timer_index = stimer->index;
859 payload->expiration_time = 0;
860 payload->delivery_time = 0;
861}
862
863static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
864{
865 memset(stimer, 0, sizeof(*stimer));
866 stimer->index = timer_index;
867 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
868 stimer->timer.function = stimer_timer_callback;
869 stimer_prepare_msg(stimer);
870}
871
872void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
873{
874 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
875 int i;
876
877 synic_init(&hv_vcpu->synic);
878
879 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
880 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
881 stimer_init(&hv_vcpu->stimer[i], i);
882}
883
884void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
885{
886 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
887
888 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
889}
890
891int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
892{
893 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
894
895
896
897
898
899
900 kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV);
901 synic->active = true;
902 synic->dont_zero_synic_pages = dont_zero_synic_pages;
903 synic->control = HV_SYNIC_CONTROL_ENABLE;
904 return 0;
905}
906
907static bool kvm_hv_msr_partition_wide(u32 msr)
908{
909 bool r = false;
910
911 switch (msr) {
912 case HV_X64_MSR_GUEST_OS_ID:
913 case HV_X64_MSR_HYPERCALL:
914 case HV_X64_MSR_REFERENCE_TSC:
915 case HV_X64_MSR_TIME_REF_COUNT:
916 case HV_X64_MSR_CRASH_CTL:
917 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
918 case HV_X64_MSR_RESET:
919 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
920 case HV_X64_MSR_TSC_EMULATION_CONTROL:
921 case HV_X64_MSR_TSC_EMULATION_STATUS:
922 case HV_X64_MSR_SYNDBG_OPTIONS:
923 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
924 r = true;
925 break;
926 }
927
928 return r;
929}
930
931static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
932 u32 index, u64 *pdata)
933{
934 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
935 size_t size = ARRAY_SIZE(hv->hv_crash_param);
936
937 if (WARN_ON_ONCE(index >= size))
938 return -EINVAL;
939
940 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
941 return 0;
942}
943
944static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
945{
946 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
947
948 *pdata = hv->hv_crash_ctl;
949 return 0;
950}
951
952static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
953{
954 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
955
956 if (host)
957 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
958
959 if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
960
961 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
962 hv->hv_crash_param[0],
963 hv->hv_crash_param[1],
964 hv->hv_crash_param[2],
965 hv->hv_crash_param[3],
966 hv->hv_crash_param[4]);
967
968
969 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
970 }
971
972 return 0;
973}
974
975static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
976 u32 index, u64 data)
977{
978 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
979 size_t size = ARRAY_SIZE(hv->hv_crash_param);
980
981 if (WARN_ON_ONCE(index >= size))
982 return -EINVAL;
983
984 hv->hv_crash_param[array_index_nospec(index, size)] = data;
985 return 0;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1024 struct ms_hyperv_tsc_page *tsc_ref)
1025{
1026 u64 max_mul;
1027
1028 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1029 return false;
1030
1031
1032
1033
1034
1035
1036
1037 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1038 if (hv_clock->tsc_to_system_mul >= max_mul)
1039 return false;
1040
1041
1042
1043
1044
1045 tsc_ref->tsc_scale =
1046 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1047 hv_clock->tsc_to_system_mul,
1048 100);
1049
1050 tsc_ref->tsc_offset = hv_clock->system_time;
1051 do_div(tsc_ref->tsc_offset, 100);
1052 tsc_ref->tsc_offset -=
1053 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1054 return true;
1055}
1056
1057void kvm_hv_setup_tsc_page(struct kvm *kvm,
1058 struct pvclock_vcpu_time_info *hv_clock)
1059{
1060 struct kvm_hv *hv = &kvm->arch.hyperv;
1061 u32 tsc_seq;
1062 u64 gfn;
1063
1064 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1065 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1066
1067 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1068 return;
1069
1070 mutex_lock(&kvm->arch.hyperv.hv_lock);
1071 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1072 goto out_unlock;
1073
1074 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1075
1076
1077
1078
1079 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1080 &tsc_seq, sizeof(tsc_seq))))
1081 goto out_unlock;
1082
1083
1084
1085
1086
1087 hv->tsc_ref.tsc_sequence = 0;
1088 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1089 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1090 goto out_unlock;
1091
1092 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1093 goto out_unlock;
1094
1095
1096 smp_wmb();
1097 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1098 goto out_unlock;
1099
1100
1101
1102
1103 tsc_seq++;
1104 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1105 tsc_seq = 1;
1106
1107
1108 smp_wmb();
1109
1110 hv->tsc_ref.tsc_sequence = tsc_seq;
1111 kvm_write_guest(kvm, gfn_to_gpa(gfn),
1112 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
1113out_unlock:
1114 mutex_unlock(&kvm->arch.hyperv.hv_lock);
1115}
1116
1117static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1118 bool host)
1119{
1120 struct kvm *kvm = vcpu->kvm;
1121 struct kvm_hv *hv = &kvm->arch.hyperv;
1122
1123 switch (msr) {
1124 case HV_X64_MSR_GUEST_OS_ID:
1125 hv->hv_guest_os_id = data;
1126
1127 if (!hv->hv_guest_os_id)
1128 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1129 break;
1130 case HV_X64_MSR_HYPERCALL: {
1131 u64 gfn;
1132 unsigned long addr;
1133 u8 instructions[4];
1134
1135
1136 if (!hv->hv_guest_os_id)
1137 break;
1138 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1139 hv->hv_hypercall = data;
1140 break;
1141 }
1142 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1143 addr = gfn_to_hva(kvm, gfn);
1144 if (kvm_is_error_hva(addr))
1145 return 1;
1146 kvm_x86_ops.patch_hypercall(vcpu, instructions);
1147 ((unsigned char *)instructions)[3] = 0xc3;
1148 if (__copy_to_user((void __user *)addr, instructions, 4))
1149 return 1;
1150 hv->hv_hypercall = data;
1151 mark_page_dirty(kvm, gfn);
1152 break;
1153 }
1154 case HV_X64_MSR_REFERENCE_TSC:
1155 hv->hv_tsc_page = data;
1156 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
1157 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1158 break;
1159 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1160 return kvm_hv_msr_set_crash_data(vcpu,
1161 msr - HV_X64_MSR_CRASH_P0,
1162 data);
1163 case HV_X64_MSR_CRASH_CTL:
1164 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
1165 case HV_X64_MSR_RESET:
1166 if (data == 1) {
1167 vcpu_debug(vcpu, "hyper-v reset requested\n");
1168 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1169 }
1170 break;
1171 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1172 hv->hv_reenlightenment_control = data;
1173 break;
1174 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1175 hv->hv_tsc_emulation_control = data;
1176 break;
1177 case HV_X64_MSR_TSC_EMULATION_STATUS:
1178 hv->hv_tsc_emulation_status = data;
1179 break;
1180 case HV_X64_MSR_TIME_REF_COUNT:
1181
1182 if (!host)
1183 return 1;
1184 break;
1185 case HV_X64_MSR_SYNDBG_OPTIONS:
1186 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1187 return syndbg_set_msr(vcpu, msr, data, host);
1188 default:
1189 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1190 msr, data);
1191 return 1;
1192 }
1193 return 0;
1194}
1195
1196
1197static u64 current_task_runtime_100ns(void)
1198{
1199 u64 utime, stime;
1200
1201 task_cputime_adjusted(current, &utime, &stime);
1202
1203 return div_u64(utime + stime, 100);
1204}
1205
1206static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1207{
1208 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1209
1210 switch (msr) {
1211 case HV_X64_MSR_VP_INDEX: {
1212 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
1213 int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1214 u32 new_vp_index = (u32)data;
1215
1216 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1217 return 1;
1218
1219 if (new_vp_index == hv_vcpu->vp_index)
1220 return 0;
1221
1222
1223
1224
1225
1226
1227
1228 if (hv_vcpu->vp_index == vcpu_idx)
1229 atomic_inc(&hv->num_mismatched_vp_indexes);
1230 else if (new_vp_index == vcpu_idx)
1231 atomic_dec(&hv->num_mismatched_vp_indexes);
1232
1233 hv_vcpu->vp_index = new_vp_index;
1234 break;
1235 }
1236 case HV_X64_MSR_VP_ASSIST_PAGE: {
1237 u64 gfn;
1238 unsigned long addr;
1239
1240 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1241 hv_vcpu->hv_vapic = data;
1242 if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1243 return 1;
1244 break;
1245 }
1246 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1247 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1248 if (kvm_is_error_hva(addr))
1249 return 1;
1250
1251
1252
1253
1254
1255
1256 if (__put_user(0, (u32 __user *)addr))
1257 return 1;
1258 hv_vcpu->hv_vapic = data;
1259 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1260 if (kvm_lapic_enable_pv_eoi(vcpu,
1261 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1262 sizeof(struct hv_vp_assist_page)))
1263 return 1;
1264 break;
1265 }
1266 case HV_X64_MSR_EOI:
1267 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1268 case HV_X64_MSR_ICR:
1269 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1270 case HV_X64_MSR_TPR:
1271 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1272 case HV_X64_MSR_VP_RUNTIME:
1273 if (!host)
1274 return 1;
1275 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1276 break;
1277 case HV_X64_MSR_SCONTROL:
1278 case HV_X64_MSR_SVERSION:
1279 case HV_X64_MSR_SIEFP:
1280 case HV_X64_MSR_SIMP:
1281 case HV_X64_MSR_EOM:
1282 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1283 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1284 case HV_X64_MSR_STIMER0_CONFIG:
1285 case HV_X64_MSR_STIMER1_CONFIG:
1286 case HV_X64_MSR_STIMER2_CONFIG:
1287 case HV_X64_MSR_STIMER3_CONFIG: {
1288 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1289
1290 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1291 data, host);
1292 }
1293 case HV_X64_MSR_STIMER0_COUNT:
1294 case HV_X64_MSR_STIMER1_COUNT:
1295 case HV_X64_MSR_STIMER2_COUNT:
1296 case HV_X64_MSR_STIMER3_COUNT: {
1297 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1298
1299 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1300 data, host);
1301 }
1302 case HV_X64_MSR_TSC_FREQUENCY:
1303 case HV_X64_MSR_APIC_FREQUENCY:
1304
1305 if (!host)
1306 return 1;
1307 break;
1308 default:
1309 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1310 msr, data);
1311 return 1;
1312 }
1313
1314 return 0;
1315}
1316
1317static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1318 bool host)
1319{
1320 u64 data = 0;
1321 struct kvm *kvm = vcpu->kvm;
1322 struct kvm_hv *hv = &kvm->arch.hyperv;
1323
1324 switch (msr) {
1325 case HV_X64_MSR_GUEST_OS_ID:
1326 data = hv->hv_guest_os_id;
1327 break;
1328 case HV_X64_MSR_HYPERCALL:
1329 data = hv->hv_hypercall;
1330 break;
1331 case HV_X64_MSR_TIME_REF_COUNT:
1332 data = get_time_ref_counter(kvm);
1333 break;
1334 case HV_X64_MSR_REFERENCE_TSC:
1335 data = hv->hv_tsc_page;
1336 break;
1337 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1338 return kvm_hv_msr_get_crash_data(vcpu,
1339 msr - HV_X64_MSR_CRASH_P0,
1340 pdata);
1341 case HV_X64_MSR_CRASH_CTL:
1342 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1343 case HV_X64_MSR_RESET:
1344 data = 0;
1345 break;
1346 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1347 data = hv->hv_reenlightenment_control;
1348 break;
1349 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1350 data = hv->hv_tsc_emulation_control;
1351 break;
1352 case HV_X64_MSR_TSC_EMULATION_STATUS:
1353 data = hv->hv_tsc_emulation_status;
1354 break;
1355 case HV_X64_MSR_SYNDBG_OPTIONS:
1356 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1357 return syndbg_get_msr(vcpu, msr, pdata, host);
1358 default:
1359 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1360 return 1;
1361 }
1362
1363 *pdata = data;
1364 return 0;
1365}
1366
1367static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1368 bool host)
1369{
1370 u64 data = 0;
1371 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1372
1373 switch (msr) {
1374 case HV_X64_MSR_VP_INDEX:
1375 data = hv_vcpu->vp_index;
1376 break;
1377 case HV_X64_MSR_EOI:
1378 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1379 case HV_X64_MSR_ICR:
1380 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1381 case HV_X64_MSR_TPR:
1382 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1383 case HV_X64_MSR_VP_ASSIST_PAGE:
1384 data = hv_vcpu->hv_vapic;
1385 break;
1386 case HV_X64_MSR_VP_RUNTIME:
1387 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1388 break;
1389 case HV_X64_MSR_SCONTROL:
1390 case HV_X64_MSR_SVERSION:
1391 case HV_X64_MSR_SIEFP:
1392 case HV_X64_MSR_SIMP:
1393 case HV_X64_MSR_EOM:
1394 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1395 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
1396 case HV_X64_MSR_STIMER0_CONFIG:
1397 case HV_X64_MSR_STIMER1_CONFIG:
1398 case HV_X64_MSR_STIMER2_CONFIG:
1399 case HV_X64_MSR_STIMER3_CONFIG: {
1400 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1401
1402 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1403 pdata);
1404 }
1405 case HV_X64_MSR_STIMER0_COUNT:
1406 case HV_X64_MSR_STIMER1_COUNT:
1407 case HV_X64_MSR_STIMER2_COUNT:
1408 case HV_X64_MSR_STIMER3_COUNT: {
1409 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1410
1411 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1412 pdata);
1413 }
1414 case HV_X64_MSR_TSC_FREQUENCY:
1415 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1416 break;
1417 case HV_X64_MSR_APIC_FREQUENCY:
1418 data = APIC_BUS_FREQUENCY;
1419 break;
1420 default:
1421 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1422 return 1;
1423 }
1424 *pdata = data;
1425 return 0;
1426}
1427
1428int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1429{
1430 if (kvm_hv_msr_partition_wide(msr)) {
1431 int r;
1432
1433 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1434 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1435 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1436 return r;
1437 } else
1438 return kvm_hv_set_msr(vcpu, msr, data, host);
1439}
1440
1441int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1442{
1443 if (kvm_hv_msr_partition_wide(msr)) {
1444 int r;
1445
1446 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1447 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1448 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1449 return r;
1450 } else
1451 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1452}
1453
1454static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1455 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1456 u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1457{
1458 struct kvm_hv *hv = &kvm->arch.hyperv;
1459 struct kvm_vcpu *vcpu;
1460 int i, bank, sbank = 0;
1461
1462 memset(vp_bitmap, 0,
1463 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1464 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1465 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1466 vp_bitmap[bank] = sparse_banks[sbank++];
1467
1468 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1469
1470 return (unsigned long *)vp_bitmap;
1471 }
1472
1473 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1474 kvm_for_each_vcpu(i, vcpu, kvm) {
1475 if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
1476 (unsigned long *)vp_bitmap))
1477 __set_bit(i, vcpu_bitmap);
1478 }
1479 return vcpu_bitmap;
1480}
1481
1482static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1483 u16 rep_cnt, bool ex)
1484{
1485 struct kvm *kvm = current_vcpu->kvm;
1486 struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv;
1487 struct hv_tlb_flush_ex flush_ex;
1488 struct hv_tlb_flush flush;
1489 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1490 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1491 unsigned long *vcpu_mask;
1492 u64 valid_bank_mask;
1493 u64 sparse_banks[64];
1494 int sparse_banks_len;
1495 bool all_cpus;
1496
1497 if (!ex) {
1498 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
1499 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1500
1501 trace_kvm_hv_flush_tlb(flush.processor_mask,
1502 flush.address_space, flush.flags);
1503
1504 valid_bank_mask = BIT_ULL(0);
1505 sparse_banks[0] = flush.processor_mask;
1506
1507
1508
1509
1510
1511
1512
1513
1514 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1515 flush.processor_mask == 0;
1516 } else {
1517 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
1518 sizeof(flush_ex))))
1519 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1520
1521 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1522 flush_ex.hv_vp_set.format,
1523 flush_ex.address_space,
1524 flush_ex.flags);
1525
1526 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1527 all_cpus = flush_ex.hv_vp_set.format !=
1528 HV_GENERIC_SET_SPARSE_4K;
1529
1530 sparse_banks_len =
1531 bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
1532 sizeof(sparse_banks[0]);
1533
1534 if (!sparse_banks_len && !all_cpus)
1535 goto ret_success;
1536
1537 if (!all_cpus &&
1538 kvm_read_guest(kvm,
1539 ingpa + offsetof(struct hv_tlb_flush_ex,
1540 hv_vp_set.bank_contents),
1541 sparse_banks,
1542 sparse_banks_len))
1543 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1544 }
1545
1546 cpumask_clear(&hv_vcpu->tlb_flush);
1547
1548 vcpu_mask = all_cpus ? NULL :
1549 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1550 vp_bitmap, vcpu_bitmap);
1551
1552
1553
1554
1555
1556 kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
1557 NULL, vcpu_mask, &hv_vcpu->tlb_flush);
1558
1559ret_success:
1560
1561 return (u64)HV_STATUS_SUCCESS |
1562 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1563}
1564
1565static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1566 unsigned long *vcpu_bitmap)
1567{
1568 struct kvm_lapic_irq irq = {
1569 .delivery_mode = APIC_DM_FIXED,
1570 .vector = vector
1571 };
1572 struct kvm_vcpu *vcpu;
1573 int i;
1574
1575 kvm_for_each_vcpu(i, vcpu, kvm) {
1576 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1577 continue;
1578
1579
1580 kvm_apic_set_irq(vcpu, &irq, NULL);
1581 }
1582}
1583
1584static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
1585 bool ex, bool fast)
1586{
1587 struct kvm *kvm = current_vcpu->kvm;
1588 struct hv_send_ipi_ex send_ipi_ex;
1589 struct hv_send_ipi send_ipi;
1590 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1591 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1592 unsigned long *vcpu_mask;
1593 unsigned long valid_bank_mask;
1594 u64 sparse_banks[64];
1595 int sparse_banks_len;
1596 u32 vector;
1597 bool all_cpus;
1598
1599 if (!ex) {
1600 if (!fast) {
1601 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
1602 sizeof(send_ipi))))
1603 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1604 sparse_banks[0] = send_ipi.cpu_mask;
1605 vector = send_ipi.vector;
1606 } else {
1607
1608 if (unlikely(ingpa >> 32 != 0))
1609 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1610 sparse_banks[0] = outgpa;
1611 vector = (u32)ingpa;
1612 }
1613 all_cpus = false;
1614 valid_bank_mask = BIT_ULL(0);
1615
1616 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1617 } else {
1618 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
1619 sizeof(send_ipi_ex))))
1620 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1621
1622 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1623 send_ipi_ex.vp_set.format,
1624 send_ipi_ex.vp_set.valid_bank_mask);
1625
1626 vector = send_ipi_ex.vector;
1627 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1628 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1629 sizeof(sparse_banks[0]);
1630
1631 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1632
1633 if (!sparse_banks_len)
1634 goto ret_success;
1635
1636 if (!all_cpus &&
1637 kvm_read_guest(kvm,
1638 ingpa + offsetof(struct hv_send_ipi_ex,
1639 vp_set.bank_contents),
1640 sparse_banks,
1641 sparse_banks_len))
1642 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1643 }
1644
1645 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1646 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1647
1648 vcpu_mask = all_cpus ? NULL :
1649 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1650 vp_bitmap, vcpu_bitmap);
1651
1652 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1653
1654ret_success:
1655 return HV_STATUS_SUCCESS;
1656}
1657
1658bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1659{
1660 return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0;
1661}
1662
1663static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1664{
1665 bool longmode;
1666
1667 longmode = is_64_bit_mode(vcpu);
1668 if (longmode)
1669 kvm_rax_write(vcpu, result);
1670 else {
1671 kvm_rdx_write(vcpu, result >> 32);
1672 kvm_rax_write(vcpu, result & 0xffffffff);
1673 }
1674}
1675
1676static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1677{
1678 kvm_hv_hypercall_set_result(vcpu, result);
1679 ++vcpu->stat.hypercalls;
1680 return kvm_skip_emulated_instruction(vcpu);
1681}
1682
1683static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1684{
1685 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1686}
1687
1688static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1689{
1690 struct eventfd_ctx *eventfd;
1691
1692 if (unlikely(!fast)) {
1693 int ret;
1694 gpa_t gpa = param;
1695
1696 if ((gpa & (__alignof__(param) - 1)) ||
1697 offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
1698 return HV_STATUS_INVALID_ALIGNMENT;
1699
1700 ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param));
1701 if (ret < 0)
1702 return HV_STATUS_INVALID_ALIGNMENT;
1703 }
1704
1705
1706
1707
1708
1709
1710 if (param & 0xffff00000000ULL)
1711 return HV_STATUS_INVALID_PORT_ID;
1712
1713 if (param & ~KVM_HYPERV_CONN_ID_MASK)
1714 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1715
1716
1717 rcu_read_lock();
1718 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1719 rcu_read_unlock();
1720 if (!eventfd)
1721 return HV_STATUS_INVALID_PORT_ID;
1722
1723 eventfd_signal(eventfd, 1);
1724 return HV_STATUS_SUCCESS;
1725}
1726
1727int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1728{
1729 u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
1730 uint16_t code, rep_idx, rep_cnt;
1731 bool fast, rep;
1732
1733
1734
1735
1736
1737 if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1738 kvm_queue_exception(vcpu, UD_VECTOR);
1739 return 1;
1740 }
1741
1742#ifdef CONFIG_X86_64
1743 if (is_64_bit_mode(vcpu)) {
1744 param = kvm_rcx_read(vcpu);
1745 ingpa = kvm_rdx_read(vcpu);
1746 outgpa = kvm_r8_read(vcpu);
1747 } else
1748#endif
1749 {
1750 param = ((u64)kvm_rdx_read(vcpu) << 32) |
1751 (kvm_rax_read(vcpu) & 0xffffffff);
1752 ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
1753 (kvm_rcx_read(vcpu) & 0xffffffff);
1754 outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
1755 (kvm_rsi_read(vcpu) & 0xffffffff);
1756 }
1757
1758 code = param & 0xffff;
1759 fast = !!(param & HV_HYPERCALL_FAST_BIT);
1760 rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
1761 rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
1762 rep = !!(rep_cnt || rep_idx);
1763
1764 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1765
1766 switch (code) {
1767 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1768 if (unlikely(rep)) {
1769 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1770 break;
1771 }
1772 kvm_vcpu_on_spin(vcpu, true);
1773 break;
1774 case HVCALL_SIGNAL_EVENT:
1775 if (unlikely(rep)) {
1776 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1777 break;
1778 }
1779 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1780 if (ret != HV_STATUS_INVALID_PORT_ID)
1781 break;
1782 fallthrough;
1783 case HVCALL_POST_MESSAGE:
1784
1785 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
1786 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1787 break;
1788 }
1789 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1790 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1791 vcpu->run->hyperv.u.hcall.input = param;
1792 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1793 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1794 vcpu->arch.complete_userspace_io =
1795 kvm_hv_hypercall_complete_userspace;
1796 return 0;
1797 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
1798 if (unlikely(fast || !rep_cnt || rep_idx)) {
1799 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1800 break;
1801 }
1802 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1803 break;
1804 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
1805 if (unlikely(fast || rep)) {
1806 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1807 break;
1808 }
1809 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1810 break;
1811 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
1812 if (unlikely(fast || !rep_cnt || rep_idx)) {
1813 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1814 break;
1815 }
1816 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1817 break;
1818 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
1819 if (unlikely(fast || rep)) {
1820 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1821 break;
1822 }
1823 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1824 break;
1825 case HVCALL_SEND_IPI:
1826 if (unlikely(rep)) {
1827 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1828 break;
1829 }
1830 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
1831 break;
1832 case HVCALL_SEND_IPI_EX:
1833 if (unlikely(fast || rep)) {
1834 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1835 break;
1836 }
1837 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
1838 break;
1839 case HVCALL_POST_DEBUG_DATA:
1840 case HVCALL_RETRIEVE_DEBUG_DATA:
1841 if (unlikely(fast)) {
1842 ret = HV_STATUS_INVALID_PARAMETER;
1843 break;
1844 }
1845 fallthrough;
1846 case HVCALL_RESET_DEBUG_SESSION: {
1847 struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
1848
1849 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
1850 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1851 break;
1852 }
1853
1854 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
1855 ret = HV_STATUS_OPERATION_DENIED;
1856 break;
1857 }
1858 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1859 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1860 vcpu->run->hyperv.u.hcall.input = param;
1861 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1862 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1863 vcpu->arch.complete_userspace_io =
1864 kvm_hv_hypercall_complete_userspace;
1865 return 0;
1866 }
1867 default:
1868 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1869 break;
1870 }
1871
1872 return kvm_hv_hypercall_complete(vcpu, ret);
1873}
1874
1875void kvm_hv_init_vm(struct kvm *kvm)
1876{
1877 mutex_init(&kvm->arch.hyperv.hv_lock);
1878 idr_init(&kvm->arch.hyperv.conn_to_evt);
1879}
1880
1881void kvm_hv_destroy_vm(struct kvm *kvm)
1882{
1883 struct eventfd_ctx *eventfd;
1884 int i;
1885
1886 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
1887 eventfd_ctx_put(eventfd);
1888 idr_destroy(&kvm->arch.hyperv.conn_to_evt);
1889}
1890
1891static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
1892{
1893 struct kvm_hv *hv = &kvm->arch.hyperv;
1894 struct eventfd_ctx *eventfd;
1895 int ret;
1896
1897 eventfd = eventfd_ctx_fdget(fd);
1898 if (IS_ERR(eventfd))
1899 return PTR_ERR(eventfd);
1900
1901 mutex_lock(&hv->hv_lock);
1902 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
1903 GFP_KERNEL_ACCOUNT);
1904 mutex_unlock(&hv->hv_lock);
1905
1906 if (ret >= 0)
1907 return 0;
1908
1909 if (ret == -ENOSPC)
1910 ret = -EEXIST;
1911 eventfd_ctx_put(eventfd);
1912 return ret;
1913}
1914
1915static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
1916{
1917 struct kvm_hv *hv = &kvm->arch.hyperv;
1918 struct eventfd_ctx *eventfd;
1919
1920 mutex_lock(&hv->hv_lock);
1921 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
1922 mutex_unlock(&hv->hv_lock);
1923
1924 if (!eventfd)
1925 return -ENOENT;
1926
1927 synchronize_srcu(&kvm->srcu);
1928 eventfd_ctx_put(eventfd);
1929 return 0;
1930}
1931
1932int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1933{
1934 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
1935 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
1936 return -EINVAL;
1937
1938 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
1939 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
1940 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
1941}
1942
1943int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1944 struct kvm_cpuid_entry2 __user *entries)
1945{
1946 uint16_t evmcs_ver = 0;
1947 struct kvm_cpuid_entry2 cpuid_entries[] = {
1948 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1949 { .function = HYPERV_CPUID_INTERFACE },
1950 { .function = HYPERV_CPUID_VERSION },
1951 { .function = HYPERV_CPUID_FEATURES },
1952 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
1953 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
1954 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
1955 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
1956 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
1957 { .function = HYPERV_CPUID_NESTED_FEATURES },
1958 };
1959 int i, nent = ARRAY_SIZE(cpuid_entries);
1960
1961 if (kvm_x86_ops.nested_ops->get_evmcs_version)
1962 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
1963
1964
1965 if (!evmcs_ver)
1966 --nent;
1967
1968 if (cpuid->nent < nent)
1969 return -E2BIG;
1970
1971 if (cpuid->nent > nent)
1972 cpuid->nent = nent;
1973
1974 for (i = 0; i < nent; i++) {
1975 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
1976 u32 signature[3];
1977
1978 switch (ent->function) {
1979 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
1980 memcpy(signature, "Linux KVM Hv", 12);
1981
1982 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
1983 ent->ebx = signature[0];
1984 ent->ecx = signature[1];
1985 ent->edx = signature[2];
1986 break;
1987
1988 case HYPERV_CPUID_INTERFACE:
1989 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1990 ent->eax = signature[0];
1991 break;
1992
1993 case HYPERV_CPUID_VERSION:
1994
1995
1996
1997
1998 ent->eax = 0x00003839;
1999 ent->ebx = 0x000A0000;
2000 break;
2001
2002 case HYPERV_CPUID_FEATURES:
2003 ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
2004 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2005 ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
2006 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2007 ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
2008 ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
2009 ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
2010 ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
2011 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2012 ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
2013 ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
2014
2015 ent->ebx |= HV_X64_POST_MESSAGES;
2016 ent->ebx |= HV_X64_SIGNAL_EVENTS;
2017
2018 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2019 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2020
2021 ent->ebx |= HV_DEBUGGING;
2022 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2023 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2024
2025
2026
2027
2028
2029 if (lapic_in_kernel(vcpu))
2030 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2031
2032 break;
2033
2034 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2035 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2036 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2037 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2038 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2039 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2040 if (evmcs_ver)
2041 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2042 if (!cpu_smt_possible())
2043 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2044
2045
2046
2047
2048 ent->ebx = 0x00000FFF;
2049
2050 break;
2051
2052 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2053
2054 ent->eax = KVM_MAX_VCPUS;
2055
2056
2057
2058
2059 ent->ebx = 64;
2060
2061 break;
2062
2063 case HYPERV_CPUID_NESTED_FEATURES:
2064 ent->eax = evmcs_ver;
2065
2066 break;
2067
2068 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2069 memcpy(signature, "Linux KVM Hv", 12);
2070
2071 ent->eax = 0;
2072 ent->ebx = signature[0];
2073 ent->ecx = signature[1];
2074 ent->edx = signature[2];
2075 break;
2076
2077 case HYPERV_CPUID_SYNDBG_INTERFACE:
2078 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2079 ent->eax = signature[0];
2080 break;
2081
2082 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2083 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2084 break;
2085
2086 default:
2087 break;
2088 }
2089 }
2090
2091 if (copy_to_user(entries, cpuid_entries,
2092 nent * sizeof(struct kvm_cpuid_entry2)))
2093 return -EFAULT;
2094
2095 return 0;
2096}
2097