1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <linux/hyperv.h>
29#include <linux/version.h>
30#include <linux/interrupt.h>
31#include <linux/clockchips.h>
32#include <asm/hyperv.h>
33#include <asm/mshyperv.h>
34#include "hyperv_vmbus.h"
35
36
37struct hv_context hv_context = {
38 .synic_initialized = false,
39 .hypercall_page = NULL,
40};
41
42#define HV_TIMER_FREQUENCY (10 * 1000 * 1000)
43#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
44#define HV_MIN_DELTA_TICKS 1
45
46
47
48
49unsigned int host_info_eax;
50unsigned int host_info_ebx;
51unsigned int host_info_ecx;
52unsigned int host_info_edx;
53
54static int query_hypervisor_info(void)
55{
56 unsigned int eax;
57 unsigned int ebx;
58 unsigned int ecx;
59 unsigned int edx;
60 unsigned int max_leaf;
61 unsigned int op;
62
63
64
65
66
67 eax = 0;
68 ebx = 0;
69 ecx = 0;
70 edx = 0;
71 op = HVCPUID_VENDOR_MAXFUNCTION;
72 cpuid(op, &eax, &ebx, &ecx, &edx);
73
74 max_leaf = eax;
75
76 if (max_leaf >= HVCPUID_VERSION) {
77 eax = 0;
78 ebx = 0;
79 ecx = 0;
80 edx = 0;
81 op = HVCPUID_VERSION;
82 cpuid(op, &eax, &ebx, &ecx, &edx);
83 host_info_eax = eax;
84 host_info_ebx = ebx;
85 host_info_ecx = ecx;
86 host_info_edx = edx;
87 }
88 return max_leaf;
89}
90
91
92
93
94u64 hv_do_hypercall(u64 control, void *input, void *output)
95{
96 u64 input_address = (input) ? virt_to_phys(input) : 0;
97 u64 output_address = (output) ? virt_to_phys(output) : 0;
98 void *hypercall_page = hv_context.hypercall_page;
99#ifdef CONFIG_X86_64
100 u64 hv_status = 0;
101
102 if (!hypercall_page)
103 return (u64)ULLONG_MAX;
104
105 __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
106 __asm__ __volatile__("call *%3" : "=a" (hv_status) :
107 "c" (control), "d" (input_address),
108 "m" (hypercall_page));
109
110 return hv_status;
111
112#else
113
114 u32 control_hi = control >> 32;
115 u32 control_lo = control & 0xFFFFFFFF;
116 u32 hv_status_hi = 1;
117 u32 hv_status_lo = 1;
118 u32 input_address_hi = input_address >> 32;
119 u32 input_address_lo = input_address & 0xFFFFFFFF;
120 u32 output_address_hi = output_address >> 32;
121 u32 output_address_lo = output_address & 0xFFFFFFFF;
122
123 if (!hypercall_page)
124 return (u64)ULLONG_MAX;
125
126 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
127 "=a"(hv_status_lo) : "d" (control_hi),
128 "a" (control_lo), "b" (input_address_hi),
129 "c" (input_address_lo), "D"(output_address_hi),
130 "S"(output_address_lo), "m" (hypercall_page));
131
132 return hv_status_lo | ((u64)hv_status_hi << 32);
133#endif
134}
135EXPORT_SYMBOL_GPL(hv_do_hypercall);
136
137#ifdef CONFIG_X86_64
138static cycle_t read_hv_clock_tsc(struct clocksource *arg)
139{
140 cycle_t current_tick;
141 struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
142
143 if (tsc_pg->tsc_sequence != 0) {
144
145
146
147
148 while (1) {
149 cycle_t tmp;
150 u32 sequence = tsc_pg->tsc_sequence;
151 u64 cur_tsc;
152 u64 scale = tsc_pg->tsc_scale;
153 s64 offset = tsc_pg->tsc_offset;
154
155 rdtscll(cur_tsc);
156
157 asm("mulq %3"
158 : "=d" (current_tick), "=a" (tmp)
159 : "a" (cur_tsc), "r" (scale));
160
161 current_tick += offset;
162 if (tsc_pg->tsc_sequence == sequence)
163 return current_tick;
164
165 if (tsc_pg->tsc_sequence != 0)
166 continue;
167
168
169
170 break;
171 }
172 }
173 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
174 return current_tick;
175}
176
177static struct clocksource hyperv_cs_tsc = {
178 .name = "hyperv_clocksource_tsc_page",
179 .rating = 425,
180 .read = read_hv_clock_tsc,
181 .mask = CLOCKSOURCE_MASK(64),
182 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
183};
184#endif
185
186
187
188
189
190
191
192int hv_init(void)
193{
194 int max_leaf;
195 union hv_x64_msr_hypercall_contents hypercall_msr;
196 void *virtaddr = NULL;
197
198 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
199 memset(hv_context.synic_message_page, 0,
200 sizeof(void *) * NR_CPUS);
201 memset(hv_context.post_msg_page, 0,
202 sizeof(void *) * NR_CPUS);
203 memset(hv_context.vp_index, 0,
204 sizeof(int) * NR_CPUS);
205 memset(hv_context.event_dpc, 0,
206 sizeof(void *) * NR_CPUS);
207 memset(hv_context.msg_dpc, 0,
208 sizeof(void *) * NR_CPUS);
209 memset(hv_context.clk_evt, 0,
210 sizeof(void *) * NR_CPUS);
211
212 max_leaf = query_hypervisor_info();
213
214
215
216
217 hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0);
218 wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
219
220
221 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
222
223 virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
224
225 if (!virtaddr)
226 goto cleanup;
227
228 hypercall_msr.enable = 1;
229
230 hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
231 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
232
233
234 hypercall_msr.as_uint64 = 0;
235 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
236
237 if (!hypercall_msr.enable)
238 goto cleanup;
239
240 hv_context.hypercall_page = virtaddr;
241
242#ifdef CONFIG_X86_64
243 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
244 union hv_x64_msr_hypercall_contents tsc_msr;
245 void *va_tsc;
246
247 va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
248 if (!va_tsc)
249 goto cleanup;
250 hv_context.tsc_page = va_tsc;
251
252 rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
253
254 tsc_msr.enable = 1;
255 tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
256
257 wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
258 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
259 }
260#endif
261 return 0;
262
263cleanup:
264 if (virtaddr) {
265 if (hypercall_msr.enable) {
266 hypercall_msr.as_uint64 = 0;
267 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
268 }
269
270 vfree(virtaddr);
271 }
272
273 return -ENOTSUPP;
274}
275
276
277
278
279
280
281void hv_cleanup(bool crash)
282{
283 union hv_x64_msr_hypercall_contents hypercall_msr;
284
285
286 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
287
288 if (hv_context.hypercall_page) {
289 hypercall_msr.as_uint64 = 0;
290 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
291 if (!crash)
292 vfree(hv_context.hypercall_page);
293 hv_context.hypercall_page = NULL;
294 }
295
296#ifdef CONFIG_X86_64
297
298
299
300 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
301
302
303
304
305 if (!oops_in_progress) {
306 clocksource_change_rating(&hyperv_cs_tsc, 10);
307 clocksource_unregister(&hyperv_cs_tsc);
308 }
309
310 hypercall_msr.as_uint64 = 0;
311 wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
312 if (!crash)
313 vfree(hv_context.tsc_page);
314 hv_context.tsc_page = NULL;
315 }
316#endif
317}
318
319
320
321
322
323
324int hv_post_message(union hv_connection_id connection_id,
325 enum hv_message_type message_type,
326 void *payload, size_t payload_size)
327{
328
329 struct hv_input_post_message *aligned_msg;
330 u64 status;
331
332 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
333 return -EMSGSIZE;
334
335 aligned_msg = (struct hv_input_post_message *)
336 hv_context.post_msg_page[get_cpu()];
337
338 aligned_msg->connectionid = connection_id;
339 aligned_msg->reserved = 0;
340 aligned_msg->message_type = message_type;
341 aligned_msg->payload_size = payload_size;
342 memcpy((void *)aligned_msg->payload, payload, payload_size);
343
344 status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
345
346 put_cpu();
347 return status & 0xFFFF;
348}
349
350static int hv_ce_set_next_event(unsigned long delta,
351 struct clock_event_device *evt)
352{
353 cycle_t current_tick;
354
355 WARN_ON(!clockevent_state_oneshot(evt));
356
357 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
358 current_tick += delta;
359 wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick);
360 return 0;
361}
362
363static int hv_ce_shutdown(struct clock_event_device *evt)
364{
365 wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
366 wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
367
368 return 0;
369}
370
371static int hv_ce_set_oneshot(struct clock_event_device *evt)
372{
373 union hv_timer_config timer_cfg;
374
375 timer_cfg.enable = 1;
376 timer_cfg.auto_enable = 1;
377 timer_cfg.sintx = VMBUS_MESSAGE_SINT;
378 wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
379
380 return 0;
381}
382
383static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
384{
385 dev->name = "Hyper-V clockevent";
386 dev->features = CLOCK_EVT_FEAT_ONESHOT;
387 dev->cpumask = cpumask_of(cpu);
388 dev->rating = 1000;
389
390
391
392
393
394
395 dev->set_state_shutdown = hv_ce_shutdown;
396 dev->set_state_oneshot = hv_ce_set_oneshot;
397 dev->set_next_event = hv_ce_set_next_event;
398}
399
400
401int hv_synic_alloc(void)
402{
403 size_t size = sizeof(struct tasklet_struct);
404 size_t ced_size = sizeof(struct clock_event_device);
405 int cpu;
406
407 hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
408 GFP_ATOMIC);
409 if (hv_context.hv_numa_map == NULL) {
410 pr_err("Unable to allocate NUMA map\n");
411 goto err;
412 }
413
414 for_each_online_cpu(cpu) {
415 hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
416 if (hv_context.event_dpc[cpu] == NULL) {
417 pr_err("Unable to allocate event dpc\n");
418 goto err;
419 }
420 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
421
422 hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
423 if (hv_context.msg_dpc[cpu] == NULL) {
424 pr_err("Unable to allocate event dpc\n");
425 goto err;
426 }
427 tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
428
429 hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
430 if (hv_context.clk_evt[cpu] == NULL) {
431 pr_err("Unable to allocate clock event device\n");
432 goto err;
433 }
434
435 hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
436
437 hv_context.synic_message_page[cpu] =
438 (void *)get_zeroed_page(GFP_ATOMIC);
439
440 if (hv_context.synic_message_page[cpu] == NULL) {
441 pr_err("Unable to allocate SYNIC message page\n");
442 goto err;
443 }
444
445 hv_context.synic_event_page[cpu] =
446 (void *)get_zeroed_page(GFP_ATOMIC);
447
448 if (hv_context.synic_event_page[cpu] == NULL) {
449 pr_err("Unable to allocate SYNIC event page\n");
450 goto err;
451 }
452
453 hv_context.post_msg_page[cpu] =
454 (void *)get_zeroed_page(GFP_ATOMIC);
455
456 if (hv_context.post_msg_page[cpu] == NULL) {
457 pr_err("Unable to allocate post msg page\n");
458 goto err;
459 }
460 }
461
462 return 0;
463err:
464 return -ENOMEM;
465}
466
467static void hv_synic_free_cpu(int cpu)
468{
469 kfree(hv_context.event_dpc[cpu]);
470 kfree(hv_context.msg_dpc[cpu]);
471 kfree(hv_context.clk_evt[cpu]);
472 if (hv_context.synic_event_page[cpu])
473 free_page((unsigned long)hv_context.synic_event_page[cpu]);
474 if (hv_context.synic_message_page[cpu])
475 free_page((unsigned long)hv_context.synic_message_page[cpu]);
476 if (hv_context.post_msg_page[cpu])
477 free_page((unsigned long)hv_context.post_msg_page[cpu]);
478}
479
480void hv_synic_free(void)
481{
482 int cpu;
483
484 kfree(hv_context.hv_numa_map);
485 for_each_online_cpu(cpu)
486 hv_synic_free_cpu(cpu);
487}
488
489
490
491
492
493
494
495
496void hv_synic_init(void *arg)
497{
498 u64 version;
499 union hv_synic_simp simp;
500 union hv_synic_siefp siefp;
501 union hv_synic_sint shared_sint;
502 union hv_synic_scontrol sctrl;
503 u64 vp_index;
504
505 int cpu = smp_processor_id();
506
507 if (!hv_context.hypercall_page)
508 return;
509
510
511 rdmsrl(HV_X64_MSR_SVERSION, version);
512
513
514 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
515 simp.simp_enabled = 1;
516 simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
517 >> PAGE_SHIFT;
518
519 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
520
521
522 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
523 siefp.siefp_enabled = 1;
524 siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
525 >> PAGE_SHIFT;
526
527 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
528
529
530 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
531
532 shared_sint.as_uint64 = 0;
533 shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
534 shared_sint.masked = false;
535 shared_sint.auto_eoi = true;
536
537 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
538
539
540 rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
541 sctrl.enable = 1;
542
543 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
544
545 hv_context.synic_initialized = true;
546
547
548
549
550
551
552 rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
553 hv_context.vp_index[cpu] = (u32)vp_index;
554
555 INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
556
557
558
559
560 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
561 clockevents_config_and_register(hv_context.clk_evt[cpu],
562 HV_TIMER_FREQUENCY,
563 HV_MIN_DELTA_TICKS,
564 HV_MAX_MAX_DELTA_TICKS);
565 return;
566}
567
568
569
570
571void hv_synic_clockevents_cleanup(void)
572{
573 int cpu;
574
575 if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
576 return;
577
578 for_each_online_cpu(cpu)
579 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
580}
581
582
583
584
585void hv_synic_cleanup(void *arg)
586{
587 union hv_synic_sint shared_sint;
588 union hv_synic_simp simp;
589 union hv_synic_siefp siefp;
590 union hv_synic_scontrol sctrl;
591 int cpu = smp_processor_id();
592
593 if (!hv_context.synic_initialized)
594 return;
595
596
597 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
598 hv_ce_shutdown(hv_context.clk_evt[cpu]);
599
600 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
601
602 shared_sint.masked = 1;
603
604
605
606 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
607
608 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
609 simp.simp_enabled = 0;
610 simp.base_simp_gpa = 0;
611
612 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
613
614 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
615 siefp.siefp_enabled = 0;
616 siefp.base_siefp_gpa = 0;
617
618 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
619
620
621 rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
622 sctrl.enable = 0;
623 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
624}
625