1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12#include <linux/clocksource.h>
13#include <linux/clockchips.h>
14#include <linux/kernel_stat.h>
15#include <linux/math64.h>
16#include <linux/gfp.h>
17#include <linux/slab.h>
18#include <linux/pvclock_gtod.h>
19
20#include <asm/pvclock.h>
21#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
24#include <xen/events.h>
25#include <xen/features.h>
26#include <xen/interface/xen.h>
27#include <xen/interface/vcpu.h>
28
29#include "xen-ops.h"
30
31
32#define TIMER_SLOP 100000
33#define NS_PER_TICK (1000000000LL / HZ)
34
35
36static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
37
38
39static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
40
41
42static DEFINE_PER_CPU(u64, xen_residual_stolen);
43
44
45static u64 get64(const u64 *p)
46{
47 u64 ret;
48
49 if (BITS_PER_LONG < 64) {
50 u32 *p32 = (u32 *)p;
51 u32 h, l;
52
53
54
55
56
57
58
59 do {
60 h = p32[1];
61 barrier();
62 l = p32[0];
63 barrier();
64 } while (p32[1] != h);
65
66 ret = (((u64)h) << 32) | l;
67 } else
68 ret = *p;
69
70 return ret;
71}
72
73
74
75
76static void get_runstate_snapshot(struct vcpu_runstate_info *res)
77{
78 u64 state_time;
79 struct vcpu_runstate_info *state;
80
81 BUG_ON(preemptible());
82
83 state = &__get_cpu_var(xen_runstate);
84
85
86
87
88
89
90 do {
91 state_time = get64(&state->state_entry_time);
92 barrier();
93 *res = *state;
94 barrier();
95 } while (get64(&state->state_entry_time) != state_time);
96}
97
98
99bool xen_vcpu_stolen(int vcpu)
100{
101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
102}
103
104void xen_setup_runstate_info(int cpu)
105{
106 struct vcpu_register_runstate_memory_area area;
107
108 area.addr.v = &per_cpu(xen_runstate, cpu);
109
110 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
111 xen_vcpu_nr(cpu), &area))
112 BUG();
113}
114
115static void do_stolen_accounting(void)
116{
117 struct vcpu_runstate_info state;
118 struct vcpu_runstate_info *snap;
119 s64 runnable, offline, stolen;
120 cputime_t ticks;
121
122 get_runstate_snapshot(&state);
123
124 WARN_ON(state.state != RUNSTATE_running);
125
126 snap = &__get_cpu_var(xen_runstate_snapshot);
127
128
129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
130 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
131
132 *snap = state;
133
134
135
136 stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
137
138 if (stolen < 0)
139 stolen = 0;
140
141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
142 __this_cpu_write(xen_residual_stolen, stolen);
143 account_steal_ticks(ticks);
144}
145
146
147static unsigned long xen_tsc_khz(void)
148{
149 struct pvclock_vcpu_time_info *info =
150 &HYPERVISOR_shared_info->vcpu_info[0].time;
151
152 return pvclock_tsc_khz(info);
153}
154
155u64 xen_clocksource_read(void)
156{
157 struct pvclock_vcpu_time_info *src;
158 u64 ret;
159
160 preempt_disable_notrace();
161 src = &__get_cpu_var(xen_vcpu)->time;
162 ret = pvclock_clocksource_read(src);
163 preempt_enable_notrace();
164 return ret;
165}
166
167static u64 xen_clocksource_get_cycles(struct clocksource *cs)
168{
169 return xen_clocksource_read();
170}
171
172static void xen_read_wallclock(struct timespec *ts)
173{
174 struct shared_info *s = HYPERVISOR_shared_info;
175 struct pvclock_wall_clock *wall_clock = &(s->wc);
176 struct pvclock_vcpu_time_info *vcpu_time;
177
178 vcpu_time = &get_cpu_var(xen_vcpu)->time;
179 pvclock_read_wallclock(wall_clock, vcpu_time, ts);
180 put_cpu_var(xen_vcpu);
181}
182
183static void xen_get_wallclock(struct timespec *now)
184{
185 xen_read_wallclock(now);
186}
187
188static int xen_set_wallclock(const struct timespec *now)
189{
190 return -1;
191}
192
193static int xen_pvclock_gtod_notify(struct notifier_block *nb,
194 unsigned long was_set, void *priv)
195{
196
197 static struct timespec next_sync;
198
199 struct xen_platform_op op;
200 struct timespec now;
201
202 now = __current_kernel_time();
203
204
205
206
207
208 if (!was_set && timespec_compare(&now, &next_sync) < 0)
209 return NOTIFY_OK;
210
211 op.cmd = XENPF_settime;
212 op.u.settime.secs = now.tv_sec;
213 op.u.settime.nsecs = now.tv_nsec;
214 op.u.settime.system_time = xen_clocksource_read();
215
216 (void)HYPERVISOR_dom0_op(&op);
217
218
219
220
221
222
223 next_sync = now;
224 next_sync.tv_sec += 11 * 60;
225
226 return NOTIFY_OK;
227}
228
229static struct notifier_block xen_pvclock_gtod_notifier = {
230 .notifier_call = xen_pvclock_gtod_notify,
231};
232
233static struct clocksource xen_clocksource __read_mostly = {
234 .name = "xen",
235 .rating = 400,
236 .read = xen_clocksource_get_cycles,
237 .mask = ~0,
238 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
239};
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272static s64 get_abs_timeout(unsigned long delta)
273{
274 return xen_clocksource_read() + delta;
275}
276
277static void xen_timerop_set_mode(enum clock_event_mode mode,
278 struct clock_event_device *evt)
279{
280 switch (mode) {
281 case CLOCK_EVT_MODE_PERIODIC:
282
283 WARN_ON(1);
284 break;
285
286 case CLOCK_EVT_MODE_ONESHOT:
287 case CLOCK_EVT_MODE_RESUME:
288 break;
289
290 case CLOCK_EVT_MODE_UNUSED:
291 case CLOCK_EVT_MODE_SHUTDOWN:
292 HYPERVISOR_set_timer_op(0);
293 break;
294 }
295}
296
297static int xen_timerop_set_next_event(unsigned long delta,
298 struct clock_event_device *evt)
299{
300 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
301
302 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
303 BUG();
304
305
306
307
308
309 return 0;
310}
311
312static const struct clock_event_device xen_timerop_clockevent = {
313 .name = "xen",
314 .features = CLOCK_EVT_FEAT_ONESHOT,
315
316 .max_delta_ns = 0xffffffff,
317 .min_delta_ns = TIMER_SLOP,
318
319 .mult = 1,
320 .shift = 0,
321 .rating = 500,
322
323 .set_mode = xen_timerop_set_mode,
324 .set_next_event = xen_timerop_set_next_event,
325};
326
327static void xen_vcpuop_set_mode(enum clock_event_mode mode,
328 struct clock_event_device *evt)
329{
330 int cpu = smp_processor_id();
331
332 switch (mode) {
333 case CLOCK_EVT_MODE_PERIODIC:
334 WARN_ON(1);
335 break;
336
337 case CLOCK_EVT_MODE_ONESHOT:
338 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
339 NULL))
340 BUG();
341 break;
342
343 case CLOCK_EVT_MODE_UNUSED:
344 case CLOCK_EVT_MODE_SHUTDOWN:
345 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
346 NULL) ||
347 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
348 NULL))
349 BUG();
350 break;
351 case CLOCK_EVT_MODE_RESUME:
352 break;
353 }
354}
355
356static int xen_vcpuop_set_next_event(unsigned long delta,
357 struct clock_event_device *evt)
358{
359 int cpu = smp_processor_id();
360 struct vcpu_set_singleshot_timer single;
361 int ret;
362
363 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
364
365 single.timeout_abs_ns = get_abs_timeout(delta);
366 single.flags = VCPU_SSHOTTMR_future;
367
368 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
369 &single);
370
371 BUG_ON(ret != 0 && ret != -ETIME);
372
373 return ret;
374}
375
376static const struct clock_event_device xen_vcpuop_clockevent = {
377 .name = "xen",
378 .features = CLOCK_EVT_FEAT_ONESHOT,
379
380 .max_delta_ns = 0xffffffff,
381 .min_delta_ns = TIMER_SLOP,
382
383 .mult = 1,
384 .shift = 0,
385 .rating = 500,
386
387 .set_mode = xen_vcpuop_set_mode,
388 .set_next_event = xen_vcpuop_set_next_event,
389};
390
391static const struct clock_event_device *xen_clockevent =
392 &xen_timerop_clockevent;
393
394struct xen_clock_event_device {
395 struct clock_event_device evt;
396 char *name;
397};
398static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
399
400static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
401{
402 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
403 irqreturn_t ret;
404
405 ret = IRQ_NONE;
406 if (evt->event_handler) {
407 evt->event_handler(evt);
408 ret = IRQ_HANDLED;
409 }
410
411 do_stolen_accounting();
412
413 return ret;
414}
415
416void xen_teardown_timer(int cpu)
417{
418 struct clock_event_device *evt;
419 evt = &per_cpu(xen_clock_events, cpu).evt;
420
421 if (evt->irq >= 0) {
422 unbind_from_irqhandler(evt->irq, NULL);
423 evt->irq = -1;
424 kfree(per_cpu(xen_clock_events, cpu).name);
425 per_cpu(xen_clock_events, cpu).name = NULL;
426 }
427}
428
429void xen_setup_timer(int cpu)
430{
431 char *name;
432 struct clock_event_device *evt;
433 int irq;
434
435 evt = &per_cpu(xen_clock_events, cpu).evt;
436 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
437 if (evt->irq >= 0)
438 xen_teardown_timer(cpu);
439
440 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
441
442 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
443 if (!name)
444 name = "<timer kasprintf failed>";
445
446 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
447 IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
448 IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
449 name, NULL);
450
451 memcpy(evt, xen_clockevent, sizeof(*evt));
452
453 evt->cpumask = cpumask_of(cpu);
454 evt->irq = irq;
455 per_cpu(xen_clock_events, cpu).name = name;
456}
457
458
459void xen_setup_cpu_clockevents(void)
460{
461 BUG_ON(preemptible());
462
463 clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
464}
465
466void xen_timer_resume(void)
467{
468 int cpu;
469
470 pvclock_resume();
471
472 if (xen_clockevent != &xen_vcpuop_clockevent)
473 return;
474
475 for_each_online_cpu(cpu) {
476 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
477 xen_vcpu_nr(cpu), NULL))
478 BUG();
479 }
480}
481
482static const struct pv_time_ops xen_time_ops __initconst = {
483 .sched_clock = xen_clocksource_read,
484};
485
486static void __init xen_time_init(void)
487{
488 int cpu = smp_processor_id();
489 struct timespec tp;
490
491 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
492
493 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
494 NULL) == 0) {
495
496
497 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
498 xen_clockevent = &xen_vcpuop_clockevent;
499 }
500
501
502 xen_read_wallclock(&tp);
503 do_settimeofday(&tp);
504
505 setup_force_cpu_cap(X86_FEATURE_TSC);
506
507 xen_setup_runstate_info(cpu);
508 xen_setup_timer(cpu);
509 xen_setup_cpu_clockevents();
510
511 if (xen_initial_domain())
512 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
513}
514
515void __init xen_init_time_ops(void)
516{
517 pv_time_ops = xen_time_ops;
518
519 x86_init.timers.timer_init = xen_time_init;
520 x86_init.timers.setup_percpu_clockev = x86_init_noop;
521 x86_cpuinit.setup_percpu_clockev = x86_init_noop;
522
523 x86_platform.calibrate_tsc = xen_tsc_khz;
524 x86_platform.get_wallclock = xen_get_wallclock;
525
526 if (!xen_initial_domain())
527 x86_platform.set_wallclock = xen_set_wallclock;
528}
529
530#ifdef CONFIG_XEN_PVHVM
531static void xen_hvm_setup_cpu_clockevents(void)
532{
533 int cpu = smp_processor_id();
534 xen_setup_runstate_info(cpu);
535
536
537
538
539
540 xen_setup_cpu_clockevents();
541}
542
543void __init xen_hvm_init_time_ops(void)
544{
545
546
547
548 if (!xen_have_vector_callback)
549 return;
550 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
551 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
552 "disable pv timer\n");
553 return;
554 }
555
556 pv_time_ops = xen_time_ops;
557 x86_init.timers.setup_percpu_clockev = xen_time_init;
558 x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
559
560 x86_platform.calibrate_tsc = xen_tsc_khz;
561 x86_platform.get_wallclock = xen_get_wallclock;
562 x86_platform.set_wallclock = xen_set_wallclock;
563}
564#endif
565