1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/cpu.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
23#include <linux/irq.h>
24
25#include <clocksource/arm_arch_timer.h>
26#include <asm/arch_timer.h>
27#include <asm/kvm_hyp.h>
28
29#include <kvm/arm_vgic.h>
30#include <kvm/arm_arch_timer.h>
31
32#include "trace.h"
33
34static struct timecounter *timecounter;
35static unsigned int host_vtimer_irq;
36static u32 host_vtimer_irq_flags;
37
38void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
39{
40 vcpu_vtimer(vcpu)->active_cleared_last = false;
41}
42
43u64 kvm_phys_timer_read(void)
44{
45 return timecounter->cc->read(timecounter->cc);
46}
47
48static bool timer_is_armed(struct arch_timer_cpu *timer)
49{
50 return timer->armed;
51}
52
53
54static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
55{
56 timer->armed = true;
57 hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
58 HRTIMER_MODE_ABS);
59}
60
61static void timer_disarm(struct arch_timer_cpu *timer)
62{
63 if (timer_is_armed(timer)) {
64 hrtimer_cancel(&timer->timer);
65 cancel_work_sync(&timer->expired);
66 timer->armed = false;
67 }
68}
69
70static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
71{
72 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
73
74
75
76
77
78
79
80 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
81 return IRQ_HANDLED;
82}
83
84
85
86
87
88static void kvm_timer_inject_irq_work(struct work_struct *work)
89{
90 struct kvm_vcpu *vcpu;
91
92 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
93
94
95
96
97
98 kvm_vcpu_kick(vcpu);
99}
100
101static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
102{
103 u64 cval, now;
104
105 cval = timer_ctx->cnt_cval;
106 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
107
108 if (now < cval) {
109 u64 ns;
110
111 ns = cyclecounter_cyc2ns(timecounter->cc,
112 cval - now,
113 timecounter->mask,
114 &timecounter->frac);
115 return ns;
116 }
117
118 return 0;
119}
120
121static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
122{
123 return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
124 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
125}
126
127
128
129
130
131static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
132{
133 u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX;
134 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
135 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
136
137 if (kvm_timer_irq_can_fire(vtimer))
138 min_virt = kvm_timer_compute_delta(vtimer);
139
140 if (kvm_timer_irq_can_fire(ptimer))
141 min_phys = kvm_timer_compute_delta(ptimer);
142
143
144 if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX))
145 return 0;
146
147 return min(min_virt, min_phys);
148}
149
150static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
151{
152 struct arch_timer_cpu *timer;
153 struct kvm_vcpu *vcpu;
154 u64 ns;
155
156 timer = container_of(hrt, struct arch_timer_cpu, timer);
157 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
158
159
160
161
162
163
164 ns = kvm_timer_earliest_exp(vcpu);
165 if (unlikely(ns)) {
166 hrtimer_forward_now(hrt, ns_to_ktime(ns));
167 return HRTIMER_RESTART;
168 }
169
170 schedule_work(&timer->expired);
171 return HRTIMER_NORESTART;
172}
173
174bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
175{
176 u64 cval, now;
177
178 if (!kvm_timer_irq_can_fire(timer_ctx))
179 return false;
180
181 cval = timer_ctx->cnt_cval;
182 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
183
184 return cval <= now;
185}
186
187
188
189
190void kvm_timer_update_run(struct kvm_vcpu *vcpu)
191{
192 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
193 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
194 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
195
196
197 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
198 KVM_ARM_DEV_EL1_PTIMER);
199 if (vtimer->irq.level)
200 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
201 if (ptimer->irq.level)
202 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
203}
204
205static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
206 struct arch_timer_context *timer_ctx)
207{
208 int ret;
209
210 timer_ctx->active_cleared_last = false;
211 timer_ctx->irq.level = new_level;
212 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
213 timer_ctx->irq.level);
214
215 if (likely(irqchip_in_kernel(vcpu->kvm))) {
216 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
217 timer_ctx->irq.irq,
218 timer_ctx->irq.level);
219 WARN_ON(ret);
220 }
221}
222
223
224
225
226
227static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
228{
229 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
230 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
231 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
232
233
234
235
236
237
238
239 if (unlikely(!timer->enabled))
240 return;
241
242 if (kvm_timer_should_fire(vtimer) != vtimer->irq.level)
243 kvm_timer_update_irq(vcpu, !vtimer->irq.level, vtimer);
244
245 if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
246 kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
247}
248
249
250static void kvm_timer_emulate(struct kvm_vcpu *vcpu,
251 struct arch_timer_context *timer_ctx)
252{
253 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
254
255 if (kvm_timer_should_fire(timer_ctx))
256 return;
257
258 if (!kvm_timer_irq_can_fire(timer_ctx))
259 return;
260
261
262 timer_arm(timer, kvm_timer_compute_delta(timer_ctx));
263}
264
265
266
267
268
269
270void kvm_timer_schedule(struct kvm_vcpu *vcpu)
271{
272 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
273 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
274 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
275
276 BUG_ON(timer_is_armed(timer));
277
278
279
280
281
282
283 if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
284 return;
285
286
287
288
289
290 if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer))
291 return;
292
293
294
295
296
297 timer_arm(timer, kvm_timer_earliest_exp(vcpu));
298}
299
300void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
301{
302 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
303 timer_disarm(timer);
304}
305
306static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu)
307{
308 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
309 bool phys_active;
310 int ret;
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329 phys_active = vtimer->irq.level ||
330 kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352 if (vtimer->active_cleared_last && !phys_active)
353 return;
354
355 ret = irq_set_irqchip_state(host_vtimer_irq,
356 IRQCHIP_STATE_ACTIVE,
357 phys_active);
358 WARN_ON(ret);
359
360 vtimer->active_cleared_last = !phys_active;
361}
362
363bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
364{
365 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
366 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
367 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
368 bool vlevel, plevel;
369
370 if (likely(irqchip_in_kernel(vcpu->kvm)))
371 return false;
372
373 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
374 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
375
376 return vtimer->irq.level != vlevel ||
377 ptimer->irq.level != plevel;
378}
379
380static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu)
381{
382 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
383
384
385
386
387
388
389
390
391 if (vtimer->irq.level)
392 disable_percpu_irq(host_vtimer_irq);
393 else
394 enable_percpu_irq(host_vtimer_irq, 0);
395}
396
397
398
399
400
401
402
403
404
405
406void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
407{
408 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
409
410 if (unlikely(!timer->enabled))
411 return;
412
413 kvm_timer_update_state(vcpu);
414
415
416 kvm_timer_emulate(vcpu, vcpu_ptimer(vcpu));
417
418 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
419 kvm_timer_flush_hwstate_user(vcpu);
420 else
421 kvm_timer_flush_hwstate_vgic(vcpu);
422}
423
424
425
426
427
428
429
430
431void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
432{
433 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
434
435
436
437
438
439 timer_disarm(timer);
440
441
442
443
444
445 kvm_timer_update_state(vcpu);
446}
447
448int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
449 const struct kvm_irq_level *virt_irq,
450 const struct kvm_irq_level *phys_irq)
451{
452 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
453 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
454
455
456
457
458
459
460
461 vtimer->irq.irq = virt_irq->irq;
462 ptimer->irq.irq = phys_irq->irq;
463
464
465
466
467
468
469
470 vtimer->cnt_ctl = 0;
471 ptimer->cnt_ctl = 0;
472 kvm_timer_update_state(vcpu);
473
474 return 0;
475}
476
477
478static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
479{
480 int i;
481 struct kvm *kvm = vcpu->kvm;
482 struct kvm_vcpu *tmp;
483
484 mutex_lock(&kvm->lock);
485 kvm_for_each_vcpu(i, tmp, kvm)
486 vcpu_vtimer(tmp)->cntvoff = cntvoff;
487
488
489
490
491
492 vcpu_vtimer(vcpu)->cntvoff = cntvoff;
493 mutex_unlock(&kvm->lock);
494}
495
496void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
497{
498 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
499
500
501 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
502 vcpu_ptimer(vcpu)->cntvoff = 0;
503
504 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
505 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
506 timer->timer.function = kvm_timer_expire;
507}
508
509static void kvm_timer_init_interrupt(void *info)
510{
511 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
512}
513
514int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
515{
516 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
517
518 switch (regid) {
519 case KVM_REG_ARM_TIMER_CTL:
520 vtimer->cnt_ctl = value;
521 break;
522 case KVM_REG_ARM_TIMER_CNT:
523 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
524 break;
525 case KVM_REG_ARM_TIMER_CVAL:
526 vtimer->cnt_cval = value;
527 break;
528 default:
529 return -1;
530 }
531
532 kvm_timer_update_state(vcpu);
533 return 0;
534}
535
536u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
537{
538 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
539
540 switch (regid) {
541 case KVM_REG_ARM_TIMER_CTL:
542 return vtimer->cnt_ctl;
543 case KVM_REG_ARM_TIMER_CNT:
544 return kvm_phys_timer_read() - vtimer->cntvoff;
545 case KVM_REG_ARM_TIMER_CVAL:
546 return vtimer->cnt_cval;
547 }
548 return (u64)-1;
549}
550
551static int kvm_timer_starting_cpu(unsigned int cpu)
552{
553 kvm_timer_init_interrupt(NULL);
554 return 0;
555}
556
557static int kvm_timer_dying_cpu(unsigned int cpu)
558{
559 disable_percpu_irq(host_vtimer_irq);
560 return 0;
561}
562
563int kvm_timer_hyp_init(void)
564{
565 struct arch_timer_kvm_info *info;
566 int err;
567
568 info = arch_timer_get_kvm_info();
569 timecounter = &info->timecounter;
570
571 if (!timecounter->cc) {
572 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
573 return -ENODEV;
574 }
575
576 if (info->virtual_irq <= 0) {
577 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
578 info->virtual_irq);
579 return -ENODEV;
580 }
581 host_vtimer_irq = info->virtual_irq;
582
583 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
584 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
585 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
586 kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
587 host_vtimer_irq);
588 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
589 }
590
591 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
592 "kvm guest timer", kvm_get_running_vcpus());
593 if (err) {
594 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
595 host_vtimer_irq, err);
596 return err;
597 }
598
599 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
600
601 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
602 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
603 kvm_timer_dying_cpu);
604 return err;
605}
606
607void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
608{
609 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
610 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
611
612 timer_disarm(timer);
613 kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
614}
615
616int kvm_timer_enable(struct kvm_vcpu *vcpu)
617{
618 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
619 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
620 struct irq_desc *desc;
621 struct irq_data *data;
622 int phys_irq;
623 int ret;
624
625 if (timer->enabled)
626 return 0;
627
628
629 if (!irqchip_in_kernel(vcpu->kvm))
630 goto no_vgic;
631
632 if (!vgic_initialized(vcpu->kvm))
633 return -ENODEV;
634
635
636
637
638 desc = irq_to_desc(host_vtimer_irq);
639 if (!desc) {
640 kvm_err("%s: no interrupt descriptor\n", __func__);
641 return -EINVAL;
642 }
643
644 data = irq_desc_get_irq_data(desc);
645 while (data->parent_data)
646 data = data->parent_data;
647
648 phys_irq = data->hwirq;
649
650
651
652
653
654 ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
655 if (ret)
656 return ret;
657
658no_vgic:
659 timer->enabled = 1;
660 return 0;
661}
662
663
664
665
666
667
668
669void kvm_timer_init_vhe(void)
670{
671
672 u32 cnthctl_shift = 10;
673 u64 val;
674
675
676
677
678
679 val = read_sysreg(cnthctl_el2);
680 val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
681 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
682 write_sysreg(val, cnthctl_el2);
683}
684