linux/virt/kvm/arm/arch_timer.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 ARM Ltd.
   3 * Author: Marc Zyngier <marc.zyngier@arm.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17 */
  18
  19#include <linux/cpu.h>
  20#include <linux/of_irq.h>
  21#include <linux/kvm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/interrupt.h>
  24
  25#include <clocksource/arm_arch_timer.h>
  26#include <asm/arch_timer.h>
  27
  28#include <kvm/arm_vgic.h>
  29#include <kvm/arm_arch_timer.h>
  30
  31static struct timecounter *timecounter;
  32static struct workqueue_struct *wqueue;
  33static unsigned int host_vtimer_irq;
  34
  35static cycle_t kvm_phys_timer_read(void)
  36{
  37        return timecounter->cc->read(timecounter->cc);
  38}
  39
  40static bool timer_is_armed(struct arch_timer_cpu *timer)
  41{
  42        return timer->armed;
  43}
  44
  45/* timer_arm: as in "arm the timer", not as in ARM the company */
  46static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
  47{
  48        timer->armed = true;
  49        hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
  50                      HRTIMER_MODE_ABS);
  51}
  52
  53static void timer_disarm(struct arch_timer_cpu *timer)
  54{
  55        if (timer_is_armed(timer)) {
  56                hrtimer_cancel(&timer->timer);
  57                cancel_work_sync(&timer->expired);
  58                timer->armed = false;
  59        }
  60}
  61
  62static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
  63{
  64        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  65
  66        timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
  67        kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
  68                            timer->irq->irq,
  69                            timer->irq->level);
  70}
  71
  72static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
  73{
  74        struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
  75
  76        /*
  77         * We disable the timer in the world switch and let it be
  78         * handled by kvm_timer_sync_hwstate(). Getting a timer
  79         * interrupt at this point is a sure sign of some major
  80         * breakage.
  81         */
  82        pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
  83        return IRQ_HANDLED;
  84}
  85
  86static void kvm_timer_inject_irq_work(struct work_struct *work)
  87{
  88        struct kvm_vcpu *vcpu;
  89
  90        vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
  91        vcpu->arch.timer_cpu.armed = false;
  92        kvm_timer_inject_irq(vcpu);
  93}
  94
  95static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
  96{
  97        struct arch_timer_cpu *timer;
  98        timer = container_of(hrt, struct arch_timer_cpu, timer);
  99        queue_work(wqueue, &timer->expired);
 100        return HRTIMER_NORESTART;
 101}
 102
 103/**
 104 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
 105 * @vcpu: The vcpu pointer
 106 *
 107 * Disarm any pending soft timers, since the world-switch code will write the
 108 * virtual timer state back to the physical CPU.
 109 */
 110void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
 111{
 112        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 113
 114        /*
 115         * We're about to run this vcpu again, so there is no need to
 116         * keep the background timer running, as we're about to
 117         * populate the CPU timer again.
 118         */
 119        timer_disarm(timer);
 120}
 121
 122/**
 123 * kvm_timer_sync_hwstate - sync timer state from cpu
 124 * @vcpu: The vcpu pointer
 125 *
 126 * Check if the virtual timer was armed and either schedule a corresponding
 127 * soft timer or inject directly if already expired.
 128 */
 129void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
 130{
 131        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 132        cycle_t cval, now;
 133        u64 ns;
 134
 135        if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
 136                !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
 137                return;
 138
 139        cval = timer->cntv_cval;
 140        now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
 141
 142        BUG_ON(timer_is_armed(timer));
 143
 144        if (cval <= now) {
 145                /*
 146                 * Timer has already expired while we were not
 147                 * looking. Inject the interrupt and carry on.
 148                 */
 149                kvm_timer_inject_irq(vcpu);
 150                return;
 151        }
 152
 153        ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
 154        timer_arm(timer, ns);
 155}
 156
 157void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
 158                          const struct kvm_irq_level *irq)
 159{
 160        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 161
 162        /*
 163         * The vcpu timer irq number cannot be determined in
 164         * kvm_timer_vcpu_init() because it is called much before
 165         * kvm_vcpu_set_target(). To handle this, we determine
 166         * vcpu timer irq number when the vcpu is reset.
 167         */
 168        timer->irq = irq;
 169}
 170
 171void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
 172{
 173        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 174
 175        INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
 176        hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 177        timer->timer.function = kvm_timer_expire;
 178}
 179
 180static void kvm_timer_init_interrupt(void *info)
 181{
 182        enable_percpu_irq(host_vtimer_irq, 0);
 183}
 184
 185int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
 186{
 187        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 188
 189        switch (regid) {
 190        case KVM_REG_ARM_TIMER_CTL:
 191                timer->cntv_ctl = value;
 192                break;
 193        case KVM_REG_ARM_TIMER_CNT:
 194                vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
 195                break;
 196        case KVM_REG_ARM_TIMER_CVAL:
 197                timer->cntv_cval = value;
 198                break;
 199        default:
 200                return -1;
 201        }
 202        return 0;
 203}
 204
 205u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
 206{
 207        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 208
 209        switch (regid) {
 210        case KVM_REG_ARM_TIMER_CTL:
 211                return timer->cntv_ctl;
 212        case KVM_REG_ARM_TIMER_CNT:
 213                return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
 214        case KVM_REG_ARM_TIMER_CVAL:
 215                return timer->cntv_cval;
 216        }
 217        return (u64)-1;
 218}
 219
 220static int kvm_timer_cpu_notify(struct notifier_block *self,
 221                                unsigned long action, void *cpu)
 222{
 223        switch (action) {
 224        case CPU_STARTING:
 225        case CPU_STARTING_FROZEN:
 226                kvm_timer_init_interrupt(NULL);
 227                break;
 228        case CPU_DYING:
 229        case CPU_DYING_FROZEN:
 230                disable_percpu_irq(host_vtimer_irq);
 231                break;
 232        }
 233
 234        return NOTIFY_OK;
 235}
 236
 237static struct notifier_block kvm_timer_cpu_nb = {
 238        .notifier_call = kvm_timer_cpu_notify,
 239};
 240
 241static const struct of_device_id arch_timer_of_match[] = {
 242        { .compatible   = "arm,armv7-timer",    },
 243        { .compatible   = "arm,armv8-timer",    },
 244        {},
 245};
 246
 247int kvm_timer_hyp_init(void)
 248{
 249        struct device_node *np;
 250        unsigned int ppi;
 251        int err;
 252
 253        timecounter = arch_timer_get_timecounter();
 254        if (!timecounter)
 255                return -ENODEV;
 256
 257        np = of_find_matching_node(NULL, arch_timer_of_match);
 258        if (!np) {
 259                kvm_err("kvm_arch_timer: can't find DT node\n");
 260                return -ENODEV;
 261        }
 262
 263        ppi = irq_of_parse_and_map(np, 2);
 264        if (!ppi) {
 265                kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
 266                err = -EINVAL;
 267                goto out;
 268        }
 269
 270        err = request_percpu_irq(ppi, kvm_arch_timer_handler,
 271                                 "kvm guest timer", kvm_get_running_vcpus());
 272        if (err) {
 273                kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
 274                        ppi, err);
 275                goto out;
 276        }
 277
 278        host_vtimer_irq = ppi;
 279
 280        err = __register_cpu_notifier(&kvm_timer_cpu_nb);
 281        if (err) {
 282                kvm_err("Cannot register timer CPU notifier\n");
 283                goto out_free;
 284        }
 285
 286        wqueue = create_singlethread_workqueue("kvm_arch_timer");
 287        if (!wqueue) {
 288                err = -ENOMEM;
 289                goto out_free;
 290        }
 291
 292        kvm_info("%s IRQ%d\n", np->name, ppi);
 293        on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
 294
 295        goto out;
 296out_free:
 297        free_percpu_irq(ppi, kvm_get_running_vcpus());
 298out:
 299        of_node_put(np);
 300        return err;
 301}
 302
 303void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
 304{
 305        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 306
 307        timer_disarm(timer);
 308}
 309
 310int kvm_timer_init(struct kvm *kvm)
 311{
 312        if (timecounter && wqueue) {
 313                kvm->arch.timer.cntvoff = kvm_phys_timer_read();
 314                kvm->arch.timer.enabled = 1;
 315        }
 316
 317        return 0;
 318}
 319