qemu/softmmu/cpu-timers.c
<<
>>
Prefs
   1/*
   2 * QEMU System Emulator
   3 *
   4 * Copyright (c) 2003-2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "qemu-common.h"
  27#include "qemu/cutils.h"
  28#include "migration/vmstate.h"
  29#include "qapi/error.h"
  30#include "qemu/error-report.h"
  31#include "exec/exec-all.h"
  32#include "sysemu/cpus.h"
  33#include "qemu/main-loop.h"
  34#include "qemu/option.h"
  35#include "qemu/seqlock.h"
  36#include "sysemu/replay.h"
  37#include "sysemu/runstate.h"
  38#include "hw/core/cpu.h"
  39#include "sysemu/cpu-timers.h"
  40#include "sysemu/cpu-throttle.h"
  41#include "timers-state.h"
  42
  43/* clock and ticks */
  44
  45static int64_t cpu_get_ticks_locked(void)
  46{
  47    int64_t ticks = timers_state.cpu_ticks_offset;
  48    if (timers_state.cpu_ticks_enabled) {
  49        ticks += cpu_get_host_ticks();
  50    }
  51
  52    if (timers_state.cpu_ticks_prev > ticks) {
  53        /* Non increasing ticks may happen if the host uses software suspend. */
  54        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
  55        ticks = timers_state.cpu_ticks_prev;
  56    }
  57
  58    timers_state.cpu_ticks_prev = ticks;
  59    return ticks;
  60}
  61
  62/*
  63 * return the time elapsed in VM between vm_start and vm_stop.
  64 * cpu_get_ticks() uses units of the host CPU cycle counter.
  65 */
  66int64_t cpu_get_ticks(void)
  67{
  68    int64_t ticks;
  69
  70    qemu_spin_lock(&timers_state.vm_clock_lock);
  71    ticks = cpu_get_ticks_locked();
  72    qemu_spin_unlock(&timers_state.vm_clock_lock);
  73    return ticks;
  74}
  75
  76int64_t cpu_get_clock_locked(void)
  77{
  78    int64_t time;
  79
  80    time = timers_state.cpu_clock_offset;
  81    if (timers_state.cpu_ticks_enabled) {
  82        time += get_clock();
  83    }
  84
  85    return time;
  86}
  87
  88/*
  89 * Return the monotonic time elapsed in VM, i.e.,
  90 * the time between vm_start and vm_stop
  91 */
  92int64_t cpu_get_clock(void)
  93{
  94    int64_t ti;
  95    unsigned start;
  96
  97    do {
  98        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
  99        ti = cpu_get_clock_locked();
 100    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
 101
 102    return ti;
 103}
 104
 105/*
 106 * enable cpu_get_ticks()
 107 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
 108 */
 109void cpu_enable_ticks(void)
 110{
 111    seqlock_write_lock(&timers_state.vm_clock_seqlock,
 112                       &timers_state.vm_clock_lock);
 113    if (!timers_state.cpu_ticks_enabled) {
 114        timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
 115        timers_state.cpu_clock_offset -= get_clock();
 116        timers_state.cpu_ticks_enabled = 1;
 117    }
 118    seqlock_write_unlock(&timers_state.vm_clock_seqlock,
 119                       &timers_state.vm_clock_lock);
 120}
 121
 122/*
 123 * disable cpu_get_ticks() : the clock is stopped. You must not call
 124 * cpu_get_ticks() after that.
 125 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
 126 */
 127void cpu_disable_ticks(void)
 128{
 129    seqlock_write_lock(&timers_state.vm_clock_seqlock,
 130                       &timers_state.vm_clock_lock);
 131    if (timers_state.cpu_ticks_enabled) {
 132        timers_state.cpu_ticks_offset += cpu_get_host_ticks();
 133        timers_state.cpu_clock_offset = cpu_get_clock_locked();
 134        timers_state.cpu_ticks_enabled = 0;
 135    }
 136    seqlock_write_unlock(&timers_state.vm_clock_seqlock,
 137                         &timers_state.vm_clock_lock);
 138}
 139
 140static bool icount_state_needed(void *opaque)
 141{
 142    return icount_enabled();
 143}
 144
 145static bool warp_timer_state_needed(void *opaque)
 146{
 147    TimersState *s = opaque;
 148    return s->icount_warp_timer != NULL;
 149}
 150
 151static bool adjust_timers_state_needed(void *opaque)
 152{
 153    TimersState *s = opaque;
 154    return s->icount_rt_timer != NULL;
 155}
 156
 157static bool icount_shift_state_needed(void *opaque)
 158{
 159    return icount_enabled() == 2;
 160}
 161
 162/*
 163 * Subsection for warp timer migration is optional, because may not be created
 164 */
 165static const VMStateDescription icount_vmstate_warp_timer = {
 166    .name = "timer/icount/warp_timer",
 167    .version_id = 1,
 168    .minimum_version_id = 1,
 169    .needed = warp_timer_state_needed,
 170    .fields = (VMStateField[]) {
 171        VMSTATE_INT64(vm_clock_warp_start, TimersState),
 172        VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
 173        VMSTATE_END_OF_LIST()
 174    }
 175};
 176
 177static const VMStateDescription icount_vmstate_adjust_timers = {
 178    .name = "timer/icount/timers",
 179    .version_id = 1,
 180    .minimum_version_id = 1,
 181    .needed = adjust_timers_state_needed,
 182    .fields = (VMStateField[]) {
 183        VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
 184        VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
 185        VMSTATE_END_OF_LIST()
 186    }
 187};
 188
 189static const VMStateDescription icount_vmstate_shift = {
 190    .name = "timer/icount/shift",
 191    .version_id = 2,
 192    .minimum_version_id = 2,
 193    .needed = icount_shift_state_needed,
 194    .fields = (VMStateField[]) {
 195        VMSTATE_INT16(icount_time_shift, TimersState),
 196        VMSTATE_INT64(last_delta, TimersState),
 197        VMSTATE_END_OF_LIST()
 198    }
 199};
 200
 201/*
 202 * This is a subsection for icount migration.
 203 */
 204static const VMStateDescription icount_vmstate_timers = {
 205    .name = "timer/icount",
 206    .version_id = 1,
 207    .minimum_version_id = 1,
 208    .needed = icount_state_needed,
 209    .fields = (VMStateField[]) {
 210        VMSTATE_INT64(qemu_icount_bias, TimersState),
 211        VMSTATE_INT64(qemu_icount, TimersState),
 212        VMSTATE_END_OF_LIST()
 213    },
 214    .subsections = (const VMStateDescription * []) {
 215        &icount_vmstate_warp_timer,
 216        &icount_vmstate_adjust_timers,
 217        &icount_vmstate_shift,
 218        NULL
 219    }
 220};
 221
 222static const VMStateDescription vmstate_timers = {
 223    .name = "timer",
 224    .version_id = 2,
 225    .minimum_version_id = 1,
 226    .fields = (VMStateField[]) {
 227        VMSTATE_INT64(cpu_ticks_offset, TimersState),
 228        VMSTATE_UNUSED(8),
 229        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
 230        VMSTATE_END_OF_LIST()
 231    },
 232    .subsections = (const VMStateDescription * []) {
 233        &icount_vmstate_timers,
 234        NULL
 235    }
 236};
 237
 238static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
 239{
 240}
 241
 242void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
 243{
 244    if (!icount_enabled() || type != QEMU_CLOCK_VIRTUAL) {
 245        qemu_notify_event();
 246        return;
 247    }
 248
 249    if (qemu_in_vcpu_thread()) {
 250        /*
 251         * A CPU is currently running; kick it back out to the
 252         * tcg_cpu_exec() loop so it will recalculate its
 253         * icount deadline immediately.
 254         */
 255        qemu_cpu_kick(current_cpu);
 256    } else if (first_cpu) {
 257        /*
 258         * qemu_cpu_kick is not enough to kick a halted CPU out of
 259         * qemu_tcg_wait_io_event.  async_run_on_cpu, instead,
 260         * causes cpu_thread_is_idle to return false.  This way,
 261         * handle_icount_deadline can run.
 262         * If we have no CPUs at all for some reason, we don't
 263         * need to do anything.
 264         */
 265        async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
 266    }
 267}
 268
 269TimersState timers_state;
 270
 271/* initialize timers state and the cpu throttle for convenience */
 272void cpu_timers_init(void)
 273{
 274    seqlock_init(&timers_state.vm_clock_seqlock);
 275    qemu_spin_init(&timers_state.vm_clock_lock);
 276    vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
 277
 278    cpu_throttle_init();
 279}
 280