linux/arch/powerpc/kvm/timing.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2008
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19 */
  20
  21#include <linux/kvm_host.h>
  22#include <linux/fs.h>
  23#include <linux/seq_file.h>
  24#include <linux/debugfs.h>
  25#include <linux/uaccess.h>
  26#include <linux/module.h>
  27
  28#include <asm/time.h>
  29#include <asm-generic/div64.h>
  30
  31#include "timing.h"
  32
  33void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
  34{
  35        int i;
  36
  37        /* Take a lock to avoid concurrent updates */
  38        mutex_lock(&vcpu->arch.exit_timing_lock);
  39
  40        vcpu->arch.last_exit_type = 0xDEAD;
  41        for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
  42                vcpu->arch.timing_count_type[i] = 0;
  43                vcpu->arch.timing_max_duration[i] = 0;
  44                vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
  45                vcpu->arch.timing_sum_duration[i] = 0;
  46                vcpu->arch.timing_sum_quad_duration[i] = 0;
  47        }
  48        vcpu->arch.timing_last_exit = 0;
  49        vcpu->arch.timing_exit.tv64 = 0;
  50        vcpu->arch.timing_last_enter.tv64 = 0;
  51
  52        mutex_unlock(&vcpu->arch.exit_timing_lock);
  53}
  54
  55static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
  56{
  57        u64 old;
  58
  59        mutex_lock(&vcpu->arch.exit_timing_lock);
  60
  61        vcpu->arch.timing_count_type[type]++;
  62
  63        /* sum */
  64        old = vcpu->arch.timing_sum_duration[type];
  65        vcpu->arch.timing_sum_duration[type] += duration;
  66        if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
  67                printk(KERN_ERR"%s - wrap adding sum of durations"
  68                        " old %lld new %lld type %d exit # of type %d\n",
  69                        __func__, old, vcpu->arch.timing_sum_duration[type],
  70                        type, vcpu->arch.timing_count_type[type]);
  71        }
  72
  73        /* square sum */
  74        old = vcpu->arch.timing_sum_quad_duration[type];
  75        vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
  76        if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
  77                printk(KERN_ERR"%s - wrap adding sum of squared durations"
  78                        " old %lld new %lld type %d exit # of type %d\n",
  79                        __func__, old,
  80                        vcpu->arch.timing_sum_quad_duration[type],
  81                        type, vcpu->arch.timing_count_type[type]);
  82        }
  83
  84        /* set min/max */
  85        if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
  86                vcpu->arch.timing_min_duration[type] = duration;
  87        if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
  88                vcpu->arch.timing_max_duration[type] = duration;
  89
  90        mutex_unlock(&vcpu->arch.exit_timing_lock);
  91}
  92
  93void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
  94{
  95        u64 exit = vcpu->arch.timing_last_exit;
  96        u64 enter = vcpu->arch.timing_last_enter.tv64;
  97
  98        /* save exit time, used next exit when the reenter time is known */
  99        vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
 100
 101        if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
 102                return; /* skip incomplete cycle (e.g. after reset) */
 103
 104        /* update statistics for average and standard deviation */
 105        add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
 106        /* enter -> timing_last_exit is time spent in guest - log this too */
 107        add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
 108                        TIMEINGUEST);
 109}
 110
 111static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
 112        [MMIO_EXITS] =              "MMIO",
 113        [SIGNAL_EXITS] =            "SIGNAL",
 114        [ITLB_REAL_MISS_EXITS] =    "ITLBREAL",
 115        [ITLB_VIRT_MISS_EXITS] =    "ITLBVIRT",
 116        [DTLB_REAL_MISS_EXITS] =    "DTLBREAL",
 117        [DTLB_VIRT_MISS_EXITS] =    "DTLBVIRT",
 118        [SYSCALL_EXITS] =           "SYSCALL",
 119        [ISI_EXITS] =               "ISI",
 120        [DSI_EXITS] =               "DSI",
 121        [EMULATED_INST_EXITS] =     "EMULINST",
 122        [EMULATED_MTMSRWE_EXITS] =  "EMUL_WAIT",
 123        [EMULATED_WRTEE_EXITS] =    "EMUL_WRTEE",
 124        [EMULATED_MTSPR_EXITS] =    "EMUL_MTSPR",
 125        [EMULATED_MFSPR_EXITS] =    "EMUL_MFSPR",
 126        [EMULATED_MTMSR_EXITS] =    "EMUL_MTMSR",
 127        [EMULATED_MFMSR_EXITS] =    "EMUL_MFMSR",
 128        [EMULATED_TLBSX_EXITS] =    "EMUL_TLBSX",
 129        [EMULATED_TLBWE_EXITS] =    "EMUL_TLBWE",
 130        [EMULATED_RFI_EXITS] =      "EMUL_RFI",
 131        [DEC_EXITS] =               "DEC",
 132        [EXT_INTR_EXITS] =          "EXTINT",
 133        [HALT_WAKEUP] =             "HALT",
 134        [USR_PR_INST] =             "USR_PR_INST",
 135        [FP_UNAVAIL] =              "FP_UNAVAIL",
 136        [DEBUG_EXITS] =             "DEBUG",
 137        [TIMEINGUEST] =             "TIMEINGUEST"
 138};
 139
 140static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
 141{
 142        struct kvm_vcpu *vcpu = m->private;
 143        int i;
 144        u64 min, max, sum, sum_quad;
 145
 146        seq_printf(m, "%s", "type       count   min     max     sum     sum_squared\n");
 147
 148
 149        for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
 150
 151                min = vcpu->arch.timing_min_duration[i];
 152                do_div(min, tb_ticks_per_usec);
 153                max = vcpu->arch.timing_max_duration[i];
 154                do_div(max, tb_ticks_per_usec);
 155                sum = vcpu->arch.timing_sum_duration[i];
 156                do_div(sum, tb_ticks_per_usec);
 157                sum_quad = vcpu->arch.timing_sum_quad_duration[i];
 158                do_div(sum_quad, tb_ticks_per_usec);
 159
 160                seq_printf(m, "%12s     %10d    %10lld  %10lld  %20lld  %20lld\n",
 161                        kvm_exit_names[i],
 162                        vcpu->arch.timing_count_type[i],
 163                        min,
 164                        max,
 165                        sum,
 166                        sum_quad);
 167
 168        }
 169        return 0;
 170}
 171
 172/* Write 'c' to clear the timing statistics. */
 173static ssize_t kvmppc_exit_timing_write(struct file *file,
 174                                       const char __user *user_buf,
 175                                       size_t count, loff_t *ppos)
 176{
 177        int err = -EINVAL;
 178        char c;
 179
 180        if (count > 1) {
 181                goto done;
 182        }
 183
 184        if (get_user(c, user_buf)) {
 185                err = -EFAULT;
 186                goto done;
 187        }
 188
 189        if (c == 'c') {
 190                struct seq_file *seqf = file->private_data;
 191                struct kvm_vcpu *vcpu = seqf->private;
 192                /* Write does not affect our buffers previously generated with
 193                 * show. seq_file is locked here to prevent races of init with
 194                 * a show call */
 195                mutex_lock(&seqf->lock);
 196                kvmppc_init_timing_stats(vcpu);
 197                mutex_unlock(&seqf->lock);
 198                err = count;
 199        }
 200
 201done:
 202        return err;
 203}
 204
 205static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
 206{
 207        return single_open(file, kvmppc_exit_timing_show, inode->i_private);
 208}
 209
 210static const struct file_operations kvmppc_exit_timing_fops = {
 211        .owner   = THIS_MODULE,
 212        .open    = kvmppc_exit_timing_open,
 213        .read    = seq_read,
 214        .write   = kvmppc_exit_timing_write,
 215        .llseek  = seq_lseek,
 216        .release = single_release,
 217};
 218
 219void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
 220{
 221        static char dbg_fname[50];
 222        struct dentry *debugfs_file;
 223
 224        snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing",
 225                 current->pid, id);
 226        debugfs_file = debugfs_create_file(dbg_fname, 0666,
 227                                        kvm_debugfs_dir, vcpu,
 228                                        &kvmppc_exit_timing_fops);
 229
 230        if (!debugfs_file) {
 231                printk(KERN_ERR"%s: error creating debugfs file %s\n",
 232                        __func__, dbg_fname);
 233                return;
 234        }
 235
 236        vcpu->arch.debugfs_exit_timing = debugfs_file;
 237}
 238
 239void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
 240{
 241        if (vcpu->arch.debugfs_exit_timing) {
 242                debugfs_remove(vcpu->arch.debugfs_exit_timing);
 243                vcpu->arch.debugfs_exit_timing = NULL;
 244        }
 245}
 246