linux/include/kvm/arm_pmu.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Linaro Ltd.
   3 * Author: Shannon Zhao <shannon.zhao@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#ifndef __ASM_ARM_KVM_PMU_H
  19#define __ASM_ARM_KVM_PMU_H
  20
  21#include <linux/perf_event.h>
  22#include <asm/perf_event.h>
  23
  24#define ARMV8_PMU_CYCLE_IDX             (ARMV8_PMU_MAX_COUNTERS - 1)
  25
  26#ifdef CONFIG_KVM_ARM_PMU
  27
  28struct kvm_pmc {
  29        u8 idx; /* index into the pmu->pmc array */
  30        struct perf_event *perf_event;
  31        u64 bitmask;
  32};
  33
  34struct kvm_pmu {
  35        int irq_num;
  36        struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
  37        bool ready;
  38        bool created;
  39        bool irq_level;
  40};
  41
  42#define kvm_arm_pmu_v3_ready(v)         ((v)->arch.pmu.ready)
  43#define kvm_arm_pmu_irq_initialized(v)  ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
  44u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
  45void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
  46u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
  47void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
  48void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
  49void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
  50void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
  51void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
  52void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
  53bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
  54void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
  55void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
  56void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
  57void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
  58                                    u64 select_idx);
  59bool kvm_arm_support_pmu_v3(void);
  60int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  61                            struct kvm_device_attr *attr);
  62int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  63                            struct kvm_device_attr *attr);
  64int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  65                            struct kvm_device_attr *attr);
  66int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
  67#else
  68struct kvm_pmu {
  69};
  70
  71#define kvm_arm_pmu_v3_ready(v)         (false)
  72#define kvm_arm_pmu_irq_initialized(v)  (false)
  73static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
  74                                            u64 select_idx)
  75{
  76        return 0;
  77}
  78static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
  79                                             u64 select_idx, u64 val) {}
  80static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
  81{
  82        return 0;
  83}
  84static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
  85static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
  86static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
  87static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
  88static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
  89static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
  90static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
  91{
  92        return false;
  93}
  94static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
  95static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
  96static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
  97static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
  98                                                  u64 data, u64 select_idx) {}
  99static inline bool kvm_arm_support_pmu_v3(void) { return false; }
 100static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
 101                                          struct kvm_device_attr *attr)
 102{
 103        return -ENXIO;
 104}
 105static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 106                                          struct kvm_device_attr *attr)
 107{
 108        return -ENXIO;
 109}
 110static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
 111                                          struct kvm_device_attr *attr)
 112{
 113        return -ENXIO;
 114}
 115static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
 116{
 117        return 0;
 118}
 119#endif
 120
 121#endif
 122