linux/arch/x86/kvm/pmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __KVM_X86_PMU_H
   3#define __KVM_X86_PMU_H
   4
   5#include <linux/nospec.h>
   6
   7#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
   8#define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
   9#define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
  10
  11/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
  12#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
  13
  14#define VMWARE_BACKDOOR_PMC_HOST_TSC            0x10000
  15#define VMWARE_BACKDOOR_PMC_REAL_TIME           0x10001
  16#define VMWARE_BACKDOOR_PMC_APPARENT_TIME       0x10002
  17
  18struct kvm_event_hw_type_mapping {
  19        u8 eventsel;
  20        u8 unit_mask;
  21        unsigned event_type;
  22};
  23
  24struct kvm_pmu_ops {
  25        unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
  26                                    u8 unit_mask);
  27        unsigned (*find_fixed_event)(int idx);
  28        bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
  29        struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
  30        struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
  31                unsigned int idx, u64 *mask);
  32        struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
  33        int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
  34        bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
  35        int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
  36        int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
  37        void (*refresh)(struct kvm_vcpu *vcpu);
  38        void (*init)(struct kvm_vcpu *vcpu);
  39        void (*reset)(struct kvm_vcpu *vcpu);
  40};
  41
  42static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
  43{
  44        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  45
  46        return pmu->counter_bitmask[pmc->type];
  47}
  48
  49static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
  50{
  51        u64 counter, enabled, running;
  52
  53        counter = pmc->counter;
  54        if (pmc->perf_event)
  55                counter += perf_event_read_value(pmc->perf_event,
  56                                                 &enabled, &running);
  57        /* FIXME: Scaling needed? */
  58        return counter & pmc_bitmask(pmc);
  59}
  60
  61static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
  62{
  63        if (pmc->perf_event) {
  64                perf_event_release_kernel(pmc->perf_event);
  65                pmc->perf_event = NULL;
  66                pmc->current_config = 0;
  67                pmc_to_pmu(pmc)->event_count--;
  68        }
  69}
  70
  71static inline void pmc_stop_counter(struct kvm_pmc *pmc)
  72{
  73        if (pmc->perf_event) {
  74                pmc->counter = pmc_read_counter(pmc);
  75                pmc_release_perf_event(pmc);
  76        }
  77}
  78
  79static inline bool pmc_is_gp(struct kvm_pmc *pmc)
  80{
  81        return pmc->type == KVM_PMC_GP;
  82}
  83
  84static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
  85{
  86        return pmc->type == KVM_PMC_FIXED;
  87}
  88
  89static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
  90{
  91        return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
  92}
  93
  94static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
  95                                                 u64 data)
  96{
  97        return !(pmu->global_ctrl_mask & data);
  98}
  99
 100/* returns general purpose PMC with the specified MSR. Note that it can be
 101 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
 102 * paramenter to tell them apart.
 103 */
 104static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
 105                                         u32 base)
 106{
 107        if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
 108                u32 index = array_index_nospec(msr - base,
 109                                               pmu->nr_arch_gp_counters);
 110
 111                return &pmu->gp_counters[index];
 112        }
 113
 114        return NULL;
 115}
 116
 117/* returns fixed PMC with the specified MSR */
 118static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
 119{
 120        int base = MSR_CORE_PERF_FIXED_CTR0;
 121
 122        if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
 123                u32 index = array_index_nospec(msr - base,
 124                                               pmu->nr_arch_fixed_counters);
 125
 126                return &pmu->fixed_counters[index];
 127        }
 128
 129        return NULL;
 130}
 131
 132void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
 133void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
 134void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
 135
 136void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
 137void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
 138int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
 139int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
 140bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
 141int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
 142int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
 143void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
 144void kvm_pmu_reset(struct kvm_vcpu *vcpu);
 145void kvm_pmu_init(struct kvm_vcpu *vcpu);
 146void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
 147void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
 148int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
 149
 150bool is_vmware_backdoor_pmc(u32 pmc_idx);
 151
 152extern struct kvm_pmu_ops intel_pmu_ops;
 153extern struct kvm_pmu_ops amd_pmu_ops;
 154#endif /* __KVM_X86_PMU_H */
 155