linux/include/kvm/arm_pmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2015 Linaro Ltd.
   4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
   5 */
   6
   7#ifndef __ASM_ARM_KVM_PMU_H
   8#define __ASM_ARM_KVM_PMU_H
   9
  10#include <linux/perf_event.h>
  11#include <asm/perf_event.h>
  12
  13#define ARMV8_PMU_CYCLE_IDX             (ARMV8_PMU_MAX_COUNTERS - 1)
  14#define ARMV8_PMU_MAX_COUNTER_PAIRS     ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
  15
  16#ifdef CONFIG_HW_PERF_EVENTS
  17
  18struct kvm_pmc {
  19        u8 idx; /* index into the pmu->pmc array */
  20        struct perf_event *perf_event;
  21};
  22
  23struct kvm_pmu {
  24        int irq_num;
  25        struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
  26        DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
  27        bool created;
  28        bool irq_level;
  29        struct irq_work overflow_work;
  30};
  31
  32struct arm_pmu_entry {
  33        struct list_head entry;
  34        struct arm_pmu *arm_pmu;
  35};
  36
  37DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
  38
  39static __always_inline bool kvm_arm_support_pmu_v3(void)
  40{
  41        return static_branch_likely(&kvm_arm_pmu_available);
  42}
  43
  44#define kvm_arm_pmu_irq_initialized(v)  ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
  45u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
  46void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
  47u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
  48u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
  49void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
  50void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
  51void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
  52void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
  53void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
  54void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
  55void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
  56bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
  57void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
  58void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
  59void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
  60void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
  61                                    u64 select_idx);
  62int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  63                            struct kvm_device_attr *attr);
  64int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  65                            struct kvm_device_attr *attr);
  66int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  67                            struct kvm_device_attr *attr);
  68int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
  69#else
  70struct kvm_pmu {
  71};
  72
  73static inline bool kvm_arm_support_pmu_v3(void)
  74{
  75        return false;
  76}
  77
  78#define kvm_arm_pmu_irq_initialized(v)  (false)
  79static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
  80                                            u64 select_idx)
  81{
  82        return 0;
  83}
  84static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
  85                                             u64 select_idx, u64 val) {}
  86static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
  87{
  88        return 0;
  89}
  90static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
  91static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
  92static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
  93static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
  94static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
  95static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
  96static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
  97static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
  98{
  99        return false;
 100}
 101static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
 102static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
 103static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
 104static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
 105                                                  u64 data, u64 select_idx) {}
 106static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
 107                                          struct kvm_device_attr *attr)
 108{
 109        return -ENXIO;
 110}
 111static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 112                                          struct kvm_device_attr *attr)
 113{
 114        return -ENXIO;
 115}
 116static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
 117                                          struct kvm_device_attr *attr)
 118{
 119        return -ENXIO;
 120}
 121static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
 122{
 123        return 0;
 124}
 125static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 126{
 127        return 0;
 128}
 129
 130#endif
 131
 132#endif
 133