1
2
3
4
5
6
7#ifndef __ASM_ARM_KVM_PMU_H
8#define __ASM_ARM_KVM_PMU_H
9
10#include <linux/perf_event.h>
11#include <asm/perf_event.h>
12
13#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
14#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
15
16DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
17
18static __always_inline bool kvm_arm_support_pmu_v3(void)
19{
20 return static_branch_likely(&kvm_arm_pmu_available);
21}
22
23#ifdef CONFIG_HW_PERF_EVENTS
24
25struct kvm_pmc {
26 u8 idx;
27 struct perf_event *perf_event;
28};
29
30struct kvm_pmu {
31 int irq_num;
32 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
33 DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
34 bool created;
35 bool irq_level;
36 struct irq_work overflow_work;
37};
38
39#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
40u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
41void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
42u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
43u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
44void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
45void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
46void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
47void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
48void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
49void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
50void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
51bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
52void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
53void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
54void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
55void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
56 u64 select_idx);
57int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
58 struct kvm_device_attr *attr);
59int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
60 struct kvm_device_attr *attr);
61int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
62 struct kvm_device_attr *attr);
63int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
64#else
65struct kvm_pmu {
66};
67
68#define kvm_arm_pmu_irq_initialized(v) (false)
69static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
70 u64 select_idx)
71{
72 return 0;
73}
74static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
75 u64 select_idx, u64 val) {}
76static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
77{
78 return 0;
79}
80static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
81static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
82static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
83static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
84static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
85static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
86static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
87static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
88{
89 return false;
90}
91static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
92static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
93static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
94static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
95 u64 data, u64 select_idx) {}
96static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
97 struct kvm_device_attr *attr)
98{
99 return -ENXIO;
100}
101static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
102 struct kvm_device_attr *attr)
103{
104 return -ENXIO;
105}
106static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
107 struct kvm_device_attr *attr)
108{
109 return -ENXIO;
110}
111static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
112{
113 return 0;
114}
115static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
116{
117 return 0;
118}
119
120#endif
121
122#endif
123