1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/types.h>
15#include <linux/kvm_host.h>
16#include <linux/perf_event.h>
17#include <asm/perf_event.h>
18#include "x86.h"
19#include "cpuid.h"
20#include "lapic.h"
21#include "pmu.h"
22
23static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
33};
34
35
36static int fixed_pmc_events[] = {1, 0, 7};
37
38static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39{
40 int i;
41
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 u8 new_ctrl = fixed_ctrl_field(data, i);
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45 struct kvm_pmc *pmc;
46
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
48
49 if (old_ctrl == new_ctrl)
50 continue;
51
52 reprogram_fixed_counter(pmc, new_ctrl, i);
53 }
54
55 pmu->fixed_ctr_ctrl = data;
56}
57
58
59static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
60{
61 int bit;
62 u64 diff = pmu->global_ctrl ^ data;
63
64 pmu->global_ctrl = data;
65
66 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
67 reprogram_counter(pmu, bit);
68}
69
70static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
71 u8 event_select,
72 u8 unit_mask)
73{
74 int i;
75
76 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
77 if (intel_arch_events[i].eventsel == event_select
78 && intel_arch_events[i].unit_mask == unit_mask
79 && (pmu->available_event_types & (1 << i)))
80 break;
81
82 if (i == ARRAY_SIZE(intel_arch_events))
83 return PERF_COUNT_HW_MAX;
84
85 return intel_arch_events[i].event_type;
86}
87
88static unsigned intel_find_fixed_event(int idx)
89{
90 if (idx >= ARRAY_SIZE(fixed_pmc_events))
91 return PERF_COUNT_HW_MAX;
92
93 return intel_arch_events[fixed_pmc_events[idx]].event_type;
94}
95
96
97static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
98{
99 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
100
101 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
102}
103
104static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
105{
106 if (pmc_idx < INTEL_PMC_IDX_FIXED)
107 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
108 MSR_P6_EVNTSEL0);
109 else {
110 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
111
112 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
113 }
114}
115
116
117static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
118{
119 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
120 bool fixed = idx & (1u << 30);
121
122 idx &= ~(3u << 30);
123
124 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
125 (fixed && idx >= pmu->nr_arch_fixed_counters);
126}
127
128static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
129 unsigned idx)
130{
131 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
132 bool fixed = idx & (1u << 30);
133 struct kvm_pmc *counters;
134
135 idx &= ~(3u << 30);
136 if (!fixed && idx >= pmu->nr_arch_gp_counters)
137 return NULL;
138 if (fixed && idx >= pmu->nr_arch_fixed_counters)
139 return NULL;
140 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
141
142 return &counters[idx];
143}
144
145static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
146{
147 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
148 int ret;
149
150 switch (msr) {
151 case MSR_CORE_PERF_FIXED_CTR_CTRL:
152 case MSR_CORE_PERF_GLOBAL_STATUS:
153 case MSR_CORE_PERF_GLOBAL_CTRL:
154 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
155 ret = pmu->version > 1;
156 break;
157 default:
158 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
159 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
160 get_fixed_pmc(pmu, msr);
161 break;
162 }
163
164 return ret;
165}
166
167static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
168{
169 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
170 struct kvm_pmc *pmc;
171
172 switch (msr) {
173 case MSR_CORE_PERF_FIXED_CTR_CTRL:
174 *data = pmu->fixed_ctr_ctrl;
175 return 0;
176 case MSR_CORE_PERF_GLOBAL_STATUS:
177 *data = pmu->global_status;
178 return 0;
179 case MSR_CORE_PERF_GLOBAL_CTRL:
180 *data = pmu->global_ctrl;
181 return 0;
182 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
183 *data = pmu->global_ovf_ctrl;
184 return 0;
185 default:
186 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
187 (pmc = get_fixed_pmc(pmu, msr))) {
188 *data = pmc_read_counter(pmc);
189 return 0;
190 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
191 *data = pmc->eventsel;
192 return 0;
193 }
194 }
195
196 return 1;
197}
198
199static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
200{
201 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
202 struct kvm_pmc *pmc;
203 u32 msr = msr_info->index;
204 u64 data = msr_info->data;
205
206 switch (msr) {
207 case MSR_CORE_PERF_FIXED_CTR_CTRL:
208 if (pmu->fixed_ctr_ctrl == data)
209 return 0;
210 if (!(data & 0xfffffffffffff444ull)) {
211 reprogram_fixed_counters(pmu, data);
212 return 0;
213 }
214 break;
215 case MSR_CORE_PERF_GLOBAL_STATUS:
216 if (msr_info->host_initiated) {
217 pmu->global_status = data;
218 return 0;
219 }
220 break;
221 case MSR_CORE_PERF_GLOBAL_CTRL:
222 if (pmu->global_ctrl == data)
223 return 0;
224 if (!(data & pmu->global_ctrl_mask)) {
225 global_ctrl_changed(pmu, data);
226 return 0;
227 }
228 break;
229 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
230 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
231 if (!msr_info->host_initiated)
232 pmu->global_status &= ~data;
233 pmu->global_ovf_ctrl = data;
234 return 0;
235 }
236 break;
237 default:
238 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
239 (pmc = get_fixed_pmc(pmu, msr))) {
240 if (!msr_info->host_initiated)
241 data = (s64)(s32)data;
242 pmc->counter += data - pmc_read_counter(pmc);
243 return 0;
244 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
245 if (data == pmc->eventsel)
246 return 0;
247 if (!(data & pmu->reserved_bits)) {
248 reprogram_gp_counter(pmc, data);
249 return 0;
250 }
251 }
252 }
253
254 return 1;
255}
256
257static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
258{
259 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
260 struct kvm_cpuid_entry2 *entry;
261 union cpuid10_eax eax;
262 union cpuid10_edx edx;
263
264 pmu->nr_arch_gp_counters = 0;
265 pmu->nr_arch_fixed_counters = 0;
266 pmu->counter_bitmask[KVM_PMC_GP] = 0;
267 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
268 pmu->version = 0;
269 pmu->reserved_bits = 0xffffffff00200000ull;
270
271 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
272 if (!entry)
273 return;
274 eax.full = entry->eax;
275 edx.full = entry->edx;
276
277 pmu->version = eax.split.version_id;
278 if (!pmu->version)
279 return;
280
281 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
282 INTEL_PMC_MAX_GENERIC);
283 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
284 pmu->available_event_types = ~entry->ebx &
285 ((1ull << eax.split.mask_length) - 1);
286
287 if (pmu->version == 1) {
288 pmu->nr_arch_fixed_counters = 0;
289 } else {
290 pmu->nr_arch_fixed_counters =
291 min_t(int, edx.split.num_counters_fixed,
292 INTEL_PMC_MAX_FIXED);
293 pmu->counter_bitmask[KVM_PMC_FIXED] =
294 ((u64)1 << edx.split.bit_width_fixed) - 1;
295 }
296
297 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
299 pmu->global_ctrl_mask = ~pmu->global_ctrl;
300
301 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
302 if (entry &&
303 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
304 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
305 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
306}
307
308static void intel_pmu_init(struct kvm_vcpu *vcpu)
309{
310 int i;
311 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
312
313 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
314 pmu->gp_counters[i].type = KVM_PMC_GP;
315 pmu->gp_counters[i].vcpu = vcpu;
316 pmu->gp_counters[i].idx = i;
317 }
318
319 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
320 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
321 pmu->fixed_counters[i].vcpu = vcpu;
322 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
323 }
324}
325
326static void intel_pmu_reset(struct kvm_vcpu *vcpu)
327{
328 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
329 int i;
330
331 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
332 struct kvm_pmc *pmc = &pmu->gp_counters[i];
333
334 pmc_stop_counter(pmc);
335 pmc->counter = pmc->eventsel = 0;
336 }
337
338 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
339 pmc_stop_counter(&pmu->fixed_counters[i]);
340
341 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
342 pmu->global_ovf_ctrl = 0;
343}
344
345struct kvm_pmu_ops intel_pmu_ops = {
346 .find_arch_event = intel_find_arch_event,
347 .find_fixed_event = intel_find_fixed_event,
348 .pmc_is_enabled = intel_pmc_is_enabled,
349 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
350 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
351 .is_valid_msr_idx = intel_is_valid_msr_idx,
352 .is_valid_msr = intel_is_valid_msr,
353 .get_msr = intel_pmu_get_msr,
354 .set_msr = intel_pmu_set_msr,
355 .refresh = intel_pmu_refresh,
356 .init = intel_pmu_init,
357 .reset = intel_pmu_reset,
358};
359