1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/perf_event.h>
22#include <linux/uaccess.h>
23#include <asm/kvm_emulate.h>
24#include <kvm/arm_pmu.h>
25#include <kvm/arm_vgic.h>
26
27
28
29
30
31
32u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
33{
34 u64 counter, reg, enabled, running;
35 struct kvm_pmu *pmu = &vcpu->arch.pmu;
36 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
37
38 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
39 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
40 counter = vcpu_sys_reg(vcpu, reg);
41
42
43
44
45 if (pmc->perf_event)
46 counter += perf_event_read_value(pmc->perf_event, &enabled,
47 &running);
48
49 return counter & pmc->bitmask;
50}
51
52
53
54
55
56
57
58void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
59{
60 u64 reg;
61
62 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
63 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
64 vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
65}
66
67
68
69
70
71
72
73static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
74{
75 u64 counter, reg;
76
77 if (pmc->perf_event) {
78 counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
79 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
80 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
81 vcpu_sys_reg(vcpu, reg) = counter;
82 perf_event_disable(pmc->perf_event);
83 perf_event_release_kernel(pmc->perf_event);
84 pmc->perf_event = NULL;
85 }
86}
87
88
89
90
91
92
93void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
94{
95 int i;
96 struct kvm_pmu *pmu = &vcpu->arch.pmu;
97
98 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
99 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
100 pmu->pmc[i].idx = i;
101 pmu->pmc[i].bitmask = 0xffffffffUL;
102 }
103}
104
105
106
107
108
109
110void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
111{
112 int i;
113 struct kvm_pmu *pmu = &vcpu->arch.pmu;
114
115 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
116 struct kvm_pmc *pmc = &pmu->pmc[i];
117
118 if (pmc->perf_event) {
119 perf_event_disable(pmc->perf_event);
120 perf_event_release_kernel(pmc->perf_event);
121 pmc->perf_event = NULL;
122 }
123 }
124}
125
126u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
127{
128 u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
129
130 val &= ARMV8_PMU_PMCR_N_MASK;
131 if (val == 0)
132 return BIT(ARMV8_PMU_CYCLE_IDX);
133 else
134 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
135}
136
137
138
139
140
141
142
143
144void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
145{
146 int i;
147 struct kvm_pmu *pmu = &vcpu->arch.pmu;
148 struct kvm_pmc *pmc;
149
150 if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
151 return;
152
153 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
154 if (!(val & BIT(i)))
155 continue;
156
157 pmc = &pmu->pmc[i];
158 if (pmc->perf_event) {
159 perf_event_enable(pmc->perf_event);
160 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
161 kvm_debug("fail to enable perf event\n");
162 }
163 }
164}
165
166
167
168
169
170
171
172
173void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
174{
175 int i;
176 struct kvm_pmu *pmu = &vcpu->arch.pmu;
177 struct kvm_pmc *pmc;
178
179 if (!val)
180 return;
181
182 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
183 if (!(val & BIT(i)))
184 continue;
185
186 pmc = &pmu->pmc[i];
187 if (pmc->perf_event)
188 perf_event_disable(pmc->perf_event);
189 }
190}
191
192static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
193{
194 u64 reg = 0;
195
196 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
197 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
198 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
199 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
200 reg &= kvm_pmu_valid_counter_mask(vcpu);
201 }
202
203 return reg;
204}
205
206
207
208
209
210
211void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
212{
213 u64 reg;
214
215 if (val == 0)
216 return;
217
218 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
219 reg = kvm_pmu_overflow_status(vcpu);
220 if (reg != 0)
221 kvm_vcpu_kick(vcpu);
222}
223
224static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
225{
226 struct kvm_pmu *pmu = &vcpu->arch.pmu;
227 bool overflow;
228
229 if (!kvm_arm_pmu_v3_ready(vcpu))
230 return;
231
232 overflow = !!kvm_pmu_overflow_status(vcpu);
233 if (pmu->irq_level != overflow) {
234 pmu->irq_level = overflow;
235 kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
236 pmu->irq_num, overflow);
237 }
238}
239
240
241
242
243
244
245
246
247void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
248{
249 kvm_pmu_update_state(vcpu);
250}
251
252
253
254
255
256
257
258
259void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
260{
261 kvm_pmu_update_state(vcpu);
262}
263
264static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
265{
266 struct kvm_pmu *pmu;
267 struct kvm_vcpu_arch *vcpu_arch;
268
269 pmc -= pmc->idx;
270 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
271 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
272 return container_of(vcpu_arch, struct kvm_vcpu, arch);
273}
274
275
276
277
278static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
279 struct perf_sample_data *data,
280 struct pt_regs *regs)
281{
282 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
283 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
284 int idx = pmc->idx;
285
286 kvm_pmu_overflow_set(vcpu, BIT(idx));
287}
288
289
290
291
292
293
294void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
295{
296 int i;
297 u64 type, enable, reg;
298
299 if (val == 0)
300 return;
301
302 enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
303 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
304 if (!(val & BIT(i)))
305 continue;
306 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
307 & ARMV8_PMU_EVTYPE_EVENT;
308 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
309 && (enable & BIT(i))) {
310 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
311 reg = lower_32_bits(reg);
312 vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
313 if (!reg)
314 kvm_pmu_overflow_set(vcpu, BIT(i));
315 }
316 }
317}
318
319
320
321
322
323
324void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
325{
326 struct kvm_pmu *pmu = &vcpu->arch.pmu;
327 struct kvm_pmc *pmc;
328 u64 mask;
329 int i;
330
331 mask = kvm_pmu_valid_counter_mask(vcpu);
332 if (val & ARMV8_PMU_PMCR_E) {
333 kvm_pmu_enable_counter(vcpu,
334 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
335 } else {
336 kvm_pmu_disable_counter(vcpu, mask);
337 }
338
339 if (val & ARMV8_PMU_PMCR_C)
340 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
341
342 if (val & ARMV8_PMU_PMCR_P) {
343 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
344 kvm_pmu_set_counter_value(vcpu, i, 0);
345 }
346
347 if (val & ARMV8_PMU_PMCR_LC) {
348 pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
349 pmc->bitmask = 0xffffffffffffffffUL;
350 }
351}
352
353static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
354{
355 return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
356 (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
357}
358
359
360
361
362
363
364
365
366
367
368
369void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
370 u64 select_idx)
371{
372 struct kvm_pmu *pmu = &vcpu->arch.pmu;
373 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
374 struct perf_event *event;
375 struct perf_event_attr attr;
376 u64 eventsel, counter;
377
378 kvm_pmu_stop_counter(vcpu, pmc);
379 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
380
381
382 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
383 select_idx != ARMV8_PMU_CYCLE_IDX)
384 return;
385
386 memset(&attr, 0, sizeof(struct perf_event_attr));
387 attr.type = PERF_TYPE_RAW;
388 attr.size = sizeof(attr);
389 attr.pinned = 1;
390 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
391 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
392 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
393 attr.exclude_hv = 1;
394 attr.exclude_host = 1;
395 attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
396 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
397
398 counter = kvm_pmu_get_counter_value(vcpu, select_idx);
399
400 attr.sample_period = (-counter) & pmc->bitmask;
401
402 event = perf_event_create_kernel_counter(&attr, -1, current,
403 kvm_pmu_perf_overflow, pmc);
404 if (IS_ERR(event)) {
405 pr_err_once("kvm: pmu event creation failed %ld\n",
406 PTR_ERR(event));
407 return;
408 }
409
410 pmc->perf_event = event;
411}
412
413bool kvm_arm_support_pmu_v3(void)
414{
415
416
417
418
419
420 return (perf_num_counters() > 0);
421}
422
423static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
424{
425 if (!kvm_arm_support_pmu_v3())
426 return -ENODEV;
427
428
429
430
431
432
433 if (!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))
434 return -ENODEV;
435
436 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) ||
437 !kvm_arm_pmu_irq_initialized(vcpu))
438 return -ENXIO;
439
440 if (kvm_arm_pmu_v3_ready(vcpu))
441 return -EBUSY;
442
443 kvm_pmu_vcpu_reset(vcpu);
444 vcpu->arch.pmu.ready = true;
445
446 return 0;
447}
448
449#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
450
451
452
453
454
455
456static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
457{
458 int i;
459 struct kvm_vcpu *vcpu;
460
461 kvm_for_each_vcpu(i, vcpu, kvm) {
462 if (!kvm_arm_pmu_irq_initialized(vcpu))
463 continue;
464
465 if (irq_is_ppi(irq)) {
466 if (vcpu->arch.pmu.irq_num != irq)
467 return false;
468 } else {
469 if (vcpu->arch.pmu.irq_num == irq)
470 return false;
471 }
472 }
473
474 return true;
475}
476
477int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
478{
479 switch (attr->attr) {
480 case KVM_ARM_VCPU_PMU_V3_IRQ: {
481 int __user *uaddr = (int __user *)(long)attr->addr;
482 int irq;
483
484 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
485 return -ENODEV;
486
487 if (get_user(irq, uaddr))
488 return -EFAULT;
489
490
491 if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq)))
492 return -EINVAL;
493
494 if (!pmu_irq_is_valid(vcpu->kvm, irq))
495 return -EINVAL;
496
497 if (kvm_arm_pmu_irq_initialized(vcpu))
498 return -EBUSY;
499
500 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
501 vcpu->arch.pmu.irq_num = irq;
502 return 0;
503 }
504 case KVM_ARM_VCPU_PMU_V3_INIT:
505 return kvm_arm_pmu_v3_init(vcpu);
506 }
507
508 return -ENXIO;
509}
510
511int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
512{
513 switch (attr->attr) {
514 case KVM_ARM_VCPU_PMU_V3_IRQ: {
515 int __user *uaddr = (int __user *)(long)attr->addr;
516 int irq;
517
518 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
519 return -ENODEV;
520
521 if (!kvm_arm_pmu_irq_initialized(vcpu))
522 return -ENXIO;
523
524 irq = vcpu->arch.pmu.irq_num;
525 return put_user(irq, uaddr);
526 }
527 }
528
529 return -ENXIO;
530}
531
532int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
533{
534 switch (attr->attr) {
535 case KVM_ARM_VCPU_PMU_V3_IRQ:
536 case KVM_ARM_VCPU_PMU_V3_INIT:
537 if (kvm_arm_support_pmu_v3() &&
538 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
539 return 0;
540 }
541
542 return -ENXIO;
543}
544