1
2
3
4
5
6
7#include <linux/cpu.h>
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/perf_event.h>
11#include <linux/uaccess.h>
12#include <asm/kvm_emulate.h>
13#include <kvm/arm_pmu.h>
14#include <kvm/arm_vgic.h>
15
16
17
18
19
20
21u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
22{
23 u64 counter, reg, enabled, running;
24 struct kvm_pmu *pmu = &vcpu->arch.pmu;
25 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
26
27 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
28 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
29 counter = __vcpu_sys_reg(vcpu, reg);
30
31
32
33
34 if (pmc->perf_event)
35 counter += perf_event_read_value(pmc->perf_event, &enabled,
36 &running);
37
38 return counter & pmc->bitmask;
39}
40
41
42
43
44
45
46
47void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
48{
49 u64 reg;
50
51 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
52 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
53 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
54}
55
56
57
58
59
60
61
62static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
63{
64 u64 counter, reg;
65
66 if (pmc->perf_event) {
67 counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
68 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
69 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
70 __vcpu_sys_reg(vcpu, reg) = counter;
71 perf_event_disable(pmc->perf_event);
72 perf_event_release_kernel(pmc->perf_event);
73 pmc->perf_event = NULL;
74 }
75}
76
77
78
79
80
81
82void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
83{
84 int i;
85 struct kvm_pmu *pmu = &vcpu->arch.pmu;
86
87 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
88 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
89 pmu->pmc[i].idx = i;
90 pmu->pmc[i].bitmask = 0xffffffffUL;
91 }
92}
93
94
95
96
97
98
99void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
100{
101 int i;
102 struct kvm_pmu *pmu = &vcpu->arch.pmu;
103
104 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
105 struct kvm_pmc *pmc = &pmu->pmc[i];
106
107 if (pmc->perf_event) {
108 perf_event_disable(pmc->perf_event);
109 perf_event_release_kernel(pmc->perf_event);
110 pmc->perf_event = NULL;
111 }
112 }
113}
114
115u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
116{
117 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
118
119 val &= ARMV8_PMU_PMCR_N_MASK;
120 if (val == 0)
121 return BIT(ARMV8_PMU_CYCLE_IDX);
122 else
123 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
124}
125
126
127
128
129
130
131
132
133void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
134{
135 int i;
136 struct kvm_pmu *pmu = &vcpu->arch.pmu;
137 struct kvm_pmc *pmc;
138
139 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
140 return;
141
142 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
143 if (!(val & BIT(i)))
144 continue;
145
146 pmc = &pmu->pmc[i];
147 if (pmc->perf_event) {
148 perf_event_enable(pmc->perf_event);
149 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
150 kvm_debug("fail to enable perf event\n");
151 }
152 }
153}
154
155
156
157
158
159
160
161
162void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
163{
164 int i;
165 struct kvm_pmu *pmu = &vcpu->arch.pmu;
166 struct kvm_pmc *pmc;
167
168 if (!val)
169 return;
170
171 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
172 if (!(val & BIT(i)))
173 continue;
174
175 pmc = &pmu->pmc[i];
176 if (pmc->perf_event)
177 perf_event_disable(pmc->perf_event);
178 }
179}
180
181static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
182{
183 u64 reg = 0;
184
185 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
186 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
187 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
188 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
189 reg &= kvm_pmu_valid_counter_mask(vcpu);
190 }
191
192 return reg;
193}
194
195static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
196{
197 struct kvm_pmu *pmu = &vcpu->arch.pmu;
198 bool overflow;
199
200 if (!kvm_arm_pmu_v3_ready(vcpu))
201 return;
202
203 overflow = !!kvm_pmu_overflow_status(vcpu);
204 if (pmu->irq_level == overflow)
205 return;
206
207 pmu->irq_level = overflow;
208
209 if (likely(irqchip_in_kernel(vcpu->kvm))) {
210 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
211 pmu->irq_num, overflow, pmu);
212 WARN_ON(ret);
213 }
214}
215
216bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
217{
218 struct kvm_pmu *pmu = &vcpu->arch.pmu;
219 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
220 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
221
222 if (likely(irqchip_in_kernel(vcpu->kvm)))
223 return false;
224
225 return pmu->irq_level != run_level;
226}
227
228
229
230
231void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
232{
233 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
234
235
236 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
237 if (vcpu->arch.pmu.irq_level)
238 regs->device_irq_level |= KVM_ARM_DEV_PMU;
239}
240
241
242
243
244
245
246
247
248void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
249{
250 kvm_pmu_update_state(vcpu);
251}
252
253
254
255
256
257
258
259
260void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
261{
262 kvm_pmu_update_state(vcpu);
263}
264
265static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
266{
267 struct kvm_pmu *pmu;
268 struct kvm_vcpu_arch *vcpu_arch;
269
270 pmc -= pmc->idx;
271 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
272 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
273 return container_of(vcpu_arch, struct kvm_vcpu, arch);
274}
275
276
277
278
279static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
280 struct perf_sample_data *data,
281 struct pt_regs *regs)
282{
283 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
284 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
285 int idx = pmc->idx;
286
287 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
288
289 if (kvm_pmu_overflow_status(vcpu)) {
290 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
291 kvm_vcpu_kick(vcpu);
292 }
293}
294
295
296
297
298
299
300void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
301{
302 int i;
303 u64 type, enable, reg;
304
305 if (val == 0)
306 return;
307
308 enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
309 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
310 if (!(val & BIT(i)))
311 continue;
312 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
313 & ARMV8_PMU_EVTYPE_EVENT;
314 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
315 && (enable & BIT(i))) {
316 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
317 reg = lower_32_bits(reg);
318 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
319 if (!reg)
320 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
321 }
322 }
323}
324
325
326
327
328
329
330void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
331{
332 struct kvm_pmu *pmu = &vcpu->arch.pmu;
333 struct kvm_pmc *pmc;
334 u64 mask;
335 int i;
336
337 mask = kvm_pmu_valid_counter_mask(vcpu);
338 if (val & ARMV8_PMU_PMCR_E) {
339 kvm_pmu_enable_counter(vcpu,
340 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
341 } else {
342 kvm_pmu_disable_counter(vcpu, mask);
343 }
344
345 if (val & ARMV8_PMU_PMCR_C)
346 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
347
348 if (val & ARMV8_PMU_PMCR_P) {
349 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
350 kvm_pmu_set_counter_value(vcpu, i, 0);
351 }
352
353 if (val & ARMV8_PMU_PMCR_LC) {
354 pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
355 pmc->bitmask = 0xffffffffffffffffUL;
356 }
357}
358
359static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
360{
361 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
362 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
363}
364
365
366
367
368
369
370
371
372
373
374
375void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
376 u64 select_idx)
377{
378 struct kvm_pmu *pmu = &vcpu->arch.pmu;
379 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
380 struct perf_event *event;
381 struct perf_event_attr attr;
382 u64 eventsel, counter;
383
384 kvm_pmu_stop_counter(vcpu, pmc);
385 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
386
387
388 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
389 select_idx != ARMV8_PMU_CYCLE_IDX)
390 return;
391
392 memset(&attr, 0, sizeof(struct perf_event_attr));
393 attr.type = PERF_TYPE_RAW;
394 attr.size = sizeof(attr);
395 attr.pinned = 1;
396 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
397 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
398 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
399 attr.exclude_hv = 1;
400 attr.exclude_host = 1;
401 attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
402 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
403
404 counter = kvm_pmu_get_counter_value(vcpu, select_idx);
405
406 attr.sample_period = (-counter) & pmc->bitmask;
407
408 event = perf_event_create_kernel_counter(&attr, -1, current,
409 kvm_pmu_perf_overflow, pmc);
410 if (IS_ERR(event)) {
411 pr_err_once("kvm: pmu event creation failed %ld\n",
412 PTR_ERR(event));
413 return;
414 }
415
416 pmc->perf_event = event;
417}
418
419bool kvm_arm_support_pmu_v3(void)
420{
421
422
423
424
425
426 return (perf_num_counters() > 0);
427}
428
429int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
430{
431 if (!vcpu->arch.pmu.created)
432 return 0;
433
434
435
436
437
438
439 if (irqchip_in_kernel(vcpu->kvm)) {
440 int irq = vcpu->arch.pmu.irq_num;
441 if (!kvm_arm_pmu_irq_initialized(vcpu))
442 return -EINVAL;
443
444
445
446
447
448
449
450 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
451 return -EINVAL;
452 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
453 return -EINVAL;
454 }
455
456 kvm_pmu_vcpu_reset(vcpu);
457 vcpu->arch.pmu.ready = true;
458
459 return 0;
460}
461
462static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
463{
464 if (!kvm_arm_support_pmu_v3())
465 return -ENODEV;
466
467 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
468 return -ENXIO;
469
470 if (vcpu->arch.pmu.created)
471 return -EBUSY;
472
473 if (irqchip_in_kernel(vcpu->kvm)) {
474 int ret;
475
476
477
478
479
480
481 if (!vgic_initialized(vcpu->kvm))
482 return -ENODEV;
483
484 if (!kvm_arm_pmu_irq_initialized(vcpu))
485 return -ENXIO;
486
487 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
488 &vcpu->arch.pmu);
489 if (ret)
490 return ret;
491 }
492
493 vcpu->arch.pmu.created = true;
494 return 0;
495}
496
497
498
499
500
501
502static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
503{
504 int i;
505 struct kvm_vcpu *vcpu;
506
507 kvm_for_each_vcpu(i, vcpu, kvm) {
508 if (!kvm_arm_pmu_irq_initialized(vcpu))
509 continue;
510
511 if (irq_is_ppi(irq)) {
512 if (vcpu->arch.pmu.irq_num != irq)
513 return false;
514 } else {
515 if (vcpu->arch.pmu.irq_num == irq)
516 return false;
517 }
518 }
519
520 return true;
521}
522
523int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
524{
525 switch (attr->attr) {
526 case KVM_ARM_VCPU_PMU_V3_IRQ: {
527 int __user *uaddr = (int __user *)(long)attr->addr;
528 int irq;
529
530 if (!irqchip_in_kernel(vcpu->kvm))
531 return -EINVAL;
532
533 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
534 return -ENODEV;
535
536 if (get_user(irq, uaddr))
537 return -EFAULT;
538
539
540 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
541 return -EINVAL;
542
543 if (!pmu_irq_is_valid(vcpu->kvm, irq))
544 return -EINVAL;
545
546 if (kvm_arm_pmu_irq_initialized(vcpu))
547 return -EBUSY;
548
549 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
550 vcpu->arch.pmu.irq_num = irq;
551 return 0;
552 }
553 case KVM_ARM_VCPU_PMU_V3_INIT:
554 return kvm_arm_pmu_v3_init(vcpu);
555 }
556
557 return -ENXIO;
558}
559
560int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
561{
562 switch (attr->attr) {
563 case KVM_ARM_VCPU_PMU_V3_IRQ: {
564 int __user *uaddr = (int __user *)(long)attr->addr;
565 int irq;
566
567 if (!irqchip_in_kernel(vcpu->kvm))
568 return -EINVAL;
569
570 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
571 return -ENODEV;
572
573 if (!kvm_arm_pmu_irq_initialized(vcpu))
574 return -ENXIO;
575
576 irq = vcpu->arch.pmu.irq_num;
577 return put_user(irq, uaddr);
578 }
579 }
580
581 return -ENXIO;
582}
583
584int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
585{
586 switch (attr->attr) {
587 case KVM_ARM_VCPU_PMU_V3_IRQ:
588 case KVM_ARM_VCPU_PMU_V3_INIT:
589 if (kvm_arm_support_pmu_v3() &&
590 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
591 return 0;
592 }
593
594 return -ENXIO;
595}
596