linux/arch/arm64/kvm/pmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright 2019 Arm Limited
   4 * Author: Andrew Murray <Andrew.Murray@arm.com>
   5 */
   6#include <linux/kvm_host.h>
   7#include <linux/perf_event.h>
   8#include <asm/kvm_hyp.h>
   9
  10/*
  11 * Given the perf event attributes and system type, determine
  12 * if we are going to need to switch counters at guest entry/exit.
  13 */
  14static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
  15{
  16        /**
  17         * With VHE the guest kernel runs at EL1 and the host at EL2,
  18         * where user (EL0) is excluded then we have no reason to switch
  19         * counters.
  20         */
  21        if (has_vhe() && attr->exclude_user)
  22                return false;
  23
  24        /* Only switch if attributes are different */
  25        return (attr->exclude_host != attr->exclude_guest);
  26}
  27
  28/*
  29 * Add events to track that we may want to switch at guest entry/exit
  30 * time.
  31 */
  32void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
  33{
  34        struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
  35
  36        if (!kvm_pmu_switch_needed(attr))
  37                return;
  38
  39        if (!attr->exclude_host)
  40                ctx->pmu_events.events_host |= set;
  41        if (!attr->exclude_guest)
  42                ctx->pmu_events.events_guest |= set;
  43}
  44
  45/*
  46 * Stop tracking events
  47 */
  48void kvm_clr_pmu_events(u32 clr)
  49{
  50        struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
  51
  52        ctx->pmu_events.events_host &= ~clr;
  53        ctx->pmu_events.events_guest &= ~clr;
  54}
  55
  56#define PMEVTYPER_READ_CASE(idx)                                \
  57        case idx:                                               \
  58                return read_sysreg(pmevtyper##idx##_el0)
  59
  60#define PMEVTYPER_WRITE_CASE(idx)                               \
  61        case idx:                                               \
  62                write_sysreg(val, pmevtyper##idx##_el0);        \
  63                break
  64
  65#define PMEVTYPER_CASES(readwrite)                              \
  66        PMEVTYPER_##readwrite##_CASE(0);                        \
  67        PMEVTYPER_##readwrite##_CASE(1);                        \
  68        PMEVTYPER_##readwrite##_CASE(2);                        \
  69        PMEVTYPER_##readwrite##_CASE(3);                        \
  70        PMEVTYPER_##readwrite##_CASE(4);                        \
  71        PMEVTYPER_##readwrite##_CASE(5);                        \
  72        PMEVTYPER_##readwrite##_CASE(6);                        \
  73        PMEVTYPER_##readwrite##_CASE(7);                        \
  74        PMEVTYPER_##readwrite##_CASE(8);                        \
  75        PMEVTYPER_##readwrite##_CASE(9);                        \
  76        PMEVTYPER_##readwrite##_CASE(10);                       \
  77        PMEVTYPER_##readwrite##_CASE(11);                       \
  78        PMEVTYPER_##readwrite##_CASE(12);                       \
  79        PMEVTYPER_##readwrite##_CASE(13);                       \
  80        PMEVTYPER_##readwrite##_CASE(14);                       \
  81        PMEVTYPER_##readwrite##_CASE(15);                       \
  82        PMEVTYPER_##readwrite##_CASE(16);                       \
  83        PMEVTYPER_##readwrite##_CASE(17);                       \
  84        PMEVTYPER_##readwrite##_CASE(18);                       \
  85        PMEVTYPER_##readwrite##_CASE(19);                       \
  86        PMEVTYPER_##readwrite##_CASE(20);                       \
  87        PMEVTYPER_##readwrite##_CASE(21);                       \
  88        PMEVTYPER_##readwrite##_CASE(22);                       \
  89        PMEVTYPER_##readwrite##_CASE(23);                       \
  90        PMEVTYPER_##readwrite##_CASE(24);                       \
  91        PMEVTYPER_##readwrite##_CASE(25);                       \
  92        PMEVTYPER_##readwrite##_CASE(26);                       \
  93        PMEVTYPER_##readwrite##_CASE(27);                       \
  94        PMEVTYPER_##readwrite##_CASE(28);                       \
  95        PMEVTYPER_##readwrite##_CASE(29);                       \
  96        PMEVTYPER_##readwrite##_CASE(30)
  97
  98/*
  99 * Read a value direct from PMEVTYPER<idx> where idx is 0-30
 100 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
 101 */
 102static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
 103{
 104        switch (idx) {
 105        PMEVTYPER_CASES(READ);
 106        case ARMV8_PMU_CYCLE_IDX:
 107                return read_sysreg(pmccfiltr_el0);
 108        default:
 109                WARN_ON(1);
 110        }
 111
 112        return 0;
 113}
 114
 115/*
 116 * Write a value direct to PMEVTYPER<idx> where idx is 0-30
 117 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
 118 */
 119static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
 120{
 121        switch (idx) {
 122        PMEVTYPER_CASES(WRITE);
 123        case ARMV8_PMU_CYCLE_IDX:
 124                write_sysreg(val, pmccfiltr_el0);
 125                break;
 126        default:
 127                WARN_ON(1);
 128        }
 129}
 130
 131/*
 132 * Modify ARMv8 PMU events to include EL0 counting
 133 */
 134static void kvm_vcpu_pmu_enable_el0(unsigned long events)
 135{
 136        u64 typer;
 137        u32 counter;
 138
 139        for_each_set_bit(counter, &events, 32) {
 140                typer = kvm_vcpu_pmu_read_evtype_direct(counter);
 141                typer &= ~ARMV8_PMU_EXCLUDE_EL0;
 142                kvm_vcpu_pmu_write_evtype_direct(counter, typer);
 143        }
 144}
 145
 146/*
 147 * Modify ARMv8 PMU events to exclude EL0 counting
 148 */
 149static void kvm_vcpu_pmu_disable_el0(unsigned long events)
 150{
 151        u64 typer;
 152        u32 counter;
 153
 154        for_each_set_bit(counter, &events, 32) {
 155                typer = kvm_vcpu_pmu_read_evtype_direct(counter);
 156                typer |= ARMV8_PMU_EXCLUDE_EL0;
 157                kvm_vcpu_pmu_write_evtype_direct(counter, typer);
 158        }
 159}
 160
 161/*
 162 * On VHE ensure that only guest events have EL0 counting enabled.
 163 * This is called from both vcpu_{load,put} and the sysreg handling.
 164 * Since the latter is preemptible, special care must be taken to
 165 * disable preemption.
 166 */
 167void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
 168{
 169        struct kvm_host_data *host;
 170        u32 events_guest, events_host;
 171
 172        if (!has_vhe())
 173                return;
 174
 175        preempt_disable();
 176        host = this_cpu_ptr(&kvm_host_data);
 177        events_guest = host->pmu_events.events_guest;
 178        events_host = host->pmu_events.events_host;
 179
 180        kvm_vcpu_pmu_enable_el0(events_guest);
 181        kvm_vcpu_pmu_disable_el0(events_host);
 182        preempt_enable();
 183}
 184
 185/*
 186 * On VHE ensure that only host events have EL0 counting enabled
 187 */
 188void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
 189{
 190        struct kvm_host_data *host;
 191        u32 events_guest, events_host;
 192
 193        if (!has_vhe())
 194                return;
 195
 196        host = this_cpu_ptr(&kvm_host_data);
 197        events_guest = host->pmu_events.events_guest;
 198        events_host = host->pmu_events.events_host;
 199
 200        kvm_vcpu_pmu_enable_el0(events_host);
 201        kvm_vcpu_pmu_disable_el0(events_guest);
 202}
 203