linux/include/kvm/arm_pmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2015 Linaro Ltd.
   4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
   5 */
   6
   7#ifndef __ASM_ARM_KVM_PMU_H
   8#define __ASM_ARM_KVM_PMU_H
   9
  10#include <linux/perf_event.h>
  11#include <asm/perf_event.h>
  12
  13#define ARMV8_PMU_CYCLE_IDX             (ARMV8_PMU_MAX_COUNTERS - 1)
  14#define ARMV8_PMU_MAX_COUNTER_PAIRS     ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
  15
  16DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
  17
  18static __always_inline bool kvm_arm_support_pmu_v3(void)
  19{
  20        return static_branch_likely(&kvm_arm_pmu_available);
  21}
  22
  23#ifdef CONFIG_HW_PERF_EVENTS
  24
  25struct kvm_pmc {
  26        u8 idx; /* index into the pmu->pmc array */
  27        struct perf_event *perf_event;
  28};
  29
  30struct kvm_pmu {
  31        int irq_num;
  32        struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
  33        DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
  34        bool created;
  35        bool irq_level;
  36        struct irq_work overflow_work;
  37};
  38
  39#define kvm_arm_pmu_irq_initialized(v)  ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
  40u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
  41void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
  42u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
  43u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
  44void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
  45void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
  46void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
  47void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
  48void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
  49void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
  50void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
  51bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
  52void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
  53void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
  54void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
  55void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
  56                                    u64 select_idx);
  57int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  58                            struct kvm_device_attr *attr);
  59int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  60                            struct kvm_device_attr *attr);
  61int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  62                            struct kvm_device_attr *attr);
  63int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
  64int kvm_pmu_probe_pmuver(void);
  65#else
  66struct kvm_pmu {
  67};
  68
  69#define kvm_arm_pmu_irq_initialized(v)  (false)
  70static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
  71                                            u64 select_idx)
  72{
  73        return 0;
  74}
  75static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
  76                                             u64 select_idx, u64 val) {}
  77static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
  78{
  79        return 0;
  80}
  81static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
  82static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
  83static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
  84static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
  85static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
  86static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
  87static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
  88static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
  89{
  90        return false;
  91}
  92static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
  93static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
  94static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
  95static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
  96                                                  u64 data, u64 select_idx) {}
  97static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  98                                          struct kvm_device_attr *attr)
  99{
 100        return -ENXIO;
 101}
 102static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 103                                          struct kvm_device_attr *attr)
 104{
 105        return -ENXIO;
 106}
 107static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
 108                                          struct kvm_device_attr *attr)
 109{
 110        return -ENXIO;
 111}
 112static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
 113{
 114        return 0;
 115}
 116static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 117{
 118        return 0;
 119}
 120
 121static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
 122
 123#endif
 124
 125#endif
 126