linux/arch/x86/kernel/cpu/perfctr-watchdog.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * local apic based NMI watchdog for various CPUs.
   4 *
   5 * This file also handles reservation of performance counters for coordination
   6 * with other users (like oprofile).
   7 *
   8 * Note that these events normally don't tick when the CPU idles. This means
   9 * the frequency varies with CPU load.
  10 *
  11 * Original code for K7/P6 written by Keith Owens
  12 *
  13 */
  14
  15#include <linux/percpu.h>
  16#include <linux/export.h>
  17#include <linux/kernel.h>
  18#include <linux/bitops.h>
  19#include <linux/smp.h>
  20#include <asm/nmi.h>
  21#include <linux/kprobes.h>
  22
  23#include <asm/apic.h>
  24#include <asm/perf_event.h>
  25
  26/*
  27 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  28 * offset from MSR_P4_BSU_ESCR0.
  29 *
  30 * It will be the max for all platforms (for now)
  31 */
  32#define NMI_MAX_COUNTER_BITS 66
  33
  34/*
  35 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
  36 * evtsel_nmi_owner tracks the ownership of the event selection
  37 * - different performance counters/ event selection may be reserved for
  38 *   different subsystems this reservation system just tries to coordinate
  39 *   things a little
  40 */
  41static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
  42static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
  43
  44/* converts an msr to an appropriate reservation bit */
  45static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
  46{
  47        /* returns the bit offset of the performance counter register */
  48        switch (boot_cpu_data.x86_vendor) {
  49        case X86_VENDOR_AMD:
  50                if (msr >= MSR_F15H_PERF_CTR)
  51                        return (msr - MSR_F15H_PERF_CTR) >> 1;
  52                return msr - MSR_K7_PERFCTR0;
  53        case X86_VENDOR_INTEL:
  54                if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  55                        return msr - MSR_ARCH_PERFMON_PERFCTR0;
  56
  57                switch (boot_cpu_data.x86) {
  58                case 6:
  59                        return msr - MSR_P6_PERFCTR0;
  60                case 11:
  61                        return msr - MSR_KNC_PERFCTR0;
  62                case 15:
  63                        return msr - MSR_P4_BPU_PERFCTR0;
  64                }
  65        }
  66        return 0;
  67}
  68
  69/*
  70 * converts an msr to an appropriate reservation bit
  71 * returns the bit offset of the event selection register
  72 */
  73static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
  74{
  75        /* returns the bit offset of the event selection register */
  76        switch (boot_cpu_data.x86_vendor) {
  77        case X86_VENDOR_AMD:
  78                if (msr >= MSR_F15H_PERF_CTL)
  79                        return (msr - MSR_F15H_PERF_CTL) >> 1;
  80                return msr - MSR_K7_EVNTSEL0;
  81        case X86_VENDOR_INTEL:
  82                if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  83                        return msr - MSR_ARCH_PERFMON_EVENTSEL0;
  84
  85                switch (boot_cpu_data.x86) {
  86                case 6:
  87                        return msr - MSR_P6_EVNTSEL0;
  88                case 11:
  89                        return msr - MSR_KNC_EVNTSEL0;
  90                case 15:
  91                        return msr - MSR_P4_BSU_ESCR0;
  92                }
  93        }
  94        return 0;
  95
  96}
  97
  98/* checks for a bit availability (hack for oprofile) */
  99int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
 100{
 101        BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 102
 103        return !test_bit(counter, perfctr_nmi_owner);
 104}
 105EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
 106
 107int reserve_perfctr_nmi(unsigned int msr)
 108{
 109        unsigned int counter;
 110
 111        counter = nmi_perfctr_msr_to_bit(msr);
 112        /* register not managed by the allocator? */
 113        if (counter > NMI_MAX_COUNTER_BITS)
 114                return 1;
 115
 116        if (!test_and_set_bit(counter, perfctr_nmi_owner))
 117                return 1;
 118        return 0;
 119}
 120EXPORT_SYMBOL(reserve_perfctr_nmi);
 121
 122void release_perfctr_nmi(unsigned int msr)
 123{
 124        unsigned int counter;
 125
 126        counter = nmi_perfctr_msr_to_bit(msr);
 127        /* register not managed by the allocator? */
 128        if (counter > NMI_MAX_COUNTER_BITS)
 129                return;
 130
 131        clear_bit(counter, perfctr_nmi_owner);
 132}
 133EXPORT_SYMBOL(release_perfctr_nmi);
 134
 135int reserve_evntsel_nmi(unsigned int msr)
 136{
 137        unsigned int counter;
 138
 139        counter = nmi_evntsel_msr_to_bit(msr);
 140        /* register not managed by the allocator? */
 141        if (counter > NMI_MAX_COUNTER_BITS)
 142                return 1;
 143
 144        if (!test_and_set_bit(counter, evntsel_nmi_owner))
 145                return 1;
 146        return 0;
 147}
 148EXPORT_SYMBOL(reserve_evntsel_nmi);
 149
 150void release_evntsel_nmi(unsigned int msr)
 151{
 152        unsigned int counter;
 153
 154        counter = nmi_evntsel_msr_to_bit(msr);
 155        /* register not managed by the allocator? */
 156        if (counter > NMI_MAX_COUNTER_BITS)
 157                return;
 158
 159        clear_bit(counter, evntsel_nmi_owner);
 160}
 161EXPORT_SYMBOL(release_evntsel_nmi);
 162