linux/arch/x86/kernel/cpu/perfctr-watchdog.c
<<
>>
Prefs
   1/*
   2 * local apic based NMI watchdog for various CPUs.
   3 *
   4 * This file also handles reservation of performance counters for coordination
   5 * with other users (like oprofile).
   6 *
   7 * Note that these events normally don't tick when the CPU idles. This means
   8 * the frequency varies with CPU load.
   9 *
  10 * Original code for K7/P6 written by Keith Owens
  11 *
  12 */
  13
  14#include <linux/percpu.h>
  15#include <linux/module.h>
  16#include <linux/kernel.h>
  17#include <linux/bitops.h>
  18#include <linux/smp.h>
  19#include <asm/nmi.h>
  20#include <linux/kprobes.h>
  21
  22#include <asm/apic.h>
  23#include <asm/perf_event.h>
  24
  25/*
  26 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  27 * offset from MSR_P4_BSU_ESCR0.
  28 *
  29 * It will be the max for all platforms (for now)
  30 */
  31#define NMI_MAX_COUNTER_BITS 66
  32
  33/*
  34 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
  35 * evtsel_nmi_owner tracks the ownership of the event selection
  36 * - different performance counters/ event selection may be reserved for
  37 *   different subsystems this reservation system just tries to coordinate
  38 *   things a little
  39 */
  40static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
  41static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
  42
  43/* converts an msr to an appropriate reservation bit */
  44static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
  45{
  46        /* returns the bit offset of the performance counter register */
  47        switch (boot_cpu_data.x86_vendor) {
  48        case X86_VENDOR_AMD:
  49                if (msr >= MSR_F15H_PERF_CTR)
  50                        return (msr - MSR_F15H_PERF_CTR) >> 1;
  51                return msr - MSR_K7_PERFCTR0;
  52        case X86_VENDOR_INTEL:
  53                if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  54                        return msr - MSR_ARCH_PERFMON_PERFCTR0;
  55
  56                switch (boot_cpu_data.x86) {
  57                case 6:
  58                        return msr - MSR_P6_PERFCTR0;
  59                case 11:
  60                        return msr - MSR_KNC_PERFCTR0;
  61                case 15:
  62                        return msr - MSR_P4_BPU_PERFCTR0;
  63                }
  64        }
  65        return 0;
  66}
  67
  68/*
  69 * converts an msr to an appropriate reservation bit
  70 * returns the bit offset of the event selection register
  71 */
  72static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
  73{
  74        /* returns the bit offset of the event selection register */
  75        switch (boot_cpu_data.x86_vendor) {
  76        case X86_VENDOR_AMD:
  77                if (msr >= MSR_F15H_PERF_CTL)
  78                        return (msr - MSR_F15H_PERF_CTL) >> 1;
  79                return msr - MSR_K7_EVNTSEL0;
  80        case X86_VENDOR_INTEL:
  81                if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  82                        return msr - MSR_ARCH_PERFMON_EVENTSEL0;
  83
  84                switch (boot_cpu_data.x86) {
  85                case 6:
  86                        return msr - MSR_P6_EVNTSEL0;
  87                case 11:
  88                        return msr - MSR_KNC_EVNTSEL0;
  89                case 15:
  90                        return msr - MSR_P4_BSU_ESCR0;
  91                }
  92        }
  93        return 0;
  94
  95}
  96
  97/* checks for a bit availability (hack for oprofile) */
  98int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
  99{
 100        BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 101
 102        return !test_bit(counter, perfctr_nmi_owner);
 103}
 104EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
 105
 106int reserve_perfctr_nmi(unsigned int msr)
 107{
 108        unsigned int counter;
 109
 110        counter = nmi_perfctr_msr_to_bit(msr);
 111        /* register not managed by the allocator? */
 112        if (counter > NMI_MAX_COUNTER_BITS)
 113                return 1;
 114
 115        if (!test_and_set_bit(counter, perfctr_nmi_owner))
 116                return 1;
 117        return 0;
 118}
 119EXPORT_SYMBOL(reserve_perfctr_nmi);
 120
 121void release_perfctr_nmi(unsigned int msr)
 122{
 123        unsigned int counter;
 124
 125        counter = nmi_perfctr_msr_to_bit(msr);
 126        /* register not managed by the allocator? */
 127        if (counter > NMI_MAX_COUNTER_BITS)
 128                return;
 129
 130        clear_bit(counter, perfctr_nmi_owner);
 131}
 132EXPORT_SYMBOL(release_perfctr_nmi);
 133
 134int reserve_evntsel_nmi(unsigned int msr)
 135{
 136        unsigned int counter;
 137
 138        counter = nmi_evntsel_msr_to_bit(msr);
 139        /* register not managed by the allocator? */
 140        if (counter > NMI_MAX_COUNTER_BITS)
 141                return 1;
 142
 143        if (!test_and_set_bit(counter, evntsel_nmi_owner))
 144                return 1;
 145        return 0;
 146}
 147EXPORT_SYMBOL(reserve_evntsel_nmi);
 148
 149void release_evntsel_nmi(unsigned int msr)
 150{
 151        unsigned int counter;
 152
 153        counter = nmi_evntsel_msr_to_bit(msr);
 154        /* register not managed by the allocator? */
 155        if (counter > NMI_MAX_COUNTER_BITS)
 156                return;
 157
 158        clear_bit(counter, evntsel_nmi_owner);
 159}
 160EXPORT_SYMBOL(release_evntsel_nmi);
 161