linux/arch/x86/events/intel/uncore.h
<<
>>
Prefs
   1#include <linux/module.h>
   2#include <linux/slab.h>
   3#include <linux/pci.h>
   4#include <asm/apicdef.h>
   5
   6#include <linux/perf_event.h>
   7#include "../perf_event.h"
   8
   9#define UNCORE_PMU_NAME_LEN             32
  10#define UNCORE_PMU_HRTIMER_INTERVAL     (60LL * NSEC_PER_SEC)
  11#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
  12
  13#define UNCORE_FIXED_EVENT              0xff
  14#define UNCORE_PMC_IDX_MAX_GENERIC      8
  15#define UNCORE_PMC_IDX_FIXED            UNCORE_PMC_IDX_MAX_GENERIC
  16#define UNCORE_PMC_IDX_MAX              (UNCORE_PMC_IDX_FIXED + 1)
  17
  18#define UNCORE_PCI_DEV_DATA(type, idx)  ((type << 8) | idx)
  19#define UNCORE_PCI_DEV_TYPE(data)       ((data >> 8) & 0xff)
  20#define UNCORE_PCI_DEV_IDX(data)        (data & 0xff)
  21#define UNCORE_EXTRA_PCI_DEV            0xff
  22#define UNCORE_EXTRA_PCI_DEV_MAX        3
  23
  24#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
  25
  26struct pci_extra_dev {
  27        struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
  28};
  29
  30struct intel_uncore_ops;
  31struct intel_uncore_pmu;
  32struct intel_uncore_box;
  33struct uncore_event_desc;
  34
  35struct intel_uncore_type {
  36        const char *name;
  37        int num_counters;
  38        int num_boxes;
  39        int perf_ctr_bits;
  40        int fixed_ctr_bits;
  41        unsigned perf_ctr;
  42        unsigned event_ctl;
  43        unsigned event_mask;
  44        unsigned fixed_ctr;
  45        unsigned fixed_ctl;
  46        unsigned box_ctl;
  47        unsigned msr_offset;
  48        unsigned num_shared_regs:8;
  49        unsigned single_fixed:1;
  50        unsigned pair_ctr_ctl:1;
  51        unsigned *msr_offsets;
  52        struct event_constraint unconstrainted;
  53        struct event_constraint *constraints;
  54        struct intel_uncore_pmu *pmus;
  55        struct intel_uncore_ops *ops;
  56        struct uncore_event_desc *event_descs;
  57        const struct attribute_group *attr_groups[4];
  58        struct pmu *pmu; /* for custom pmu ops */
  59};
  60
  61#define pmu_group attr_groups[0]
  62#define format_group attr_groups[1]
  63#define events_group attr_groups[2]
  64
  65struct intel_uncore_ops {
  66        void (*init_box)(struct intel_uncore_box *);
  67        void (*exit_box)(struct intel_uncore_box *);
  68        void (*disable_box)(struct intel_uncore_box *);
  69        void (*enable_box)(struct intel_uncore_box *);
  70        void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
  71        void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
  72        u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
  73        int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
  74        struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
  75                                                   struct perf_event *);
  76        void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
  77};
  78
  79struct intel_uncore_pmu {
  80        struct pmu                      pmu;
  81        char                            name[UNCORE_PMU_NAME_LEN];
  82        int                             pmu_idx;
  83        int                             func_id;
  84        bool                            registered;
  85        atomic_t                        activeboxes;
  86        struct intel_uncore_type        *type;
  87        struct intel_uncore_box         **boxes;
  88};
  89
  90struct intel_uncore_extra_reg {
  91        raw_spinlock_t lock;
  92        u64 config, config1, config2;
  93        atomic_t ref;
  94};
  95
  96struct intel_uncore_box {
  97        int pci_phys_id;
  98        int pkgid;
  99        int n_active;   /* number of active events */
 100        int n_events;
 101        int cpu;        /* cpu to collect events */
 102        unsigned long flags;
 103        atomic_t refcnt;
 104        struct perf_event *events[UNCORE_PMC_IDX_MAX];
 105        struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
 106        struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
 107        unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
 108        u64 tags[UNCORE_PMC_IDX_MAX];
 109        struct pci_dev *pci_dev;
 110        struct intel_uncore_pmu *pmu;
 111        u64 hrtimer_duration; /* hrtimer timeout for this box */
 112        struct hrtimer hrtimer;
 113        struct list_head list;
 114        struct list_head active_list;
 115        void *io_addr;
 116        struct intel_uncore_extra_reg shared_regs[0];
 117};
 118
 119#define UNCORE_BOX_FLAG_INITIATED       0
 120
 121struct uncore_event_desc {
 122        struct kobj_attribute attr;
 123        const char *config;
 124};
 125
 126struct pci2phy_map {
 127        struct list_head list;
 128        int segment;
 129        int pbus_to_physid[256];
 130};
 131
 132struct pci2phy_map *__find_pci2phy_map(int segment);
 133
 134ssize_t uncore_event_show(struct kobject *kobj,
 135                          struct kobj_attribute *attr, char *buf);
 136
 137#define INTEL_UNCORE_EVENT_DESC(_name, _config)                 \
 138{                                                               \
 139        .attr   = __ATTR(_name, 0444, uncore_event_show, NULL), \
 140        .config = _config,                                      \
 141}
 142
 143#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                 \
 144static ssize_t __uncore_##_var##_show(struct kobject *kobj,             \
 145                                struct kobj_attribute *attr,            \
 146                                char *page)                             \
 147{                                                                       \
 148        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
 149        return sprintf(page, _format "\n");                             \
 150}                                                                       \
 151static struct kobj_attribute format_attr_##_var =                       \
 152        __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
 153
 154static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
 155{
 156        return box->pmu->type->box_ctl;
 157}
 158
 159static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
 160{
 161        return box->pmu->type->fixed_ctl;
 162}
 163
 164static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
 165{
 166        return box->pmu->type->fixed_ctr;
 167}
 168
 169static inline
 170unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
 171{
 172        return idx * 4 + box->pmu->type->event_ctl;
 173}
 174
 175static inline
 176unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
 177{
 178        return idx * 8 + box->pmu->type->perf_ctr;
 179}
 180
 181static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
 182{
 183        struct intel_uncore_pmu *pmu = box->pmu;
 184        return pmu->type->msr_offsets ?
 185                pmu->type->msr_offsets[pmu->pmu_idx] :
 186                pmu->type->msr_offset * pmu->pmu_idx;
 187}
 188
 189static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
 190{
 191        if (!box->pmu->type->box_ctl)
 192                return 0;
 193        return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
 194}
 195
 196static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
 197{
 198        if (!box->pmu->type->fixed_ctl)
 199                return 0;
 200        return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
 201}
 202
 203static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
 204{
 205        return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
 206}
 207
 208static inline
 209unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
 210{
 211        return box->pmu->type->event_ctl +
 212                (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
 213                uncore_msr_box_offset(box);
 214}
 215
 216static inline
 217unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
 218{
 219        return box->pmu->type->perf_ctr +
 220                (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
 221                uncore_msr_box_offset(box);
 222}
 223
 224static inline
 225unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
 226{
 227        if (box->pci_dev)
 228                return uncore_pci_fixed_ctl(box);
 229        else
 230                return uncore_msr_fixed_ctl(box);
 231}
 232
 233static inline
 234unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
 235{
 236        if (box->pci_dev)
 237                return uncore_pci_fixed_ctr(box);
 238        else
 239                return uncore_msr_fixed_ctr(box);
 240}
 241
 242static inline
 243unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
 244{
 245        if (box->pci_dev)
 246                return uncore_pci_event_ctl(box, idx);
 247        else
 248                return uncore_msr_event_ctl(box, idx);
 249}
 250
 251static inline
 252unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
 253{
 254        if (box->pci_dev)
 255                return uncore_pci_perf_ctr(box, idx);
 256        else
 257                return uncore_msr_perf_ctr(box, idx);
 258}
 259
 260static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
 261{
 262        return box->pmu->type->perf_ctr_bits;
 263}
 264
 265static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
 266{
 267        return box->pmu->type->fixed_ctr_bits;
 268}
 269
 270static inline int uncore_num_counters(struct intel_uncore_box *box)
 271{
 272        return box->pmu->type->num_counters;
 273}
 274
 275static inline void uncore_disable_box(struct intel_uncore_box *box)
 276{
 277        if (box->pmu->type->ops->disable_box)
 278                box->pmu->type->ops->disable_box(box);
 279}
 280
 281static inline void uncore_enable_box(struct intel_uncore_box *box)
 282{
 283        if (box->pmu->type->ops->enable_box)
 284                box->pmu->type->ops->enable_box(box);
 285}
 286
 287static inline void uncore_disable_event(struct intel_uncore_box *box,
 288                                struct perf_event *event)
 289{
 290        box->pmu->type->ops->disable_event(box, event);
 291}
 292
 293static inline void uncore_enable_event(struct intel_uncore_box *box,
 294                                struct perf_event *event)
 295{
 296        box->pmu->type->ops->enable_event(box, event);
 297}
 298
 299static inline u64 uncore_read_counter(struct intel_uncore_box *box,
 300                                struct perf_event *event)
 301{
 302        return box->pmu->type->ops->read_counter(box, event);
 303}
 304
 305static inline void uncore_box_init(struct intel_uncore_box *box)
 306{
 307        if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
 308                if (box->pmu->type->ops->init_box)
 309                        box->pmu->type->ops->init_box(box);
 310        }
 311}
 312
 313static inline void uncore_box_exit(struct intel_uncore_box *box)
 314{
 315        if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
 316                if (box->pmu->type->ops->exit_box)
 317                        box->pmu->type->ops->exit_box(box);
 318        }
 319}
 320
 321static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
 322{
 323        return (box->pkgid < 0);
 324}
 325
 326static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
 327{
 328        return container_of(event->pmu, struct intel_uncore_pmu, pmu);
 329}
 330
 331static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
 332{
 333        return event->pmu_private;
 334}
 335
 336struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
 337u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
 338void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
 339void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
 340void uncore_pmu_event_read(struct perf_event *event);
 341void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
 342struct event_constraint *
 343uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
 344void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
 345u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
 346
 347extern struct intel_uncore_type **uncore_msr_uncores;
 348extern struct intel_uncore_type **uncore_pci_uncores;
 349extern struct pci_driver *uncore_pci_driver;
 350extern raw_spinlock_t pci2phy_map_lock;
 351extern struct list_head pci2phy_map_head;
 352extern struct pci_extra_dev *uncore_extra_pci_dev;
 353extern struct event_constraint uncore_constraint_empty;
 354
 355/* perf_event_intel_uncore_snb.c */
 356int snb_uncore_pci_init(void);
 357int ivb_uncore_pci_init(void);
 358int hsw_uncore_pci_init(void);
 359int bdw_uncore_pci_init(void);
 360int skl_uncore_pci_init(void);
 361void snb_uncore_cpu_init(void);
 362void nhm_uncore_cpu_init(void);
 363int snb_pci2phy_map_init(int devid);
 364
 365/* perf_event_intel_uncore_snbep.c */
 366int snbep_uncore_pci_init(void);
 367void snbep_uncore_cpu_init(void);
 368int ivbep_uncore_pci_init(void);
 369void ivbep_uncore_cpu_init(void);
 370int hswep_uncore_pci_init(void);
 371void hswep_uncore_cpu_init(void);
 372int bdx_uncore_pci_init(void);
 373void bdx_uncore_cpu_init(void);
 374int knl_uncore_pci_init(void);
 375void knl_uncore_cpu_init(void);
 376
 377/* perf_event_intel_uncore_nhmex.c */
 378void nhmex_uncore_cpu_init(void);
 379