linux/arch/x86/events/intel/uncore.h
<<
>>
Prefs
   1#include <linux/slab.h>
   2#include <linux/pci.h>
   3#include <asm/apicdef.h>
   4
   5#include <linux/perf_event.h>
   6#include "../perf_event.h"
   7
   8#define UNCORE_PMU_NAME_LEN             32
   9#define UNCORE_PMU_HRTIMER_INTERVAL     (60LL * NSEC_PER_SEC)
  10#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
  11
  12#define UNCORE_FIXED_EVENT              0xff
  13#define UNCORE_PMC_IDX_MAX_GENERIC      8
  14#define UNCORE_PMC_IDX_FIXED            UNCORE_PMC_IDX_MAX_GENERIC
  15#define UNCORE_PMC_IDX_MAX              (UNCORE_PMC_IDX_FIXED + 1)
  16
  17#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx)  \
  18                ((dev << 24) | (func << 16) | (type << 8) | idx)
  19#define UNCORE_PCI_DEV_DATA(type, idx)  ((type << 8) | idx)
  20#define UNCORE_PCI_DEV_DEV(data)        ((data >> 24) & 0xff)
  21#define UNCORE_PCI_DEV_FUNC(data)       ((data >> 16) & 0xff)
  22#define UNCORE_PCI_DEV_TYPE(data)       ((data >> 8) & 0xff)
  23#define UNCORE_PCI_DEV_IDX(data)        (data & 0xff)
  24#define UNCORE_EXTRA_PCI_DEV            0xff
  25#define UNCORE_EXTRA_PCI_DEV_MAX        3
  26
  27#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
  28
  29struct pci_extra_dev {
  30        struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
  31};
  32
  33struct intel_uncore_ops;
  34struct intel_uncore_pmu;
  35struct intel_uncore_box;
  36struct uncore_event_desc;
  37
  38struct intel_uncore_type {
  39        const char *name;
  40        int num_counters;
  41        int num_boxes;
  42        int perf_ctr_bits;
  43        int fixed_ctr_bits;
  44        unsigned perf_ctr;
  45        unsigned event_ctl;
  46        unsigned event_mask;
  47        unsigned event_mask_ext;
  48        unsigned fixed_ctr;
  49        unsigned fixed_ctl;
  50        unsigned box_ctl;
  51        unsigned msr_offset;
  52        unsigned num_shared_regs:8;
  53        unsigned single_fixed:1;
  54        unsigned pair_ctr_ctl:1;
  55        unsigned *msr_offsets;
  56        struct event_constraint unconstrainted;
  57        struct event_constraint *constraints;
  58        struct intel_uncore_pmu *pmus;
  59        struct intel_uncore_ops *ops;
  60        struct uncore_event_desc *event_descs;
  61        const struct attribute_group *attr_groups[4];
  62        struct pmu *pmu; /* for custom pmu ops */
  63};
  64
  65#define pmu_group attr_groups[0]
  66#define format_group attr_groups[1]
  67#define events_group attr_groups[2]
  68
  69struct intel_uncore_ops {
  70        void (*init_box)(struct intel_uncore_box *);
  71        void (*exit_box)(struct intel_uncore_box *);
  72        void (*disable_box)(struct intel_uncore_box *);
  73        void (*enable_box)(struct intel_uncore_box *);
  74        void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
  75        void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
  76        u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
  77        int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
  78        struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
  79                                                   struct perf_event *);
  80        void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
  81};
  82
  83struct intel_uncore_pmu {
  84        struct pmu                      pmu;
  85        char                            name[UNCORE_PMU_NAME_LEN];
  86        int                             pmu_idx;
  87        int                             func_id;
  88        bool                            registered;
  89        atomic_t                        activeboxes;
  90        struct intel_uncore_type        *type;
  91        struct intel_uncore_box         **boxes;
  92};
  93
  94struct intel_uncore_extra_reg {
  95        raw_spinlock_t lock;
  96        u64 config, config1, config2;
  97        atomic_t ref;
  98};
  99
 100struct intel_uncore_box {
 101        int pci_phys_id;
 102        int pkgid;
 103        int n_active;   /* number of active events */
 104        int n_events;
 105        int cpu;        /* cpu to collect events */
 106        unsigned long flags;
 107        atomic_t refcnt;
 108        struct perf_event *events[UNCORE_PMC_IDX_MAX];
 109        struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
 110        struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
 111        unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
 112        u64 tags[UNCORE_PMC_IDX_MAX];
 113        struct pci_dev *pci_dev;
 114        struct intel_uncore_pmu *pmu;
 115        u64 hrtimer_duration; /* hrtimer timeout for this box */
 116        struct hrtimer hrtimer;
 117        struct list_head list;
 118        struct list_head active_list;
 119        void *io_addr;
 120        struct intel_uncore_extra_reg shared_regs[0];
 121};
 122
 123#define UNCORE_BOX_FLAG_INITIATED       0
 124#define UNCORE_BOX_FLAG_CTL_OFFS8       1 /* event config registers are 8-byte apart */
 125
 126struct uncore_event_desc {
 127        struct kobj_attribute attr;
 128        const char *config;
 129};
 130
 131struct pci2phy_map {
 132        struct list_head list;
 133        int segment;
 134        int pbus_to_physid[256];
 135};
 136
 137struct pci2phy_map *__find_pci2phy_map(int segment);
 138
 139ssize_t uncore_event_show(struct kobject *kobj,
 140                          struct kobj_attribute *attr, char *buf);
 141
 142#define INTEL_UNCORE_EVENT_DESC(_name, _config)                 \
 143{                                                               \
 144        .attr   = __ATTR(_name, 0444, uncore_event_show, NULL), \
 145        .config = _config,                                      \
 146}
 147
 148#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                 \
 149static ssize_t __uncore_##_var##_show(struct kobject *kobj,             \
 150                                struct kobj_attribute *attr,            \
 151                                char *page)                             \
 152{                                                                       \
 153        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
 154        return sprintf(page, _format "\n");                             \
 155}                                                                       \
 156static struct kobj_attribute format_attr_##_var =                       \
 157        __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
 158
 159static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
 160{
 161        return box->pmu->type->box_ctl;
 162}
 163
 164static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
 165{
 166        return box->pmu->type->fixed_ctl;
 167}
 168
 169static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
 170{
 171        return box->pmu->type->fixed_ctr;
 172}
 173
 174static inline
 175unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
 176{
 177        if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
 178                return idx * 8 + box->pmu->type->event_ctl;
 179
 180        return idx * 4 + box->pmu->type->event_ctl;
 181}
 182
 183static inline
 184unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
 185{
 186        return idx * 8 + box->pmu->type->perf_ctr;
 187}
 188
 189static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
 190{
 191        struct intel_uncore_pmu *pmu = box->pmu;
 192        return pmu->type->msr_offsets ?
 193                pmu->type->msr_offsets[pmu->pmu_idx] :
 194                pmu->type->msr_offset * pmu->pmu_idx;
 195}
 196
 197static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
 198{
 199        if (!box->pmu->type->box_ctl)
 200                return 0;
 201        return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
 202}
 203
 204static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
 205{
 206        if (!box->pmu->type->fixed_ctl)
 207                return 0;
 208        return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
 209}
 210
 211static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
 212{
 213        return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
 214}
 215
 216static inline
 217unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
 218{
 219        return box->pmu->type->event_ctl +
 220                (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
 221                uncore_msr_box_offset(box);
 222}
 223
 224static inline
 225unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
 226{
 227        return box->pmu->type->perf_ctr +
 228                (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
 229                uncore_msr_box_offset(box);
 230}
 231
 232static inline
 233unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
 234{
 235        if (box->pci_dev)
 236                return uncore_pci_fixed_ctl(box);
 237        else
 238                return uncore_msr_fixed_ctl(box);
 239}
 240
 241static inline
 242unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
 243{
 244        if (box->pci_dev)
 245                return uncore_pci_fixed_ctr(box);
 246        else
 247                return uncore_msr_fixed_ctr(box);
 248}
 249
 250static inline
 251unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
 252{
 253        if (box->pci_dev)
 254                return uncore_pci_event_ctl(box, idx);
 255        else
 256                return uncore_msr_event_ctl(box, idx);
 257}
 258
 259static inline
 260unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
 261{
 262        if (box->pci_dev)
 263                return uncore_pci_perf_ctr(box, idx);
 264        else
 265                return uncore_msr_perf_ctr(box, idx);
 266}
 267
 268static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
 269{
 270        return box->pmu->type->perf_ctr_bits;
 271}
 272
 273static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
 274{
 275        return box->pmu->type->fixed_ctr_bits;
 276}
 277
 278static inline int uncore_num_counters(struct intel_uncore_box *box)
 279{
 280        return box->pmu->type->num_counters;
 281}
 282
 283static inline void uncore_disable_box(struct intel_uncore_box *box)
 284{
 285        if (box->pmu->type->ops->disable_box)
 286                box->pmu->type->ops->disable_box(box);
 287}
 288
 289static inline void uncore_enable_box(struct intel_uncore_box *box)
 290{
 291        if (box->pmu->type->ops->enable_box)
 292                box->pmu->type->ops->enable_box(box);
 293}
 294
 295static inline void uncore_disable_event(struct intel_uncore_box *box,
 296                                struct perf_event *event)
 297{
 298        box->pmu->type->ops->disable_event(box, event);
 299}
 300
 301static inline void uncore_enable_event(struct intel_uncore_box *box,
 302                                struct perf_event *event)
 303{
 304        box->pmu->type->ops->enable_event(box, event);
 305}
 306
 307static inline u64 uncore_read_counter(struct intel_uncore_box *box,
 308                                struct perf_event *event)
 309{
 310        return box->pmu->type->ops->read_counter(box, event);
 311}
 312
 313static inline void uncore_box_init(struct intel_uncore_box *box)
 314{
 315        if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
 316                if (box->pmu->type->ops->init_box)
 317                        box->pmu->type->ops->init_box(box);
 318        }
 319}
 320
 321static inline void uncore_box_exit(struct intel_uncore_box *box)
 322{
 323        if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
 324                if (box->pmu->type->ops->exit_box)
 325                        box->pmu->type->ops->exit_box(box);
 326        }
 327}
 328
 329static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
 330{
 331        return (box->pkgid < 0);
 332}
 333
 334static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
 335{
 336        return container_of(event->pmu, struct intel_uncore_pmu, pmu);
 337}
 338
 339static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
 340{
 341        return event->pmu_private;
 342}
 343
 344struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
 345u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
 346void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
 347void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
 348void uncore_pmu_event_read(struct perf_event *event);
 349void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
 350struct event_constraint *
 351uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
 352void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
 353u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
 354
 355extern struct intel_uncore_type **uncore_msr_uncores;
 356extern struct intel_uncore_type **uncore_pci_uncores;
 357extern struct pci_driver *uncore_pci_driver;
 358extern raw_spinlock_t pci2phy_map_lock;
 359extern struct list_head pci2phy_map_head;
 360extern struct pci_extra_dev *uncore_extra_pci_dev;
 361extern struct event_constraint uncore_constraint_empty;
 362
 363/* perf_event_intel_uncore_snb.c */
 364int snb_uncore_pci_init(void);
 365int ivb_uncore_pci_init(void);
 366int hsw_uncore_pci_init(void);
 367int bdw_uncore_pci_init(void);
 368int skl_uncore_pci_init(void);
 369void snb_uncore_cpu_init(void);
 370void nhm_uncore_cpu_init(void);
 371void skl_uncore_cpu_init(void);
 372int snb_pci2phy_map_init(int devid);
 373
 374/* perf_event_intel_uncore_snbep.c */
 375int snbep_uncore_pci_init(void);
 376void snbep_uncore_cpu_init(void);
 377int ivbep_uncore_pci_init(void);
 378void ivbep_uncore_cpu_init(void);
 379int hswep_uncore_pci_init(void);
 380void hswep_uncore_cpu_init(void);
 381int bdx_uncore_pci_init(void);
 382void bdx_uncore_cpu_init(void);
 383int knl_uncore_pci_init(void);
 384void knl_uncore_cpu_init(void);
 385int skx_uncore_pci_init(void);
 386void skx_uncore_cpu_init(void);
 387
 388/* perf_event_intel_uncore_nhmex.c */
 389void nhmex_uncore_cpu_init(void);
 390