1
2#include <linux/slab.h>
3#include <linux/pci.h>
4#include <asm/apicdef.h>
5#include <linux/io-64-nonatomic-lo-hi.h>
6
7#include <linux/perf_event.h>
8#include "../perf_event.h"
9
10#define UNCORE_PMU_NAME_LEN 32
11#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
12#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
13
14#define UNCORE_FIXED_EVENT 0xff
15#define UNCORE_PMC_IDX_MAX_GENERIC 8
16#define UNCORE_PMC_IDX_MAX_FIXED 1
17#define UNCORE_PMC_IDX_MAX_FREERUNNING 1
18#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
19#define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
20 UNCORE_PMC_IDX_MAX_FIXED)
21#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
22 UNCORE_PMC_IDX_MAX_FREERUNNING)
23
24#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
25 ((dev << 24) | (func << 16) | (type << 8) | idx)
26#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
27#define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
28#define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
29#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
30#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
31#define UNCORE_EXTRA_PCI_DEV 0xff
32#define UNCORE_EXTRA_PCI_DEV_MAX 4
33
34#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
35
36struct pci_extra_dev {
37 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
38};
39
40struct intel_uncore_ops;
41struct intel_uncore_pmu;
42struct intel_uncore_box;
43struct uncore_event_desc;
44struct freerunning_counters;
45
46struct intel_uncore_type {
47 const char *name;
48 int num_counters;
49 int num_boxes;
50 int perf_ctr_bits;
51 int fixed_ctr_bits;
52 int num_freerunning_types;
53 unsigned perf_ctr;
54 unsigned event_ctl;
55 unsigned event_mask;
56 unsigned event_mask_ext;
57 unsigned fixed_ctr;
58 unsigned fixed_ctl;
59 unsigned box_ctl;
60 union {
61 unsigned msr_offset;
62 unsigned mmio_offset;
63 };
64 unsigned num_shared_regs:8;
65 unsigned single_fixed:1;
66 unsigned pair_ctr_ctl:1;
67 unsigned *msr_offsets;
68 struct event_constraint unconstrainted;
69 struct event_constraint *constraints;
70 struct intel_uncore_pmu *pmus;
71 struct intel_uncore_ops *ops;
72 struct uncore_event_desc *event_descs;
73 struct freerunning_counters *freerunning;
74 const struct attribute_group *attr_groups[4];
75 struct pmu *pmu;
76};
77
78#define pmu_group attr_groups[0]
79#define format_group attr_groups[1]
80#define events_group attr_groups[2]
81
82struct intel_uncore_ops {
83 void (*init_box)(struct intel_uncore_box *);
84 void (*exit_box)(struct intel_uncore_box *);
85 void (*disable_box)(struct intel_uncore_box *);
86 void (*enable_box)(struct intel_uncore_box *);
87 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
88 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
89 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
90 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
91 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
92 struct perf_event *);
93 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
94};
95
96struct intel_uncore_pmu {
97 struct pmu pmu;
98 char name[UNCORE_PMU_NAME_LEN];
99 int pmu_idx;
100 int func_id;
101 bool registered;
102 atomic_t activeboxes;
103 struct intel_uncore_type *type;
104 struct intel_uncore_box **boxes;
105};
106
107struct intel_uncore_extra_reg {
108 raw_spinlock_t lock;
109 u64 config, config1, config2;
110 atomic_t ref;
111};
112
113struct intel_uncore_box {
114 int pci_phys_id;
115 int dieid;
116 int n_active;
117 int n_events;
118 int cpu;
119 unsigned long flags;
120 atomic_t refcnt;
121 struct perf_event *events[UNCORE_PMC_IDX_MAX];
122 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
123 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
124 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
125 u64 tags[UNCORE_PMC_IDX_MAX];
126 struct pci_dev *pci_dev;
127 struct intel_uncore_pmu *pmu;
128 u64 hrtimer_duration;
129 struct hrtimer hrtimer;
130 struct list_head list;
131 struct list_head active_list;
132 void __iomem *io_addr;
133 struct intel_uncore_extra_reg shared_regs[0];
134};
135
136
137#define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
138#define CFL_UNC_CBO_7_PER_CTR0 0xf76
139
140#define UNCORE_BOX_FLAG_INITIATED 0
141
142#define UNCORE_BOX_FLAG_CTL_OFFS8 1
143
144#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
145
146struct uncore_event_desc {
147 struct kobj_attribute attr;
148 const char *config;
149};
150
151struct freerunning_counters {
152 unsigned int counter_base;
153 unsigned int counter_offset;
154 unsigned int box_offset;
155 unsigned int num_counters;
156 unsigned int bits;
157};
158
159struct pci2phy_map {
160 struct list_head list;
161 int segment;
162 int pbus_to_physid[256];
163};
164
165struct pci2phy_map *__find_pci2phy_map(int segment);
166int uncore_pcibus_to_physid(struct pci_bus *bus);
167
168ssize_t uncore_event_show(struct kobject *kobj,
169 struct kobj_attribute *attr, char *buf);
170
171#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
172{ \
173 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
174 .config = _config, \
175}
176
177#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
178static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
179 struct kobj_attribute *attr, \
180 char *page) \
181{ \
182 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
183 return sprintf(page, _format "\n"); \
184} \
185static struct kobj_attribute format_attr_##_var = \
186 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
187
188static inline bool uncore_pmc_fixed(int idx)
189{
190 return idx == UNCORE_PMC_IDX_FIXED;
191}
192
193static inline bool uncore_pmc_freerunning(int idx)
194{
195 return idx == UNCORE_PMC_IDX_FREERUNNING;
196}
197
198static inline
199unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
200{
201 return box->pmu->type->box_ctl +
202 box->pmu->type->mmio_offset * box->pmu->pmu_idx;
203}
204
205static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
206{
207 return box->pmu->type->box_ctl;
208}
209
210static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
211{
212 return box->pmu->type->fixed_ctl;
213}
214
215static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
216{
217 return box->pmu->type->fixed_ctr;
218}
219
220static inline
221unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
222{
223 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
224 return idx * 8 + box->pmu->type->event_ctl;
225
226 return idx * 4 + box->pmu->type->event_ctl;
227}
228
229static inline
230unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
231{
232 return idx * 8 + box->pmu->type->perf_ctr;
233}
234
235static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
236{
237 struct intel_uncore_pmu *pmu = box->pmu;
238 return pmu->type->msr_offsets ?
239 pmu->type->msr_offsets[pmu->pmu_idx] :
240 pmu->type->msr_offset * pmu->pmu_idx;
241}
242
243static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
244{
245 if (!box->pmu->type->box_ctl)
246 return 0;
247 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
248}
249
250static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
251{
252 if (!box->pmu->type->fixed_ctl)
253 return 0;
254 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
255}
256
257static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
258{
259 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291static inline unsigned int uncore_freerunning_idx(u64 config)
292{
293 return ((config >> 8) & 0xf);
294}
295
296#define UNCORE_FREERUNNING_UMASK_START 0x10
297
298static inline unsigned int uncore_freerunning_type(u64 config)
299{
300 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
301}
302
303static inline
304unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
305 struct perf_event *event)
306{
307 unsigned int type = uncore_freerunning_type(event->hw.config);
308 unsigned int idx = uncore_freerunning_idx(event->hw.config);
309 struct intel_uncore_pmu *pmu = box->pmu;
310
311 return pmu->type->freerunning[type].counter_base +
312 pmu->type->freerunning[type].counter_offset * idx +
313 pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
314}
315
316static inline
317unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
318{
319 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
320 return CFL_UNC_CBO_7_PERFEVTSEL0 +
321 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
322 } else {
323 return box->pmu->type->event_ctl +
324 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
325 uncore_msr_box_offset(box);
326 }
327}
328
329static inline
330unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
331{
332 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
333 return CFL_UNC_CBO_7_PER_CTR0 +
334 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
335 } else {
336 return box->pmu->type->perf_ctr +
337 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
338 uncore_msr_box_offset(box);
339 }
340}
341
342static inline
343unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
344{
345 if (box->pci_dev || box->io_addr)
346 return uncore_pci_fixed_ctl(box);
347 else
348 return uncore_msr_fixed_ctl(box);
349}
350
351static inline
352unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
353{
354 if (box->pci_dev || box->io_addr)
355 return uncore_pci_fixed_ctr(box);
356 else
357 return uncore_msr_fixed_ctr(box);
358}
359
360static inline
361unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
362{
363 if (box->pci_dev || box->io_addr)
364 return uncore_pci_event_ctl(box, idx);
365 else
366 return uncore_msr_event_ctl(box, idx);
367}
368
369static inline
370unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
371{
372 if (box->pci_dev || box->io_addr)
373 return uncore_pci_perf_ctr(box, idx);
374 else
375 return uncore_msr_perf_ctr(box, idx);
376}
377
378static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
379{
380 return box->pmu->type->perf_ctr_bits;
381}
382
383static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
384{
385 return box->pmu->type->fixed_ctr_bits;
386}
387
388static inline
389unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
390 struct perf_event *event)
391{
392 unsigned int type = uncore_freerunning_type(event->hw.config);
393
394 return box->pmu->type->freerunning[type].bits;
395}
396
397static inline int uncore_num_freerunning(struct intel_uncore_box *box,
398 struct perf_event *event)
399{
400 unsigned int type = uncore_freerunning_type(event->hw.config);
401
402 return box->pmu->type->freerunning[type].num_counters;
403}
404
405static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
406 struct perf_event *event)
407{
408 return box->pmu->type->num_freerunning_types;
409}
410
411static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
412 struct perf_event *event)
413{
414 unsigned int type = uncore_freerunning_type(event->hw.config);
415 unsigned int idx = uncore_freerunning_idx(event->hw.config);
416
417 return (type < uncore_num_freerunning_types(box, event)) &&
418 (idx < uncore_num_freerunning(box, event));
419}
420
421static inline int uncore_num_counters(struct intel_uncore_box *box)
422{
423 return box->pmu->type->num_counters;
424}
425
426static inline bool is_freerunning_event(struct perf_event *event)
427{
428 u64 cfg = event->attr.config;
429
430 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
431 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
432}
433
434
435static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
436 struct perf_event *event)
437{
438 if (is_freerunning_event(event))
439 return 0;
440
441 return -EINVAL;
442}
443
444static inline void uncore_disable_box(struct intel_uncore_box *box)
445{
446 if (box->pmu->type->ops->disable_box)
447 box->pmu->type->ops->disable_box(box);
448}
449
450static inline void uncore_enable_box(struct intel_uncore_box *box)
451{
452 if (box->pmu->type->ops->enable_box)
453 box->pmu->type->ops->enable_box(box);
454}
455
456static inline void uncore_disable_event(struct intel_uncore_box *box,
457 struct perf_event *event)
458{
459 box->pmu->type->ops->disable_event(box, event);
460}
461
462static inline void uncore_enable_event(struct intel_uncore_box *box,
463 struct perf_event *event)
464{
465 box->pmu->type->ops->enable_event(box, event);
466}
467
468static inline u64 uncore_read_counter(struct intel_uncore_box *box,
469 struct perf_event *event)
470{
471 return box->pmu->type->ops->read_counter(box, event);
472}
473
474static inline void uncore_box_init(struct intel_uncore_box *box)
475{
476 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
477 if (box->pmu->type->ops->init_box)
478 box->pmu->type->ops->init_box(box);
479 }
480}
481
482static inline void uncore_box_exit(struct intel_uncore_box *box)
483{
484 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
485 if (box->pmu->type->ops->exit_box)
486 box->pmu->type->ops->exit_box(box);
487 }
488}
489
490static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
491{
492 return (box->dieid < 0);
493}
494
495static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
496{
497 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
498}
499
500static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
501{
502 return event->pmu_private;
503}
504
505struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
506u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
507void uncore_mmio_exit_box(struct intel_uncore_box *box);
508u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
509 struct perf_event *event);
510void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
511void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
512void uncore_pmu_event_start(struct perf_event *event, int flags);
513void uncore_pmu_event_stop(struct perf_event *event, int flags);
514int uncore_pmu_event_add(struct perf_event *event, int flags);
515void uncore_pmu_event_del(struct perf_event *event, int flags);
516void uncore_pmu_event_read(struct perf_event *event);
517void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
518struct event_constraint *
519uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
520void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
521u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
522
523extern struct intel_uncore_type **uncore_msr_uncores;
524extern struct intel_uncore_type **uncore_pci_uncores;
525extern struct intel_uncore_type **uncore_mmio_uncores;
526extern struct pci_driver *uncore_pci_driver;
527extern raw_spinlock_t pci2phy_map_lock;
528extern struct list_head pci2phy_map_head;
529extern struct pci_extra_dev *uncore_extra_pci_dev;
530extern struct event_constraint uncore_constraint_empty;
531
532
533int snb_uncore_pci_init(void);
534int ivb_uncore_pci_init(void);
535int hsw_uncore_pci_init(void);
536int bdw_uncore_pci_init(void);
537int skl_uncore_pci_init(void);
538void snb_uncore_cpu_init(void);
539void nhm_uncore_cpu_init(void);
540void skl_uncore_cpu_init(void);
541void icl_uncore_cpu_init(void);
542int snb_pci2phy_map_init(int devid);
543
544
545int snbep_uncore_pci_init(void);
546void snbep_uncore_cpu_init(void);
547int ivbep_uncore_pci_init(void);
548void ivbep_uncore_cpu_init(void);
549int hswep_uncore_pci_init(void);
550void hswep_uncore_cpu_init(void);
551int bdx_uncore_pci_init(void);
552void bdx_uncore_cpu_init(void);
553int knl_uncore_pci_init(void);
554void knl_uncore_cpu_init(void);
555int skx_uncore_pci_init(void);
556void skx_uncore_cpu_init(void);
557int snr_uncore_pci_init(void);
558void snr_uncore_cpu_init(void);
559void snr_uncore_mmio_init(void);
560
561
562void nhmex_uncore_cpu_init(void);
563