1
2#include <linux/slab.h>
3#include <linux/pci.h>
4#include <asm/apicdef.h>
5
6#include <linux/perf_event.h>
7#include "../perf_event.h"
8
9#define UNCORE_PMU_NAME_LEN 32
10#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
11#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
12
13#define UNCORE_FIXED_EVENT 0xff
14#define UNCORE_PMC_IDX_MAX_GENERIC 8
15#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
16#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
17
18#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
19 ((dev << 24) | (func << 16) | (type << 8) | idx)
20#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
21#define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
22#define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
23#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
24#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
25#define UNCORE_EXTRA_PCI_DEV 0xff
26#define UNCORE_EXTRA_PCI_DEV_MAX 3
27
28#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
29
30struct pci_extra_dev {
31 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
32};
33
34struct intel_uncore_ops;
35struct intel_uncore_pmu;
36struct intel_uncore_box;
37struct uncore_event_desc;
38
39struct intel_uncore_type {
40 const char *name;
41 int num_counters;
42 int num_boxes;
43 int perf_ctr_bits;
44 int fixed_ctr_bits;
45 unsigned perf_ctr;
46 unsigned event_ctl;
47 unsigned event_mask;
48 unsigned event_mask_ext;
49 unsigned fixed_ctr;
50 unsigned fixed_ctl;
51 unsigned box_ctl;
52 unsigned msr_offset;
53 unsigned num_shared_regs:8;
54 unsigned single_fixed:1;
55 unsigned pair_ctr_ctl:1;
56 unsigned *msr_offsets;
57 struct event_constraint unconstrainted;
58 struct event_constraint *constraints;
59 struct intel_uncore_pmu *pmus;
60 struct intel_uncore_ops *ops;
61 struct uncore_event_desc *event_descs;
62 const struct attribute_group *attr_groups[4];
63 struct pmu *pmu;
64};
65
66#define pmu_group attr_groups[0]
67#define format_group attr_groups[1]
68#define events_group attr_groups[2]
69
70struct intel_uncore_ops {
71 void (*init_box)(struct intel_uncore_box *);
72 void (*exit_box)(struct intel_uncore_box *);
73 void (*disable_box)(struct intel_uncore_box *);
74 void (*enable_box)(struct intel_uncore_box *);
75 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
76 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
77 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
78 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
79 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
80 struct perf_event *);
81 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
82};
83
84struct intel_uncore_pmu {
85 struct pmu pmu;
86 char name[UNCORE_PMU_NAME_LEN];
87 int pmu_idx;
88 int func_id;
89 bool registered;
90 atomic_t activeboxes;
91 struct intel_uncore_type *type;
92 struct intel_uncore_box **boxes;
93};
94
95struct intel_uncore_extra_reg {
96 raw_spinlock_t lock;
97 u64 config, config1, config2;
98 atomic_t ref;
99};
100
101struct intel_uncore_box {
102 int pci_phys_id;
103 int pkgid;
104 int n_active;
105 int n_events;
106 int cpu;
107 unsigned long flags;
108 atomic_t refcnt;
109 struct perf_event *events[UNCORE_PMC_IDX_MAX];
110 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
111 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
112 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
113 u64 tags[UNCORE_PMC_IDX_MAX];
114 struct pci_dev *pci_dev;
115 struct intel_uncore_pmu *pmu;
116 u64 hrtimer_duration;
117 struct hrtimer hrtimer;
118 struct list_head list;
119 struct list_head active_list;
120 void *io_addr;
121 struct intel_uncore_extra_reg shared_regs[0];
122};
123
124#define UNCORE_BOX_FLAG_INITIATED 0
125#define UNCORE_BOX_FLAG_CTL_OFFS8 1
126
127struct uncore_event_desc {
128 struct kobj_attribute attr;
129 const char *config;
130};
131
132struct pci2phy_map {
133 struct list_head list;
134 int segment;
135 int pbus_to_physid[256];
136};
137
138struct pci2phy_map *__find_pci2phy_map(int segment);
139
140ssize_t uncore_event_show(struct kobject *kobj,
141 struct kobj_attribute *attr, char *buf);
142
143#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
144{ \
145 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
146 .config = _config, \
147}
148
149#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
150static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
151 struct kobj_attribute *attr, \
152 char *page) \
153{ \
154 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
155 return sprintf(page, _format "\n"); \
156} \
157static struct kobj_attribute format_attr_##_var = \
158 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
159
160static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
161{
162 return box->pmu->type->box_ctl;
163}
164
165static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
166{
167 return box->pmu->type->fixed_ctl;
168}
169
170static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
171{
172 return box->pmu->type->fixed_ctr;
173}
174
175static inline
176unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
177{
178 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
179 return idx * 8 + box->pmu->type->event_ctl;
180
181 return idx * 4 + box->pmu->type->event_ctl;
182}
183
184static inline
185unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
186{
187 return idx * 8 + box->pmu->type->perf_ctr;
188}
189
190static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
191{
192 struct intel_uncore_pmu *pmu = box->pmu;
193 return pmu->type->msr_offsets ?
194 pmu->type->msr_offsets[pmu->pmu_idx] :
195 pmu->type->msr_offset * pmu->pmu_idx;
196}
197
198static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
199{
200 if (!box->pmu->type->box_ctl)
201 return 0;
202 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
203}
204
205static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
206{
207 if (!box->pmu->type->fixed_ctl)
208 return 0;
209 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
210}
211
212static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
213{
214 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
215}
216
217static inline
218unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
219{
220 return box->pmu->type->event_ctl +
221 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
222 uncore_msr_box_offset(box);
223}
224
225static inline
226unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
227{
228 return box->pmu->type->perf_ctr +
229 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
230 uncore_msr_box_offset(box);
231}
232
233static inline
234unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
235{
236 if (box->pci_dev)
237 return uncore_pci_fixed_ctl(box);
238 else
239 return uncore_msr_fixed_ctl(box);
240}
241
242static inline
243unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
244{
245 if (box->pci_dev)
246 return uncore_pci_fixed_ctr(box);
247 else
248 return uncore_msr_fixed_ctr(box);
249}
250
251static inline
252unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
253{
254 if (box->pci_dev)
255 return uncore_pci_event_ctl(box, idx);
256 else
257 return uncore_msr_event_ctl(box, idx);
258}
259
260static inline
261unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
262{
263 if (box->pci_dev)
264 return uncore_pci_perf_ctr(box, idx);
265 else
266 return uncore_msr_perf_ctr(box, idx);
267}
268
269static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
270{
271 return box->pmu->type->perf_ctr_bits;
272}
273
274static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
275{
276 return box->pmu->type->fixed_ctr_bits;
277}
278
279static inline int uncore_num_counters(struct intel_uncore_box *box)
280{
281 return box->pmu->type->num_counters;
282}
283
284static inline void uncore_disable_box(struct intel_uncore_box *box)
285{
286 if (box->pmu->type->ops->disable_box)
287 box->pmu->type->ops->disable_box(box);
288}
289
290static inline void uncore_enable_box(struct intel_uncore_box *box)
291{
292 if (box->pmu->type->ops->enable_box)
293 box->pmu->type->ops->enable_box(box);
294}
295
296static inline void uncore_disable_event(struct intel_uncore_box *box,
297 struct perf_event *event)
298{
299 box->pmu->type->ops->disable_event(box, event);
300}
301
302static inline void uncore_enable_event(struct intel_uncore_box *box,
303 struct perf_event *event)
304{
305 box->pmu->type->ops->enable_event(box, event);
306}
307
308static inline u64 uncore_read_counter(struct intel_uncore_box *box,
309 struct perf_event *event)
310{
311 return box->pmu->type->ops->read_counter(box, event);
312}
313
314static inline void uncore_box_init(struct intel_uncore_box *box)
315{
316 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
317 if (box->pmu->type->ops->init_box)
318 box->pmu->type->ops->init_box(box);
319 }
320}
321
322static inline void uncore_box_exit(struct intel_uncore_box *box)
323{
324 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
325 if (box->pmu->type->ops->exit_box)
326 box->pmu->type->ops->exit_box(box);
327 }
328}
329
330static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
331{
332 return (box->pkgid < 0);
333}
334
335static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
336{
337 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
338}
339
340static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
341{
342 return event->pmu_private;
343}
344
345struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
346u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
347void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
348void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
349void uncore_pmu_event_read(struct perf_event *event);
350void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
351struct event_constraint *
352uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
353void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
354u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
355
356extern struct intel_uncore_type **uncore_msr_uncores;
357extern struct intel_uncore_type **uncore_pci_uncores;
358extern struct pci_driver *uncore_pci_driver;
359extern raw_spinlock_t pci2phy_map_lock;
360extern struct list_head pci2phy_map_head;
361extern struct pci_extra_dev *uncore_extra_pci_dev;
362extern struct event_constraint uncore_constraint_empty;
363
364
365int snb_uncore_pci_init(void);
366int ivb_uncore_pci_init(void);
367int hsw_uncore_pci_init(void);
368int bdw_uncore_pci_init(void);
369int skl_uncore_pci_init(void);
370void snb_uncore_cpu_init(void);
371void nhm_uncore_cpu_init(void);
372void skl_uncore_cpu_init(void);
373int snb_pci2phy_map_init(int devid);
374
375
376int snbep_uncore_pci_init(void);
377void snbep_uncore_cpu_init(void);
378int ivbep_uncore_pci_init(void);
379void ivbep_uncore_cpu_init(void);
380int hswep_uncore_pci_init(void);
381void hswep_uncore_cpu_init(void);
382int bdx_uncore_pci_init(void);
383void bdx_uncore_cpu_init(void);
384int knl_uncore_pci_init(void);
385void knl_uncore_cpu_init(void);
386int skx_uncore_pci_init(void);
387void skx_uncore_cpu_init(void);
388
389
390void nhmex_uncore_cpu_init(void);
391