1
2
3#include "uncore.h"
4
5
6#define SNBEP_CPUNODEID 0x40
7#define SNBEP_GIDNIDMAP 0x54
8
9
10#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17
18#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20#define SNBEP_PMON_CTL_RST (1 << 17)
21#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23#define SNBEP_PMON_CTL_EN (1 << 22)
24#define SNBEP_PMON_CTL_INVERT (1 << 23)
25#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
31
32
33#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
44
45
46#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
62
63
64#define SNBEP_PCI_PMON_BOX_CTL 0xf4
65#define SNBEP_PCI_PMON_CTL0 0xd8
66
67#define SNBEP_PCI_PMON_CTR0 0xa0
68
69
70#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73
74#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76
77#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
81
82
83#define SNBEP_U_MSR_PMON_CTR0 0xc16
84#define SNBEP_U_MSR_PMON_CTL0 0xc10
85
86#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
88
89
90#define SNBEP_C0_MSR_PMON_CTR0 0xd16
91#define SNBEP_C0_MSR_PMON_CTL0 0xd10
92#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94#define SNBEP_CBO_MSR_OFFSET 0x20
95
96#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
100
101#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
102 .event = (e), \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
105 .idx = (i) \
106}
107
108
109#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
116
117
118#define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120#define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
124
125#define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126#define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127#define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
128
129#define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134
135#define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
137
138#define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139#define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140#define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142#define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143#define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145#define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
146
147
148#define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149#define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152
153#define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160
161#define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
164
165#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
167
168
169#define HSWEP_U_MSR_PMON_CTR0 0x709
170#define HSWEP_U_MSR_PMON_CTL0 0x705
171#define HSWEP_U_MSR_PMON_FILTER 0x707
172
173#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
175
176#define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177#define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178#define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182
183#define HSWEP_C0_MSR_PMON_CTR0 0xe08
184#define HSWEP_C0_MSR_PMON_CTL0 0xe01
185#define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186#define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187#define HSWEP_CBO_MSR_OFFSET 0x10
188
189
190#define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191#define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192#define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194#define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195#define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197#define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
198
199
200
201#define HSWEP_S0_MSR_PMON_CTR0 0x726
202#define HSWEP_S0_MSR_PMON_CTL0 0x721
203#define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204#define HSWEP_SBOX_MSR_OFFSET 0xa
205#define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
207
208
209#define HSWEP_PCU_MSR_PMON_CTR0 0x717
210#define HSWEP_PCU_MSR_PMON_CTL0 0x711
211#define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212#define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
213
214
215#define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
218
219#define KNL_CHA_MSR_OFFSET 0xc
220#define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221#define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224#define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225#define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226#define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
230
231
232#define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233#define KNL_UCLK_MSR_PMON_CTL0 0x420
234#define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235#define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236#define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237#define KNL_PMON_FIXED_CTL_EN 0x1
238
239
240#define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241#define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242#define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
245
246
247#define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248#define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249#define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250#define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251#define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
252
253
254#define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255#define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
257
258#define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259#define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260#define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261#define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271
272
273#define SKX_CPUNODEID 0xc0
274#define SKX_GIDNIDMAP 0xd4
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296#define SKX_MSR_CPU_BUS_NUMBER 0x300
297#define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
298#define BUS_NUM_STRIDE 8
299
300
301#define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
302#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
303#define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
304#define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
305#define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
306#define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
307#define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
308#define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
309#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
310#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
311#define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
312#define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
313#define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
314
315
316#define SKX_IIO0_MSR_PMON_CTL0 0xa48
317#define SKX_IIO0_MSR_PMON_CTR0 0xa41
318#define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
319#define SKX_IIO_MSR_OFFSET 0x20
320
321#define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
322#define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
323#define SKX_PMON_CTL_CH_MASK (0xff << 4)
324#define SKX_PMON_CTL_FC_MASK (0x7 << 12)
325#define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
326 SNBEP_PMON_CTL_UMASK_MASK | \
327 SNBEP_PMON_CTL_EDGE_DET | \
328 SNBEP_PMON_CTL_INVERT | \
329 SKX_PMON_CTL_TRESH_MASK)
330#define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
331 SKX_PMON_CTL_CH_MASK | \
332 SKX_PMON_CTL_FC_MASK)
333
334
335#define SKX_IRP0_MSR_PMON_CTL0 0xa5b
336#define SKX_IRP0_MSR_PMON_CTR0 0xa59
337#define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
338#define SKX_IRP_MSR_OFFSET 0x20
339
340
341#define SKX_UPI_PCI_PMON_CTL0 0x350
342#define SKX_UPI_PCI_PMON_CTR0 0x318
343#define SKX_UPI_PCI_PMON_BOX_CTL 0x378
344#define SKX_UPI_CTL_UMASK_EXT 0xffefff
345
346
347#define SKX_M2M_PCI_PMON_CTL0 0x228
348#define SKX_M2M_PCI_PMON_CTR0 0x200
349#define SKX_M2M_PCI_PMON_BOX_CTL 0x258
350
351
352#define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
353#define SNR_ICX_SAD_CONTROL_CFG 0x3f4
354
355
356#define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
357
358
359#define SNR_U_MSR_PMON_CTR0 0x1f98
360#define SNR_U_MSR_PMON_CTL0 0x1f91
361#define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
362#define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
363
364
365#define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
366#define SNR_CHA_MSR_PMON_CTL0 0x1c01
367#define SNR_CHA_MSR_PMON_CTR0 0x1c08
368#define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
369#define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
370
371
372
373#define SNR_IIO_MSR_PMON_CTL0 0x1e08
374#define SNR_IIO_MSR_PMON_CTR0 0x1e01
375#define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
376#define SNR_IIO_MSR_OFFSET 0x10
377#define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
378
379
380#define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
381#define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
382#define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
383#define SNR_IRP_MSR_OFFSET 0x10
384
385
386#define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
387#define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
388#define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
389#define SNR_M2PCIE_MSR_OFFSET 0x10
390
391
392#define SNR_PCU_MSR_PMON_CTL0 0x1ef1
393#define SNR_PCU_MSR_PMON_CTR0 0x1ef8
394#define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
395#define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
396
397
398#define SNR_M2M_PCI_PMON_CTL0 0x468
399#define SNR_M2M_PCI_PMON_CTR0 0x440
400#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
401#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
402
403
404#define SNR_PCIE3_PCI_PMON_CTL0 0x508
405#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
406#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
407
408
409#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
410#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
411#define SNR_IMC_MMIO_PMON_CTL0 0x40
412#define SNR_IMC_MMIO_PMON_CTR0 0x8
413#define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
414#define SNR_IMC_MMIO_OFFSET 0x4000
415#define SNR_IMC_MMIO_SIZE 0x4000
416#define SNR_IMC_MMIO_BASE_OFFSET 0xd0
417#define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
418#define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
419#define SNR_IMC_MMIO_MEM0_MASK 0x7FF
420
421
422#define ICX_C34_MSR_PMON_CTR0 0xb68
423#define ICX_C34_MSR_PMON_CTL0 0xb61
424#define ICX_C34_MSR_PMON_BOX_CTL 0xb60
425#define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
426
427
428#define ICX_IIO_MSR_PMON_CTL0 0xa58
429#define ICX_IIO_MSR_PMON_CTR0 0xa51
430#define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
431
432
433#define ICX_IRP0_MSR_PMON_CTL0 0xa4d
434#define ICX_IRP0_MSR_PMON_CTR0 0xa4b
435#define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
436
437
438#define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
439#define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
440#define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
441
442
443#define ICX_UPI_PCI_PMON_CTL0 0x350
444#define ICX_UPI_PCI_PMON_CTR0 0x320
445#define ICX_UPI_PCI_PMON_BOX_CTL 0x318
446#define ICX_UPI_CTL_UMASK_EXT 0xffffff
447
448
449#define ICX_M3UPI_PCI_PMON_CTL0 0xd8
450#define ICX_M3UPI_PCI_PMON_CTR0 0xa8
451#define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
452
453
454#define ICX_NUMBER_IMC_CHN 2
455#define ICX_IMC_MEM_STRIDE 0x4
456
457DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
458DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
459DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
460DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
461DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
462DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
463DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
464DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
465DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
466DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
467DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
468DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
469DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
470DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
471DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
472DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
473DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
474DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
475DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
476DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
477DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
478DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
479DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
480DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
481DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
482DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
483DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
484DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
485DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
486DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
487DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
488DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
489DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
490DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
491DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
492DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
493DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
494DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
495DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
496DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
497DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
498DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
499DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
500DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
501DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
502DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
503DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
504DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
505DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
506DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
507DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
508DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
509DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
510DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
511DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
512DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
513DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
514DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
515DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
516DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
517DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
518DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
519DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
520DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
521DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
522DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
523DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
524DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
525DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
526DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
527DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
528DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
529DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
530DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
531DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
532DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
533DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
534DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
535
536static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
537{
538 struct pci_dev *pdev = box->pci_dev;
539 int box_ctl = uncore_pci_box_ctl(box);
540 u32 config = 0;
541
542 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
543 config |= SNBEP_PMON_BOX_CTL_FRZ;
544 pci_write_config_dword(pdev, box_ctl, config);
545 }
546}
547
548static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
549{
550 struct pci_dev *pdev = box->pci_dev;
551 int box_ctl = uncore_pci_box_ctl(box);
552 u32 config = 0;
553
554 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
555 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
556 pci_write_config_dword(pdev, box_ctl, config);
557 }
558}
559
560static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
561{
562 struct pci_dev *pdev = box->pci_dev;
563 struct hw_perf_event *hwc = &event->hw;
564
565 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
566}
567
568static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
569{
570 struct pci_dev *pdev = box->pci_dev;
571 struct hw_perf_event *hwc = &event->hw;
572
573 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
574}
575
576static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
577{
578 struct pci_dev *pdev = box->pci_dev;
579 struct hw_perf_event *hwc = &event->hw;
580 u64 count = 0;
581
582 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
583 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
584
585 return count;
586}
587
588static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
589{
590 struct pci_dev *pdev = box->pci_dev;
591 int box_ctl = uncore_pci_box_ctl(box);
592
593 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
594}
595
596static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
597{
598 u64 config;
599 unsigned msr;
600
601 msr = uncore_msr_box_ctl(box);
602 if (msr) {
603 rdmsrl(msr, config);
604 config |= SNBEP_PMON_BOX_CTL_FRZ;
605 wrmsrl(msr, config);
606 }
607}
608
609static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
610{
611 u64 config;
612 unsigned msr;
613
614 msr = uncore_msr_box_ctl(box);
615 if (msr) {
616 rdmsrl(msr, config);
617 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
618 wrmsrl(msr, config);
619 }
620}
621
622static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
623{
624 struct hw_perf_event *hwc = &event->hw;
625 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
626
627 if (reg1->idx != EXTRA_REG_NONE)
628 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
629
630 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
631}
632
633static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
634 struct perf_event *event)
635{
636 struct hw_perf_event *hwc = &event->hw;
637
638 wrmsrl(hwc->config_base, hwc->config);
639}
640
641static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
642{
643 unsigned msr = uncore_msr_box_ctl(box);
644
645 if (msr)
646 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
647}
648
649static struct attribute *snbep_uncore_formats_attr[] = {
650 &format_attr_event.attr,
651 &format_attr_umask.attr,
652 &format_attr_edge.attr,
653 &format_attr_inv.attr,
654 &format_attr_thresh8.attr,
655 NULL,
656};
657
658static struct attribute *snbep_uncore_ubox_formats_attr[] = {
659 &format_attr_event.attr,
660 &format_attr_umask.attr,
661 &format_attr_edge.attr,
662 &format_attr_inv.attr,
663 &format_attr_thresh5.attr,
664 NULL,
665};
666
667static struct attribute *snbep_uncore_cbox_formats_attr[] = {
668 &format_attr_event.attr,
669 &format_attr_umask.attr,
670 &format_attr_edge.attr,
671 &format_attr_tid_en.attr,
672 &format_attr_inv.attr,
673 &format_attr_thresh8.attr,
674 &format_attr_filter_tid.attr,
675 &format_attr_filter_nid.attr,
676 &format_attr_filter_state.attr,
677 &format_attr_filter_opc.attr,
678 NULL,
679};
680
681static struct attribute *snbep_uncore_pcu_formats_attr[] = {
682 &format_attr_event.attr,
683 &format_attr_occ_sel.attr,
684 &format_attr_edge.attr,
685 &format_attr_inv.attr,
686 &format_attr_thresh5.attr,
687 &format_attr_occ_invert.attr,
688 &format_attr_occ_edge.attr,
689 &format_attr_filter_band0.attr,
690 &format_attr_filter_band1.attr,
691 &format_attr_filter_band2.attr,
692 &format_attr_filter_band3.attr,
693 NULL,
694};
695
696static struct attribute *snbep_uncore_qpi_formats_attr[] = {
697 &format_attr_event_ext.attr,
698 &format_attr_umask.attr,
699 &format_attr_edge.attr,
700 &format_attr_inv.attr,
701 &format_attr_thresh8.attr,
702 &format_attr_match_rds.attr,
703 &format_attr_match_rnid30.attr,
704 &format_attr_match_rnid4.attr,
705 &format_attr_match_dnid.attr,
706 &format_attr_match_mc.attr,
707 &format_attr_match_opc.attr,
708 &format_attr_match_vnw.attr,
709 &format_attr_match0.attr,
710 &format_attr_match1.attr,
711 &format_attr_mask_rds.attr,
712 &format_attr_mask_rnid30.attr,
713 &format_attr_mask_rnid4.attr,
714 &format_attr_mask_dnid.attr,
715 &format_attr_mask_mc.attr,
716 &format_attr_mask_opc.attr,
717 &format_attr_mask_vnw.attr,
718 &format_attr_mask0.attr,
719 &format_attr_mask1.attr,
720 NULL,
721};
722
723static struct uncore_event_desc snbep_uncore_imc_events[] = {
724 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
725 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
726 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
727 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
728 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
729 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
730 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
731 { },
732};
733
734static struct uncore_event_desc snbep_uncore_qpi_events[] = {
735 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
736 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
737 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
738 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
739 { },
740};
741
742static const struct attribute_group snbep_uncore_format_group = {
743 .name = "format",
744 .attrs = snbep_uncore_formats_attr,
745};
746
747static const struct attribute_group snbep_uncore_ubox_format_group = {
748 .name = "format",
749 .attrs = snbep_uncore_ubox_formats_attr,
750};
751
752static const struct attribute_group snbep_uncore_cbox_format_group = {
753 .name = "format",
754 .attrs = snbep_uncore_cbox_formats_attr,
755};
756
757static const struct attribute_group snbep_uncore_pcu_format_group = {
758 .name = "format",
759 .attrs = snbep_uncore_pcu_formats_attr,
760};
761
762static const struct attribute_group snbep_uncore_qpi_format_group = {
763 .name = "format",
764 .attrs = snbep_uncore_qpi_formats_attr,
765};
766
767#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
768 .disable_box = snbep_uncore_msr_disable_box, \
769 .enable_box = snbep_uncore_msr_enable_box, \
770 .disable_event = snbep_uncore_msr_disable_event, \
771 .enable_event = snbep_uncore_msr_enable_event, \
772 .read_counter = uncore_msr_read_counter
773
774#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
775 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
776 .init_box = snbep_uncore_msr_init_box \
777
778static struct intel_uncore_ops snbep_uncore_msr_ops = {
779 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
780};
781
782#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
783 .init_box = snbep_uncore_pci_init_box, \
784 .disable_box = snbep_uncore_pci_disable_box, \
785 .enable_box = snbep_uncore_pci_enable_box, \
786 .disable_event = snbep_uncore_pci_disable_event, \
787 .read_counter = snbep_uncore_pci_read_counter
788
789static struct intel_uncore_ops snbep_uncore_pci_ops = {
790 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
791 .enable_event = snbep_uncore_pci_enable_event, \
792};
793
794static struct event_constraint snbep_uncore_cbox_constraints[] = {
795 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
796 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
797 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
798 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
799 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
800 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
801 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
802 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
803 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
804 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
805 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
806 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
807 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
808 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
809 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
810 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
811 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
812 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
813 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
814 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
815 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
816 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
817 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
818 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
819 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
820 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
821 EVENT_CONSTRAINT_END
822};
823
824static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
825 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
826 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
827 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
828 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
829 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
830 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
831 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
832 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
833 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
834 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
835 EVENT_CONSTRAINT_END
836};
837
838static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
839 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
841 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
843 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
844 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
845 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
846 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
848 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
849 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
850 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
851 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
852 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
853 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
854 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
855 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
856 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
858 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
859 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
860 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
861 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
862 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
863 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
864 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
865 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
866 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
867 EVENT_CONSTRAINT_END
868};
869
870static struct intel_uncore_type snbep_uncore_ubox = {
871 .name = "ubox",
872 .num_counters = 2,
873 .num_boxes = 1,
874 .perf_ctr_bits = 44,
875 .fixed_ctr_bits = 48,
876 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
877 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
878 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
879 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
880 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
881 .ops = &snbep_uncore_msr_ops,
882 .format_group = &snbep_uncore_ubox_format_group,
883};
884
885static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
886 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
887 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
888 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
889 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
890 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
891 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
892 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
893 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
894 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
895 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
896 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
897 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
898 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
899 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
900 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
904 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
905 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
906 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
907 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
908 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
909 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
910 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
911 EVENT_EXTRA_END
912};
913
914static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
915{
916 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
917 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
918 int i;
919
920 if (uncore_box_is_fake(box))
921 return;
922
923 for (i = 0; i < 5; i++) {
924 if (reg1->alloc & (0x1 << i))
925 atomic_sub(1 << (i * 6), &er->ref);
926 }
927 reg1->alloc = 0;
928}
929
930static struct event_constraint *
931__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
932 u64 (*cbox_filter_mask)(int fields))
933{
934 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
935 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
936 int i, alloc = 0;
937 unsigned long flags;
938 u64 mask;
939
940 if (reg1->idx == EXTRA_REG_NONE)
941 return NULL;
942
943 raw_spin_lock_irqsave(&er->lock, flags);
944 for (i = 0; i < 5; i++) {
945 if (!(reg1->idx & (0x1 << i)))
946 continue;
947 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
948 continue;
949
950 mask = cbox_filter_mask(0x1 << i);
951 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
952 !((reg1->config ^ er->config) & mask)) {
953 atomic_add(1 << (i * 6), &er->ref);
954 er->config &= ~mask;
955 er->config |= reg1->config & mask;
956 alloc |= (0x1 << i);
957 } else {
958 break;
959 }
960 }
961 raw_spin_unlock_irqrestore(&er->lock, flags);
962 if (i < 5)
963 goto fail;
964
965 if (!uncore_box_is_fake(box))
966 reg1->alloc |= alloc;
967
968 return NULL;
969fail:
970 for (; i >= 0; i--) {
971 if (alloc & (0x1 << i))
972 atomic_sub(1 << (i * 6), &er->ref);
973 }
974 return &uncore_constraint_empty;
975}
976
977static u64 snbep_cbox_filter_mask(int fields)
978{
979 u64 mask = 0;
980
981 if (fields & 0x1)
982 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
983 if (fields & 0x2)
984 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
985 if (fields & 0x4)
986 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
987 if (fields & 0x8)
988 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
989
990 return mask;
991}
992
993static struct event_constraint *
994snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
995{
996 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
997}
998
999static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1000{
1001 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1002 struct extra_reg *er;
1003 int idx = 0;
1004
1005 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1006 if (er->event != (event->hw.config & er->config_mask))
1007 continue;
1008 idx |= er->idx;
1009 }
1010
1011 if (idx) {
1012 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1013 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1014 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1015 reg1->idx = idx;
1016 }
1017 return 0;
1018}
1019
1020static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1021 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1022 .hw_config = snbep_cbox_hw_config,
1023 .get_constraint = snbep_cbox_get_constraint,
1024 .put_constraint = snbep_cbox_put_constraint,
1025};
1026
1027static struct intel_uncore_type snbep_uncore_cbox = {
1028 .name = "cbox",
1029 .num_counters = 4,
1030 .num_boxes = 8,
1031 .perf_ctr_bits = 44,
1032 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1033 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1034 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1035 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1036 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1037 .num_shared_regs = 1,
1038 .constraints = snbep_uncore_cbox_constraints,
1039 .ops = &snbep_uncore_cbox_ops,
1040 .format_group = &snbep_uncore_cbox_format_group,
1041};
1042
1043static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1044{
1045 struct hw_perf_event *hwc = &event->hw;
1046 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1047 u64 config = reg1->config;
1048
1049 if (new_idx > reg1->idx)
1050 config <<= 8 * (new_idx - reg1->idx);
1051 else
1052 config >>= 8 * (reg1->idx - new_idx);
1053
1054 if (modify) {
1055 hwc->config += new_idx - reg1->idx;
1056 reg1->config = config;
1057 reg1->idx = new_idx;
1058 }
1059 return config;
1060}
1061
1062static struct event_constraint *
1063snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1064{
1065 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1066 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1067 unsigned long flags;
1068 int idx = reg1->idx;
1069 u64 mask, config1 = reg1->config;
1070 bool ok = false;
1071
1072 if (reg1->idx == EXTRA_REG_NONE ||
1073 (!uncore_box_is_fake(box) && reg1->alloc))
1074 return NULL;
1075again:
1076 mask = 0xffULL << (idx * 8);
1077 raw_spin_lock_irqsave(&er->lock, flags);
1078 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1079 !((config1 ^ er->config) & mask)) {
1080 atomic_add(1 << (idx * 8), &er->ref);
1081 er->config &= ~mask;
1082 er->config |= config1 & mask;
1083 ok = true;
1084 }
1085 raw_spin_unlock_irqrestore(&er->lock, flags);
1086
1087 if (!ok) {
1088 idx = (idx + 1) % 4;
1089 if (idx != reg1->idx) {
1090 config1 = snbep_pcu_alter_er(event, idx, false);
1091 goto again;
1092 }
1093 return &uncore_constraint_empty;
1094 }
1095
1096 if (!uncore_box_is_fake(box)) {
1097 if (idx != reg1->idx)
1098 snbep_pcu_alter_er(event, idx, true);
1099 reg1->alloc = 1;
1100 }
1101 return NULL;
1102}
1103
1104static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1105{
1106 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1107 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1108
1109 if (uncore_box_is_fake(box) || !reg1->alloc)
1110 return;
1111
1112 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1113 reg1->alloc = 0;
1114}
1115
1116static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1117{
1118 struct hw_perf_event *hwc = &event->hw;
1119 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1120 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1121
1122 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1123 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1124 reg1->idx = ev_sel - 0xb;
1125 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1126 }
1127 return 0;
1128}
1129
1130static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1131 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1132 .hw_config = snbep_pcu_hw_config,
1133 .get_constraint = snbep_pcu_get_constraint,
1134 .put_constraint = snbep_pcu_put_constraint,
1135};
1136
1137static struct intel_uncore_type snbep_uncore_pcu = {
1138 .name = "pcu",
1139 .num_counters = 4,
1140 .num_boxes = 1,
1141 .perf_ctr_bits = 48,
1142 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1143 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1144 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1145 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1146 .num_shared_regs = 1,
1147 .ops = &snbep_uncore_pcu_ops,
1148 .format_group = &snbep_uncore_pcu_format_group,
1149};
1150
1151static struct intel_uncore_type *snbep_msr_uncores[] = {
1152 &snbep_uncore_ubox,
1153 &snbep_uncore_cbox,
1154 &snbep_uncore_pcu,
1155 NULL,
1156};
1157
1158void snbep_uncore_cpu_init(void)
1159{
1160 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1161 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1162 uncore_msr_uncores = snbep_msr_uncores;
1163}
1164
1165enum {
1166 SNBEP_PCI_QPI_PORT0_FILTER,
1167 SNBEP_PCI_QPI_PORT1_FILTER,
1168 BDX_PCI_QPI_PORT2_FILTER,
1169};
1170
1171static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1172{
1173 struct hw_perf_event *hwc = &event->hw;
1174 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1175 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1176
1177 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1178 reg1->idx = 0;
1179 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1180 reg1->config = event->attr.config1;
1181 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1182 reg2->config = event->attr.config2;
1183 }
1184 return 0;
1185}
1186
1187static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1188{
1189 struct pci_dev *pdev = box->pci_dev;
1190 struct hw_perf_event *hwc = &event->hw;
1191 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1192 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1193
1194 if (reg1->idx != EXTRA_REG_NONE) {
1195 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1196 int die = box->dieid;
1197 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1198
1199 if (filter_pdev) {
1200 pci_write_config_dword(filter_pdev, reg1->reg,
1201 (u32)reg1->config);
1202 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1203 (u32)(reg1->config >> 32));
1204 pci_write_config_dword(filter_pdev, reg2->reg,
1205 (u32)reg2->config);
1206 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1207 (u32)(reg2->config >> 32));
1208 }
1209 }
1210
1211 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1212}
1213
1214static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1215 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1216 .enable_event = snbep_qpi_enable_event,
1217 .hw_config = snbep_qpi_hw_config,
1218 .get_constraint = uncore_get_constraint,
1219 .put_constraint = uncore_put_constraint,
1220};
1221
1222#define SNBEP_UNCORE_PCI_COMMON_INIT() \
1223 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1224 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1225 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1226 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1227 .ops = &snbep_uncore_pci_ops, \
1228 .format_group = &snbep_uncore_format_group
1229
1230static struct intel_uncore_type snbep_uncore_ha = {
1231 .name = "ha",
1232 .num_counters = 4,
1233 .num_boxes = 1,
1234 .perf_ctr_bits = 48,
1235 SNBEP_UNCORE_PCI_COMMON_INIT(),
1236};
1237
1238static struct intel_uncore_type snbep_uncore_imc = {
1239 .name = "imc",
1240 .num_counters = 4,
1241 .num_boxes = 4,
1242 .perf_ctr_bits = 48,
1243 .fixed_ctr_bits = 48,
1244 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1245 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1246 .event_descs = snbep_uncore_imc_events,
1247 SNBEP_UNCORE_PCI_COMMON_INIT(),
1248};
1249
1250static struct intel_uncore_type snbep_uncore_qpi = {
1251 .name = "qpi",
1252 .num_counters = 4,
1253 .num_boxes = 2,
1254 .perf_ctr_bits = 48,
1255 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1256 .event_ctl = SNBEP_PCI_PMON_CTL0,
1257 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1258 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1259 .num_shared_regs = 1,
1260 .ops = &snbep_uncore_qpi_ops,
1261 .event_descs = snbep_uncore_qpi_events,
1262 .format_group = &snbep_uncore_qpi_format_group,
1263};
1264
1265
1266static struct intel_uncore_type snbep_uncore_r2pcie = {
1267 .name = "r2pcie",
1268 .num_counters = 4,
1269 .num_boxes = 1,
1270 .perf_ctr_bits = 44,
1271 .constraints = snbep_uncore_r2pcie_constraints,
1272 SNBEP_UNCORE_PCI_COMMON_INIT(),
1273};
1274
1275static struct intel_uncore_type snbep_uncore_r3qpi = {
1276 .name = "r3qpi",
1277 .num_counters = 3,
1278 .num_boxes = 2,
1279 .perf_ctr_bits = 44,
1280 .constraints = snbep_uncore_r3qpi_constraints,
1281 SNBEP_UNCORE_PCI_COMMON_INIT(),
1282};
1283
1284enum {
1285 SNBEP_PCI_UNCORE_HA,
1286 SNBEP_PCI_UNCORE_IMC,
1287 SNBEP_PCI_UNCORE_QPI,
1288 SNBEP_PCI_UNCORE_R2PCIE,
1289 SNBEP_PCI_UNCORE_R3QPI,
1290};
1291
1292static struct intel_uncore_type *snbep_pci_uncores[] = {
1293 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1294 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1295 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1296 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1297 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1298 NULL,
1299};
1300
1301static const struct pci_device_id snbep_uncore_pci_ids[] = {
1302 {
1303 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1304 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1305 },
1306 {
1307 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1308 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1309 },
1310 {
1311 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1312 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1313 },
1314 {
1315 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1316 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1317 },
1318 {
1319 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1320 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1321 },
1322 {
1323 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1324 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1325 },
1326 {
1327 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1328 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1329 },
1330 {
1331 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1332 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1333 },
1334 {
1335 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1336 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1337 },
1338 {
1339 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1340 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1341 },
1342 {
1343 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1344 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1345 SNBEP_PCI_QPI_PORT0_FILTER),
1346 },
1347 {
1348 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1349 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1350 SNBEP_PCI_QPI_PORT1_FILTER),
1351 },
1352 { }
1353};
1354
1355static struct pci_driver snbep_uncore_pci_driver = {
1356 .name = "snbep_uncore",
1357 .id_table = snbep_uncore_pci_ids,
1358};
1359
1360#define NODE_ID_MASK 0x7
1361
1362
1363
1364
1365static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1366{
1367 struct pci_dev *ubox_dev = NULL;
1368 int i, bus, nodeid, segment, die_id;
1369 struct pci2phy_map *map;
1370 int err = 0;
1371 u32 config = 0;
1372
1373 while (1) {
1374
1375 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1376 if (!ubox_dev)
1377 break;
1378 bus = ubox_dev->bus->number;
1379
1380
1381
1382
1383
1384
1385
1386 if (nr_node_ids <= 8) {
1387
1388 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1389 if (err)
1390 break;
1391 nodeid = config & NODE_ID_MASK;
1392
1393 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1394 if (err)
1395 break;
1396
1397 segment = pci_domain_nr(ubox_dev->bus);
1398 raw_spin_lock(&pci2phy_map_lock);
1399 map = __find_pci2phy_map(segment);
1400 if (!map) {
1401 raw_spin_unlock(&pci2phy_map_lock);
1402 err = -ENOMEM;
1403 break;
1404 }
1405
1406
1407
1408
1409
1410 for (i = 0; i < 8; i++) {
1411 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1412 if (topology_max_die_per_package() > 1)
1413 die_id = i;
1414 else
1415 die_id = topology_phys_to_logical_pkg(i);
1416 if (die_id < 0)
1417 die_id = -ENODEV;
1418 map->pbus_to_dieid[bus] = die_id;
1419 break;
1420 }
1421 }
1422 raw_spin_unlock(&pci2phy_map_lock);
1423 } else {
1424 int node = pcibus_to_node(ubox_dev->bus);
1425 int cpu;
1426
1427 segment = pci_domain_nr(ubox_dev->bus);
1428 raw_spin_lock(&pci2phy_map_lock);
1429 map = __find_pci2phy_map(segment);
1430 if (!map) {
1431 raw_spin_unlock(&pci2phy_map_lock);
1432 err = -ENOMEM;
1433 break;
1434 }
1435
1436 die_id = -1;
1437 for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1438 struct cpuinfo_x86 *c = &cpu_data(cpu);
1439
1440 if (c->initialized && cpu_to_node(cpu) == node) {
1441 map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1442 break;
1443 }
1444 }
1445 raw_spin_unlock(&pci2phy_map_lock);
1446
1447 if (WARN_ON_ONCE(die_id == -1)) {
1448 err = -EINVAL;
1449 break;
1450 }
1451 }
1452 }
1453
1454 if (!err) {
1455
1456
1457
1458
1459 raw_spin_lock(&pci2phy_map_lock);
1460 list_for_each_entry(map, &pci2phy_map_head, list) {
1461 i = -1;
1462 if (reverse) {
1463 for (bus = 255; bus >= 0; bus--) {
1464 if (map->pbus_to_dieid[bus] != -1)
1465 i = map->pbus_to_dieid[bus];
1466 else
1467 map->pbus_to_dieid[bus] = i;
1468 }
1469 } else {
1470 for (bus = 0; bus <= 255; bus++) {
1471 if (map->pbus_to_dieid[bus] != -1)
1472 i = map->pbus_to_dieid[bus];
1473 else
1474 map->pbus_to_dieid[bus] = i;
1475 }
1476 }
1477 }
1478 raw_spin_unlock(&pci2phy_map_lock);
1479 }
1480
1481 pci_dev_put(ubox_dev);
1482
1483 return err ? pcibios_err_to_errno(err) : 0;
1484}
1485
1486int snbep_uncore_pci_init(void)
1487{
1488 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1489 if (ret)
1490 return ret;
1491 uncore_pci_uncores = snbep_pci_uncores;
1492 uncore_pci_driver = &snbep_uncore_pci_driver;
1493 return 0;
1494}
1495
1496
1497
1498static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1499{
1500 unsigned msr = uncore_msr_box_ctl(box);
1501 if (msr)
1502 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1503}
1504
1505static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1506{
1507 struct pci_dev *pdev = box->pci_dev;
1508
1509 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1510}
1511
1512#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1513 .init_box = ivbep_uncore_msr_init_box, \
1514 .disable_box = snbep_uncore_msr_disable_box, \
1515 .enable_box = snbep_uncore_msr_enable_box, \
1516 .disable_event = snbep_uncore_msr_disable_event, \
1517 .enable_event = snbep_uncore_msr_enable_event, \
1518 .read_counter = uncore_msr_read_counter
1519
1520static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1521 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1522};
1523
1524static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1525 .init_box = ivbep_uncore_pci_init_box,
1526 .disable_box = snbep_uncore_pci_disable_box,
1527 .enable_box = snbep_uncore_pci_enable_box,
1528 .disable_event = snbep_uncore_pci_disable_event,
1529 .enable_event = snbep_uncore_pci_enable_event,
1530 .read_counter = snbep_uncore_pci_read_counter,
1531};
1532
1533#define IVBEP_UNCORE_PCI_COMMON_INIT() \
1534 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1535 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1536 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1537 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1538 .ops = &ivbep_uncore_pci_ops, \
1539 .format_group = &ivbep_uncore_format_group
1540
1541static struct attribute *ivbep_uncore_formats_attr[] = {
1542 &format_attr_event.attr,
1543 &format_attr_umask.attr,
1544 &format_attr_edge.attr,
1545 &format_attr_inv.attr,
1546 &format_attr_thresh8.attr,
1547 NULL,
1548};
1549
1550static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1551 &format_attr_event.attr,
1552 &format_attr_umask.attr,
1553 &format_attr_edge.attr,
1554 &format_attr_inv.attr,
1555 &format_attr_thresh5.attr,
1556 NULL,
1557};
1558
1559static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1560 &format_attr_event.attr,
1561 &format_attr_umask.attr,
1562 &format_attr_edge.attr,
1563 &format_attr_tid_en.attr,
1564 &format_attr_thresh8.attr,
1565 &format_attr_filter_tid.attr,
1566 &format_attr_filter_link.attr,
1567 &format_attr_filter_state2.attr,
1568 &format_attr_filter_nid2.attr,
1569 &format_attr_filter_opc2.attr,
1570 &format_attr_filter_nc.attr,
1571 &format_attr_filter_c6.attr,
1572 &format_attr_filter_isoc.attr,
1573 NULL,
1574};
1575
1576static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1577 &format_attr_event.attr,
1578 &format_attr_occ_sel.attr,
1579 &format_attr_edge.attr,
1580 &format_attr_thresh5.attr,
1581 &format_attr_occ_invert.attr,
1582 &format_attr_occ_edge.attr,
1583 &format_attr_filter_band0.attr,
1584 &format_attr_filter_band1.attr,
1585 &format_attr_filter_band2.attr,
1586 &format_attr_filter_band3.attr,
1587 NULL,
1588};
1589
1590static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1591 &format_attr_event_ext.attr,
1592 &format_attr_umask.attr,
1593 &format_attr_edge.attr,
1594 &format_attr_thresh8.attr,
1595 &format_attr_match_rds.attr,
1596 &format_attr_match_rnid30.attr,
1597 &format_attr_match_rnid4.attr,
1598 &format_attr_match_dnid.attr,
1599 &format_attr_match_mc.attr,
1600 &format_attr_match_opc.attr,
1601 &format_attr_match_vnw.attr,
1602 &format_attr_match0.attr,
1603 &format_attr_match1.attr,
1604 &format_attr_mask_rds.attr,
1605 &format_attr_mask_rnid30.attr,
1606 &format_attr_mask_rnid4.attr,
1607 &format_attr_mask_dnid.attr,
1608 &format_attr_mask_mc.attr,
1609 &format_attr_mask_opc.attr,
1610 &format_attr_mask_vnw.attr,
1611 &format_attr_mask0.attr,
1612 &format_attr_mask1.attr,
1613 NULL,
1614};
1615
1616static const struct attribute_group ivbep_uncore_format_group = {
1617 .name = "format",
1618 .attrs = ivbep_uncore_formats_attr,
1619};
1620
1621static const struct attribute_group ivbep_uncore_ubox_format_group = {
1622 .name = "format",
1623 .attrs = ivbep_uncore_ubox_formats_attr,
1624};
1625
1626static const struct attribute_group ivbep_uncore_cbox_format_group = {
1627 .name = "format",
1628 .attrs = ivbep_uncore_cbox_formats_attr,
1629};
1630
1631static const struct attribute_group ivbep_uncore_pcu_format_group = {
1632 .name = "format",
1633 .attrs = ivbep_uncore_pcu_formats_attr,
1634};
1635
1636static const struct attribute_group ivbep_uncore_qpi_format_group = {
1637 .name = "format",
1638 .attrs = ivbep_uncore_qpi_formats_attr,
1639};
1640
1641static struct intel_uncore_type ivbep_uncore_ubox = {
1642 .name = "ubox",
1643 .num_counters = 2,
1644 .num_boxes = 1,
1645 .perf_ctr_bits = 44,
1646 .fixed_ctr_bits = 48,
1647 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1648 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1649 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1650 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1651 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1652 .ops = &ivbep_uncore_msr_ops,
1653 .format_group = &ivbep_uncore_ubox_format_group,
1654};
1655
1656static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1657 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1658 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1659 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1660 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1661 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1662 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1663 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1664 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1665 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1666 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1667 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1668 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1669 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1670 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1671 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1672 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1673 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1674 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1675 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1676 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1677 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1678 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1679 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1680 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1681 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1682 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1683 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1684 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1685 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1687 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1688 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1689 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1690 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1691 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1692 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1693 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1694 EVENT_EXTRA_END
1695};
1696
1697static u64 ivbep_cbox_filter_mask(int fields)
1698{
1699 u64 mask = 0;
1700
1701 if (fields & 0x1)
1702 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1703 if (fields & 0x2)
1704 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1705 if (fields & 0x4)
1706 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1707 if (fields & 0x8)
1708 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1709 if (fields & 0x10) {
1710 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1711 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1712 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1713 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1714 }
1715
1716 return mask;
1717}
1718
1719static struct event_constraint *
1720ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1721{
1722 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1723}
1724
1725static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1726{
1727 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1728 struct extra_reg *er;
1729 int idx = 0;
1730
1731 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1732 if (er->event != (event->hw.config & er->config_mask))
1733 continue;
1734 idx |= er->idx;
1735 }
1736
1737 if (idx) {
1738 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1739 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1740 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1741 reg1->idx = idx;
1742 }
1743 return 0;
1744}
1745
1746static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1747{
1748 struct hw_perf_event *hwc = &event->hw;
1749 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1750
1751 if (reg1->idx != EXTRA_REG_NONE) {
1752 u64 filter = uncore_shared_reg_config(box, 0);
1753 wrmsrl(reg1->reg, filter & 0xffffffff);
1754 wrmsrl(reg1->reg + 6, filter >> 32);
1755 }
1756
1757 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1758}
1759
1760static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1761 .init_box = ivbep_uncore_msr_init_box,
1762 .disable_box = snbep_uncore_msr_disable_box,
1763 .enable_box = snbep_uncore_msr_enable_box,
1764 .disable_event = snbep_uncore_msr_disable_event,
1765 .enable_event = ivbep_cbox_enable_event,
1766 .read_counter = uncore_msr_read_counter,
1767 .hw_config = ivbep_cbox_hw_config,
1768 .get_constraint = ivbep_cbox_get_constraint,
1769 .put_constraint = snbep_cbox_put_constraint,
1770};
1771
1772static struct intel_uncore_type ivbep_uncore_cbox = {
1773 .name = "cbox",
1774 .num_counters = 4,
1775 .num_boxes = 15,
1776 .perf_ctr_bits = 44,
1777 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1778 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1779 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1780 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1781 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1782 .num_shared_regs = 1,
1783 .constraints = snbep_uncore_cbox_constraints,
1784 .ops = &ivbep_uncore_cbox_ops,
1785 .format_group = &ivbep_uncore_cbox_format_group,
1786};
1787
1788static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1789 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1790 .hw_config = snbep_pcu_hw_config,
1791 .get_constraint = snbep_pcu_get_constraint,
1792 .put_constraint = snbep_pcu_put_constraint,
1793};
1794
1795static struct intel_uncore_type ivbep_uncore_pcu = {
1796 .name = "pcu",
1797 .num_counters = 4,
1798 .num_boxes = 1,
1799 .perf_ctr_bits = 48,
1800 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1801 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1802 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1803 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1804 .num_shared_regs = 1,
1805 .ops = &ivbep_uncore_pcu_ops,
1806 .format_group = &ivbep_uncore_pcu_format_group,
1807};
1808
1809static struct intel_uncore_type *ivbep_msr_uncores[] = {
1810 &ivbep_uncore_ubox,
1811 &ivbep_uncore_cbox,
1812 &ivbep_uncore_pcu,
1813 NULL,
1814};
1815
1816void ivbep_uncore_cpu_init(void)
1817{
1818 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1819 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1820 uncore_msr_uncores = ivbep_msr_uncores;
1821}
1822
1823static struct intel_uncore_type ivbep_uncore_ha = {
1824 .name = "ha",
1825 .num_counters = 4,
1826 .num_boxes = 2,
1827 .perf_ctr_bits = 48,
1828 IVBEP_UNCORE_PCI_COMMON_INIT(),
1829};
1830
1831static struct intel_uncore_type ivbep_uncore_imc = {
1832 .name = "imc",
1833 .num_counters = 4,
1834 .num_boxes = 8,
1835 .perf_ctr_bits = 48,
1836 .fixed_ctr_bits = 48,
1837 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1838 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1839 .event_descs = snbep_uncore_imc_events,
1840 IVBEP_UNCORE_PCI_COMMON_INIT(),
1841};
1842
1843
1844static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1845static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1846
1847static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1848{
1849 struct pci_dev *pdev = box->pci_dev;
1850 struct hw_perf_event *hwc = &event->hw;
1851
1852 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1853 hwc->config | SNBEP_PMON_CTL_EN);
1854}
1855
1856static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1857{
1858 struct pci_dev *pdev = box->pci_dev;
1859 struct hw_perf_event *hwc = &event->hw;
1860
1861 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1862}
1863
1864static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1865{
1866 struct pci_dev *pdev = box->pci_dev;
1867 struct hw_perf_event *hwc = &event->hw;
1868 u64 count = 0;
1869
1870 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1871 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1872
1873 return count;
1874}
1875
1876static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1877 .init_box = ivbep_uncore_pci_init_box,
1878 .disable_box = snbep_uncore_pci_disable_box,
1879 .enable_box = snbep_uncore_pci_enable_box,
1880 .disable_event = ivbep_uncore_irp_disable_event,
1881 .enable_event = ivbep_uncore_irp_enable_event,
1882 .read_counter = ivbep_uncore_irp_read_counter,
1883};
1884
1885static struct intel_uncore_type ivbep_uncore_irp = {
1886 .name = "irp",
1887 .num_counters = 4,
1888 .num_boxes = 1,
1889 .perf_ctr_bits = 48,
1890 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1891 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1892 .ops = &ivbep_uncore_irp_ops,
1893 .format_group = &ivbep_uncore_format_group,
1894};
1895
1896static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1897 .init_box = ivbep_uncore_pci_init_box,
1898 .disable_box = snbep_uncore_pci_disable_box,
1899 .enable_box = snbep_uncore_pci_enable_box,
1900 .disable_event = snbep_uncore_pci_disable_event,
1901 .enable_event = snbep_qpi_enable_event,
1902 .read_counter = snbep_uncore_pci_read_counter,
1903 .hw_config = snbep_qpi_hw_config,
1904 .get_constraint = uncore_get_constraint,
1905 .put_constraint = uncore_put_constraint,
1906};
1907
1908static struct intel_uncore_type ivbep_uncore_qpi = {
1909 .name = "qpi",
1910 .num_counters = 4,
1911 .num_boxes = 3,
1912 .perf_ctr_bits = 48,
1913 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1914 .event_ctl = SNBEP_PCI_PMON_CTL0,
1915 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1916 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1917 .num_shared_regs = 1,
1918 .ops = &ivbep_uncore_qpi_ops,
1919 .format_group = &ivbep_uncore_qpi_format_group,
1920};
1921
1922static struct intel_uncore_type ivbep_uncore_r2pcie = {
1923 .name = "r2pcie",
1924 .num_counters = 4,
1925 .num_boxes = 1,
1926 .perf_ctr_bits = 44,
1927 .constraints = snbep_uncore_r2pcie_constraints,
1928 IVBEP_UNCORE_PCI_COMMON_INIT(),
1929};
1930
1931static struct intel_uncore_type ivbep_uncore_r3qpi = {
1932 .name = "r3qpi",
1933 .num_counters = 3,
1934 .num_boxes = 2,
1935 .perf_ctr_bits = 44,
1936 .constraints = snbep_uncore_r3qpi_constraints,
1937 IVBEP_UNCORE_PCI_COMMON_INIT(),
1938};
1939
1940enum {
1941 IVBEP_PCI_UNCORE_HA,
1942 IVBEP_PCI_UNCORE_IMC,
1943 IVBEP_PCI_UNCORE_IRP,
1944 IVBEP_PCI_UNCORE_QPI,
1945 IVBEP_PCI_UNCORE_R2PCIE,
1946 IVBEP_PCI_UNCORE_R3QPI,
1947};
1948
1949static struct intel_uncore_type *ivbep_pci_uncores[] = {
1950 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1951 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1952 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1953 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1954 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1955 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1956 NULL,
1957};
1958
1959static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1960 {
1961 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1962 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1963 },
1964 {
1965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1966 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1967 },
1968 {
1969 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1970 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1971 },
1972 {
1973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1974 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1975 },
1976 {
1977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1978 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1979 },
1980 {
1981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1982 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1983 },
1984 {
1985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1986 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1987 },
1988 {
1989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1990 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1991 },
1992 {
1993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1994 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1995 },
1996 {
1997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1998 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1999 },
2000 {
2001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2002 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2003 },
2004 {
2005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2006 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2007 },
2008 {
2009 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2010 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2011 },
2012 {
2013 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2014 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2015 },
2016 {
2017 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2018 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2019 },
2020 {
2021 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2022 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2023 },
2024 {
2025 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2026 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2027 },
2028 {
2029 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2030 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2031 },
2032 {
2033 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2034 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2035 SNBEP_PCI_QPI_PORT0_FILTER),
2036 },
2037 {
2038 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2039 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2040 SNBEP_PCI_QPI_PORT1_FILTER),
2041 },
2042 { }
2043};
2044
2045static struct pci_driver ivbep_uncore_pci_driver = {
2046 .name = "ivbep_uncore",
2047 .id_table = ivbep_uncore_pci_ids,
2048};
2049
2050int ivbep_uncore_pci_init(void)
2051{
2052 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2053 if (ret)
2054 return ret;
2055 uncore_pci_uncores = ivbep_pci_uncores;
2056 uncore_pci_driver = &ivbep_uncore_pci_driver;
2057 return 0;
2058}
2059
2060
2061
2062static struct attribute *knl_uncore_ubox_formats_attr[] = {
2063 &format_attr_event.attr,
2064 &format_attr_umask.attr,
2065 &format_attr_edge.attr,
2066 &format_attr_tid_en.attr,
2067 &format_attr_inv.attr,
2068 &format_attr_thresh5.attr,
2069 NULL,
2070};
2071
2072static const struct attribute_group knl_uncore_ubox_format_group = {
2073 .name = "format",
2074 .attrs = knl_uncore_ubox_formats_attr,
2075};
2076
2077static struct intel_uncore_type knl_uncore_ubox = {
2078 .name = "ubox",
2079 .num_counters = 2,
2080 .num_boxes = 1,
2081 .perf_ctr_bits = 48,
2082 .fixed_ctr_bits = 48,
2083 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2084 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2085 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2086 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2087 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2088 .ops = &snbep_uncore_msr_ops,
2089 .format_group = &knl_uncore_ubox_format_group,
2090};
2091
2092static struct attribute *knl_uncore_cha_formats_attr[] = {
2093 &format_attr_event.attr,
2094 &format_attr_umask.attr,
2095 &format_attr_qor.attr,
2096 &format_attr_edge.attr,
2097 &format_attr_tid_en.attr,
2098 &format_attr_inv.attr,
2099 &format_attr_thresh8.attr,
2100 &format_attr_filter_tid4.attr,
2101 &format_attr_filter_link3.attr,
2102 &format_attr_filter_state4.attr,
2103 &format_attr_filter_local.attr,
2104 &format_attr_filter_all_op.attr,
2105 &format_attr_filter_nnm.attr,
2106 &format_attr_filter_opc3.attr,
2107 &format_attr_filter_nc.attr,
2108 &format_attr_filter_isoc.attr,
2109 NULL,
2110};
2111
2112static const struct attribute_group knl_uncore_cha_format_group = {
2113 .name = "format",
2114 .attrs = knl_uncore_cha_formats_attr,
2115};
2116
2117static struct event_constraint knl_uncore_cha_constraints[] = {
2118 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2119 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2120 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2121 EVENT_CONSTRAINT_END
2122};
2123
2124static struct extra_reg knl_uncore_cha_extra_regs[] = {
2125 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2126 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2127 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2128 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2129 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2130 EVENT_EXTRA_END
2131};
2132
2133static u64 knl_cha_filter_mask(int fields)
2134{
2135 u64 mask = 0;
2136
2137 if (fields & 0x1)
2138 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2139 if (fields & 0x2)
2140 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2141 if (fields & 0x4)
2142 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2143 return mask;
2144}
2145
2146static struct event_constraint *
2147knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2148{
2149 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2150}
2151
2152static int knl_cha_hw_config(struct intel_uncore_box *box,
2153 struct perf_event *event)
2154{
2155 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2156 struct extra_reg *er;
2157 int idx = 0;
2158
2159 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2160 if (er->event != (event->hw.config & er->config_mask))
2161 continue;
2162 idx |= er->idx;
2163 }
2164
2165 if (idx) {
2166 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2167 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2168 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2169
2170 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2171 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2172 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2173 reg1->idx = idx;
2174 }
2175 return 0;
2176}
2177
2178static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2179 struct perf_event *event);
2180
2181static struct intel_uncore_ops knl_uncore_cha_ops = {
2182 .init_box = snbep_uncore_msr_init_box,
2183 .disable_box = snbep_uncore_msr_disable_box,
2184 .enable_box = snbep_uncore_msr_enable_box,
2185 .disable_event = snbep_uncore_msr_disable_event,
2186 .enable_event = hswep_cbox_enable_event,
2187 .read_counter = uncore_msr_read_counter,
2188 .hw_config = knl_cha_hw_config,
2189 .get_constraint = knl_cha_get_constraint,
2190 .put_constraint = snbep_cbox_put_constraint,
2191};
2192
2193static struct intel_uncore_type knl_uncore_cha = {
2194 .name = "cha",
2195 .num_counters = 4,
2196 .num_boxes = 38,
2197 .perf_ctr_bits = 48,
2198 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2199 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2200 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2201 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2202 .msr_offset = KNL_CHA_MSR_OFFSET,
2203 .num_shared_regs = 1,
2204 .constraints = knl_uncore_cha_constraints,
2205 .ops = &knl_uncore_cha_ops,
2206 .format_group = &knl_uncore_cha_format_group,
2207};
2208
2209static struct attribute *knl_uncore_pcu_formats_attr[] = {
2210 &format_attr_event2.attr,
2211 &format_attr_use_occ_ctr.attr,
2212 &format_attr_occ_sel.attr,
2213 &format_attr_edge.attr,
2214 &format_attr_tid_en.attr,
2215 &format_attr_inv.attr,
2216 &format_attr_thresh6.attr,
2217 &format_attr_occ_invert.attr,
2218 &format_attr_occ_edge_det.attr,
2219 NULL,
2220};
2221
2222static const struct attribute_group knl_uncore_pcu_format_group = {
2223 .name = "format",
2224 .attrs = knl_uncore_pcu_formats_attr,
2225};
2226
2227static struct intel_uncore_type knl_uncore_pcu = {
2228 .name = "pcu",
2229 .num_counters = 4,
2230 .num_boxes = 1,
2231 .perf_ctr_bits = 48,
2232 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2233 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2234 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2235 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2236 .ops = &snbep_uncore_msr_ops,
2237 .format_group = &knl_uncore_pcu_format_group,
2238};
2239
2240static struct intel_uncore_type *knl_msr_uncores[] = {
2241 &knl_uncore_ubox,
2242 &knl_uncore_cha,
2243 &knl_uncore_pcu,
2244 NULL,
2245};
2246
2247void knl_uncore_cpu_init(void)
2248{
2249 uncore_msr_uncores = knl_msr_uncores;
2250}
2251
2252static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2253{
2254 struct pci_dev *pdev = box->pci_dev;
2255 int box_ctl = uncore_pci_box_ctl(box);
2256
2257 pci_write_config_dword(pdev, box_ctl, 0);
2258}
2259
2260static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2261 struct perf_event *event)
2262{
2263 struct pci_dev *pdev = box->pci_dev;
2264 struct hw_perf_event *hwc = &event->hw;
2265
2266 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2267 == UNCORE_FIXED_EVENT)
2268 pci_write_config_dword(pdev, hwc->config_base,
2269 hwc->config | KNL_PMON_FIXED_CTL_EN);
2270 else
2271 pci_write_config_dword(pdev, hwc->config_base,
2272 hwc->config | SNBEP_PMON_CTL_EN);
2273}
2274
2275static struct intel_uncore_ops knl_uncore_imc_ops = {
2276 .init_box = snbep_uncore_pci_init_box,
2277 .disable_box = snbep_uncore_pci_disable_box,
2278 .enable_box = knl_uncore_imc_enable_box,
2279 .read_counter = snbep_uncore_pci_read_counter,
2280 .enable_event = knl_uncore_imc_enable_event,
2281 .disable_event = snbep_uncore_pci_disable_event,
2282};
2283
2284static struct intel_uncore_type knl_uncore_imc_uclk = {
2285 .name = "imc_uclk",
2286 .num_counters = 4,
2287 .num_boxes = 2,
2288 .perf_ctr_bits = 48,
2289 .fixed_ctr_bits = 48,
2290 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2291 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2292 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2293 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2294 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2295 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2296 .ops = &knl_uncore_imc_ops,
2297 .format_group = &snbep_uncore_format_group,
2298};
2299
2300static struct intel_uncore_type knl_uncore_imc_dclk = {
2301 .name = "imc",
2302 .num_counters = 4,
2303 .num_boxes = 6,
2304 .perf_ctr_bits = 48,
2305 .fixed_ctr_bits = 48,
2306 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2307 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2308 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2309 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2310 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2311 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2312 .ops = &knl_uncore_imc_ops,
2313 .format_group = &snbep_uncore_format_group,
2314};
2315
2316static struct intel_uncore_type knl_uncore_edc_uclk = {
2317 .name = "edc_uclk",
2318 .num_counters = 4,
2319 .num_boxes = 8,
2320 .perf_ctr_bits = 48,
2321 .fixed_ctr_bits = 48,
2322 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2323 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2324 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2325 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2326 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2327 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2328 .ops = &knl_uncore_imc_ops,
2329 .format_group = &snbep_uncore_format_group,
2330};
2331
2332static struct intel_uncore_type knl_uncore_edc_eclk = {
2333 .name = "edc_eclk",
2334 .num_counters = 4,
2335 .num_boxes = 8,
2336 .perf_ctr_bits = 48,
2337 .fixed_ctr_bits = 48,
2338 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2339 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2340 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2341 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2342 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2343 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2344 .ops = &knl_uncore_imc_ops,
2345 .format_group = &snbep_uncore_format_group,
2346};
2347
2348static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2349 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2350 EVENT_CONSTRAINT_END
2351};
2352
2353static struct intel_uncore_type knl_uncore_m2pcie = {
2354 .name = "m2pcie",
2355 .num_counters = 4,
2356 .num_boxes = 1,
2357 .perf_ctr_bits = 48,
2358 .constraints = knl_uncore_m2pcie_constraints,
2359 SNBEP_UNCORE_PCI_COMMON_INIT(),
2360};
2361
2362static struct attribute *knl_uncore_irp_formats_attr[] = {
2363 &format_attr_event.attr,
2364 &format_attr_umask.attr,
2365 &format_attr_qor.attr,
2366 &format_attr_edge.attr,
2367 &format_attr_inv.attr,
2368 &format_attr_thresh8.attr,
2369 NULL,
2370};
2371
2372static const struct attribute_group knl_uncore_irp_format_group = {
2373 .name = "format",
2374 .attrs = knl_uncore_irp_formats_attr,
2375};
2376
2377static struct intel_uncore_type knl_uncore_irp = {
2378 .name = "irp",
2379 .num_counters = 2,
2380 .num_boxes = 1,
2381 .perf_ctr_bits = 48,
2382 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2383 .event_ctl = SNBEP_PCI_PMON_CTL0,
2384 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2385 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2386 .ops = &snbep_uncore_pci_ops,
2387 .format_group = &knl_uncore_irp_format_group,
2388};
2389
2390enum {
2391 KNL_PCI_UNCORE_MC_UCLK,
2392 KNL_PCI_UNCORE_MC_DCLK,
2393 KNL_PCI_UNCORE_EDC_UCLK,
2394 KNL_PCI_UNCORE_EDC_ECLK,
2395 KNL_PCI_UNCORE_M2PCIE,
2396 KNL_PCI_UNCORE_IRP,
2397};
2398
2399static struct intel_uncore_type *knl_pci_uncores[] = {
2400 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2401 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2402 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2403 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2404 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2405 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2406 NULL,
2407};
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427static const struct pci_device_id knl_uncore_pci_ids[] = {
2428 {
2429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2430 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2431 },
2432 {
2433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2434 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2435 },
2436 {
2437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2438 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2439 },
2440 {
2441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2442 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2443 },
2444 {
2445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2446 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2447 },
2448 {
2449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2450 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2451 },
2452 {
2453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2454 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2455 },
2456 {
2457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2458 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2459 },
2460 {
2461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2462 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2463 },
2464 {
2465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2466 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2467 },
2468 {
2469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2470 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2471 },
2472 {
2473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2474 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2475 },
2476 {
2477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2478 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2479 },
2480 {
2481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2482 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2483 },
2484 {
2485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2486 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2487 },
2488 {
2489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2490 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2491 },
2492 {
2493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2494 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2495 },
2496 {
2497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2498 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2499 },
2500 {
2501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2502 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2503 },
2504 {
2505 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2506 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2507 },
2508 {
2509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2510 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2511 },
2512 {
2513 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2514 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2515 },
2516 {
2517 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2518 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2519 },
2520 {
2521 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2522 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2523 },
2524 {
2525 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2526 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2527 },
2528 {
2529 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2530 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2531 },
2532 { }
2533};
2534
2535static struct pci_driver knl_uncore_pci_driver = {
2536 .name = "knl_uncore",
2537 .id_table = knl_uncore_pci_ids,
2538};
2539
2540int knl_uncore_pci_init(void)
2541{
2542 int ret;
2543
2544
2545 ret = snb_pci2phy_map_init(0x7814);
2546 if (ret)
2547 return ret;
2548 ret = snb_pci2phy_map_init(0x7817);
2549 if (ret)
2550 return ret;
2551 uncore_pci_uncores = knl_pci_uncores;
2552 uncore_pci_driver = &knl_uncore_pci_driver;
2553 return 0;
2554}
2555
2556
2557
2558
2559static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2560 &format_attr_event.attr,
2561 &format_attr_umask.attr,
2562 &format_attr_edge.attr,
2563 &format_attr_inv.attr,
2564 &format_attr_thresh5.attr,
2565 &format_attr_filter_tid2.attr,
2566 &format_attr_filter_cid.attr,
2567 NULL,
2568};
2569
2570static const struct attribute_group hswep_uncore_ubox_format_group = {
2571 .name = "format",
2572 .attrs = hswep_uncore_ubox_formats_attr,
2573};
2574
2575static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2576{
2577 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2578 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2579 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2580 reg1->idx = 0;
2581 return 0;
2582}
2583
2584static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2585 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2586 .hw_config = hswep_ubox_hw_config,
2587 .get_constraint = uncore_get_constraint,
2588 .put_constraint = uncore_put_constraint,
2589};
2590
2591static struct intel_uncore_type hswep_uncore_ubox = {
2592 .name = "ubox",
2593 .num_counters = 2,
2594 .num_boxes = 1,
2595 .perf_ctr_bits = 44,
2596 .fixed_ctr_bits = 48,
2597 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2598 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2599 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2600 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2601 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2602 .num_shared_regs = 1,
2603 .ops = &hswep_uncore_ubox_ops,
2604 .format_group = &hswep_uncore_ubox_format_group,
2605};
2606
2607static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2608 &format_attr_event.attr,
2609 &format_attr_umask.attr,
2610 &format_attr_edge.attr,
2611 &format_attr_tid_en.attr,
2612 &format_attr_thresh8.attr,
2613 &format_attr_filter_tid3.attr,
2614 &format_attr_filter_link2.attr,
2615 &format_attr_filter_state3.attr,
2616 &format_attr_filter_nid2.attr,
2617 &format_attr_filter_opc2.attr,
2618 &format_attr_filter_nc.attr,
2619 &format_attr_filter_c6.attr,
2620 &format_attr_filter_isoc.attr,
2621 NULL,
2622};
2623
2624static const struct attribute_group hswep_uncore_cbox_format_group = {
2625 .name = "format",
2626 .attrs = hswep_uncore_cbox_formats_attr,
2627};
2628
2629static struct event_constraint hswep_uncore_cbox_constraints[] = {
2630 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2631 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2632 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2633 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2634 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2635 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2636 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2637 EVENT_CONSTRAINT_END
2638};
2639
2640static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2641 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2642 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2643 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2644 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2645 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2646 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2647 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2648 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2649 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2650 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2651 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2652 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2653 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2654 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2655 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2656 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2657 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2658 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2659 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2660 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2661 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2663 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2664 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2665 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2666 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2667 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2668 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2669 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2670 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2671 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2672 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2673 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2674 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2675 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2676 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2677 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2678 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2679 EVENT_EXTRA_END
2680};
2681
2682static u64 hswep_cbox_filter_mask(int fields)
2683{
2684 u64 mask = 0;
2685 if (fields & 0x1)
2686 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2687 if (fields & 0x2)
2688 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2689 if (fields & 0x4)
2690 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2691 if (fields & 0x8)
2692 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2693 if (fields & 0x10) {
2694 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2695 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2696 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2697 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2698 }
2699 return mask;
2700}
2701
2702static struct event_constraint *
2703hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2704{
2705 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2706}
2707
2708static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2709{
2710 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2711 struct extra_reg *er;
2712 int idx = 0;
2713
2714 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2715 if (er->event != (event->hw.config & er->config_mask))
2716 continue;
2717 idx |= er->idx;
2718 }
2719
2720 if (idx) {
2721 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2722 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2723 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2724 reg1->idx = idx;
2725 }
2726 return 0;
2727}
2728
2729static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2730 struct perf_event *event)
2731{
2732 struct hw_perf_event *hwc = &event->hw;
2733 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2734
2735 if (reg1->idx != EXTRA_REG_NONE) {
2736 u64 filter = uncore_shared_reg_config(box, 0);
2737 wrmsrl(reg1->reg, filter & 0xffffffff);
2738 wrmsrl(reg1->reg + 1, filter >> 32);
2739 }
2740
2741 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2742}
2743
2744static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2745 .init_box = snbep_uncore_msr_init_box,
2746 .disable_box = snbep_uncore_msr_disable_box,
2747 .enable_box = snbep_uncore_msr_enable_box,
2748 .disable_event = snbep_uncore_msr_disable_event,
2749 .enable_event = hswep_cbox_enable_event,
2750 .read_counter = uncore_msr_read_counter,
2751 .hw_config = hswep_cbox_hw_config,
2752 .get_constraint = hswep_cbox_get_constraint,
2753 .put_constraint = snbep_cbox_put_constraint,
2754};
2755
2756static struct intel_uncore_type hswep_uncore_cbox = {
2757 .name = "cbox",
2758 .num_counters = 4,
2759 .num_boxes = 18,
2760 .perf_ctr_bits = 48,
2761 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2762 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2763 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2764 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2765 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2766 .num_shared_regs = 1,
2767 .constraints = hswep_uncore_cbox_constraints,
2768 .ops = &hswep_uncore_cbox_ops,
2769 .format_group = &hswep_uncore_cbox_format_group,
2770};
2771
2772
2773
2774
2775static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2776{
2777 unsigned msr = uncore_msr_box_ctl(box);
2778
2779 if (msr) {
2780 u64 init = SNBEP_PMON_BOX_CTL_INT;
2781 u64 flags = 0;
2782 int i;
2783
2784 for_each_set_bit(i, (unsigned long *)&init, 64) {
2785 flags |= (1ULL << i);
2786 wrmsrl(msr, flags);
2787 }
2788 }
2789}
2790
2791static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2792 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2793 .init_box = hswep_uncore_sbox_msr_init_box
2794};
2795
2796static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2797 &format_attr_event.attr,
2798 &format_attr_umask.attr,
2799 &format_attr_edge.attr,
2800 &format_attr_tid_en.attr,
2801 &format_attr_inv.attr,
2802 &format_attr_thresh8.attr,
2803 NULL,
2804};
2805
2806static const struct attribute_group hswep_uncore_sbox_format_group = {
2807 .name = "format",
2808 .attrs = hswep_uncore_sbox_formats_attr,
2809};
2810
2811static struct intel_uncore_type hswep_uncore_sbox = {
2812 .name = "sbox",
2813 .num_counters = 4,
2814 .num_boxes = 4,
2815 .perf_ctr_bits = 44,
2816 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2817 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2818 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2819 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2820 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2821 .ops = &hswep_uncore_sbox_msr_ops,
2822 .format_group = &hswep_uncore_sbox_format_group,
2823};
2824
2825static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2826{
2827 struct hw_perf_event *hwc = &event->hw;
2828 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2829 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2830
2831 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2832 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2833 reg1->idx = ev_sel - 0xb;
2834 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2835 }
2836 return 0;
2837}
2838
2839static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2840 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2841 .hw_config = hswep_pcu_hw_config,
2842 .get_constraint = snbep_pcu_get_constraint,
2843 .put_constraint = snbep_pcu_put_constraint,
2844};
2845
2846static struct intel_uncore_type hswep_uncore_pcu = {
2847 .name = "pcu",
2848 .num_counters = 4,
2849 .num_boxes = 1,
2850 .perf_ctr_bits = 48,
2851 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2852 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2853 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2854 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2855 .num_shared_regs = 1,
2856 .ops = &hswep_uncore_pcu_ops,
2857 .format_group = &snbep_uncore_pcu_format_group,
2858};
2859
2860static struct intel_uncore_type *hswep_msr_uncores[] = {
2861 &hswep_uncore_ubox,
2862 &hswep_uncore_cbox,
2863 &hswep_uncore_sbox,
2864 &hswep_uncore_pcu,
2865 NULL,
2866};
2867
2868#define HSWEP_PCU_DID 0x2fc0
2869#define HSWEP_PCU_CAPID4_OFFET 0x94
2870#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2871
2872static bool hswep_has_limit_sbox(unsigned int device)
2873{
2874 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2875 u32 capid4;
2876
2877 if (!dev)
2878 return false;
2879
2880 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2881 if (!hswep_get_chop(capid4))
2882 return true;
2883
2884 return false;
2885}
2886
2887void hswep_uncore_cpu_init(void)
2888{
2889 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2890 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2891
2892
2893 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2894 hswep_uncore_sbox.num_boxes = 2;
2895
2896 uncore_msr_uncores = hswep_msr_uncores;
2897}
2898
2899static struct intel_uncore_type hswep_uncore_ha = {
2900 .name = "ha",
2901 .num_counters = 4,
2902 .num_boxes = 2,
2903 .perf_ctr_bits = 48,
2904 SNBEP_UNCORE_PCI_COMMON_INIT(),
2905};
2906
2907static struct uncore_event_desc hswep_uncore_imc_events[] = {
2908 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2909 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2910 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2911 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2912 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2913 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2914 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2915 { },
2916};
2917
2918static struct intel_uncore_type hswep_uncore_imc = {
2919 .name = "imc",
2920 .num_counters = 4,
2921 .num_boxes = 8,
2922 .perf_ctr_bits = 48,
2923 .fixed_ctr_bits = 48,
2924 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2925 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2926 .event_descs = hswep_uncore_imc_events,
2927 SNBEP_UNCORE_PCI_COMMON_INIT(),
2928};
2929
2930static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2931
2932static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2933{
2934 struct pci_dev *pdev = box->pci_dev;
2935 struct hw_perf_event *hwc = &event->hw;
2936 u64 count = 0;
2937
2938 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2939 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2940
2941 return count;
2942}
2943
2944static struct intel_uncore_ops hswep_uncore_irp_ops = {
2945 .init_box = snbep_uncore_pci_init_box,
2946 .disable_box = snbep_uncore_pci_disable_box,
2947 .enable_box = snbep_uncore_pci_enable_box,
2948 .disable_event = ivbep_uncore_irp_disable_event,
2949 .enable_event = ivbep_uncore_irp_enable_event,
2950 .read_counter = hswep_uncore_irp_read_counter,
2951};
2952
2953static struct intel_uncore_type hswep_uncore_irp = {
2954 .name = "irp",
2955 .num_counters = 4,
2956 .num_boxes = 1,
2957 .perf_ctr_bits = 48,
2958 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2959 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2960 .ops = &hswep_uncore_irp_ops,
2961 .format_group = &snbep_uncore_format_group,
2962};
2963
2964static struct intel_uncore_type hswep_uncore_qpi = {
2965 .name = "qpi",
2966 .num_counters = 4,
2967 .num_boxes = 3,
2968 .perf_ctr_bits = 48,
2969 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2970 .event_ctl = SNBEP_PCI_PMON_CTL0,
2971 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2972 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2973 .num_shared_regs = 1,
2974 .ops = &snbep_uncore_qpi_ops,
2975 .format_group = &snbep_uncore_qpi_format_group,
2976};
2977
2978static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2979 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2980 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2981 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2982 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2983 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2984 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2985 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2986 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2987 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2988 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2989 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2990 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2991 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2992 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2993 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2994 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2995 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2996 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2997 EVENT_CONSTRAINT_END
2998};
2999
3000static struct intel_uncore_type hswep_uncore_r2pcie = {
3001 .name = "r2pcie",
3002 .num_counters = 4,
3003 .num_boxes = 1,
3004 .perf_ctr_bits = 48,
3005 .constraints = hswep_uncore_r2pcie_constraints,
3006 SNBEP_UNCORE_PCI_COMMON_INIT(),
3007};
3008
3009static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3010 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3011 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3012 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3013 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3014 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3015 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3016 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3017 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3018 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3019 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3020 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3021 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3022 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3023 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3024 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3025 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3026 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3027 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3028 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3029 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3030 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3031 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3032 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3033 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3034 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3035 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3036 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3037 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3038 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3039 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3040 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3041 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3042 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3043 EVENT_CONSTRAINT_END
3044};
3045
3046static struct intel_uncore_type hswep_uncore_r3qpi = {
3047 .name = "r3qpi",
3048 .num_counters = 3,
3049 .num_boxes = 3,
3050 .perf_ctr_bits = 44,
3051 .constraints = hswep_uncore_r3qpi_constraints,
3052 SNBEP_UNCORE_PCI_COMMON_INIT(),
3053};
3054
3055enum {
3056 HSWEP_PCI_UNCORE_HA,
3057 HSWEP_PCI_UNCORE_IMC,
3058 HSWEP_PCI_UNCORE_IRP,
3059 HSWEP_PCI_UNCORE_QPI,
3060 HSWEP_PCI_UNCORE_R2PCIE,
3061 HSWEP_PCI_UNCORE_R3QPI,
3062};
3063
3064static struct intel_uncore_type *hswep_pci_uncores[] = {
3065 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3066 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3067 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3068 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3069 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3070 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3071 NULL,
3072};
3073
3074static const struct pci_device_id hswep_uncore_pci_ids[] = {
3075 {
3076 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3077 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3078 },
3079 {
3080 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3081 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3082 },
3083 {
3084 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3085 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3086 },
3087 {
3088 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3089 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3090 },
3091 {
3092 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3093 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3094 },
3095 {
3096 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3097 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3098 },
3099 {
3100 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3101 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3102 },
3103 {
3104 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3105 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3106 },
3107 {
3108 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3109 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3110 },
3111 {
3112 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3113 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3114 },
3115 {
3116 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3117 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3118 },
3119 {
3120 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3121 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3122 },
3123 {
3124 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3125 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3126 },
3127 {
3128 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3129 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3130 },
3131 {
3132 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3133 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3134 },
3135 {
3136 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3137 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3138 },
3139 {
3140 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3141 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3142 },
3143 {
3144 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3145 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3146 },
3147 {
3148 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3149 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3150 SNBEP_PCI_QPI_PORT0_FILTER),
3151 },
3152 {
3153 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3154 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3155 SNBEP_PCI_QPI_PORT1_FILTER),
3156 },
3157 { }
3158};
3159
3160static struct pci_driver hswep_uncore_pci_driver = {
3161 .name = "hswep_uncore",
3162 .id_table = hswep_uncore_pci_ids,
3163};
3164
3165int hswep_uncore_pci_init(void)
3166{
3167 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3168 if (ret)
3169 return ret;
3170 uncore_pci_uncores = hswep_pci_uncores;
3171 uncore_pci_driver = &hswep_uncore_pci_driver;
3172 return 0;
3173}
3174
3175
3176
3177
3178static struct intel_uncore_type bdx_uncore_ubox = {
3179 .name = "ubox",
3180 .num_counters = 2,
3181 .num_boxes = 1,
3182 .perf_ctr_bits = 48,
3183 .fixed_ctr_bits = 48,
3184 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3185 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3186 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3187 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3188 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3189 .num_shared_regs = 1,
3190 .ops = &ivbep_uncore_msr_ops,
3191 .format_group = &ivbep_uncore_ubox_format_group,
3192};
3193
3194static struct event_constraint bdx_uncore_cbox_constraints[] = {
3195 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3196 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3197 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3198 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3199 EVENT_CONSTRAINT_END
3200};
3201
3202static struct intel_uncore_type bdx_uncore_cbox = {
3203 .name = "cbox",
3204 .num_counters = 4,
3205 .num_boxes = 24,
3206 .perf_ctr_bits = 48,
3207 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3208 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3209 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3210 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3211 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3212 .num_shared_regs = 1,
3213 .constraints = bdx_uncore_cbox_constraints,
3214 .ops = &hswep_uncore_cbox_ops,
3215 .format_group = &hswep_uncore_cbox_format_group,
3216};
3217
3218static struct intel_uncore_type bdx_uncore_sbox = {
3219 .name = "sbox",
3220 .num_counters = 4,
3221 .num_boxes = 4,
3222 .perf_ctr_bits = 48,
3223 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3224 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3225 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3226 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3227 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3228 .ops = &hswep_uncore_sbox_msr_ops,
3229 .format_group = &hswep_uncore_sbox_format_group,
3230};
3231
3232#define BDX_MSR_UNCORE_SBOX 3
3233
3234static struct intel_uncore_type *bdx_msr_uncores[] = {
3235 &bdx_uncore_ubox,
3236 &bdx_uncore_cbox,
3237 &hswep_uncore_pcu,
3238 &bdx_uncore_sbox,
3239 NULL,
3240};
3241
3242
3243static struct event_constraint bdx_uncore_pcu_constraints[] = {
3244 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3245 EVENT_CONSTRAINT_END
3246};
3247
3248#define BDX_PCU_DID 0x6fc0
3249
3250void bdx_uncore_cpu_init(void)
3251{
3252 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3253 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3254 uncore_msr_uncores = bdx_msr_uncores;
3255
3256
3257 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3258 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3259
3260 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3261}
3262
3263static struct intel_uncore_type bdx_uncore_ha = {
3264 .name = "ha",
3265 .num_counters = 4,
3266 .num_boxes = 2,
3267 .perf_ctr_bits = 48,
3268 SNBEP_UNCORE_PCI_COMMON_INIT(),
3269};
3270
3271static struct intel_uncore_type bdx_uncore_imc = {
3272 .name = "imc",
3273 .num_counters = 4,
3274 .num_boxes = 8,
3275 .perf_ctr_bits = 48,
3276 .fixed_ctr_bits = 48,
3277 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3278 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3279 .event_descs = hswep_uncore_imc_events,
3280 SNBEP_UNCORE_PCI_COMMON_INIT(),
3281};
3282
3283static struct intel_uncore_type bdx_uncore_irp = {
3284 .name = "irp",
3285 .num_counters = 4,
3286 .num_boxes = 1,
3287 .perf_ctr_bits = 48,
3288 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3289 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3290 .ops = &hswep_uncore_irp_ops,
3291 .format_group = &snbep_uncore_format_group,
3292};
3293
3294static struct intel_uncore_type bdx_uncore_qpi = {
3295 .name = "qpi",
3296 .num_counters = 4,
3297 .num_boxes = 3,
3298 .perf_ctr_bits = 48,
3299 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3300 .event_ctl = SNBEP_PCI_PMON_CTL0,
3301 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3302 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3303 .num_shared_regs = 1,
3304 .ops = &snbep_uncore_qpi_ops,
3305 .format_group = &snbep_uncore_qpi_format_group,
3306};
3307
3308static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3309 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3310 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3311 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3312 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3313 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3314 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3315 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3316 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3317 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3318 EVENT_CONSTRAINT_END
3319};
3320
3321static struct intel_uncore_type bdx_uncore_r2pcie = {
3322 .name = "r2pcie",
3323 .num_counters = 4,
3324 .num_boxes = 1,
3325 .perf_ctr_bits = 48,
3326 .constraints = bdx_uncore_r2pcie_constraints,
3327 SNBEP_UNCORE_PCI_COMMON_INIT(),
3328};
3329
3330static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3331 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3332 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3333 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3334 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3335 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3336 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3337 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3338 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3339 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3340 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3341 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3342 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3343 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3344 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3345 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3346 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3347 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3348 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3349 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3350 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3351 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3352 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3353 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3354 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3355 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3356 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3357 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3358 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3359 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3360 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3361 EVENT_CONSTRAINT_END
3362};
3363
3364static struct intel_uncore_type bdx_uncore_r3qpi = {
3365 .name = "r3qpi",
3366 .num_counters = 3,
3367 .num_boxes = 3,
3368 .perf_ctr_bits = 48,
3369 .constraints = bdx_uncore_r3qpi_constraints,
3370 SNBEP_UNCORE_PCI_COMMON_INIT(),
3371};
3372
3373enum {
3374 BDX_PCI_UNCORE_HA,
3375 BDX_PCI_UNCORE_IMC,
3376 BDX_PCI_UNCORE_IRP,
3377 BDX_PCI_UNCORE_QPI,
3378 BDX_PCI_UNCORE_R2PCIE,
3379 BDX_PCI_UNCORE_R3QPI,
3380};
3381
3382static struct intel_uncore_type *bdx_pci_uncores[] = {
3383 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3384 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3385 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3386 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3387 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3388 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3389 NULL,
3390};
3391
3392static const struct pci_device_id bdx_uncore_pci_ids[] = {
3393 {
3394 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3395 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3396 },
3397 {
3398 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3399 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3400 },
3401 {
3402 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3403 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3404 },
3405 {
3406 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3407 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3408 },
3409 {
3410 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3411 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3412 },
3413 {
3414 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3415 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3416 },
3417 {
3418 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3419 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3420 },
3421 {
3422 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3423 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3424 },
3425 {
3426 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3427 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3428 },
3429 {
3430 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3431 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3432 },
3433 {
3434 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3435 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3436 },
3437 {
3438 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3439 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3440 },
3441 {
3442 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3443 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3444 },
3445 {
3446 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3447 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3448 },
3449 {
3450 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3451 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3452 },
3453 {
3454 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3455 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3456 },
3457 {
3458 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3459 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3460 },
3461 {
3462 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3463 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3464 },
3465 {
3466 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3467 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3468 SNBEP_PCI_QPI_PORT0_FILTER),
3469 },
3470 {
3471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3472 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3473 SNBEP_PCI_QPI_PORT1_FILTER),
3474 },
3475 {
3476 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3477 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3478 BDX_PCI_QPI_PORT2_FILTER),
3479 },
3480 { }
3481};
3482
3483static struct pci_driver bdx_uncore_pci_driver = {
3484 .name = "bdx_uncore",
3485 .id_table = bdx_uncore_pci_ids,
3486};
3487
3488int bdx_uncore_pci_init(void)
3489{
3490 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3491
3492 if (ret)
3493 return ret;
3494 uncore_pci_uncores = bdx_pci_uncores;
3495 uncore_pci_driver = &bdx_uncore_pci_driver;
3496 return 0;
3497}
3498
3499
3500
3501
3502
3503static struct intel_uncore_type skx_uncore_ubox = {
3504 .name = "ubox",
3505 .num_counters = 2,
3506 .num_boxes = 1,
3507 .perf_ctr_bits = 48,
3508 .fixed_ctr_bits = 48,
3509 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3510 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3511 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3512 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3513 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3514 .ops = &ivbep_uncore_msr_ops,
3515 .format_group = &ivbep_uncore_ubox_format_group,
3516};
3517
3518static struct attribute *skx_uncore_cha_formats_attr[] = {
3519 &format_attr_event.attr,
3520 &format_attr_umask.attr,
3521 &format_attr_edge.attr,
3522 &format_attr_tid_en.attr,
3523 &format_attr_inv.attr,
3524 &format_attr_thresh8.attr,
3525 &format_attr_filter_tid4.attr,
3526 &format_attr_filter_state5.attr,
3527 &format_attr_filter_rem.attr,
3528 &format_attr_filter_loc.attr,
3529 &format_attr_filter_nm.attr,
3530 &format_attr_filter_all_op.attr,
3531 &format_attr_filter_not_nm.attr,
3532 &format_attr_filter_opc_0.attr,
3533 &format_attr_filter_opc_1.attr,
3534 &format_attr_filter_nc.attr,
3535 &format_attr_filter_isoc.attr,
3536 NULL,
3537};
3538
3539static const struct attribute_group skx_uncore_chabox_format_group = {
3540 .name = "format",
3541 .attrs = skx_uncore_cha_formats_attr,
3542};
3543
3544static struct event_constraint skx_uncore_chabox_constraints[] = {
3545 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3546 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3547 EVENT_CONSTRAINT_END
3548};
3549
3550static struct extra_reg skx_uncore_cha_extra_regs[] = {
3551 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3552 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3553 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3554 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3555 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3556 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3557 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3558 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3559 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3560 EVENT_EXTRA_END
3561};
3562
3563static u64 skx_cha_filter_mask(int fields)
3564{
3565 u64 mask = 0;
3566
3567 if (fields & 0x1)
3568 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3569 if (fields & 0x2)
3570 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3571 if (fields & 0x4)
3572 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3573 if (fields & 0x8) {
3574 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3575 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3576 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3577 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3578 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3579 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3580 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3581 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3582 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3583 }
3584 return mask;
3585}
3586
3587static struct event_constraint *
3588skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3589{
3590 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3591}
3592
3593static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3594{
3595 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3596 struct extra_reg *er;
3597 int idx = 0;
3598
3599 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3600 if (er->event != (event->hw.config & er->config_mask))
3601 continue;
3602 idx |= er->idx;
3603 }
3604
3605 if (idx) {
3606 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3607 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3608 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3609 reg1->idx = idx;
3610 }
3611 return 0;
3612}
3613
3614static struct intel_uncore_ops skx_uncore_chabox_ops = {
3615
3616 .init_box = ivbep_uncore_msr_init_box,
3617 .disable_box = snbep_uncore_msr_disable_box,
3618 .enable_box = snbep_uncore_msr_enable_box,
3619 .disable_event = snbep_uncore_msr_disable_event,
3620 .enable_event = hswep_cbox_enable_event,
3621 .read_counter = uncore_msr_read_counter,
3622 .hw_config = skx_cha_hw_config,
3623 .get_constraint = skx_cha_get_constraint,
3624 .put_constraint = snbep_cbox_put_constraint,
3625};
3626
3627static struct intel_uncore_type skx_uncore_chabox = {
3628 .name = "cha",
3629 .num_counters = 4,
3630 .perf_ctr_bits = 48,
3631 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3632 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3633 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3634 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3635 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3636 .num_shared_regs = 1,
3637 .constraints = skx_uncore_chabox_constraints,
3638 .ops = &skx_uncore_chabox_ops,
3639 .format_group = &skx_uncore_chabox_format_group,
3640};
3641
3642static struct attribute *skx_uncore_iio_formats_attr[] = {
3643 &format_attr_event.attr,
3644 &format_attr_umask.attr,
3645 &format_attr_edge.attr,
3646 &format_attr_inv.attr,
3647 &format_attr_thresh9.attr,
3648 &format_attr_ch_mask.attr,
3649 &format_attr_fc_mask.attr,
3650 NULL,
3651};
3652
3653static const struct attribute_group skx_uncore_iio_format_group = {
3654 .name = "format",
3655 .attrs = skx_uncore_iio_formats_attr,
3656};
3657
3658static struct event_constraint skx_uncore_iio_constraints[] = {
3659 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3660 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3661 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3662 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3663 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3664 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3665 EVENT_CONSTRAINT_END
3666};
3667
3668static void skx_iio_enable_event(struct intel_uncore_box *box,
3669 struct perf_event *event)
3670{
3671 struct hw_perf_event *hwc = &event->hw;
3672
3673 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3674}
3675
3676static struct intel_uncore_ops skx_uncore_iio_ops = {
3677 .init_box = ivbep_uncore_msr_init_box,
3678 .disable_box = snbep_uncore_msr_disable_box,
3679 .enable_box = snbep_uncore_msr_enable_box,
3680 .disable_event = snbep_uncore_msr_disable_event,
3681 .enable_event = skx_iio_enable_event,
3682 .read_counter = uncore_msr_read_counter,
3683};
3684
3685static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3686{
3687 return pmu->type->topology[die].configuration >>
3688 (pmu->pmu_idx * BUS_NUM_STRIDE);
3689}
3690
3691static umode_t
3692pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3693 int die, int zero_bus_pmu)
3694{
3695 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3696
3697 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3698}
3699
3700static umode_t
3701skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3702{
3703
3704 return pmu_iio_mapping_visible(kobj, attr, die, 0);
3705}
3706
3707static ssize_t skx_iio_mapping_show(struct device *dev,
3708 struct device_attribute *attr, char *buf)
3709{
3710 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3711 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3712 long die = (long)ea->var;
3713
3714 return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3715 skx_iio_stack(pmu, die));
3716}
3717
3718static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3719{
3720 u64 msr_value;
3721
3722 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3723 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3724 return -ENXIO;
3725
3726 *topology = msr_value;
3727
3728 return 0;
3729}
3730
3731static int die_to_cpu(int die)
3732{
3733 int res = 0, cpu, current_die;
3734
3735
3736
3737
3738 cpus_read_lock();
3739 for_each_online_cpu(cpu) {
3740 current_die = topology_logical_die_id(cpu);
3741 if (current_die == die) {
3742 res = cpu;
3743 break;
3744 }
3745 }
3746 cpus_read_unlock();
3747 return res;
3748}
3749
3750static int skx_iio_get_topology(struct intel_uncore_type *type)
3751{
3752 int die, ret = -EPERM;
3753
3754 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3755 GFP_KERNEL);
3756 if (!type->topology)
3757 return -ENOMEM;
3758
3759 for (die = 0; die < uncore_max_dies(); die++) {
3760 ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3761 &type->topology[die].configuration);
3762 if (ret)
3763 break;
3764
3765 ret = uncore_die_to_segment(die);
3766 if (ret < 0)
3767 break;
3768
3769 type->topology[die].segment = ret;
3770 }
3771
3772 if (ret < 0) {
3773 kfree(type->topology);
3774 type->topology = NULL;
3775 }
3776
3777 return ret;
3778}
3779
3780static struct attribute_group skx_iio_mapping_group = {
3781 .is_visible = skx_iio_mapping_visible,
3782};
3783
3784static const struct attribute_group *skx_iio_attr_update[] = {
3785 &skx_iio_mapping_group,
3786 NULL,
3787};
3788
3789static int
3790pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3791{
3792 char buf[64];
3793 int ret;
3794 long die = -1;
3795 struct attribute **attrs = NULL;
3796 struct dev_ext_attribute *eas = NULL;
3797
3798 ret = type->get_topology(type);
3799 if (ret < 0)
3800 goto clear_attr_update;
3801
3802 ret = -ENOMEM;
3803
3804
3805 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3806 if (!attrs)
3807 goto clear_topology;
3808
3809 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3810 if (!eas)
3811 goto clear_attrs;
3812
3813 for (die = 0; die < uncore_max_dies(); die++) {
3814 sprintf(buf, "die%ld", die);
3815 sysfs_attr_init(&eas[die].attr.attr);
3816 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3817 if (!eas[die].attr.attr.name)
3818 goto err;
3819 eas[die].attr.attr.mode = 0444;
3820 eas[die].attr.show = skx_iio_mapping_show;
3821 eas[die].attr.store = NULL;
3822 eas[die].var = (void *)die;
3823 attrs[die] = &eas[die].attr.attr;
3824 }
3825 ag->attrs = attrs;
3826
3827 return 0;
3828err:
3829 for (; die >= 0; die--)
3830 kfree(eas[die].attr.attr.name);
3831 kfree(eas);
3832clear_attrs:
3833 kfree(attrs);
3834clear_topology:
3835 kfree(type->topology);
3836clear_attr_update:
3837 type->attr_update = NULL;
3838 return ret;
3839}
3840
3841static int skx_iio_set_mapping(struct intel_uncore_type *type)
3842{
3843 return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3844}
3845
3846static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3847{
3848 struct attribute **attr = skx_iio_mapping_group.attrs;
3849
3850 if (!attr)
3851 return;
3852
3853 for (; *attr; attr++)
3854 kfree((*attr)->name);
3855 kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3856 kfree(skx_iio_mapping_group.attrs);
3857 skx_iio_mapping_group.attrs = NULL;
3858 kfree(type->topology);
3859}
3860
3861static struct intel_uncore_type skx_uncore_iio = {
3862 .name = "iio",
3863 .num_counters = 4,
3864 .num_boxes = 6,
3865 .perf_ctr_bits = 48,
3866 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3867 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3868 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3869 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3870 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3871 .msr_offset = SKX_IIO_MSR_OFFSET,
3872 .constraints = skx_uncore_iio_constraints,
3873 .ops = &skx_uncore_iio_ops,
3874 .format_group = &skx_uncore_iio_format_group,
3875 .attr_update = skx_iio_attr_update,
3876 .get_topology = skx_iio_get_topology,
3877 .set_mapping = skx_iio_set_mapping,
3878 .cleanup_mapping = skx_iio_cleanup_mapping,
3879};
3880
3881enum perf_uncore_iio_freerunning_type_id {
3882 SKX_IIO_MSR_IOCLK = 0,
3883 SKX_IIO_MSR_BW = 1,
3884 SKX_IIO_MSR_UTIL = 2,
3885
3886 SKX_IIO_FREERUNNING_TYPE_MAX,
3887};
3888
3889
3890static struct freerunning_counters skx_iio_freerunning[] = {
3891 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3892 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3893 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3894};
3895
3896static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3897
3898 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3899
3900 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3901 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3902 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3903 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3904 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3905 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3906 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3907 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3908 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3909 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3910 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3911 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3912 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3913 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3914 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3915 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3916 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3917 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3918 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3919 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3920 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3921 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3922 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3923 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3924
3925 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3926 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3927 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3928 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3929 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3930 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3931 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3932 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3933 { },
3934};
3935
3936static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3937 .read_counter = uncore_msr_read_counter,
3938 .hw_config = uncore_freerunning_hw_config,
3939};
3940
3941static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3942 &format_attr_event.attr,
3943 &format_attr_umask.attr,
3944 NULL,
3945};
3946
3947static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3948 .name = "format",
3949 .attrs = skx_uncore_iio_freerunning_formats_attr,
3950};
3951
3952static struct intel_uncore_type skx_uncore_iio_free_running = {
3953 .name = "iio_free_running",
3954 .num_counters = 17,
3955 .num_boxes = 6,
3956 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3957 .freerunning = skx_iio_freerunning,
3958 .ops = &skx_uncore_iio_freerunning_ops,
3959 .event_descs = skx_uncore_iio_freerunning_events,
3960 .format_group = &skx_uncore_iio_freerunning_format_group,
3961};
3962
3963static struct attribute *skx_uncore_formats_attr[] = {
3964 &format_attr_event.attr,
3965 &format_attr_umask.attr,
3966 &format_attr_edge.attr,
3967 &format_attr_inv.attr,
3968 &format_attr_thresh8.attr,
3969 NULL,
3970};
3971
3972static const struct attribute_group skx_uncore_format_group = {
3973 .name = "format",
3974 .attrs = skx_uncore_formats_attr,
3975};
3976
3977static struct intel_uncore_type skx_uncore_irp = {
3978 .name = "irp",
3979 .num_counters = 2,
3980 .num_boxes = 6,
3981 .perf_ctr_bits = 48,
3982 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3983 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3984 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3985 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3986 .msr_offset = SKX_IRP_MSR_OFFSET,
3987 .ops = &skx_uncore_iio_ops,
3988 .format_group = &skx_uncore_format_group,
3989};
3990
3991static struct attribute *skx_uncore_pcu_formats_attr[] = {
3992 &format_attr_event.attr,
3993 &format_attr_umask.attr,
3994 &format_attr_edge.attr,
3995 &format_attr_inv.attr,
3996 &format_attr_thresh8.attr,
3997 &format_attr_occ_invert.attr,
3998 &format_attr_occ_edge_det.attr,
3999 &format_attr_filter_band0.attr,
4000 &format_attr_filter_band1.attr,
4001 &format_attr_filter_band2.attr,
4002 &format_attr_filter_band3.attr,
4003 NULL,
4004};
4005
4006static struct attribute_group skx_uncore_pcu_format_group = {
4007 .name = "format",
4008 .attrs = skx_uncore_pcu_formats_attr,
4009};
4010
4011static struct intel_uncore_ops skx_uncore_pcu_ops = {
4012 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4013 .hw_config = hswep_pcu_hw_config,
4014 .get_constraint = snbep_pcu_get_constraint,
4015 .put_constraint = snbep_pcu_put_constraint,
4016};
4017
4018static struct intel_uncore_type skx_uncore_pcu = {
4019 .name = "pcu",
4020 .num_counters = 4,
4021 .num_boxes = 1,
4022 .perf_ctr_bits = 48,
4023 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4024 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4025 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4026 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4027 .num_shared_regs = 1,
4028 .ops = &skx_uncore_pcu_ops,
4029 .format_group = &skx_uncore_pcu_format_group,
4030};
4031
4032static struct intel_uncore_type *skx_msr_uncores[] = {
4033 &skx_uncore_ubox,
4034 &skx_uncore_chabox,
4035 &skx_uncore_iio,
4036 &skx_uncore_iio_free_running,
4037 &skx_uncore_irp,
4038 &skx_uncore_pcu,
4039 NULL,
4040};
4041
4042
4043
4044
4045
4046#define SKX_CAPID6 0x9c
4047#define SKX_CHA_BIT_MASK GENMASK(27, 0)
4048
4049static int skx_count_chabox(void)
4050{
4051 struct pci_dev *dev = NULL;
4052 u32 val = 0;
4053
4054 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4055 if (!dev)
4056 goto out;
4057
4058 pci_read_config_dword(dev, SKX_CAPID6, &val);
4059 val &= SKX_CHA_BIT_MASK;
4060out:
4061 pci_dev_put(dev);
4062 return hweight32(val);
4063}
4064
4065void skx_uncore_cpu_init(void)
4066{
4067 skx_uncore_chabox.num_boxes = skx_count_chabox();
4068 uncore_msr_uncores = skx_msr_uncores;
4069}
4070
4071static struct intel_uncore_type skx_uncore_imc = {
4072 .name = "imc",
4073 .num_counters = 4,
4074 .num_boxes = 6,
4075 .perf_ctr_bits = 48,
4076 .fixed_ctr_bits = 48,
4077 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4078 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4079 .event_descs = hswep_uncore_imc_events,
4080 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4081 .event_ctl = SNBEP_PCI_PMON_CTL0,
4082 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4083 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4084 .ops = &ivbep_uncore_pci_ops,
4085 .format_group = &skx_uncore_format_group,
4086};
4087
4088static struct attribute *skx_upi_uncore_formats_attr[] = {
4089 &format_attr_event.attr,
4090 &format_attr_umask_ext.attr,
4091 &format_attr_edge.attr,
4092 &format_attr_inv.attr,
4093 &format_attr_thresh8.attr,
4094 NULL,
4095};
4096
4097static const struct attribute_group skx_upi_uncore_format_group = {
4098 .name = "format",
4099 .attrs = skx_upi_uncore_formats_attr,
4100};
4101
4102static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4103{
4104 struct pci_dev *pdev = box->pci_dev;
4105
4106 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4107 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4108}
4109
4110static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4111 .init_box = skx_upi_uncore_pci_init_box,
4112 .disable_box = snbep_uncore_pci_disable_box,
4113 .enable_box = snbep_uncore_pci_enable_box,
4114 .disable_event = snbep_uncore_pci_disable_event,
4115 .enable_event = snbep_uncore_pci_enable_event,
4116 .read_counter = snbep_uncore_pci_read_counter,
4117};
4118
4119static struct intel_uncore_type skx_uncore_upi = {
4120 .name = "upi",
4121 .num_counters = 4,
4122 .num_boxes = 3,
4123 .perf_ctr_bits = 48,
4124 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4125 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4126 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4127 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4128 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4129 .ops = &skx_upi_uncore_pci_ops,
4130 .format_group = &skx_upi_uncore_format_group,
4131};
4132
4133static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4134{
4135 struct pci_dev *pdev = box->pci_dev;
4136
4137 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4138 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4139}
4140
4141static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4142 .init_box = skx_m2m_uncore_pci_init_box,
4143 .disable_box = snbep_uncore_pci_disable_box,
4144 .enable_box = snbep_uncore_pci_enable_box,
4145 .disable_event = snbep_uncore_pci_disable_event,
4146 .enable_event = snbep_uncore_pci_enable_event,
4147 .read_counter = snbep_uncore_pci_read_counter,
4148};
4149
4150static struct intel_uncore_type skx_uncore_m2m = {
4151 .name = "m2m",
4152 .num_counters = 4,
4153 .num_boxes = 2,
4154 .perf_ctr_bits = 48,
4155 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4156 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4157 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4158 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4159 .ops = &skx_m2m_uncore_pci_ops,
4160 .format_group = &skx_uncore_format_group,
4161};
4162
4163static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4164 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4165 EVENT_CONSTRAINT_END
4166};
4167
4168static struct intel_uncore_type skx_uncore_m2pcie = {
4169 .name = "m2pcie",
4170 .num_counters = 4,
4171 .num_boxes = 4,
4172 .perf_ctr_bits = 48,
4173 .constraints = skx_uncore_m2pcie_constraints,
4174 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4175 .event_ctl = SNBEP_PCI_PMON_CTL0,
4176 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4177 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4178 .ops = &ivbep_uncore_pci_ops,
4179 .format_group = &skx_uncore_format_group,
4180};
4181
4182static struct event_constraint skx_uncore_m3upi_constraints[] = {
4183 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4184 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4185 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4186 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4187 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4188 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4189 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4190 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4191 EVENT_CONSTRAINT_END
4192};
4193
4194static struct intel_uncore_type skx_uncore_m3upi = {
4195 .name = "m3upi",
4196 .num_counters = 3,
4197 .num_boxes = 3,
4198 .perf_ctr_bits = 48,
4199 .constraints = skx_uncore_m3upi_constraints,
4200 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4201 .event_ctl = SNBEP_PCI_PMON_CTL0,
4202 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4203 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4204 .ops = &ivbep_uncore_pci_ops,
4205 .format_group = &skx_uncore_format_group,
4206};
4207
4208enum {
4209 SKX_PCI_UNCORE_IMC,
4210 SKX_PCI_UNCORE_M2M,
4211 SKX_PCI_UNCORE_UPI,
4212 SKX_PCI_UNCORE_M2PCIE,
4213 SKX_PCI_UNCORE_M3UPI,
4214};
4215
4216static struct intel_uncore_type *skx_pci_uncores[] = {
4217 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4218 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4219 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4220 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4221 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4222 NULL,
4223};
4224
4225static const struct pci_device_id skx_uncore_pci_ids[] = {
4226 {
4227 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4228 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4229 },
4230 {
4231 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4232 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4233 },
4234 {
4235 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4236 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4237 },
4238 {
4239 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4240 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4241 },
4242 {
4243 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4244 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4245 },
4246 {
4247 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4248 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4249 },
4250 {
4251 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4252 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4253 },
4254 {
4255 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4256 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4257 },
4258 {
4259 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4260 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4261 },
4262 {
4263 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4264 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4265 },
4266 {
4267 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4268 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4269 },
4270 {
4271 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4272 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4273 },
4274 {
4275 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4276 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4277 },
4278 {
4279 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4280 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4281 },
4282 {
4283 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4284 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4285 },
4286 {
4287 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4288 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4289 },
4290 {
4291 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4292 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4293 },
4294 {
4295 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4296 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4297 },
4298 { }
4299};
4300
4301
4302static struct pci_driver skx_uncore_pci_driver = {
4303 .name = "skx_uncore",
4304 .id_table = skx_uncore_pci_ids,
4305};
4306
4307int skx_uncore_pci_init(void)
4308{
4309
4310 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4311
4312 if (ret)
4313 return ret;
4314
4315 uncore_pci_uncores = skx_pci_uncores;
4316 uncore_pci_driver = &skx_uncore_pci_driver;
4317 return 0;
4318}
4319
4320
4321
4322
4323
4324static struct intel_uncore_type snr_uncore_ubox = {
4325 .name = "ubox",
4326 .num_counters = 2,
4327 .num_boxes = 1,
4328 .perf_ctr_bits = 48,
4329 .fixed_ctr_bits = 48,
4330 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4331 .event_ctl = SNR_U_MSR_PMON_CTL0,
4332 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4333 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4334 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4335 .ops = &ivbep_uncore_msr_ops,
4336 .format_group = &ivbep_uncore_format_group,
4337};
4338
4339static struct attribute *snr_uncore_cha_formats_attr[] = {
4340 &format_attr_event.attr,
4341 &format_attr_umask_ext2.attr,
4342 &format_attr_edge.attr,
4343 &format_attr_tid_en.attr,
4344 &format_attr_inv.attr,
4345 &format_attr_thresh8.attr,
4346 &format_attr_filter_tid5.attr,
4347 NULL,
4348};
4349static const struct attribute_group snr_uncore_chabox_format_group = {
4350 .name = "format",
4351 .attrs = snr_uncore_cha_formats_attr,
4352};
4353
4354static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4355{
4356 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4357
4358 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4359 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4360 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4361 reg1->idx = 0;
4362
4363 return 0;
4364}
4365
4366static void snr_cha_enable_event(struct intel_uncore_box *box,
4367 struct perf_event *event)
4368{
4369 struct hw_perf_event *hwc = &event->hw;
4370 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4371
4372 if (reg1->idx != EXTRA_REG_NONE)
4373 wrmsrl(reg1->reg, reg1->config);
4374
4375 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4376}
4377
4378static struct intel_uncore_ops snr_uncore_chabox_ops = {
4379 .init_box = ivbep_uncore_msr_init_box,
4380 .disable_box = snbep_uncore_msr_disable_box,
4381 .enable_box = snbep_uncore_msr_enable_box,
4382 .disable_event = snbep_uncore_msr_disable_event,
4383 .enable_event = snr_cha_enable_event,
4384 .read_counter = uncore_msr_read_counter,
4385 .hw_config = snr_cha_hw_config,
4386};
4387
4388static struct intel_uncore_type snr_uncore_chabox = {
4389 .name = "cha",
4390 .num_counters = 4,
4391 .num_boxes = 6,
4392 .perf_ctr_bits = 48,
4393 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4394 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4395 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4396 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4397 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4398 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4399 .ops = &snr_uncore_chabox_ops,
4400 .format_group = &snr_uncore_chabox_format_group,
4401};
4402
4403static struct attribute *snr_uncore_iio_formats_attr[] = {
4404 &format_attr_event.attr,
4405 &format_attr_umask.attr,
4406 &format_attr_edge.attr,
4407 &format_attr_inv.attr,
4408 &format_attr_thresh9.attr,
4409 &format_attr_ch_mask2.attr,
4410 &format_attr_fc_mask2.attr,
4411 NULL,
4412};
4413
4414static const struct attribute_group snr_uncore_iio_format_group = {
4415 .name = "format",
4416 .attrs = snr_uncore_iio_formats_attr,
4417};
4418
4419static umode_t
4420snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4421{
4422
4423 return pmu_iio_mapping_visible(kobj, attr, die, 1);
4424}
4425
4426static struct attribute_group snr_iio_mapping_group = {
4427 .is_visible = snr_iio_mapping_visible,
4428};
4429
4430static const struct attribute_group *snr_iio_attr_update[] = {
4431 &snr_iio_mapping_group,
4432 NULL,
4433};
4434
4435static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4436{
4437 u32 sad_cfg;
4438 int die, stack_id, ret = -EPERM;
4439 struct pci_dev *dev = NULL;
4440
4441 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
4442 GFP_KERNEL);
4443 if (!type->topology)
4444 return -ENOMEM;
4445
4446 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4447 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4448 if (ret) {
4449 ret = pcibios_err_to_errno(ret);
4450 break;
4451 }
4452
4453 die = uncore_pcibus_to_dieid(dev->bus);
4454 stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4455 if (die < 0 || stack_id >= type->num_boxes) {
4456 ret = -EPERM;
4457 break;
4458 }
4459
4460
4461 stack_id = sad_pmon_mapping[stack_id];
4462
4463 ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
4464 type->topology[die].segment = pci_domain_nr(dev->bus);
4465 }
4466
4467 if (ret) {
4468 kfree(type->topology);
4469 type->topology = NULL;
4470 }
4471
4472 return ret;
4473}
4474
4475
4476
4477
4478enum {
4479 SNR_QAT_PMON_ID,
4480 SNR_CBDMA_DMI_PMON_ID,
4481 SNR_NIS_PMON_ID,
4482 SNR_DLB_PMON_ID,
4483 SNR_PCIE_GEN3_PMON_ID
4484};
4485
4486static u8 snr_sad_pmon_mapping[] = {
4487 SNR_CBDMA_DMI_PMON_ID,
4488 SNR_PCIE_GEN3_PMON_ID,
4489 SNR_DLB_PMON_ID,
4490 SNR_NIS_PMON_ID,
4491 SNR_QAT_PMON_ID
4492};
4493
4494static int snr_iio_get_topology(struct intel_uncore_type *type)
4495{
4496 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4497}
4498
4499static int snr_iio_set_mapping(struct intel_uncore_type *type)
4500{
4501 return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4502}
4503
4504static struct intel_uncore_type snr_uncore_iio = {
4505 .name = "iio",
4506 .num_counters = 4,
4507 .num_boxes = 5,
4508 .perf_ctr_bits = 48,
4509 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4510 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4511 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4512 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4513 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4514 .msr_offset = SNR_IIO_MSR_OFFSET,
4515 .ops = &ivbep_uncore_msr_ops,
4516 .format_group = &snr_uncore_iio_format_group,
4517 .attr_update = snr_iio_attr_update,
4518 .get_topology = snr_iio_get_topology,
4519 .set_mapping = snr_iio_set_mapping,
4520 .cleanup_mapping = skx_iio_cleanup_mapping,
4521};
4522
4523static struct intel_uncore_type snr_uncore_irp = {
4524 .name = "irp",
4525 .num_counters = 2,
4526 .num_boxes = 5,
4527 .perf_ctr_bits = 48,
4528 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4529 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4530 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4531 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4532 .msr_offset = SNR_IRP_MSR_OFFSET,
4533 .ops = &ivbep_uncore_msr_ops,
4534 .format_group = &ivbep_uncore_format_group,
4535};
4536
4537static struct intel_uncore_type snr_uncore_m2pcie = {
4538 .name = "m2pcie",
4539 .num_counters = 4,
4540 .num_boxes = 5,
4541 .perf_ctr_bits = 48,
4542 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4543 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4544 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4545 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4546 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4547 .ops = &ivbep_uncore_msr_ops,
4548 .format_group = &ivbep_uncore_format_group,
4549};
4550
4551static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4552{
4553 struct hw_perf_event *hwc = &event->hw;
4554 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4555 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4556
4557 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4558 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4559 reg1->idx = ev_sel - 0xb;
4560 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4561 }
4562 return 0;
4563}
4564
4565static struct intel_uncore_ops snr_uncore_pcu_ops = {
4566 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4567 .hw_config = snr_pcu_hw_config,
4568 .get_constraint = snbep_pcu_get_constraint,
4569 .put_constraint = snbep_pcu_put_constraint,
4570};
4571
4572static struct intel_uncore_type snr_uncore_pcu = {
4573 .name = "pcu",
4574 .num_counters = 4,
4575 .num_boxes = 1,
4576 .perf_ctr_bits = 48,
4577 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4578 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4579 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4580 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4581 .num_shared_regs = 1,
4582 .ops = &snr_uncore_pcu_ops,
4583 .format_group = &skx_uncore_pcu_format_group,
4584};
4585
4586enum perf_uncore_snr_iio_freerunning_type_id {
4587 SNR_IIO_MSR_IOCLK,
4588 SNR_IIO_MSR_BW_IN,
4589
4590 SNR_IIO_FREERUNNING_TYPE_MAX,
4591};
4592
4593static struct freerunning_counters snr_iio_freerunning[] = {
4594 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4595 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4596};
4597
4598static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4599
4600 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4601
4602 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4603 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4604 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4605 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4606 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4607 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4608 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4609 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4610 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4611 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4612 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4613 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4614 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4615 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4616 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4617 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4618 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4619 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4620 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4621 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4622 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4623 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4624 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4625 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4626 { },
4627};
4628
4629static struct intel_uncore_type snr_uncore_iio_free_running = {
4630 .name = "iio_free_running",
4631 .num_counters = 9,
4632 .num_boxes = 5,
4633 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4634 .freerunning = snr_iio_freerunning,
4635 .ops = &skx_uncore_iio_freerunning_ops,
4636 .event_descs = snr_uncore_iio_freerunning_events,
4637 .format_group = &skx_uncore_iio_freerunning_format_group,
4638};
4639
4640static struct intel_uncore_type *snr_msr_uncores[] = {
4641 &snr_uncore_ubox,
4642 &snr_uncore_chabox,
4643 &snr_uncore_iio,
4644 &snr_uncore_irp,
4645 &snr_uncore_m2pcie,
4646 &snr_uncore_pcu,
4647 &snr_uncore_iio_free_running,
4648 NULL,
4649};
4650
4651void snr_uncore_cpu_init(void)
4652{
4653 uncore_msr_uncores = snr_msr_uncores;
4654}
4655
4656static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4657{
4658 struct pci_dev *pdev = box->pci_dev;
4659 int box_ctl = uncore_pci_box_ctl(box);
4660
4661 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4662 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4663}
4664
4665static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4666 .init_box = snr_m2m_uncore_pci_init_box,
4667 .disable_box = snbep_uncore_pci_disable_box,
4668 .enable_box = snbep_uncore_pci_enable_box,
4669 .disable_event = snbep_uncore_pci_disable_event,
4670 .enable_event = snbep_uncore_pci_enable_event,
4671 .read_counter = snbep_uncore_pci_read_counter,
4672};
4673
4674static struct attribute *snr_m2m_uncore_formats_attr[] = {
4675 &format_attr_event.attr,
4676 &format_attr_umask_ext3.attr,
4677 &format_attr_edge.attr,
4678 &format_attr_inv.attr,
4679 &format_attr_thresh8.attr,
4680 NULL,
4681};
4682
4683static const struct attribute_group snr_m2m_uncore_format_group = {
4684 .name = "format",
4685 .attrs = snr_m2m_uncore_formats_attr,
4686};
4687
4688static struct intel_uncore_type snr_uncore_m2m = {
4689 .name = "m2m",
4690 .num_counters = 4,
4691 .num_boxes = 1,
4692 .perf_ctr_bits = 48,
4693 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4694 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4695 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4696 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4697 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4698 .ops = &snr_m2m_uncore_pci_ops,
4699 .format_group = &snr_m2m_uncore_format_group,
4700};
4701
4702static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4703{
4704 struct pci_dev *pdev = box->pci_dev;
4705 struct hw_perf_event *hwc = &event->hw;
4706
4707 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4708 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4709}
4710
4711static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4712 .init_box = snr_m2m_uncore_pci_init_box,
4713 .disable_box = snbep_uncore_pci_disable_box,
4714 .enable_box = snbep_uncore_pci_enable_box,
4715 .disable_event = snbep_uncore_pci_disable_event,
4716 .enable_event = snr_uncore_pci_enable_event,
4717 .read_counter = snbep_uncore_pci_read_counter,
4718};
4719
4720static struct intel_uncore_type snr_uncore_pcie3 = {
4721 .name = "pcie3",
4722 .num_counters = 4,
4723 .num_boxes = 1,
4724 .perf_ctr_bits = 48,
4725 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4726 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4727 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4728 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4729 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4730 .ops = &snr_pcie3_uncore_pci_ops,
4731 .format_group = &skx_uncore_iio_format_group,
4732};
4733
4734enum {
4735 SNR_PCI_UNCORE_M2M,
4736 SNR_PCI_UNCORE_PCIE3,
4737};
4738
4739static struct intel_uncore_type *snr_pci_uncores[] = {
4740 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4741 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4742 NULL,
4743};
4744
4745static const struct pci_device_id snr_uncore_pci_ids[] = {
4746 {
4747 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4748 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4749 },
4750 { }
4751};
4752
4753static struct pci_driver snr_uncore_pci_driver = {
4754 .name = "snr_uncore",
4755 .id_table = snr_uncore_pci_ids,
4756};
4757
4758static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4759 {
4760 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4761 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4762 },
4763 { }
4764};
4765
4766static struct pci_driver snr_uncore_pci_sub_driver = {
4767 .name = "snr_uncore_sub",
4768 .id_table = snr_uncore_pci_sub_ids,
4769};
4770
4771int snr_uncore_pci_init(void)
4772{
4773
4774 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4775 SKX_GIDNIDMAP, true);
4776
4777 if (ret)
4778 return ret;
4779
4780 uncore_pci_uncores = snr_pci_uncores;
4781 uncore_pci_driver = &snr_uncore_pci_driver;
4782 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4783 return 0;
4784}
4785
4786static struct pci_dev *snr_uncore_get_mc_dev(int id)
4787{
4788 struct pci_dev *mc_dev = NULL;
4789 int pkg;
4790
4791 while (1) {
4792 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4793 if (!mc_dev)
4794 break;
4795 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4796 if (pkg == id)
4797 break;
4798 }
4799 return mc_dev;
4800}
4801
4802static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4803 unsigned int box_ctl, int mem_offset)
4804{
4805 struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4806 struct intel_uncore_type *type = box->pmu->type;
4807 resource_size_t addr;
4808 u32 pci_dword;
4809
4810 if (!pdev)
4811 return;
4812
4813 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4814 addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4815
4816 pci_read_config_dword(pdev, mem_offset, &pci_dword);
4817 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4818
4819 addr += box_ctl;
4820
4821 box->io_addr = ioremap(addr, type->mmio_map_size);
4822 if (!box->io_addr) {
4823 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4824 return;
4825 }
4826
4827 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4828}
4829
4830static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4831{
4832 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4833 SNR_IMC_MMIO_MEM0_OFFSET);
4834}
4835
4836static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4837{
4838 u32 config;
4839
4840 if (!box->io_addr)
4841 return;
4842
4843 config = readl(box->io_addr);
4844 config |= SNBEP_PMON_BOX_CTL_FRZ;
4845 writel(config, box->io_addr);
4846}
4847
4848static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4849{
4850 u32 config;
4851
4852 if (!box->io_addr)
4853 return;
4854
4855 config = readl(box->io_addr);
4856 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4857 writel(config, box->io_addr);
4858}
4859
4860static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4861 struct perf_event *event)
4862{
4863 struct hw_perf_event *hwc = &event->hw;
4864
4865 if (!box->io_addr)
4866 return;
4867
4868 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4869 return;
4870
4871 writel(hwc->config | SNBEP_PMON_CTL_EN,
4872 box->io_addr + hwc->config_base);
4873}
4874
4875static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4876 struct perf_event *event)
4877{
4878 struct hw_perf_event *hwc = &event->hw;
4879
4880 if (!box->io_addr)
4881 return;
4882
4883 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4884 return;
4885
4886 writel(hwc->config, box->io_addr + hwc->config_base);
4887}
4888
4889static struct intel_uncore_ops snr_uncore_mmio_ops = {
4890 .init_box = snr_uncore_mmio_init_box,
4891 .exit_box = uncore_mmio_exit_box,
4892 .disable_box = snr_uncore_mmio_disable_box,
4893 .enable_box = snr_uncore_mmio_enable_box,
4894 .disable_event = snr_uncore_mmio_disable_event,
4895 .enable_event = snr_uncore_mmio_enable_event,
4896 .read_counter = uncore_mmio_read_counter,
4897};
4898
4899static struct uncore_event_desc snr_uncore_imc_events[] = {
4900 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4901 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4902 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4903 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4904 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4905 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4906 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4907 { },
4908};
4909
4910static struct intel_uncore_type snr_uncore_imc = {
4911 .name = "imc",
4912 .num_counters = 4,
4913 .num_boxes = 2,
4914 .perf_ctr_bits = 48,
4915 .fixed_ctr_bits = 48,
4916 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4917 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4918 .event_descs = snr_uncore_imc_events,
4919 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4920 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4921 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4922 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4923 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4924 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4925 .ops = &snr_uncore_mmio_ops,
4926 .format_group = &skx_uncore_format_group,
4927};
4928
4929enum perf_uncore_snr_imc_freerunning_type_id {
4930 SNR_IMC_DCLK,
4931 SNR_IMC_DDR,
4932
4933 SNR_IMC_FREERUNNING_TYPE_MAX,
4934};
4935
4936static struct freerunning_counters snr_imc_freerunning[] = {
4937 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4938 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4939};
4940
4941static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4942 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4943
4944 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4945 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4946 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4947 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4948 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4949 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4950 { },
4951};
4952
4953static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4954 .init_box = snr_uncore_mmio_init_box,
4955 .exit_box = uncore_mmio_exit_box,
4956 .read_counter = uncore_mmio_read_counter,
4957 .hw_config = uncore_freerunning_hw_config,
4958};
4959
4960static struct intel_uncore_type snr_uncore_imc_free_running = {
4961 .name = "imc_free_running",
4962 .num_counters = 3,
4963 .num_boxes = 1,
4964 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4965 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4966 .freerunning = snr_imc_freerunning,
4967 .ops = &snr_uncore_imc_freerunning_ops,
4968 .event_descs = snr_uncore_imc_freerunning_events,
4969 .format_group = &skx_uncore_iio_freerunning_format_group,
4970};
4971
4972static struct intel_uncore_type *snr_mmio_uncores[] = {
4973 &snr_uncore_imc,
4974 &snr_uncore_imc_free_running,
4975 NULL,
4976};
4977
4978void snr_uncore_mmio_init(void)
4979{
4980 uncore_mmio_uncores = snr_mmio_uncores;
4981}
4982
4983
4984
4985
4986
4987static unsigned icx_cha_msr_offsets[] = {
4988 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4989 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4990 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4991 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
4992 0x1c, 0x2a, 0x38, 0x46,
4993};
4994
4995static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4996{
4997 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4998 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4999
5000 if (tie_en) {
5001 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5002 icx_cha_msr_offsets[box->pmu->pmu_idx];
5003 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5004 reg1->idx = 0;
5005 }
5006
5007 return 0;
5008}
5009
5010static struct intel_uncore_ops icx_uncore_chabox_ops = {
5011 .init_box = ivbep_uncore_msr_init_box,
5012 .disable_box = snbep_uncore_msr_disable_box,
5013 .enable_box = snbep_uncore_msr_enable_box,
5014 .disable_event = snbep_uncore_msr_disable_event,
5015 .enable_event = snr_cha_enable_event,
5016 .read_counter = uncore_msr_read_counter,
5017 .hw_config = icx_cha_hw_config,
5018};
5019
5020static struct intel_uncore_type icx_uncore_chabox = {
5021 .name = "cha",
5022 .num_counters = 4,
5023 .perf_ctr_bits = 48,
5024 .event_ctl = ICX_C34_MSR_PMON_CTL0,
5025 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
5026 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
5027 .msr_offsets = icx_cha_msr_offsets,
5028 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5029 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
5030 .constraints = skx_uncore_chabox_constraints,
5031 .ops = &icx_uncore_chabox_ops,
5032 .format_group = &snr_uncore_chabox_format_group,
5033};
5034
5035static unsigned icx_msr_offsets[] = {
5036 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5037};
5038
5039static struct event_constraint icx_uncore_iio_constraints[] = {
5040 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5041 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5042 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5043 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5044 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5045 EVENT_CONSTRAINT_END
5046};
5047
5048static umode_t
5049icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5050{
5051
5052 return pmu_iio_mapping_visible(kobj, attr, die, 5);
5053}
5054
5055static struct attribute_group icx_iio_mapping_group = {
5056 .is_visible = icx_iio_mapping_visible,
5057};
5058
5059static const struct attribute_group *icx_iio_attr_update[] = {
5060 &icx_iio_mapping_group,
5061 NULL,
5062};
5063
5064
5065
5066
5067enum {
5068 ICX_PCIE1_PMON_ID,
5069 ICX_PCIE2_PMON_ID,
5070 ICX_PCIE3_PMON_ID,
5071 ICX_PCIE4_PMON_ID,
5072 ICX_PCIE5_PMON_ID,
5073 ICX_CBDMA_DMI_PMON_ID
5074};
5075
5076static u8 icx_sad_pmon_mapping[] = {
5077 ICX_CBDMA_DMI_PMON_ID,
5078 ICX_PCIE1_PMON_ID,
5079 ICX_PCIE2_PMON_ID,
5080 ICX_PCIE3_PMON_ID,
5081 ICX_PCIE4_PMON_ID,
5082 ICX_PCIE5_PMON_ID,
5083};
5084
5085static int icx_iio_get_topology(struct intel_uncore_type *type)
5086{
5087 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5088}
5089
5090static int icx_iio_set_mapping(struct intel_uncore_type *type)
5091{
5092 return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5093}
5094
5095static struct intel_uncore_type icx_uncore_iio = {
5096 .name = "iio",
5097 .num_counters = 4,
5098 .num_boxes = 6,
5099 .perf_ctr_bits = 48,
5100 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
5101 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
5102 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5103 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5104 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
5105 .msr_offsets = icx_msr_offsets,
5106 .constraints = icx_uncore_iio_constraints,
5107 .ops = &skx_uncore_iio_ops,
5108 .format_group = &snr_uncore_iio_format_group,
5109 .attr_update = icx_iio_attr_update,
5110 .get_topology = icx_iio_get_topology,
5111 .set_mapping = icx_iio_set_mapping,
5112 .cleanup_mapping = skx_iio_cleanup_mapping,
5113};
5114
5115static struct intel_uncore_type icx_uncore_irp = {
5116 .name = "irp",
5117 .num_counters = 2,
5118 .num_boxes = 6,
5119 .perf_ctr_bits = 48,
5120 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
5121 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
5122 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5123 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
5124 .msr_offsets = icx_msr_offsets,
5125 .ops = &ivbep_uncore_msr_ops,
5126 .format_group = &ivbep_uncore_format_group,
5127};
5128
5129static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5130 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5131 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5132 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5133 EVENT_CONSTRAINT_END
5134};
5135
5136static struct intel_uncore_type icx_uncore_m2pcie = {
5137 .name = "m2pcie",
5138 .num_counters = 4,
5139 .num_boxes = 6,
5140 .perf_ctr_bits = 48,
5141 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
5142 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
5143 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5144 .msr_offsets = icx_msr_offsets,
5145 .constraints = icx_uncore_m2pcie_constraints,
5146 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5147 .ops = &ivbep_uncore_msr_ops,
5148 .format_group = &ivbep_uncore_format_group,
5149};
5150
5151enum perf_uncore_icx_iio_freerunning_type_id {
5152 ICX_IIO_MSR_IOCLK,
5153 ICX_IIO_MSR_BW_IN,
5154
5155 ICX_IIO_FREERUNNING_TYPE_MAX,
5156};
5157
5158static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5159 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5160};
5161
5162static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5163 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5164};
5165
5166static struct freerunning_counters icx_iio_freerunning[] = {
5167 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5168 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5169};
5170
5171static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5172
5173 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5174
5175 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5176 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5177 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5178 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5179 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5180 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5181 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5182 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5183 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5184 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5185 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5186 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5187 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5188 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5189 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5190 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5191 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5192 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5193 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5194 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5195 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5196 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5197 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5198 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5199 { },
5200};
5201
5202static struct intel_uncore_type icx_uncore_iio_free_running = {
5203 .name = "iio_free_running",
5204 .num_counters = 9,
5205 .num_boxes = 6,
5206 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5207 .freerunning = icx_iio_freerunning,
5208 .ops = &skx_uncore_iio_freerunning_ops,
5209 .event_descs = icx_uncore_iio_freerunning_events,
5210 .format_group = &skx_uncore_iio_freerunning_format_group,
5211};
5212
5213static struct intel_uncore_type *icx_msr_uncores[] = {
5214 &skx_uncore_ubox,
5215 &icx_uncore_chabox,
5216 &icx_uncore_iio,
5217 &icx_uncore_irp,
5218 &icx_uncore_m2pcie,
5219 &skx_uncore_pcu,
5220 &icx_uncore_iio_free_running,
5221 NULL,
5222};
5223
5224
5225
5226
5227
5228#define ICX_CAPID6 0x9c
5229#define ICX_CAPID7 0xa0
5230
5231static u64 icx_count_chabox(void)
5232{
5233 struct pci_dev *dev = NULL;
5234 u64 caps = 0;
5235
5236 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5237 if (!dev)
5238 goto out;
5239
5240 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5241 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5242out:
5243 pci_dev_put(dev);
5244 return hweight64(caps);
5245}
5246
5247void icx_uncore_cpu_init(void)
5248{
5249 u64 num_boxes = icx_count_chabox();
5250
5251 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5252 return;
5253 icx_uncore_chabox.num_boxes = num_boxes;
5254 uncore_msr_uncores = icx_msr_uncores;
5255}
5256
5257static struct intel_uncore_type icx_uncore_m2m = {
5258 .name = "m2m",
5259 .num_counters = 4,
5260 .num_boxes = 4,
5261 .perf_ctr_bits = 48,
5262 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5263 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5264 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5265 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5266 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5267 .ops = &snr_m2m_uncore_pci_ops,
5268 .format_group = &snr_m2m_uncore_format_group,
5269};
5270
5271static struct attribute *icx_upi_uncore_formats_attr[] = {
5272 &format_attr_event.attr,
5273 &format_attr_umask_ext4.attr,
5274 &format_attr_edge.attr,
5275 &format_attr_inv.attr,
5276 &format_attr_thresh8.attr,
5277 NULL,
5278};
5279
5280static const struct attribute_group icx_upi_uncore_format_group = {
5281 .name = "format",
5282 .attrs = icx_upi_uncore_formats_attr,
5283};
5284
5285static struct intel_uncore_type icx_uncore_upi = {
5286 .name = "upi",
5287 .num_counters = 4,
5288 .num_boxes = 3,
5289 .perf_ctr_bits = 48,
5290 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5291 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5292 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5293 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5294 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5295 .ops = &skx_upi_uncore_pci_ops,
5296 .format_group = &icx_upi_uncore_format_group,
5297};
5298
5299static struct event_constraint icx_uncore_m3upi_constraints[] = {
5300 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5301 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5302 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5303 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5304 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5305 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5306 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5307 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5308 EVENT_CONSTRAINT_END
5309};
5310
5311static struct intel_uncore_type icx_uncore_m3upi = {
5312 .name = "m3upi",
5313 .num_counters = 4,
5314 .num_boxes = 3,
5315 .perf_ctr_bits = 48,
5316 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5317 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5318 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5319 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5320 .constraints = icx_uncore_m3upi_constraints,
5321 .ops = &ivbep_uncore_pci_ops,
5322 .format_group = &skx_uncore_format_group,
5323};
5324
5325enum {
5326 ICX_PCI_UNCORE_M2M,
5327 ICX_PCI_UNCORE_UPI,
5328 ICX_PCI_UNCORE_M3UPI,
5329};
5330
5331static struct intel_uncore_type *icx_pci_uncores[] = {
5332 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5333 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5334 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5335 NULL,
5336};
5337
5338static const struct pci_device_id icx_uncore_pci_ids[] = {
5339 {
5340 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5341 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5342 },
5343 {
5344 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5345 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5346 },
5347 {
5348 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5349 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5350 },
5351 {
5352 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5353 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5354 },
5355 {
5356 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5357 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5358 },
5359 {
5360 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5361 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5362 },
5363 {
5364 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5365 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5366 },
5367 {
5368 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5369 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5370 },
5371 {
5372 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5373 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5374 },
5375 {
5376 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5377 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5378 },
5379 { }
5380};
5381
5382static struct pci_driver icx_uncore_pci_driver = {
5383 .name = "icx_uncore",
5384 .id_table = icx_uncore_pci_ids,
5385};
5386
5387int icx_uncore_pci_init(void)
5388{
5389
5390 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5391 SKX_GIDNIDMAP, true);
5392
5393 if (ret)
5394 return ret;
5395
5396 uncore_pci_uncores = icx_pci_uncores;
5397 uncore_pci_driver = &icx_uncore_pci_driver;
5398 return 0;
5399}
5400
5401static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5402{
5403 unsigned int box_ctl = box->pmu->type->box_ctl +
5404 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5405 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5406 SNR_IMC_MMIO_MEM0_OFFSET;
5407
5408 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5409}
5410
5411static struct intel_uncore_ops icx_uncore_mmio_ops = {
5412 .init_box = icx_uncore_imc_init_box,
5413 .exit_box = uncore_mmio_exit_box,
5414 .disable_box = snr_uncore_mmio_disable_box,
5415 .enable_box = snr_uncore_mmio_enable_box,
5416 .disable_event = snr_uncore_mmio_disable_event,
5417 .enable_event = snr_uncore_mmio_enable_event,
5418 .read_counter = uncore_mmio_read_counter,
5419};
5420
5421static struct intel_uncore_type icx_uncore_imc = {
5422 .name = "imc",
5423 .num_counters = 4,
5424 .num_boxes = 8,
5425 .perf_ctr_bits = 48,
5426 .fixed_ctr_bits = 48,
5427 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5428 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5429 .event_descs = hswep_uncore_imc_events,
5430 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5431 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5432 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5433 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5434 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5435 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5436 .ops = &icx_uncore_mmio_ops,
5437 .format_group = &skx_uncore_format_group,
5438};
5439
5440enum perf_uncore_icx_imc_freerunning_type_id {
5441 ICX_IMC_DCLK,
5442 ICX_IMC_DDR,
5443 ICX_IMC_DDRT,
5444
5445 ICX_IMC_FREERUNNING_TYPE_MAX,
5446};
5447
5448static struct freerunning_counters icx_imc_freerunning[] = {
5449 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5450 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5451 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5452};
5453
5454static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5455 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5456
5457 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5458 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5459 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5460 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5461 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5462 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5463
5464 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5465 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5466 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5467 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5468 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5469 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5470 { },
5471};
5472
5473static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5474{
5475 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5476 SNR_IMC_MMIO_MEM0_OFFSET;
5477
5478 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5479}
5480
5481static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5482 .init_box = icx_uncore_imc_freerunning_init_box,
5483 .exit_box = uncore_mmio_exit_box,
5484 .read_counter = uncore_mmio_read_counter,
5485 .hw_config = uncore_freerunning_hw_config,
5486};
5487
5488static struct intel_uncore_type icx_uncore_imc_free_running = {
5489 .name = "imc_free_running",
5490 .num_counters = 5,
5491 .num_boxes = 4,
5492 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5493 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5494 .freerunning = icx_imc_freerunning,
5495 .ops = &icx_uncore_imc_freerunning_ops,
5496 .event_descs = icx_uncore_imc_freerunning_events,
5497 .format_group = &skx_uncore_iio_freerunning_format_group,
5498};
5499
5500static struct intel_uncore_type *icx_mmio_uncores[] = {
5501 &icx_uncore_imc,
5502 &icx_uncore_imc_free_running,
5503 NULL,
5504};
5505
5506void icx_uncore_mmio_init(void)
5507{
5508 uncore_mmio_uncores = icx_mmio_uncores;
5509}
5510
5511
5512