1
2#include "perf_event_intel_uncore.h"
3
4
5#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10
11
12#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
13#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
14#define SNB_UNC_CTL_EDGE_DET (1 << 18)
15#define SNB_UNC_CTL_EN (1 << 22)
16#define SNB_UNC_CTL_INVERT (1 << 23)
17#define SNB_UNC_CTL_CMASK_MASK 0x1f000000
18#define NHM_UNC_CTL_CMASK_MASK 0xff000000
19#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
20
21#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
22 SNB_UNC_CTL_UMASK_MASK | \
23 SNB_UNC_CTL_EDGE_DET | \
24 SNB_UNC_CTL_INVERT | \
25 SNB_UNC_CTL_CMASK_MASK)
26
27#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
28 SNB_UNC_CTL_UMASK_MASK | \
29 SNB_UNC_CTL_EDGE_DET | \
30 SNB_UNC_CTL_INVERT | \
31 NHM_UNC_CTL_CMASK_MASK)
32
33
34#define SNB_UNC_PERF_GLOBAL_CTL 0x391
35#define SNB_UNC_FIXED_CTR_CTRL 0x394
36#define SNB_UNC_FIXED_CTR 0x395
37
38
39#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
40#define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
41
42
43#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
44#define SNB_UNC_CBO_0_PER_CTR0 0x706
45#define SNB_UNC_CBO_MSR_OFFSET 0x10
46
47
48#define NHM_UNC_PERF_GLOBAL_CTL 0x391
49#define NHM_UNC_FIXED_CTR 0x394
50#define NHM_UNC_FIXED_CTR_CTRL 0x395
51
52
53#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
54#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
55
56
57#define NHM_UNC_PERFEVTSEL0 0x3c0
58#define NHM_UNC_UNCORE_PMC0 0x3b0
59
60DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
61DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
62DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
63DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
64DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
65DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
66
67
68static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
69{
70 struct hw_perf_event *hwc = &event->hw;
71
72 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
73 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
74 else
75 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
76}
77
78static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
79{
80 wrmsrl(event->hw.config_base, 0);
81}
82
83static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
84{
85 if (box->pmu->pmu_idx == 0) {
86 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
87 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
88 }
89}
90
91static struct uncore_event_desc snb_uncore_events[] = {
92 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
93 { },
94};
95
96static struct attribute *snb_uncore_formats_attr[] = {
97 &format_attr_event.attr,
98 &format_attr_umask.attr,
99 &format_attr_edge.attr,
100 &format_attr_inv.attr,
101 &format_attr_cmask5.attr,
102 NULL,
103};
104
105static struct attribute_group snb_uncore_format_group = {
106 .name = "format",
107 .attrs = snb_uncore_formats_attr,
108};
109
110static struct intel_uncore_ops snb_uncore_msr_ops = {
111 .init_box = snb_uncore_msr_init_box,
112 .disable_event = snb_uncore_msr_disable_event,
113 .enable_event = snb_uncore_msr_enable_event,
114 .read_counter = uncore_msr_read_counter,
115};
116
117static struct event_constraint snb_uncore_cbox_constraints[] = {
118 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
119 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
120 EVENT_CONSTRAINT_END
121};
122
123static struct intel_uncore_type snb_uncore_cbox = {
124 .name = "cbox",
125 .num_counters = 2,
126 .num_boxes = 4,
127 .perf_ctr_bits = 44,
128 .fixed_ctr_bits = 48,
129 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
130 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
131 .fixed_ctr = SNB_UNC_FIXED_CTR,
132 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
133 .single_fixed = 1,
134 .event_mask = SNB_UNC_RAW_EVENT_MASK,
135 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
136 .constraints = snb_uncore_cbox_constraints,
137 .ops = &snb_uncore_msr_ops,
138 .format_group = &snb_uncore_format_group,
139 .event_descs = snb_uncore_events,
140};
141
142static struct intel_uncore_type *snb_msr_uncores[] = {
143 &snb_uncore_cbox,
144 NULL,
145};
146
147void snb_uncore_cpu_init(void)
148{
149 uncore_msr_uncores = snb_msr_uncores;
150 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
151 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
152}
153
154enum {
155 SNB_PCI_UNCORE_IMC,
156};
157
158static struct uncore_event_desc snb_uncore_imc_events[] = {
159 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
160 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
161 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
162
163 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
164 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
165 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
166
167 { },
168};
169
170#define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
171#define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
172
173
174#define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
175
176#define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
177#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
178#define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
179#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
180#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
181
182static struct attribute *snb_uncore_imc_formats_attr[] = {
183 &format_attr_event.attr,
184 NULL,
185};
186
187static struct attribute_group snb_uncore_imc_format_group = {
188 .name = "format",
189 .attrs = snb_uncore_imc_formats_attr,
190};
191
192static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
193{
194 struct pci_dev *pdev = box->pci_dev;
195 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
196 resource_size_t addr;
197 u32 pci_dword;
198
199 pci_read_config_dword(pdev, where, &pci_dword);
200 addr = pci_dword;
201
202#ifdef CONFIG_PHYS_ADDR_T_64BIT
203 pci_read_config_dword(pdev, where + 4, &pci_dword);
204 addr |= ((resource_size_t)pci_dword << 32);
205#endif
206
207 addr &= ~(PAGE_SIZE - 1);
208
209 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
210 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
211}
212
213static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
214{}
215
216static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
217{}
218
219static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
220{}
221
222static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
223{}
224
225static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
226{
227 struct hw_perf_event *hwc = &event->hw;
228
229 return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
230}
231
232
233
234
235
236
237static int snb_uncore_imc_event_init(struct perf_event *event)
238{
239 struct intel_uncore_pmu *pmu;
240 struct intel_uncore_box *box;
241 struct hw_perf_event *hwc = &event->hw;
242 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
243 int idx, base;
244
245 if (event->attr.type != event->pmu->type)
246 return -ENOENT;
247
248 pmu = uncore_event_to_pmu(event);
249
250 if (pmu->func_id < 0)
251 return -ENOENT;
252
253
254 if (hwc->sample_period)
255 return -EINVAL;
256
257
258 if (event->attr.exclude_user ||
259 event->attr.exclude_kernel ||
260 event->attr.exclude_hv ||
261 event->attr.exclude_idle ||
262 event->attr.exclude_host ||
263 event->attr.exclude_guest ||
264 event->attr.sample_period)
265 return -EINVAL;
266
267
268
269
270
271 if (event->cpu < 0)
272 return -EINVAL;
273
274
275 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
276 return -EINVAL;
277
278 box = uncore_pmu_to_box(pmu, event->cpu);
279 if (!box || box->cpu < 0)
280 return -EINVAL;
281
282 event->cpu = box->cpu;
283
284 event->hw.idx = -1;
285 event->hw.last_tag = ~0ULL;
286 event->hw.extra_reg.idx = EXTRA_REG_NONE;
287 event->hw.branch_reg.idx = EXTRA_REG_NONE;
288
289
290
291 switch (cfg) {
292 case SNB_UNCORE_PCI_IMC_DATA_READS:
293 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
294 idx = UNCORE_PMC_IDX_FIXED;
295 break;
296 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
297 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
298 idx = UNCORE_PMC_IDX_FIXED + 1;
299 break;
300 default:
301 return -EINVAL;
302 }
303
304
305 event->hw.event_base = base;
306 event->hw.config = cfg;
307 event->hw.idx = idx;
308
309
310
311 return 0;
312}
313
314static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
315{
316 return 0;
317}
318
319static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
320{
321 struct intel_uncore_box *box = uncore_event_to_box(event);
322 u64 count;
323
324 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
325 return;
326
327 event->hw.state = 0;
328 box->n_active++;
329
330 list_add_tail(&event->active_entry, &box->active_list);
331
332 count = snb_uncore_imc_read_counter(box, event);
333 local64_set(&event->hw.prev_count, count);
334
335 if (box->n_active == 1)
336 uncore_pmu_start_hrtimer(box);
337}
338
339static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
340{
341 struct intel_uncore_box *box = uncore_event_to_box(event);
342 struct hw_perf_event *hwc = &event->hw;
343
344 if (!(hwc->state & PERF_HES_STOPPED)) {
345 box->n_active--;
346
347 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
348 hwc->state |= PERF_HES_STOPPED;
349
350 list_del(&event->active_entry);
351
352 if (box->n_active == 0)
353 uncore_pmu_cancel_hrtimer(box);
354 }
355
356 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
357
358
359
360
361 uncore_perf_event_update(box, event);
362 hwc->state |= PERF_HES_UPTODATE;
363 }
364}
365
366static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
367{
368 struct intel_uncore_box *box = uncore_event_to_box(event);
369 struct hw_perf_event *hwc = &event->hw;
370
371 if (!box)
372 return -ENODEV;
373
374 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
375 if (!(flags & PERF_EF_START))
376 hwc->state |= PERF_HES_ARCH;
377
378 snb_uncore_imc_event_start(event, 0);
379
380 box->n_events++;
381
382 return 0;
383}
384
385static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
386{
387 struct intel_uncore_box *box = uncore_event_to_box(event);
388 int i;
389
390 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
391
392 for (i = 0; i < box->n_events; i++) {
393 if (event == box->event_list[i]) {
394 --box->n_events;
395 break;
396 }
397 }
398}
399
400static int snb_pci2phy_map_init(int devid)
401{
402 struct pci_dev *dev = NULL;
403 int bus;
404
405 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
406 if (!dev)
407 return -ENOTTY;
408
409 bus = dev->bus->number;
410
411 uncore_pcibus_to_physid[bus] = 0;
412
413 pci_dev_put(dev);
414
415 return 0;
416}
417
418static struct pmu snb_uncore_imc_pmu = {
419 .task_ctx_nr = perf_invalid_context,
420 .event_init = snb_uncore_imc_event_init,
421 .add = snb_uncore_imc_event_add,
422 .del = snb_uncore_imc_event_del,
423 .start = snb_uncore_imc_event_start,
424 .stop = snb_uncore_imc_event_stop,
425 .read = uncore_pmu_event_read,
426};
427
428static struct intel_uncore_ops snb_uncore_imc_ops = {
429 .init_box = snb_uncore_imc_init_box,
430 .enable_box = snb_uncore_imc_enable_box,
431 .disable_box = snb_uncore_imc_disable_box,
432 .disable_event = snb_uncore_imc_disable_event,
433 .enable_event = snb_uncore_imc_enable_event,
434 .hw_config = snb_uncore_imc_hw_config,
435 .read_counter = snb_uncore_imc_read_counter,
436};
437
438static struct intel_uncore_type snb_uncore_imc = {
439 .name = "imc",
440 .num_counters = 2,
441 .num_boxes = 1,
442 .fixed_ctr_bits = 32,
443 .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE,
444 .event_descs = snb_uncore_imc_events,
445 .format_group = &snb_uncore_imc_format_group,
446 .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
447 .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
448 .ops = &snb_uncore_imc_ops,
449 .pmu = &snb_uncore_imc_pmu,
450};
451
452static struct intel_uncore_type *snb_pci_uncores[] = {
453 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
454 NULL,
455};
456
457static const struct pci_device_id snb_uncore_pci_ids[] = {
458 {
459 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
460 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
461 },
462 { },
463};
464
465static const struct pci_device_id ivb_uncore_pci_ids[] = {
466 {
467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
468 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
469 },
470 {
471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
472 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
473 },
474 { },
475};
476
477static const struct pci_device_id hsw_uncore_pci_ids[] = {
478 {
479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
480 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
481 },
482 {
483 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
484 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
485 },
486 { },
487};
488
489static struct pci_driver snb_uncore_pci_driver = {
490 .name = "snb_uncore",
491 .id_table = snb_uncore_pci_ids,
492};
493
494static struct pci_driver ivb_uncore_pci_driver = {
495 .name = "ivb_uncore",
496 .id_table = ivb_uncore_pci_ids,
497};
498
499static struct pci_driver hsw_uncore_pci_driver = {
500 .name = "hsw_uncore",
501 .id_table = hsw_uncore_pci_ids,
502};
503
504struct imc_uncore_pci_dev {
505 __u32 pci_id;
506 struct pci_driver *driver;
507};
508#define IMC_DEV(a, d) \
509 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
510
511static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
512 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
513 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),
514 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver),
515 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),
516 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),
517 { }
518};
519
520
521#define for_each_imc_pci_id(x, t) \
522 for (x = (t); (x)->pci_id; x++)
523
524static struct pci_driver *imc_uncore_find_dev(void)
525{
526 const struct imc_uncore_pci_dev *p;
527 int ret;
528
529 for_each_imc_pci_id(p, desktop_imc_pci_ids) {
530 ret = snb_pci2phy_map_init(p->pci_id);
531 if (ret == 0)
532 return p->driver;
533 }
534 return NULL;
535}
536
537static int imc_uncore_pci_init(void)
538{
539 struct pci_driver *imc_drv = imc_uncore_find_dev();
540
541 if (!imc_drv)
542 return -ENODEV;
543
544 uncore_pci_uncores = snb_pci_uncores;
545 uncore_pci_driver = imc_drv;
546
547 return 0;
548}
549
550int snb_uncore_pci_init(void)
551{
552 return imc_uncore_pci_init();
553}
554
555int ivb_uncore_pci_init(void)
556{
557 return imc_uncore_pci_init();
558}
559int hsw_uncore_pci_init(void)
560{
561 return imc_uncore_pci_init();
562}
563
564
565
566
567static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
568{
569 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
570}
571
572static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
573{
574 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
575}
576
577static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
578{
579 struct hw_perf_event *hwc = &event->hw;
580
581 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
582 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
583 else
584 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
585}
586
587static struct attribute *nhm_uncore_formats_attr[] = {
588 &format_attr_event.attr,
589 &format_attr_umask.attr,
590 &format_attr_edge.attr,
591 &format_attr_inv.attr,
592 &format_attr_cmask8.attr,
593 NULL,
594};
595
596static struct attribute_group nhm_uncore_format_group = {
597 .name = "format",
598 .attrs = nhm_uncore_formats_attr,
599};
600
601static struct uncore_event_desc nhm_uncore_events[] = {
602 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
603 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
604 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
605 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
606 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
607 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
608 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
609 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
610 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
611 { },
612};
613
614static struct intel_uncore_ops nhm_uncore_msr_ops = {
615 .disable_box = nhm_uncore_msr_disable_box,
616 .enable_box = nhm_uncore_msr_enable_box,
617 .disable_event = snb_uncore_msr_disable_event,
618 .enable_event = nhm_uncore_msr_enable_event,
619 .read_counter = uncore_msr_read_counter,
620};
621
622static struct intel_uncore_type nhm_uncore = {
623 .name = "",
624 .num_counters = 8,
625 .num_boxes = 1,
626 .perf_ctr_bits = 48,
627 .fixed_ctr_bits = 48,
628 .event_ctl = NHM_UNC_PERFEVTSEL0,
629 .perf_ctr = NHM_UNC_UNCORE_PMC0,
630 .fixed_ctr = NHM_UNC_FIXED_CTR,
631 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
632 .event_mask = NHM_UNC_RAW_EVENT_MASK,
633 .event_descs = nhm_uncore_events,
634 .ops = &nhm_uncore_msr_ops,
635 .format_group = &nhm_uncore_format_group,
636};
637
638static struct intel_uncore_type *nhm_msr_uncores[] = {
639 &nhm_uncore,
640 NULL,
641};
642
643void nhm_uncore_cpu_init(void)
644{
645 uncore_msr_uncores = nhm_msr_uncores;
646}
647
648
649