1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#include <linux/module.h>
103#include <linux/slab.h>
104#include <linux/perf_event.h>
105#include <linux/nospec.h>
106#include <asm/cpu_device_id.h>
107#include <asm/intel-family.h>
108#include "../perf_event.h"
109#include "../probe.h"
110
111MODULE_LICENSE("GPL");
112
113#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
114static ssize_t __cstate_##_var##_show(struct device *dev, \
115 struct device_attribute *attr, \
116 char *page) \
117{ \
118 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
119 return sprintf(page, _format "\n"); \
120} \
121static struct device_attribute format_attr_##_var = \
122 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
123
124static ssize_t cstate_get_attr_cpumask(struct device *dev,
125 struct device_attribute *attr,
126 char *buf);
127
128
129struct cstate_model {
130 unsigned long core_events;
131 unsigned long pkg_events;
132 unsigned long quirks;
133};
134
135
136#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
137#define KNL_CORE_C6_MSR (1UL << 1)
138
139struct perf_cstate_msr {
140 u64 msr;
141 struct perf_pmu_events_attr *attr;
142};
143
144
145
146static struct pmu cstate_core_pmu;
147static bool has_cstate_core;
148
149enum perf_cstate_core_events {
150 PERF_CSTATE_CORE_C1_RES = 0,
151 PERF_CSTATE_CORE_C3_RES,
152 PERF_CSTATE_CORE_C6_RES,
153 PERF_CSTATE_CORE_C7_RES,
154
155 PERF_CSTATE_CORE_EVENT_MAX,
156};
157
158PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00");
159PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01");
160PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02");
161PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03");
162
163static unsigned long core_msr_mask;
164
165PMU_EVENT_GROUP(events, cstate_core_c1);
166PMU_EVENT_GROUP(events, cstate_core_c3);
167PMU_EVENT_GROUP(events, cstate_core_c6);
168PMU_EVENT_GROUP(events, cstate_core_c7);
169
170static bool test_msr(int idx, void *data)
171{
172 return test_bit(idx, (unsigned long *) data);
173}
174
175static struct perf_msr core_msr[] = {
176 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr },
177 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr },
178 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr },
179 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr },
180};
181
182static struct attribute *attrs_empty[] = {
183 NULL,
184};
185
186
187
188
189
190
191static struct attribute_group core_events_attr_group = {
192 .name = "events",
193 .attrs = attrs_empty,
194};
195
196DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
197static struct attribute *core_format_attrs[] = {
198 &format_attr_core_event.attr,
199 NULL,
200};
201
202static struct attribute_group core_format_attr_group = {
203 .name = "format",
204 .attrs = core_format_attrs,
205};
206
207static cpumask_t cstate_core_cpu_mask;
208static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
209
210static struct attribute *cstate_cpumask_attrs[] = {
211 &dev_attr_cpumask.attr,
212 NULL,
213};
214
215static struct attribute_group cpumask_attr_group = {
216 .attrs = cstate_cpumask_attrs,
217};
218
219static const struct attribute_group *core_attr_groups[] = {
220 &core_events_attr_group,
221 &core_format_attr_group,
222 &cpumask_attr_group,
223 NULL,
224};
225
226
227static struct pmu cstate_pkg_pmu;
228static bool has_cstate_pkg;
229
230enum perf_cstate_pkg_events {
231 PERF_CSTATE_PKG_C2_RES = 0,
232 PERF_CSTATE_PKG_C3_RES,
233 PERF_CSTATE_PKG_C6_RES,
234 PERF_CSTATE_PKG_C7_RES,
235 PERF_CSTATE_PKG_C8_RES,
236 PERF_CSTATE_PKG_C9_RES,
237 PERF_CSTATE_PKG_C10_RES,
238
239 PERF_CSTATE_PKG_EVENT_MAX,
240};
241
242PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00");
243PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01");
244PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02");
245PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03");
246PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04");
247PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05");
248PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06");
249
250static unsigned long pkg_msr_mask;
251
252PMU_EVENT_GROUP(events, cstate_pkg_c2);
253PMU_EVENT_GROUP(events, cstate_pkg_c3);
254PMU_EVENT_GROUP(events, cstate_pkg_c6);
255PMU_EVENT_GROUP(events, cstate_pkg_c7);
256PMU_EVENT_GROUP(events, cstate_pkg_c8);
257PMU_EVENT_GROUP(events, cstate_pkg_c9);
258PMU_EVENT_GROUP(events, cstate_pkg_c10);
259
260static struct perf_msr pkg_msr[] = {
261 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr },
262 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr },
263 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr },
264 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr },
265 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr },
266 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr },
267 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr },
268};
269
270static struct attribute_group pkg_events_attr_group = {
271 .name = "events",
272 .attrs = attrs_empty,
273};
274
275DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
276static struct attribute *pkg_format_attrs[] = {
277 &format_attr_pkg_event.attr,
278 NULL,
279};
280static struct attribute_group pkg_format_attr_group = {
281 .name = "format",
282 .attrs = pkg_format_attrs,
283};
284
285static cpumask_t cstate_pkg_cpu_mask;
286
287static const struct attribute_group *pkg_attr_groups[] = {
288 &pkg_events_attr_group,
289 &pkg_format_attr_group,
290 &cpumask_attr_group,
291 NULL,
292};
293
294static ssize_t cstate_get_attr_cpumask(struct device *dev,
295 struct device_attribute *attr,
296 char *buf)
297{
298 struct pmu *pmu = dev_get_drvdata(dev);
299
300 if (pmu == &cstate_core_pmu)
301 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
302 else if (pmu == &cstate_pkg_pmu)
303 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
304 else
305 return 0;
306}
307
308static int cstate_pmu_event_init(struct perf_event *event)
309{
310 u64 cfg = event->attr.config;
311 int cpu;
312
313 if (event->attr.type != event->pmu->type)
314 return -ENOENT;
315
316
317 if (event->attr.sample_period)
318 return -EINVAL;
319
320 if (event->cpu < 0)
321 return -EINVAL;
322
323 if (event->pmu == &cstate_core_pmu) {
324 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
325 return -EINVAL;
326 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX);
327 if (!(core_msr_mask & (1 << cfg)))
328 return -EINVAL;
329 event->hw.event_base = core_msr[cfg].msr;
330 cpu = cpumask_any_and(&cstate_core_cpu_mask,
331 topology_sibling_cpumask(event->cpu));
332 } else if (event->pmu == &cstate_pkg_pmu) {
333 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
334 return -EINVAL;
335 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
336 if (!(pkg_msr_mask & (1 << cfg)))
337 return -EINVAL;
338 event->hw.event_base = pkg_msr[cfg].msr;
339 cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
340 topology_die_cpumask(event->cpu));
341 } else {
342 return -ENOENT;
343 }
344
345 if (cpu >= nr_cpu_ids)
346 return -ENODEV;
347
348 event->cpu = cpu;
349 event->hw.config = cfg;
350 event->hw.idx = -1;
351 return 0;
352}
353
354static inline u64 cstate_pmu_read_counter(struct perf_event *event)
355{
356 u64 val;
357
358 rdmsrl(event->hw.event_base, val);
359 return val;
360}
361
362static void cstate_pmu_event_update(struct perf_event *event)
363{
364 struct hw_perf_event *hwc = &event->hw;
365 u64 prev_raw_count, new_raw_count;
366
367again:
368 prev_raw_count = local64_read(&hwc->prev_count);
369 new_raw_count = cstate_pmu_read_counter(event);
370
371 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
372 new_raw_count) != prev_raw_count)
373 goto again;
374
375 local64_add(new_raw_count - prev_raw_count, &event->count);
376}
377
378static void cstate_pmu_event_start(struct perf_event *event, int mode)
379{
380 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
381}
382
383static void cstate_pmu_event_stop(struct perf_event *event, int mode)
384{
385 cstate_pmu_event_update(event);
386}
387
388static void cstate_pmu_event_del(struct perf_event *event, int mode)
389{
390 cstate_pmu_event_stop(event, PERF_EF_UPDATE);
391}
392
393static int cstate_pmu_event_add(struct perf_event *event, int mode)
394{
395 if (mode & PERF_EF_START)
396 cstate_pmu_event_start(event, mode);
397
398 return 0;
399}
400
401
402
403
404
405static int cstate_cpu_exit(unsigned int cpu)
406{
407 unsigned int target;
408
409 if (has_cstate_core &&
410 cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
411
412 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
413
414 if (target < nr_cpu_ids) {
415 cpumask_set_cpu(target, &cstate_core_cpu_mask);
416 perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
417 }
418 }
419
420 if (has_cstate_pkg &&
421 cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
422
423 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
424
425 if (target < nr_cpu_ids) {
426 cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
427 perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
428 }
429 }
430 return 0;
431}
432
433static int cstate_cpu_init(unsigned int cpu)
434{
435 unsigned int target;
436
437
438
439
440
441 target = cpumask_any_and(&cstate_core_cpu_mask,
442 topology_sibling_cpumask(cpu));
443
444 if (has_cstate_core && target >= nr_cpu_ids)
445 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
446
447
448
449
450
451 target = cpumask_any_and(&cstate_pkg_cpu_mask,
452 topology_die_cpumask(cpu));
453 if (has_cstate_pkg && target >= nr_cpu_ids)
454 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
455
456 return 0;
457}
458
459static const struct attribute_group *core_attr_update[] = {
460 &group_cstate_core_c1,
461 &group_cstate_core_c3,
462 &group_cstate_core_c6,
463 &group_cstate_core_c7,
464 NULL,
465};
466
467static const struct attribute_group *pkg_attr_update[] = {
468 &group_cstate_pkg_c2,
469 &group_cstate_pkg_c3,
470 &group_cstate_pkg_c6,
471 &group_cstate_pkg_c7,
472 &group_cstate_pkg_c8,
473 &group_cstate_pkg_c9,
474 &group_cstate_pkg_c10,
475 NULL,
476};
477
478static struct pmu cstate_core_pmu = {
479 .attr_groups = core_attr_groups,
480 .attr_update = core_attr_update,
481 .name = "cstate_core",
482 .task_ctx_nr = perf_invalid_context,
483 .event_init = cstate_pmu_event_init,
484 .add = cstate_pmu_event_add,
485 .del = cstate_pmu_event_del,
486 .start = cstate_pmu_event_start,
487 .stop = cstate_pmu_event_stop,
488 .read = cstate_pmu_event_update,
489 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
490 .module = THIS_MODULE,
491};
492
493static struct pmu cstate_pkg_pmu = {
494 .attr_groups = pkg_attr_groups,
495 .attr_update = pkg_attr_update,
496 .name = "cstate_pkg",
497 .task_ctx_nr = perf_invalid_context,
498 .event_init = cstate_pmu_event_init,
499 .add = cstate_pmu_event_add,
500 .del = cstate_pmu_event_del,
501 .start = cstate_pmu_event_start,
502 .stop = cstate_pmu_event_stop,
503 .read = cstate_pmu_event_update,
504 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
505 .module = THIS_MODULE,
506};
507
508static const struct cstate_model nhm_cstates __initconst = {
509 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
510 BIT(PERF_CSTATE_CORE_C6_RES),
511
512 .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) |
513 BIT(PERF_CSTATE_PKG_C6_RES) |
514 BIT(PERF_CSTATE_PKG_C7_RES),
515};
516
517static const struct cstate_model snb_cstates __initconst = {
518 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
519 BIT(PERF_CSTATE_CORE_C6_RES) |
520 BIT(PERF_CSTATE_CORE_C7_RES),
521
522 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
523 BIT(PERF_CSTATE_PKG_C3_RES) |
524 BIT(PERF_CSTATE_PKG_C6_RES) |
525 BIT(PERF_CSTATE_PKG_C7_RES),
526};
527
528static const struct cstate_model hswult_cstates __initconst = {
529 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
530 BIT(PERF_CSTATE_CORE_C6_RES) |
531 BIT(PERF_CSTATE_CORE_C7_RES),
532
533 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
534 BIT(PERF_CSTATE_PKG_C3_RES) |
535 BIT(PERF_CSTATE_PKG_C6_RES) |
536 BIT(PERF_CSTATE_PKG_C7_RES) |
537 BIT(PERF_CSTATE_PKG_C8_RES) |
538 BIT(PERF_CSTATE_PKG_C9_RES) |
539 BIT(PERF_CSTATE_PKG_C10_RES),
540};
541
542static const struct cstate_model cnl_cstates __initconst = {
543 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
544 BIT(PERF_CSTATE_CORE_C3_RES) |
545 BIT(PERF_CSTATE_CORE_C6_RES) |
546 BIT(PERF_CSTATE_CORE_C7_RES),
547
548 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
549 BIT(PERF_CSTATE_PKG_C3_RES) |
550 BIT(PERF_CSTATE_PKG_C6_RES) |
551 BIT(PERF_CSTATE_PKG_C7_RES) |
552 BIT(PERF_CSTATE_PKG_C8_RES) |
553 BIT(PERF_CSTATE_PKG_C9_RES) |
554 BIT(PERF_CSTATE_PKG_C10_RES),
555};
556
557static const struct cstate_model icl_cstates __initconst = {
558 .core_events = BIT(PERF_CSTATE_CORE_C6_RES) |
559 BIT(PERF_CSTATE_CORE_C7_RES),
560
561 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
562 BIT(PERF_CSTATE_PKG_C3_RES) |
563 BIT(PERF_CSTATE_PKG_C6_RES) |
564 BIT(PERF_CSTATE_PKG_C7_RES) |
565 BIT(PERF_CSTATE_PKG_C8_RES) |
566 BIT(PERF_CSTATE_PKG_C9_RES) |
567 BIT(PERF_CSTATE_PKG_C10_RES),
568};
569
570static const struct cstate_model icx_cstates __initconst = {
571 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
572 BIT(PERF_CSTATE_CORE_C6_RES),
573
574 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
575 BIT(PERF_CSTATE_PKG_C6_RES),
576};
577
578static const struct cstate_model adl_cstates __initconst = {
579 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
580 BIT(PERF_CSTATE_CORE_C6_RES) |
581 BIT(PERF_CSTATE_CORE_C7_RES),
582
583 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
584 BIT(PERF_CSTATE_PKG_C3_RES) |
585 BIT(PERF_CSTATE_PKG_C6_RES) |
586 BIT(PERF_CSTATE_PKG_C7_RES) |
587 BIT(PERF_CSTATE_PKG_C8_RES) |
588 BIT(PERF_CSTATE_PKG_C9_RES) |
589 BIT(PERF_CSTATE_PKG_C10_RES),
590};
591
592static const struct cstate_model slm_cstates __initconst = {
593 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
594 BIT(PERF_CSTATE_CORE_C6_RES),
595
596 .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
597 .quirks = SLM_PKG_C6_USE_C7_MSR,
598};
599
600
601static const struct cstate_model knl_cstates __initconst = {
602 .core_events = BIT(PERF_CSTATE_CORE_C6_RES),
603
604 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
605 BIT(PERF_CSTATE_PKG_C3_RES) |
606 BIT(PERF_CSTATE_PKG_C6_RES),
607 .quirks = KNL_CORE_C6_MSR,
608};
609
610
611static const struct cstate_model glm_cstates __initconst = {
612 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
613 BIT(PERF_CSTATE_CORE_C3_RES) |
614 BIT(PERF_CSTATE_CORE_C6_RES),
615
616 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
617 BIT(PERF_CSTATE_PKG_C3_RES) |
618 BIT(PERF_CSTATE_PKG_C6_RES) |
619 BIT(PERF_CSTATE_PKG_C10_RES),
620};
621
622
623static const struct x86_cpu_id intel_cstates_match[] __initconst = {
624 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates),
625 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates),
626 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates),
627
628 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates),
629 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates),
630 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates),
631
632 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates),
633 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates),
634
635 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates),
636 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates),
637
638 X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates),
639 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates),
640 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates),
641
642 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates),
643
644 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates),
645 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates),
646 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates),
647
648 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates),
649 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates),
650 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates),
651 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates),
652
653 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates),
654 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates),
655 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates),
656
657 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates),
658 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates),
659 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates),
660 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates),
661
662 X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates),
663
664 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates),
665 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates),
666
667 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates),
668 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates),
669 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates),
670 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates),
671 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates),
672 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates),
673
674 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates),
675 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
676 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
677 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
678 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
679
680 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
681 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
682 X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates),
683 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates),
684 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates),
685 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates),
686 { },
687};
688MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
689
690static int __init cstate_probe(const struct cstate_model *cm)
691{
692
693 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
694 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
695
696
697 if (cm->quirks & KNL_CORE_C6_MSR)
698 pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
699
700
701 core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX,
702 true, (void *) &cm->core_events);
703
704 pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
705 true, (void *) &cm->pkg_events);
706
707 has_cstate_core = !!core_msr_mask;
708 has_cstate_pkg = !!pkg_msr_mask;
709
710 return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
711}
712
713static inline void cstate_cleanup(void)
714{
715 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
716 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
717
718 if (has_cstate_core)
719 perf_pmu_unregister(&cstate_core_pmu);
720
721 if (has_cstate_pkg)
722 perf_pmu_unregister(&cstate_pkg_pmu);
723}
724
725static int __init cstate_init(void)
726{
727 int err;
728
729 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
730 "perf/x86/cstate:starting", cstate_cpu_init, NULL);
731 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
732 "perf/x86/cstate:online", NULL, cstate_cpu_exit);
733
734 if (has_cstate_core) {
735 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
736 if (err) {
737 has_cstate_core = false;
738 pr_info("Failed to register cstate core pmu\n");
739 cstate_cleanup();
740 return err;
741 }
742 }
743
744 if (has_cstate_pkg) {
745 if (topology_max_die_per_package() > 1) {
746 err = perf_pmu_register(&cstate_pkg_pmu,
747 "cstate_die", -1);
748 } else {
749 err = perf_pmu_register(&cstate_pkg_pmu,
750 cstate_pkg_pmu.name, -1);
751 }
752 if (err) {
753 has_cstate_pkg = false;
754 pr_info("Failed to register cstate pkg pmu\n");
755 cstate_cleanup();
756 return err;
757 }
758 }
759 return 0;
760}
761
762static int __init cstate_pmu_init(void)
763{
764 const struct x86_cpu_id *id;
765 int err;
766
767 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
768 return -ENODEV;
769
770 id = x86_match_cpu(intel_cstates_match);
771 if (!id)
772 return -ENODEV;
773
774 err = cstate_probe((const struct cstate_model *) id->driver_data);
775 if (err)
776 return err;
777
778 return cstate_init();
779}
780module_init(cstate_pmu_init);
781
782static void __exit cstate_pmu_exit(void)
783{
784 cstate_cleanup();
785}
786module_exit(cstate_pmu_exit);
787