1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/slab.h>
28#include <linux/err.h>
29#include <linux/cpu.h>
30#include <asm/processor.h>
31
32#include <asm/intel-family.h>
33#include <asm/intel_rdt_sched.h>
34#include "intel_rdt.h"
35
36#define MBA_IS_LINEAR 0x4
37#define MBA_MAX_MBPS U32_MAX
38
39
40DEFINE_MUTEX(rdtgroup_mutex);
41
42
43
44
45
46
47
48DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
49
50
51
52
53
54int max_name_width, max_data_width;
55
56
57
58
59
60bool rdt_alloc_capable;
61
62static void
63mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
64static void
65cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
66
67#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
68
69struct rdt_resource rdt_resources_all[] = {
70 [RDT_RESOURCE_L3] =
71 {
72 .rid = RDT_RESOURCE_L3,
73 .name = "L3",
74 .domains = domain_init(RDT_RESOURCE_L3),
75 .msr_base = IA32_L3_CBM_BASE,
76 .msr_update = cat_wrmsr,
77 .cache_level = 3,
78 .cache = {
79 .min_cbm_bits = 1,
80 .cbm_idx_mult = 1,
81 .cbm_idx_offset = 0,
82 },
83 .parse_ctrlval = parse_cbm,
84 .format_str = "%d=%0*x",
85 .fflags = RFTYPE_RES_CACHE,
86 },
87 [RDT_RESOURCE_L3DATA] =
88 {
89 .rid = RDT_RESOURCE_L3DATA,
90 .name = "L3DATA",
91 .domains = domain_init(RDT_RESOURCE_L3DATA),
92 .msr_base = IA32_L3_CBM_BASE,
93 .msr_update = cat_wrmsr,
94 .cache_level = 3,
95 .cache = {
96 .min_cbm_bits = 1,
97 .cbm_idx_mult = 2,
98 .cbm_idx_offset = 0,
99 },
100 .parse_ctrlval = parse_cbm,
101 .format_str = "%d=%0*x",
102 .fflags = RFTYPE_RES_CACHE,
103 },
104 [RDT_RESOURCE_L3CODE] =
105 {
106 .rid = RDT_RESOURCE_L3CODE,
107 .name = "L3CODE",
108 .domains = domain_init(RDT_RESOURCE_L3CODE),
109 .msr_base = IA32_L3_CBM_BASE,
110 .msr_update = cat_wrmsr,
111 .cache_level = 3,
112 .cache = {
113 .min_cbm_bits = 1,
114 .cbm_idx_mult = 2,
115 .cbm_idx_offset = 1,
116 },
117 .parse_ctrlval = parse_cbm,
118 .format_str = "%d=%0*x",
119 .fflags = RFTYPE_RES_CACHE,
120 },
121 [RDT_RESOURCE_L2] =
122 {
123 .rid = RDT_RESOURCE_L2,
124 .name = "L2",
125 .domains = domain_init(RDT_RESOURCE_L2),
126 .msr_base = IA32_L2_CBM_BASE,
127 .msr_update = cat_wrmsr,
128 .cache_level = 2,
129 .cache = {
130 .min_cbm_bits = 1,
131 .cbm_idx_mult = 1,
132 .cbm_idx_offset = 0,
133 },
134 .parse_ctrlval = parse_cbm,
135 .format_str = "%d=%0*x",
136 .fflags = RFTYPE_RES_CACHE,
137 },
138 [RDT_RESOURCE_L2DATA] =
139 {
140 .rid = RDT_RESOURCE_L2DATA,
141 .name = "L2DATA",
142 .domains = domain_init(RDT_RESOURCE_L2DATA),
143 .msr_base = IA32_L2_CBM_BASE,
144 .msr_update = cat_wrmsr,
145 .cache_level = 2,
146 .cache = {
147 .min_cbm_bits = 1,
148 .cbm_idx_mult = 2,
149 .cbm_idx_offset = 0,
150 },
151 .parse_ctrlval = parse_cbm,
152 .format_str = "%d=%0*x",
153 .fflags = RFTYPE_RES_CACHE,
154 },
155 [RDT_RESOURCE_L2CODE] =
156 {
157 .rid = RDT_RESOURCE_L2CODE,
158 .name = "L2CODE",
159 .domains = domain_init(RDT_RESOURCE_L2CODE),
160 .msr_base = IA32_L2_CBM_BASE,
161 .msr_update = cat_wrmsr,
162 .cache_level = 2,
163 .cache = {
164 .min_cbm_bits = 1,
165 .cbm_idx_mult = 2,
166 .cbm_idx_offset = 1,
167 },
168 .parse_ctrlval = parse_cbm,
169 .format_str = "%d=%0*x",
170 .fflags = RFTYPE_RES_CACHE,
171 },
172 [RDT_RESOURCE_MBA] =
173 {
174 .rid = RDT_RESOURCE_MBA,
175 .name = "MB",
176 .domains = domain_init(RDT_RESOURCE_MBA),
177 .msr_base = IA32_MBA_THRTL_BASE,
178 .msr_update = mba_wrmsr,
179 .cache_level = 3,
180 .parse_ctrlval = parse_bw,
181 .format_str = "%d=%*u",
182 .fflags = RFTYPE_RES_MB,
183 },
184};
185
186static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
187{
188 return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
189}
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static inline void cache_alloc_hsw_probe(void)
210{
211 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
212 u32 l, h, max_cbm = BIT_MASK(20) - 1;
213
214 if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
215 return;
216 rdmsr(IA32_L3_CBM_BASE, l, h);
217
218
219 if (l != max_cbm)
220 return;
221
222 r->num_closid = 4;
223 r->default_ctrl = max_cbm;
224 r->cache.cbm_len = 20;
225 r->cache.shareable_bits = 0xc0000;
226 r->cache.min_cbm_bits = 2;
227 r->alloc_capable = true;
228 r->alloc_enabled = true;
229
230 rdt_alloc_capable = true;
231}
232
233bool is_mba_sc(struct rdt_resource *r)
234{
235 if (!r)
236 return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
237
238 return r->membw.mba_sc;
239}
240
241
242
243
244
245
246
247
248
249
250
251static inline bool rdt_get_mb_table(struct rdt_resource *r)
252{
253
254
255
256 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
257 boot_cpu_data.x86, boot_cpu_data.x86_model);
258
259 return false;
260}
261
262static bool rdt_get_mem_config(struct rdt_resource *r)
263{
264 union cpuid_0x10_3_eax eax;
265 union cpuid_0x10_x_edx edx;
266 u32 ebx, ecx;
267
268 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
269 r->num_closid = edx.split.cos_max + 1;
270 r->membw.max_delay = eax.split.max_delay + 1;
271 r->default_ctrl = MAX_MBA_BW;
272 if (ecx & MBA_IS_LINEAR) {
273 r->membw.delay_linear = true;
274 r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay;
275 r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay;
276 } else {
277 if (!rdt_get_mb_table(r))
278 return false;
279 }
280 r->data_width = 3;
281
282 r->alloc_capable = true;
283 r->alloc_enabled = true;
284
285 return true;
286}
287
288static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
289{
290 union cpuid_0x10_1_eax eax;
291 union cpuid_0x10_x_edx edx;
292 u32 ebx, ecx;
293
294 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
295 r->num_closid = edx.split.cos_max + 1;
296 r->cache.cbm_len = eax.split.cbm_len + 1;
297 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
298 r->cache.shareable_bits = ebx & r->default_ctrl;
299 r->data_width = (r->cache.cbm_len + 3) / 4;
300 r->alloc_capable = true;
301 r->alloc_enabled = true;
302}
303
304static void rdt_get_cdp_config(int level, int type)
305{
306 struct rdt_resource *r_l = &rdt_resources_all[level];
307 struct rdt_resource *r = &rdt_resources_all[type];
308
309 r->num_closid = r_l->num_closid / 2;
310 r->cache.cbm_len = r_l->cache.cbm_len;
311 r->default_ctrl = r_l->default_ctrl;
312 r->cache.shareable_bits = r_l->cache.shareable_bits;
313 r->data_width = (r->cache.cbm_len + 3) / 4;
314 r->alloc_capable = true;
315
316
317
318
319 r->alloc_enabled = false;
320}
321
322static void rdt_get_cdp_l3_config(void)
323{
324 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
325 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
326}
327
328static void rdt_get_cdp_l2_config(void)
329{
330 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
331 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
332}
333
334static int get_cache_id(int cpu, int level)
335{
336 return get_cpu_cache_id(cpu, level);
337}
338
339
340
341
342
343
344u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
345{
346 if (r->membw.delay_linear)
347 return MAX_MBA_BW - bw;
348
349 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
350 return r->default_ctrl;
351}
352
353static void
354mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
355{
356 unsigned int i;
357
358
359 for (i = m->low; i < m->high; i++)
360 wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
361}
362
363static void
364cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
365{
366 unsigned int i;
367
368 for (i = m->low; i < m->high; i++)
369 wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
370}
371
372struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
373{
374 struct rdt_domain *d;
375
376 list_for_each_entry(d, &r->domains, list) {
377
378 if (cpumask_test_cpu(cpu, &d->cpu_mask))
379 return d;
380 }
381
382 return NULL;
383}
384
385void rdt_ctrl_update(void *arg)
386{
387 struct msr_param *m = arg;
388 struct rdt_resource *r = m->res;
389 int cpu = smp_processor_id();
390 struct rdt_domain *d;
391
392 d = get_domain_from_cpu(cpu, r);
393 if (d) {
394 r->msr_update(d, m, r);
395 return;
396 }
397 pr_warn_once("cpu %d not found in any domain for resource %s\n",
398 cpu, r->name);
399}
400
401
402
403
404
405
406
407
408
409struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
410 struct list_head **pos)
411{
412 struct rdt_domain *d;
413 struct list_head *l;
414
415 if (id < 0)
416 return ERR_PTR(id);
417
418 list_for_each(l, &r->domains) {
419 d = list_entry(l, struct rdt_domain, list);
420
421 if (id == d->id)
422 return d;
423
424 if (id < d->id)
425 break;
426 }
427
428 if (pos)
429 *pos = l;
430
431 return NULL;
432}
433
434void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
435{
436 int i;
437
438
439
440
441
442
443
444 for (i = 0; i < r->num_closid; i++, dc++, dm++) {
445 *dc = r->default_ctrl;
446 *dm = MBA_MAX_MBPS;
447 }
448}
449
450static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d,
451 bool notifier)
452{
453 struct msr_param m;
454 u32 *dc, *dm;
455
456 dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
457 if (!dc)
458 return -ENOMEM;
459
460 dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
461 if (!dm) {
462 kfree(dc);
463 return -ENOMEM;
464 }
465
466 d->ctrl_val = dc;
467 d->mbps_val = dm;
468 setup_default_ctrlval(r, dc, dm);
469
470 if (notifier) {
471 m.low = 0;
472 m.high = r->num_closid;
473 r->msr_update(d, &m, r);
474 }
475 return 0;
476}
477
478static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
479{
480 size_t tsize;
481
482 if (is_llc_occupancy_enabled()) {
483 d->rmid_busy_llc = kcalloc(BITS_TO_LONGS(r->num_rmid),
484 sizeof(unsigned long),
485 GFP_KERNEL);
486 if (!d->rmid_busy_llc)
487 return -ENOMEM;
488 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
489 }
490 if (is_mbm_total_enabled()) {
491 tsize = sizeof(*d->mbm_total);
492 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
493 if (!d->mbm_total) {
494 kfree(d->rmid_busy_llc);
495 return -ENOMEM;
496 }
497 }
498 if (is_mbm_local_enabled()) {
499 tsize = sizeof(*d->mbm_local);
500 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
501 if (!d->mbm_local) {
502 kfree(d->rmid_busy_llc);
503 kfree(d->mbm_total);
504 return -ENOMEM;
505 }
506 }
507
508 if (is_mbm_enabled()) {
509 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
510 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
511 }
512
513 return 0;
514}
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529static void domain_add_cpu(int cpu, struct rdt_resource *r, bool notifier)
530{
531 int id = get_cache_id(cpu, r->cache_level);
532 struct list_head *add_pos = NULL;
533 struct rdt_domain *d;
534
535 d = rdt_find_domain(r, id, &add_pos);
536 if (IS_ERR(d)) {
537 pr_warn("Could't find cache id for cpu %d\n", cpu);
538 return;
539 }
540
541 if (d) {
542 cpumask_set_cpu(cpu, &d->cpu_mask);
543 return;
544 }
545
546 d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
547 if (!d)
548 return;
549
550 d->id = id;
551 cpumask_set_cpu(cpu, &d->cpu_mask);
552
553 if (r->alloc_capable && domain_setup_ctrlval(r, d, notifier)) {
554 kfree(d);
555 return;
556 }
557
558 if (r->mon_capable && domain_setup_mon_state(r, d)) {
559 kfree(d);
560 return;
561 }
562
563 list_add_tail(&d->list, add_pos);
564
565
566
567
568
569 if (static_key_false(&rdt_mon_enable_key))
570 mkdir_mondata_subdir_allrdtgrp(r, d);
571}
572
573static void domain_remove_cpu(int cpu, struct rdt_resource *r)
574{
575 int id = get_cache_id(cpu, r->cache_level);
576 struct rdt_domain *d;
577
578 d = rdt_find_domain(r, id, NULL);
579 if (IS_ERR_OR_NULL(d)) {
580 pr_warn("Could't find cache id for cpu %d\n", cpu);
581 return;
582 }
583
584 cpumask_clear_cpu(cpu, &d->cpu_mask);
585 if (cpumask_empty(&d->cpu_mask)) {
586
587
588
589
590 if (static_key_false(&rdt_mon_enable_key))
591 rmdir_mondata_subdir_allrdtgrp(r, d->id);
592 list_del(&d->list);
593 if (is_mbm_enabled())
594 cancel_delayed_work(&d->mbm_over);
595 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
596
597
598
599
600
601
602
603
604 __check_limbo(d, true);
605 cancel_delayed_work(&d->cqm_limbo);
606 }
607
608 kfree(d->ctrl_val);
609 kfree(d->mbps_val);
610 kfree(d->rmid_busy_llc);
611 kfree(d->mbm_total);
612 kfree(d->mbm_local);
613 kfree(d);
614 return;
615 }
616
617 if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
618 if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
619 cancel_delayed_work(&d->mbm_over);
620 mbm_setup_overflow_handler(d, 0);
621 }
622 if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
623 has_busy_rmid(r, d)) {
624 cancel_delayed_work(&d->cqm_limbo);
625 cqm_setup_limbo_handler(d, 0);
626 }
627 }
628}
629
630static void clear_closid_rmid(int cpu)
631{
632 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
633
634 state->default_closid = 0;
635 state->default_rmid = 0;
636 state->cur_closid = 0;
637 state->cur_rmid = 0;
638 wrmsr(IA32_PQR_ASSOC, 0, 0);
639}
640
641static int intel_rdt_online_cpu(unsigned int cpu, bool notifier)
642{
643 struct rdt_resource *r;
644
645 mutex_lock(&rdtgroup_mutex);
646 for_each_capable_rdt_resource(r)
647 domain_add_cpu(cpu, r, notifier);
648
649
650 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
651 if (notifier)
652 clear_closid_rmid(cpu);
653
654 mutex_unlock(&rdtgroup_mutex);
655
656 return 0;
657}
658
659static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
660{
661 struct rdtgroup *cr;
662
663 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
664 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
665 break;
666 }
667 }
668}
669
670static int intel_rdt_offline_cpu(unsigned int cpu)
671{
672 struct rdtgroup *rdtgrp;
673 struct rdt_resource *r;
674
675 mutex_lock(&rdtgroup_mutex);
676 for_each_capable_rdt_resource(r)
677 domain_remove_cpu(cpu, r);
678 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
679 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
680 clear_childcpus(rdtgrp, cpu);
681 break;
682 }
683 }
684 clear_closid_rmid(cpu);
685 mutex_unlock(&rdtgroup_mutex);
686
687 return 0;
688}
689
690static int
691rdt_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
692{
693 unsigned int cpu = (long)hcpu;
694
695 switch (action & ~CPU_TASKS_FROZEN) {
696
697 case CPU_ONLINE:
698 case CPU_DOWN_FAILED:
699 intel_rdt_online_cpu(cpu, true);
700 break;
701
702 case CPU_UP_CANCELED:
703 case CPU_DOWN_PREPARE:
704 intel_rdt_offline_cpu(cpu);
705 break;
706 default:
707 break;
708 }
709
710 return NOTIFY_OK;
711}
712
713static void __init rdt_cpu_setup(void *dummy)
714{
715 struct rdt_resource *r;
716 int i, idx;
717 u32 val;
718
719 clear_closid_rmid(smp_processor_id());
720
721 for_each_alloc_capable_rdt_resource(r) {
722 for (i = 0; i < r->num_closid; i++) {
723 if (r == &rdt_resources_all[RDT_RESOURCE_MBA]) {
724 idx = i;
725 val = delay_bw_map(r->default_ctrl, r);
726 } else {
727 idx = cbm_idx(r, i);
728 val = r->default_ctrl;
729 }
730 wrmsrl(r->msr_base + idx, val);
731 }
732 }
733}
734
735static struct notifier_block rdt_cpu_nb = {
736 .notifier_call = rdt_cpu_notify,
737 .priority = -INT_MAX,
738};
739
740static int __init rdt_notifier_init(void)
741{
742 unsigned int cpu;
743
744 for_each_online_cpu(cpu) {
745 intel_rdt_online_cpu(cpu, false);
746
747
748
749
750
751
752
753 smp_call_function_single(cpu, rdt_cpu_setup, NULL, 1);
754 }
755
756 __register_cpu_notifier(&rdt_cpu_nb);
757 return 0;
758}
759
760
761
762
763
764static __init void rdt_init_padding(void)
765{
766 struct rdt_resource *r;
767 int cl;
768
769 for_each_alloc_capable_rdt_resource(r) {
770 cl = strlen(r->name);
771 if (cl > max_name_width)
772 max_name_width = cl;
773
774 if (r->data_width > max_data_width)
775 max_data_width = r->data_width;
776 }
777}
778
779enum {
780 RDT_FLAG_CMT,
781 RDT_FLAG_MBM_TOTAL,
782 RDT_FLAG_MBM_LOCAL,
783 RDT_FLAG_L3_CAT,
784 RDT_FLAG_L3_CDP,
785 RDT_FLAG_L2_CAT,
786 RDT_FLAG_L2_CDP,
787 RDT_FLAG_MBA,
788};
789
790#define RDT_OPT(idx, n, f) \
791[idx] = { \
792 .name = n, \
793 .flag = f \
794}
795
796struct rdt_options {
797 char *name;
798 int flag;
799 bool force_off, force_on;
800};
801
802static struct rdt_options rdt_options[] __initdata = {
803 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
804 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
805 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
806 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
807 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
808 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
809 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
810 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
811};
812#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
813
814static int __init set_rdt_options(char *str)
815{
816 struct rdt_options *o;
817 bool force_off;
818 char *tok;
819
820 if (*str == '=')
821 str++;
822 while ((tok = strsep(&str, ",")) != NULL) {
823 force_off = *tok == '!';
824 if (force_off)
825 tok++;
826 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
827 if (strcmp(tok, o->name) == 0) {
828 if (force_off)
829 o->force_off = true;
830 else
831 o->force_on = true;
832 break;
833 }
834 }
835 }
836 return 1;
837}
838__setup("rdt", set_rdt_options);
839
840static bool __init rdt_cpu_has(int flag)
841{
842 bool ret = boot_cpu_has(flag);
843 struct rdt_options *o;
844
845 if (!ret)
846 return ret;
847
848 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
849 if (flag == o->flag) {
850 if (o->force_off)
851 ret = false;
852 if (o->force_on)
853 ret = true;
854 break;
855 }
856 }
857 return ret;
858}
859
860static __init bool get_rdt_alloc_resources(void)
861{
862 bool ret = false;
863
864 if (rdt_alloc_capable)
865 return true;
866
867 if (!boot_cpu_has(X86_FEATURE_RDT_A))
868 return false;
869
870 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
871 rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
872 if (rdt_cpu_has(X86_FEATURE_CDP_L3))
873 rdt_get_cdp_l3_config();
874 ret = true;
875 }
876 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
877
878 rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
879 if (rdt_cpu_has(X86_FEATURE_CDP_L2))
880 rdt_get_cdp_l2_config();
881 ret = true;
882 }
883
884 if (rdt_cpu_has(X86_FEATURE_MBA)) {
885 if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
886 ret = true;
887 }
888 return ret;
889}
890
891static __init bool get_rdt_mon_resources(void)
892{
893 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
894 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
895 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
896 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
897 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
898 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
899
900 if (!rdt_mon_features)
901 return false;
902
903 return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
904}
905
906static __init void rdt_quirks(void)
907{
908 switch (boot_cpu_data.x86_model) {
909 case INTEL_FAM6_HASWELL_X:
910 if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
911 cache_alloc_hsw_probe();
912 break;
913 case INTEL_FAM6_SKYLAKE_X:
914 if (boot_cpu_data.x86_mask <= 4)
915 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
916 else
917 set_rdt_options("!l3cat");
918 }
919}
920
921static __init bool get_rdt_resources(void)
922{
923 rdt_quirks();
924 rdt_alloc_capable = get_rdt_alloc_resources();
925 rdt_mon_capable = get_rdt_mon_resources();
926
927 return (rdt_mon_capable || rdt_alloc_capable);
928}
929
930static int __init intel_rdt_late_init(void)
931{
932 struct rdt_resource *r;
933 int state, ret;
934
935 if (!get_rdt_resources())
936 return -ENODEV;
937
938 rdt_init_padding();
939
940 cpu_notifier_register_begin();
941 state = rdt_notifier_init();
942 cpu_notifier_register_done();
943
944 if (state < 0)
945 return state;
946
947 ret = rdtgroup_init();
948 if (ret) {
949 unregister_cpu_notifier(&rdt_cpu_nb);
950 return ret;
951 }
952
953 for_each_alloc_capable_rdt_resource(r)
954 pr_info("Intel RDT %s allocation detected\n", r->name);
955
956 for_each_mon_capable_rdt_resource(r)
957 pr_info("Intel RDT %s monitoring detected\n", r->name);
958
959 return 0;
960}
961
962late_initcall(intel_rdt_late_init);
963