1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/cacheinfo.h>
24#include <linux/cpu.h>
25#include <linux/debugfs.h>
26#include <linux/fs.h>
27#include <linux/fs_parser.h>
28#include <linux/sysfs.h>
29#include <linux/kernfs.h>
30#include <linux/seq_buf.h>
31#include <linux/seq_file.h>
32#include <linux/sched/signal.h>
33#include <linux/sched/task.h>
34#include <linux/slab.h>
35#include <linux/task_work.h>
36#include <linux/user_namespace.h>
37
38#include <uapi/linux/magic.h>
39
40#include <asm/resctrl.h>
41#include "internal.h"
42
43DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
44DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
45DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
46static struct kernfs_root *rdt_root;
47struct rdtgroup rdtgroup_default;
48LIST_HEAD(rdt_all_groups);
49
50
51static struct kernfs_node *kn_info;
52
53
54static struct kernfs_node *kn_mongrp;
55
56
57static struct kernfs_node *kn_mondata;
58
59static struct seq_buf last_cmd_status;
60static char last_cmd_status_buf[512];
61
62struct dentry *debugfs_resctrl;
63
64void rdt_last_cmd_clear(void)
65{
66 lockdep_assert_held(&rdtgroup_mutex);
67 seq_buf_clear(&last_cmd_status);
68}
69
70void rdt_last_cmd_puts(const char *s)
71{
72 lockdep_assert_held(&rdtgroup_mutex);
73 seq_buf_puts(&last_cmd_status, s);
74}
75
76void rdt_last_cmd_printf(const char *fmt, ...)
77{
78 va_list ap;
79
80 va_start(ap, fmt);
81 lockdep_assert_held(&rdtgroup_mutex);
82 seq_buf_vprintf(&last_cmd_status, fmt, ap);
83 va_end(ap);
84}
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101static int closid_free_map;
102static int closid_free_map_len;
103
104int closids_supported(void)
105{
106 return closid_free_map_len;
107}
108
109static void closid_init(void)
110{
111 struct rdt_resource *r;
112 int rdt_min_closid = 32;
113
114
115 for_each_alloc_enabled_rdt_resource(r)
116 rdt_min_closid = min(rdt_min_closid, r->num_closid);
117
118 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
119
120
121 closid_free_map &= ~1;
122 closid_free_map_len = rdt_min_closid;
123}
124
125static int closid_alloc(void)
126{
127 u32 closid = ffs(closid_free_map);
128
129 if (closid == 0)
130 return -ENOSPC;
131 closid--;
132 closid_free_map &= ~(1 << closid);
133
134 return closid;
135}
136
137void closid_free(int closid)
138{
139 closid_free_map |= 1 << closid;
140}
141
142
143
144
145
146
147
148
149static bool closid_allocated(unsigned int closid)
150{
151 return (closid_free_map & (1 << closid)) == 0;
152}
153
154
155
156
157
158
159
160
161
162
163enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
164{
165 struct rdtgroup *rdtgrp;
166
167 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
168 if (rdtgrp->closid == closid)
169 return rdtgrp->mode;
170 }
171
172 return RDT_NUM_MODES;
173}
174
175static const char * const rdt_mode_str[] = {
176 [RDT_MODE_SHAREABLE] = "shareable",
177 [RDT_MODE_EXCLUSIVE] = "exclusive",
178 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
179 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
180};
181
182
183
184
185
186
187
188static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
189{
190 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
191 return "unknown";
192
193 return rdt_mode_str[mode];
194}
195
196
197static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
198{
199 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
200 .ia_uid = current_fsuid(),
201 .ia_gid = current_fsgid(), };
202
203 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
204 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
205 return 0;
206
207 return kernfs_setattr(kn, &iattr);
208}
209
210static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
211{
212 struct kernfs_node *kn;
213 int ret;
214
215 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
216 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
217 0, rft->kf_ops, rft, NULL, NULL);
218 if (IS_ERR(kn))
219 return PTR_ERR(kn);
220
221 ret = rdtgroup_kn_set_ugid(kn);
222 if (ret) {
223 kernfs_remove(kn);
224 return ret;
225 }
226
227 return 0;
228}
229
230static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
231{
232 struct kernfs_open_file *of = m->private;
233 struct rftype *rft = of->kn->priv;
234
235 if (rft->seq_show)
236 return rft->seq_show(of, m, arg);
237 return 0;
238}
239
240static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
241 size_t nbytes, loff_t off)
242{
243 struct rftype *rft = of->kn->priv;
244
245 if (rft->write)
246 return rft->write(of, buf, nbytes, off);
247
248 return -EINVAL;
249}
250
251static struct kernfs_ops rdtgroup_kf_single_ops = {
252 .atomic_write_len = PAGE_SIZE,
253 .write = rdtgroup_file_write,
254 .seq_show = rdtgroup_seqfile_show,
255};
256
257static struct kernfs_ops kf_mondata_ops = {
258 .atomic_write_len = PAGE_SIZE,
259 .seq_show = rdtgroup_mondata_show,
260};
261
262static bool is_cpu_list(struct kernfs_open_file *of)
263{
264 struct rftype *rft = of->kn->priv;
265
266 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
267}
268
269static int rdtgroup_cpus_show(struct kernfs_open_file *of,
270 struct seq_file *s, void *v)
271{
272 struct rdtgroup *rdtgrp;
273 struct cpumask *mask;
274 int ret = 0;
275
276 rdtgrp = rdtgroup_kn_lock_live(of->kn);
277
278 if (rdtgrp) {
279 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
280 if (!rdtgrp->plr->d) {
281 rdt_last_cmd_clear();
282 rdt_last_cmd_puts("Cache domain offline\n");
283 ret = -ENODEV;
284 } else {
285 mask = &rdtgrp->plr->d->cpu_mask;
286 seq_printf(s, is_cpu_list(of) ?
287 "%*pbl\n" : "%*pb\n",
288 cpumask_pr_args(mask));
289 }
290 } else {
291 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
292 cpumask_pr_args(&rdtgrp->cpu_mask));
293 }
294 } else {
295 ret = -ENOENT;
296 }
297 rdtgroup_kn_unlock(of->kn);
298
299 return ret;
300}
301
302
303
304
305
306
307
308static void update_cpu_closid_rmid(void *info)
309{
310 struct rdtgroup *r = info;
311
312 if (r) {
313 this_cpu_write(pqr_state.default_closid, r->closid);
314 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
315 }
316
317
318
319
320
321
322 resctrl_sched_in();
323}
324
325
326
327
328
329
330static void
331update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
332{
333 int cpu = get_cpu();
334
335 if (cpumask_test_cpu(cpu, cpu_mask))
336 update_cpu_closid_rmid(r);
337 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
338 put_cpu();
339}
340
341static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
342 cpumask_var_t tmpmask)
343{
344 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
345 struct list_head *head;
346
347
348 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
349 if (cpumask_weight(tmpmask)) {
350 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
351 return -EINVAL;
352 }
353
354
355 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
356 if (cpumask_weight(tmpmask)) {
357
358 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
359 update_closid_rmid(tmpmask, prgrp);
360 }
361
362
363
364
365
366 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
367 if (cpumask_weight(tmpmask)) {
368 head = &prgrp->mon.crdtgrp_list;
369 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
370 if (crgrp == rdtgrp)
371 continue;
372 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
373 tmpmask);
374 }
375 update_closid_rmid(tmpmask, rdtgrp);
376 }
377
378
379 cpumask_copy(&rdtgrp->cpu_mask, newmask);
380
381 return 0;
382}
383
384static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
385{
386 struct rdtgroup *crgrp;
387
388 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
389
390 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
391 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
392}
393
394static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
395 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
396{
397 struct rdtgroup *r, *crgrp;
398 struct list_head *head;
399
400
401 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
402 if (cpumask_weight(tmpmask)) {
403
404 if (rdtgrp == &rdtgroup_default) {
405 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
406 return -EINVAL;
407 }
408
409
410 cpumask_or(&rdtgroup_default.cpu_mask,
411 &rdtgroup_default.cpu_mask, tmpmask);
412 update_closid_rmid(tmpmask, &rdtgroup_default);
413 }
414
415
416
417
418
419
420 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
421 if (cpumask_weight(tmpmask)) {
422 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
423 if (r == rdtgrp)
424 continue;
425 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
426 if (cpumask_weight(tmpmask1))
427 cpumask_rdtgrp_clear(r, tmpmask1);
428 }
429 update_closid_rmid(tmpmask, rdtgrp);
430 }
431
432
433 cpumask_copy(&rdtgrp->cpu_mask, newmask);
434
435
436
437
438
439 head = &rdtgrp->mon.crdtgrp_list;
440 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
441 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
442 update_closid_rmid(tmpmask, rdtgrp);
443 cpumask_clear(&crgrp->cpu_mask);
444 }
445
446 return 0;
447}
448
449static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
450 char *buf, size_t nbytes, loff_t off)
451{
452 cpumask_var_t tmpmask, newmask, tmpmask1;
453 struct rdtgroup *rdtgrp;
454 int ret;
455
456 if (!buf)
457 return -EINVAL;
458
459 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
460 return -ENOMEM;
461 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
462 free_cpumask_var(tmpmask);
463 return -ENOMEM;
464 }
465 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
466 free_cpumask_var(tmpmask);
467 free_cpumask_var(newmask);
468 return -ENOMEM;
469 }
470
471 rdtgrp = rdtgroup_kn_lock_live(of->kn);
472 if (!rdtgrp) {
473 ret = -ENOENT;
474 goto unlock;
475 }
476
477 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
478 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
479 ret = -EINVAL;
480 rdt_last_cmd_puts("Pseudo-locking in progress\n");
481 goto unlock;
482 }
483
484 if (is_cpu_list(of))
485 ret = cpulist_parse(buf, newmask);
486 else
487 ret = cpumask_parse(buf, newmask);
488
489 if (ret) {
490 rdt_last_cmd_puts("Bad CPU list/mask\n");
491 goto unlock;
492 }
493
494
495 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
496 if (cpumask_weight(tmpmask)) {
497 ret = -EINVAL;
498 rdt_last_cmd_puts("Can only assign online CPUs\n");
499 goto unlock;
500 }
501
502 if (rdtgrp->type == RDTCTRL_GROUP)
503 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
504 else if (rdtgrp->type == RDTMON_GROUP)
505 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
506 else
507 ret = -EINVAL;
508
509unlock:
510 rdtgroup_kn_unlock(of->kn);
511 free_cpumask_var(tmpmask);
512 free_cpumask_var(newmask);
513 free_cpumask_var(tmpmask1);
514
515 return ret ?: nbytes;
516}
517
518struct task_move_callback {
519 struct callback_head work;
520 struct rdtgroup *rdtgrp;
521};
522
523static void move_myself(struct callback_head *head)
524{
525 struct task_move_callback *callback;
526 struct rdtgroup *rdtgrp;
527
528 callback = container_of(head, struct task_move_callback, work);
529 rdtgrp = callback->rdtgrp;
530
531
532
533
534
535
536 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
537 (rdtgrp->flags & RDT_DELETED)) {
538 current->closid = 0;
539 current->rmid = 0;
540 kfree(rdtgrp);
541 }
542
543 if (unlikely(current->flags & PF_EXITING))
544 goto out;
545
546 preempt_disable();
547
548 resctrl_sched_in();
549 preempt_enable();
550
551out:
552 kfree(callback);
553}
554
555static int __rdtgroup_move_task(struct task_struct *tsk,
556 struct rdtgroup *rdtgrp)
557{
558 struct task_move_callback *callback;
559 int ret;
560
561 callback = kzalloc(sizeof(*callback), GFP_KERNEL);
562 if (!callback)
563 return -ENOMEM;
564 callback->work.func = move_myself;
565 callback->rdtgrp = rdtgrp;
566
567
568
569
570
571 atomic_inc(&rdtgrp->waitcount);
572 ret = task_work_add(tsk, &callback->work, true);
573 if (ret) {
574
575
576
577
578
579 atomic_dec(&rdtgrp->waitcount);
580 kfree(callback);
581 rdt_last_cmd_puts("Task exited\n");
582 } else {
583
584
585
586
587
588 if (rdtgrp->type == RDTCTRL_GROUP) {
589 tsk->closid = rdtgrp->closid;
590 tsk->rmid = rdtgrp->mon.rmid;
591 } else if (rdtgrp->type == RDTMON_GROUP) {
592 if (rdtgrp->mon.parent->closid == tsk->closid) {
593 tsk->rmid = rdtgrp->mon.rmid;
594 } else {
595 rdt_last_cmd_puts("Can't move task to different control group\n");
596 ret = -EINVAL;
597 }
598 }
599 }
600 return ret;
601}
602
603static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
604{
605 return (rdt_alloc_capable &&
606 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
607}
608
609static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
610{
611 return (rdt_mon_capable &&
612 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
613}
614
615
616
617
618
619
620
621int rdtgroup_tasks_assigned(struct rdtgroup *r)
622{
623 struct task_struct *p, *t;
624 int ret = 0;
625
626 lockdep_assert_held(&rdtgroup_mutex);
627
628 rcu_read_lock();
629 for_each_process_thread(p, t) {
630 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
631 ret = 1;
632 break;
633 }
634 }
635 rcu_read_unlock();
636
637 return ret;
638}
639
640static int rdtgroup_task_write_permission(struct task_struct *task,
641 struct kernfs_open_file *of)
642{
643 const struct cred *tcred = get_task_cred(task);
644 const struct cred *cred = current_cred();
645 int ret = 0;
646
647
648
649
650
651 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
652 !uid_eq(cred->euid, tcred->uid) &&
653 !uid_eq(cred->euid, tcred->suid)) {
654 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
655 ret = -EPERM;
656 }
657
658 put_cred(tcred);
659 return ret;
660}
661
662static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
663 struct kernfs_open_file *of)
664{
665 struct task_struct *tsk;
666 int ret;
667
668 rcu_read_lock();
669 if (pid) {
670 tsk = find_task_by_vpid(pid);
671 if (!tsk) {
672 rcu_read_unlock();
673 rdt_last_cmd_printf("No task %d\n", pid);
674 return -ESRCH;
675 }
676 } else {
677 tsk = current;
678 }
679
680 get_task_struct(tsk);
681 rcu_read_unlock();
682
683 ret = rdtgroup_task_write_permission(tsk, of);
684 if (!ret)
685 ret = __rdtgroup_move_task(tsk, rdtgrp);
686
687 put_task_struct(tsk);
688 return ret;
689}
690
691static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
692 char *buf, size_t nbytes, loff_t off)
693{
694 struct rdtgroup *rdtgrp;
695 int ret = 0;
696 pid_t pid;
697
698 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
699 return -EINVAL;
700 rdtgrp = rdtgroup_kn_lock_live(of->kn);
701 if (!rdtgrp) {
702 rdtgroup_kn_unlock(of->kn);
703 return -ENOENT;
704 }
705 rdt_last_cmd_clear();
706
707 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
708 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
709 ret = -EINVAL;
710 rdt_last_cmd_puts("Pseudo-locking in progress\n");
711 goto unlock;
712 }
713
714 ret = rdtgroup_move_task(pid, rdtgrp, of);
715
716unlock:
717 rdtgroup_kn_unlock(of->kn);
718
719 return ret ?: nbytes;
720}
721
722static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
723{
724 struct task_struct *p, *t;
725
726 rcu_read_lock();
727 for_each_process_thread(p, t) {
728 if (is_closid_match(t, r) || is_rmid_match(t, r))
729 seq_printf(s, "%d\n", t->pid);
730 }
731 rcu_read_unlock();
732}
733
734static int rdtgroup_tasks_show(struct kernfs_open_file *of,
735 struct seq_file *s, void *v)
736{
737 struct rdtgroup *rdtgrp;
738 int ret = 0;
739
740 rdtgrp = rdtgroup_kn_lock_live(of->kn);
741 if (rdtgrp)
742 show_rdt_tasks(rdtgrp, s);
743 else
744 ret = -ENOENT;
745 rdtgroup_kn_unlock(of->kn);
746
747 return ret;
748}
749
750#ifdef CONFIG_PROC_CPU_RESCTRL
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
784 struct pid *pid, struct task_struct *tsk)
785{
786 struct rdtgroup *rdtg;
787 int ret = 0;
788
789 mutex_lock(&rdtgroup_mutex);
790
791
792 if (!static_branch_unlikely(&rdt_enable_key)) {
793 seq_puts(s, "res:\nmon:\n");
794 goto unlock;
795 }
796
797 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
798 struct rdtgroup *crg;
799
800
801
802
803
804 if (rdtg->mode != RDT_MODE_SHAREABLE &&
805 rdtg->mode != RDT_MODE_EXCLUSIVE)
806 continue;
807
808 if (rdtg->closid != tsk->closid)
809 continue;
810
811 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
812 rdtg->kn->name);
813 seq_puts(s, "mon:");
814 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
815 mon.crdtgrp_list) {
816 if (tsk->rmid != crg->mon.rmid)
817 continue;
818 seq_printf(s, "%s", crg->kn->name);
819 break;
820 }
821 seq_putc(s, '\n');
822 goto unlock;
823 }
824
825
826
827
828 ret = -ENOENT;
829unlock:
830 mutex_unlock(&rdtgroup_mutex);
831
832 return ret;
833}
834#endif
835
836static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
837 struct seq_file *seq, void *v)
838{
839 int len;
840
841 mutex_lock(&rdtgroup_mutex);
842 len = seq_buf_used(&last_cmd_status);
843 if (len)
844 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
845 else
846 seq_puts(seq, "ok\n");
847 mutex_unlock(&rdtgroup_mutex);
848 return 0;
849}
850
851static int rdt_num_closids_show(struct kernfs_open_file *of,
852 struct seq_file *seq, void *v)
853{
854 struct rdt_resource *r = of->kn->parent->priv;
855
856 seq_printf(seq, "%d\n", r->num_closid);
857 return 0;
858}
859
860static int rdt_default_ctrl_show(struct kernfs_open_file *of,
861 struct seq_file *seq, void *v)
862{
863 struct rdt_resource *r = of->kn->parent->priv;
864
865 seq_printf(seq, "%x\n", r->default_ctrl);
866 return 0;
867}
868
869static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
870 struct seq_file *seq, void *v)
871{
872 struct rdt_resource *r = of->kn->parent->priv;
873
874 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
875 return 0;
876}
877
878static int rdt_shareable_bits_show(struct kernfs_open_file *of,
879 struct seq_file *seq, void *v)
880{
881 struct rdt_resource *r = of->kn->parent->priv;
882
883 seq_printf(seq, "%x\n", r->cache.shareable_bits);
884 return 0;
885}
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901static int rdt_bit_usage_show(struct kernfs_open_file *of,
902 struct seq_file *seq, void *v)
903{
904 struct rdt_resource *r = of->kn->parent->priv;
905
906
907
908
909 unsigned long sw_shareable = 0, hw_shareable = 0;
910 unsigned long exclusive = 0, pseudo_locked = 0;
911 struct rdt_domain *dom;
912 int i, hwb, swb, excl, psl;
913 enum rdtgrp_mode mode;
914 bool sep = false;
915 u32 *ctrl;
916
917 mutex_lock(&rdtgroup_mutex);
918 hw_shareable = r->cache.shareable_bits;
919 list_for_each_entry(dom, &r->domains, list) {
920 if (sep)
921 seq_putc(seq, ';');
922 ctrl = dom->ctrl_val;
923 sw_shareable = 0;
924 exclusive = 0;
925 seq_printf(seq, "%d=", dom->id);
926 for (i = 0; i < closids_supported(); i++, ctrl++) {
927 if (!closid_allocated(i))
928 continue;
929 mode = rdtgroup_mode_by_closid(i);
930 switch (mode) {
931 case RDT_MODE_SHAREABLE:
932 sw_shareable |= *ctrl;
933 break;
934 case RDT_MODE_EXCLUSIVE:
935 exclusive |= *ctrl;
936 break;
937 case RDT_MODE_PSEUDO_LOCKSETUP:
938
939
940
941
942
943
944
945 break;
946 case RDT_MODE_PSEUDO_LOCKED:
947 case RDT_NUM_MODES:
948 WARN(1,
949 "invalid mode for closid %d\n", i);
950 break;
951 }
952 }
953 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
954 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
955 hwb = test_bit(i, &hw_shareable);
956 swb = test_bit(i, &sw_shareable);
957 excl = test_bit(i, &exclusive);
958 psl = test_bit(i, &pseudo_locked);
959 if (hwb && swb)
960 seq_putc(seq, 'X');
961 else if (hwb && !swb)
962 seq_putc(seq, 'H');
963 else if (!hwb && swb)
964 seq_putc(seq, 'S');
965 else if (excl)
966 seq_putc(seq, 'E');
967 else if (psl)
968 seq_putc(seq, 'P');
969 else
970 seq_putc(seq, '0');
971 }
972 sep = true;
973 }
974 seq_putc(seq, '\n');
975 mutex_unlock(&rdtgroup_mutex);
976 return 0;
977}
978
979static int rdt_min_bw_show(struct kernfs_open_file *of,
980 struct seq_file *seq, void *v)
981{
982 struct rdt_resource *r = of->kn->parent->priv;
983
984 seq_printf(seq, "%u\n", r->membw.min_bw);
985 return 0;
986}
987
988static int rdt_num_rmids_show(struct kernfs_open_file *of,
989 struct seq_file *seq, void *v)
990{
991 struct rdt_resource *r = of->kn->parent->priv;
992
993 seq_printf(seq, "%d\n", r->num_rmid);
994
995 return 0;
996}
997
998static int rdt_mon_features_show(struct kernfs_open_file *of,
999 struct seq_file *seq, void *v)
1000{
1001 struct rdt_resource *r = of->kn->parent->priv;
1002 struct mon_evt *mevt;
1003
1004 list_for_each_entry(mevt, &r->evt_list, list)
1005 seq_printf(seq, "%s\n", mevt->name);
1006
1007 return 0;
1008}
1009
1010static int rdt_bw_gran_show(struct kernfs_open_file *of,
1011 struct seq_file *seq, void *v)
1012{
1013 struct rdt_resource *r = of->kn->parent->priv;
1014
1015 seq_printf(seq, "%u\n", r->membw.bw_gran);
1016 return 0;
1017}
1018
1019static int rdt_delay_linear_show(struct kernfs_open_file *of,
1020 struct seq_file *seq, void *v)
1021{
1022 struct rdt_resource *r = of->kn->parent->priv;
1023
1024 seq_printf(seq, "%u\n", r->membw.delay_linear);
1025 return 0;
1026}
1027
1028static int max_threshold_occ_show(struct kernfs_open_file *of,
1029 struct seq_file *seq, void *v)
1030{
1031 struct rdt_resource *r = of->kn->parent->priv;
1032
1033 seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
1034
1035 return 0;
1036}
1037
1038static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
1039 struct seq_file *seq, void *v)
1040{
1041 struct rdt_resource *r = of->kn->parent->priv;
1042
1043 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
1044 seq_puts(seq, "per-thread\n");
1045 else
1046 seq_puts(seq, "max\n");
1047
1048 return 0;
1049}
1050
1051static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
1052 char *buf, size_t nbytes, loff_t off)
1053{
1054 struct rdt_resource *r = of->kn->parent->priv;
1055 unsigned int bytes;
1056 int ret;
1057
1058 ret = kstrtouint(buf, 0, &bytes);
1059 if (ret)
1060 return ret;
1061
1062 if (bytes > (boot_cpu_data.x86_cache_size * 1024))
1063 return -EINVAL;
1064
1065 resctrl_cqm_threshold = bytes / r->mon_scale;
1066
1067 return nbytes;
1068}
1069
1070
1071
1072
1073static int rdtgroup_mode_show(struct kernfs_open_file *of,
1074 struct seq_file *s, void *v)
1075{
1076 struct rdtgroup *rdtgrp;
1077
1078 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1079 if (!rdtgrp) {
1080 rdtgroup_kn_unlock(of->kn);
1081 return -ENOENT;
1082 }
1083
1084 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
1085
1086 rdtgroup_kn_unlock(of->kn);
1087 return 0;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
1114 struct rdt_resource **r_cdp,
1115 struct rdt_domain **d_cdp)
1116{
1117 struct rdt_resource *_r_cdp = NULL;
1118 struct rdt_domain *_d_cdp = NULL;
1119 int ret = 0;
1120
1121 switch (r->rid) {
1122 case RDT_RESOURCE_L3DATA:
1123 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
1124 break;
1125 case RDT_RESOURCE_L3CODE:
1126 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA];
1127 break;
1128 case RDT_RESOURCE_L2DATA:
1129 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE];
1130 break;
1131 case RDT_RESOURCE_L2CODE:
1132 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA];
1133 break;
1134 default:
1135 ret = -ENOENT;
1136 goto out;
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
1149 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
1150 _r_cdp = NULL;
1151 _d_cdp = NULL;
1152 ret = -EINVAL;
1153 }
1154
1155out:
1156 *r_cdp = _r_cdp;
1157 *d_cdp = _d_cdp;
1158
1159 return ret;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1183 unsigned long cbm, int closid, bool exclusive)
1184{
1185 enum rdtgrp_mode mode;
1186 unsigned long ctrl_b;
1187 u32 *ctrl;
1188 int i;
1189
1190
1191 if (!exclusive) {
1192 ctrl_b = r->cache.shareable_bits;
1193 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1194 return true;
1195 }
1196
1197
1198 ctrl = d->ctrl_val;
1199 for (i = 0; i < closids_supported(); i++, ctrl++) {
1200 ctrl_b = *ctrl;
1201 mode = rdtgroup_mode_by_closid(i);
1202 if (closid_allocated(i) && i != closid &&
1203 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1204 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1205 if (exclusive) {
1206 if (mode == RDT_MODE_EXCLUSIVE)
1207 return true;
1208 continue;
1209 }
1210 return true;
1211 }
1212 }
1213 }
1214
1215 return false;
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1239 unsigned long cbm, int closid, bool exclusive)
1240{
1241 struct rdt_resource *r_cdp;
1242 struct rdt_domain *d_cdp;
1243
1244 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
1245 return true;
1246
1247 if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
1248 return false;
1249
1250 return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1266{
1267 int closid = rdtgrp->closid;
1268 struct rdt_resource *r;
1269 bool has_cache = false;
1270 struct rdt_domain *d;
1271
1272 for_each_alloc_enabled_rdt_resource(r) {
1273 if (r->rid == RDT_RESOURCE_MBA)
1274 continue;
1275 has_cache = true;
1276 list_for_each_entry(d, &r->domains, list) {
1277 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
1278 rdtgrp->closid, false)) {
1279 rdt_last_cmd_puts("Schemata overlaps\n");
1280 return false;
1281 }
1282 }
1283 }
1284
1285 if (!has_cache) {
1286 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1287 return false;
1288 }
1289
1290 return true;
1291}
1292
1293
1294
1295
1296
1297static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1298 char *buf, size_t nbytes, loff_t off)
1299{
1300 struct rdtgroup *rdtgrp;
1301 enum rdtgrp_mode mode;
1302 int ret = 0;
1303
1304
1305 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1306 return -EINVAL;
1307 buf[nbytes - 1] = '\0';
1308
1309 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1310 if (!rdtgrp) {
1311 rdtgroup_kn_unlock(of->kn);
1312 return -ENOENT;
1313 }
1314
1315 rdt_last_cmd_clear();
1316
1317 mode = rdtgrp->mode;
1318
1319 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1320 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1321 (!strcmp(buf, "pseudo-locksetup") &&
1322 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1323 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1324 goto out;
1325
1326 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1327 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1328 ret = -EINVAL;
1329 goto out;
1330 }
1331
1332 if (!strcmp(buf, "shareable")) {
1333 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1334 ret = rdtgroup_locksetup_exit(rdtgrp);
1335 if (ret)
1336 goto out;
1337 }
1338 rdtgrp->mode = RDT_MODE_SHAREABLE;
1339 } else if (!strcmp(buf, "exclusive")) {
1340 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1341 ret = -EINVAL;
1342 goto out;
1343 }
1344 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1345 ret = rdtgroup_locksetup_exit(rdtgrp);
1346 if (ret)
1347 goto out;
1348 }
1349 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1350 } else if (!strcmp(buf, "pseudo-locksetup")) {
1351 ret = rdtgroup_locksetup_enter(rdtgrp);
1352 if (ret)
1353 goto out;
1354 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1355 } else {
1356 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1357 ret = -EINVAL;
1358 }
1359
1360out:
1361 rdtgroup_kn_unlock(of->kn);
1362 return ret ?: nbytes;
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1381 struct rdt_domain *d, unsigned long cbm)
1382{
1383 struct cpu_cacheinfo *ci;
1384 unsigned int size = 0;
1385 int num_b, i;
1386
1387 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1388 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1389 for (i = 0; i < ci->num_leaves; i++) {
1390 if (ci->info_list[i].level == r->cache_level) {
1391 size = ci->info_list[i].size / r->cache.cbm_len * num_b;
1392 break;
1393 }
1394 }
1395
1396 return size;
1397}
1398
1399
1400
1401
1402
1403
1404
1405
1406static int rdtgroup_size_show(struct kernfs_open_file *of,
1407 struct seq_file *s, void *v)
1408{
1409 struct rdtgroup *rdtgrp;
1410 struct rdt_resource *r;
1411 struct rdt_domain *d;
1412 unsigned int size;
1413 int ret = 0;
1414 bool sep;
1415 u32 ctrl;
1416
1417 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1418 if (!rdtgrp) {
1419 rdtgroup_kn_unlock(of->kn);
1420 return -ENOENT;
1421 }
1422
1423 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1424 if (!rdtgrp->plr->d) {
1425 rdt_last_cmd_clear();
1426 rdt_last_cmd_puts("Cache domain offline\n");
1427 ret = -ENODEV;
1428 } else {
1429 seq_printf(s, "%*s:", max_name_width,
1430 rdtgrp->plr->r->name);
1431 size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
1432 rdtgrp->plr->d,
1433 rdtgrp->plr->cbm);
1434 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1435 }
1436 goto out;
1437 }
1438
1439 for_each_alloc_enabled_rdt_resource(r) {
1440 sep = false;
1441 seq_printf(s, "%*s:", max_name_width, r->name);
1442 list_for_each_entry(d, &r->domains, list) {
1443 if (sep)
1444 seq_putc(s, ';');
1445 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1446 size = 0;
1447 } else {
1448 ctrl = (!is_mba_sc(r) ?
1449 d->ctrl_val[rdtgrp->closid] :
1450 d->mbps_val[rdtgrp->closid]);
1451 if (r->rid == RDT_RESOURCE_MBA)
1452 size = ctrl;
1453 else
1454 size = rdtgroup_cbm_to_size(r, d, ctrl);
1455 }
1456 seq_printf(s, "%d=%u", d->id, size);
1457 sep = true;
1458 }
1459 seq_putc(s, '\n');
1460 }
1461
1462out:
1463 rdtgroup_kn_unlock(of->kn);
1464
1465 return ret;
1466}
1467
1468
1469static struct rftype res_common_files[] = {
1470 {
1471 .name = "last_cmd_status",
1472 .mode = 0444,
1473 .kf_ops = &rdtgroup_kf_single_ops,
1474 .seq_show = rdt_last_cmd_status_show,
1475 .fflags = RF_TOP_INFO,
1476 },
1477 {
1478 .name = "num_closids",
1479 .mode = 0444,
1480 .kf_ops = &rdtgroup_kf_single_ops,
1481 .seq_show = rdt_num_closids_show,
1482 .fflags = RF_CTRL_INFO,
1483 },
1484 {
1485 .name = "mon_features",
1486 .mode = 0444,
1487 .kf_ops = &rdtgroup_kf_single_ops,
1488 .seq_show = rdt_mon_features_show,
1489 .fflags = RF_MON_INFO,
1490 },
1491 {
1492 .name = "num_rmids",
1493 .mode = 0444,
1494 .kf_ops = &rdtgroup_kf_single_ops,
1495 .seq_show = rdt_num_rmids_show,
1496 .fflags = RF_MON_INFO,
1497 },
1498 {
1499 .name = "cbm_mask",
1500 .mode = 0444,
1501 .kf_ops = &rdtgroup_kf_single_ops,
1502 .seq_show = rdt_default_ctrl_show,
1503 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1504 },
1505 {
1506 .name = "min_cbm_bits",
1507 .mode = 0444,
1508 .kf_ops = &rdtgroup_kf_single_ops,
1509 .seq_show = rdt_min_cbm_bits_show,
1510 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1511 },
1512 {
1513 .name = "shareable_bits",
1514 .mode = 0444,
1515 .kf_ops = &rdtgroup_kf_single_ops,
1516 .seq_show = rdt_shareable_bits_show,
1517 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1518 },
1519 {
1520 .name = "bit_usage",
1521 .mode = 0444,
1522 .kf_ops = &rdtgroup_kf_single_ops,
1523 .seq_show = rdt_bit_usage_show,
1524 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1525 },
1526 {
1527 .name = "min_bandwidth",
1528 .mode = 0444,
1529 .kf_ops = &rdtgroup_kf_single_ops,
1530 .seq_show = rdt_min_bw_show,
1531 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1532 },
1533 {
1534 .name = "bandwidth_gran",
1535 .mode = 0444,
1536 .kf_ops = &rdtgroup_kf_single_ops,
1537 .seq_show = rdt_bw_gran_show,
1538 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1539 },
1540 {
1541 .name = "delay_linear",
1542 .mode = 0444,
1543 .kf_ops = &rdtgroup_kf_single_ops,
1544 .seq_show = rdt_delay_linear_show,
1545 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1546 },
1547
1548
1549
1550
1551
1552 {
1553 .name = "thread_throttle_mode",
1554 .mode = 0444,
1555 .kf_ops = &rdtgroup_kf_single_ops,
1556 .seq_show = rdt_thread_throttle_mode_show,
1557 },
1558 {
1559 .name = "max_threshold_occupancy",
1560 .mode = 0644,
1561 .kf_ops = &rdtgroup_kf_single_ops,
1562 .write = max_threshold_occ_write,
1563 .seq_show = max_threshold_occ_show,
1564 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
1565 },
1566 {
1567 .name = "cpus",
1568 .mode = 0644,
1569 .kf_ops = &rdtgroup_kf_single_ops,
1570 .write = rdtgroup_cpus_write,
1571 .seq_show = rdtgroup_cpus_show,
1572 .fflags = RFTYPE_BASE,
1573 },
1574 {
1575 .name = "cpus_list",
1576 .mode = 0644,
1577 .kf_ops = &rdtgroup_kf_single_ops,
1578 .write = rdtgroup_cpus_write,
1579 .seq_show = rdtgroup_cpus_show,
1580 .flags = RFTYPE_FLAGS_CPUS_LIST,
1581 .fflags = RFTYPE_BASE,
1582 },
1583 {
1584 .name = "tasks",
1585 .mode = 0644,
1586 .kf_ops = &rdtgroup_kf_single_ops,
1587 .write = rdtgroup_tasks_write,
1588 .seq_show = rdtgroup_tasks_show,
1589 .fflags = RFTYPE_BASE,
1590 },
1591 {
1592 .name = "schemata",
1593 .mode = 0644,
1594 .kf_ops = &rdtgroup_kf_single_ops,
1595 .write = rdtgroup_schemata_write,
1596 .seq_show = rdtgroup_schemata_show,
1597 .fflags = RF_CTRL_BASE,
1598 },
1599 {
1600 .name = "mode",
1601 .mode = 0644,
1602 .kf_ops = &rdtgroup_kf_single_ops,
1603 .write = rdtgroup_mode_write,
1604 .seq_show = rdtgroup_mode_show,
1605 .fflags = RF_CTRL_BASE,
1606 },
1607 {
1608 .name = "size",
1609 .mode = 0444,
1610 .kf_ops = &rdtgroup_kf_single_ops,
1611 .seq_show = rdtgroup_size_show,
1612 .fflags = RF_CTRL_BASE,
1613 },
1614
1615};
1616
1617static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
1618{
1619 struct rftype *rfts, *rft;
1620 int ret, len;
1621
1622 rfts = res_common_files;
1623 len = ARRAY_SIZE(res_common_files);
1624
1625 lockdep_assert_held(&rdtgroup_mutex);
1626
1627 for (rft = rfts; rft < rfts + len; rft++) {
1628 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
1629 ret = rdtgroup_add_file(kn, rft);
1630 if (ret)
1631 goto error;
1632 }
1633 }
1634
1635 return 0;
1636error:
1637 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
1638 while (--rft >= rfts) {
1639 if ((fflags & rft->fflags) == rft->fflags)
1640 kernfs_remove_by_name(kn, rft->name);
1641 }
1642 return ret;
1643}
1644
1645static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
1646{
1647 struct rftype *rfts, *rft;
1648 int len;
1649
1650 rfts = res_common_files;
1651 len = ARRAY_SIZE(res_common_files);
1652
1653 for (rft = rfts; rft < rfts + len; rft++) {
1654 if (!strcmp(rft->name, name))
1655 return rft;
1656 }
1657
1658 return NULL;
1659}
1660
1661void __init thread_throttle_mode_init(void)
1662{
1663 struct rftype *rft;
1664
1665 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode");
1666 if (!rft)
1667 return;
1668
1669 rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB;
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
1690{
1691 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1692 struct kernfs_node *kn;
1693 int ret = 0;
1694
1695 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1696 if (!kn)
1697 return -ENOENT;
1698
1699 switch (kernfs_type(kn)) {
1700 case KERNFS_DIR:
1701 iattr.ia_mode = S_IFDIR;
1702 break;
1703 case KERNFS_FILE:
1704 iattr.ia_mode = S_IFREG;
1705 break;
1706 case KERNFS_LINK:
1707 iattr.ia_mode = S_IFLNK;
1708 break;
1709 }
1710
1711 ret = kernfs_setattr(kn, &iattr);
1712 kernfs_put(kn);
1713 return ret;
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
1728 umode_t mask)
1729{
1730 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1731 struct kernfs_node *kn, *parent;
1732 struct rftype *rfts, *rft;
1733 int ret, len;
1734
1735 rfts = res_common_files;
1736 len = ARRAY_SIZE(res_common_files);
1737
1738 for (rft = rfts; rft < rfts + len; rft++) {
1739 if (!strcmp(rft->name, name))
1740 iattr.ia_mode = rft->mode & mask;
1741 }
1742
1743 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1744 if (!kn)
1745 return -ENOENT;
1746
1747 switch (kernfs_type(kn)) {
1748 case KERNFS_DIR:
1749 parent = kernfs_get_parent(kn);
1750 if (parent) {
1751 iattr.ia_mode |= parent->mode;
1752 kernfs_put(parent);
1753 }
1754 iattr.ia_mode |= S_IFDIR;
1755 break;
1756 case KERNFS_FILE:
1757 iattr.ia_mode |= S_IFREG;
1758 break;
1759 case KERNFS_LINK:
1760 iattr.ia_mode |= S_IFLNK;
1761 break;
1762 }
1763
1764 ret = kernfs_setattr(kn, &iattr);
1765 kernfs_put(kn);
1766 return ret;
1767}
1768
1769static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
1770 unsigned long fflags)
1771{
1772 struct kernfs_node *kn_subdir;
1773 int ret;
1774
1775 kn_subdir = kernfs_create_dir(kn_info, name,
1776 kn_info->mode, r);
1777 if (IS_ERR(kn_subdir))
1778 return PTR_ERR(kn_subdir);
1779
1780 kernfs_get(kn_subdir);
1781 ret = rdtgroup_kn_set_ugid(kn_subdir);
1782 if (ret)
1783 return ret;
1784
1785 ret = rdtgroup_add_files(kn_subdir, fflags);
1786 if (!ret)
1787 kernfs_activate(kn_subdir);
1788
1789 return ret;
1790}
1791
1792static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
1793{
1794 struct rdt_resource *r;
1795 unsigned long fflags;
1796 char name[32];
1797 int ret;
1798
1799
1800 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
1801 if (IS_ERR(kn_info))
1802 return PTR_ERR(kn_info);
1803 kernfs_get(kn_info);
1804
1805 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
1806 if (ret)
1807 goto out_destroy;
1808
1809 for_each_alloc_enabled_rdt_resource(r) {
1810 fflags = r->fflags | RF_CTRL_INFO;
1811 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
1812 if (ret)
1813 goto out_destroy;
1814 }
1815
1816 for_each_mon_enabled_rdt_resource(r) {
1817 fflags = r->fflags | RF_MON_INFO;
1818 sprintf(name, "%s_MON", r->name);
1819 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
1820 if (ret)
1821 goto out_destroy;
1822 }
1823
1824
1825
1826
1827
1828 kernfs_get(kn_info);
1829
1830 ret = rdtgroup_kn_set_ugid(kn_info);
1831 if (ret)
1832 goto out_destroy;
1833
1834 kernfs_activate(kn_info);
1835
1836 return 0;
1837
1838out_destroy:
1839 kernfs_remove(kn_info);
1840 return ret;
1841}
1842
1843static int
1844mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
1845 char *name, struct kernfs_node **dest_kn)
1846{
1847 struct kernfs_node *kn;
1848 int ret;
1849
1850
1851 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1852 if (IS_ERR(kn))
1853 return PTR_ERR(kn);
1854
1855 if (dest_kn)
1856 *dest_kn = kn;
1857
1858
1859
1860
1861
1862 kernfs_get(kn);
1863
1864 ret = rdtgroup_kn_set_ugid(kn);
1865 if (ret)
1866 goto out_destroy;
1867
1868 kernfs_activate(kn);
1869
1870 return 0;
1871
1872out_destroy:
1873 kernfs_remove(kn);
1874 return ret;
1875}
1876
1877static void l3_qos_cfg_update(void *arg)
1878{
1879 bool *enable = arg;
1880
1881 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
1882}
1883
1884static void l2_qos_cfg_update(void *arg)
1885{
1886 bool *enable = arg;
1887
1888 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1889}
1890
1891static inline bool is_mba_linear(void)
1892{
1893 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
1894}
1895
1896static int set_cache_qos_cfg(int level, bool enable)
1897{
1898 void (*update)(void *arg);
1899 struct rdt_resource *r_l;
1900 cpumask_var_t cpu_mask;
1901 struct rdt_domain *d;
1902 int cpu;
1903
1904 if (level == RDT_RESOURCE_L3)
1905 update = l3_qos_cfg_update;
1906 else if (level == RDT_RESOURCE_L2)
1907 update = l2_qos_cfg_update;
1908 else
1909 return -EINVAL;
1910
1911 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1912 return -ENOMEM;
1913
1914 r_l = &rdt_resources_all[level];
1915 list_for_each_entry(d, &r_l->domains, list) {
1916 if (r_l->cache.arch_has_per_cpu_cfg)
1917
1918 for_each_cpu(cpu, &d->cpu_mask)
1919 cpumask_set_cpu(cpu, cpu_mask);
1920 else
1921
1922 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1923 }
1924 cpu = get_cpu();
1925
1926 if (cpumask_test_cpu(cpu, cpu_mask))
1927 update(&enable);
1928
1929 smp_call_function_many(cpu_mask, update, &enable, 1);
1930 put_cpu();
1931
1932 free_cpumask_var(cpu_mask);
1933
1934 return 0;
1935}
1936
1937
1938void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
1939{
1940 if (!r->alloc_capable)
1941 return;
1942
1943 if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
1944 l2_qos_cfg_update(&r->alloc_enabled);
1945
1946 if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
1947 l3_qos_cfg_update(&r->alloc_enabled);
1948}
1949
1950
1951
1952
1953
1954
1955
1956static int set_mba_sc(bool mba_sc)
1957{
1958 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
1959 struct rdt_domain *d;
1960
1961 if (!is_mbm_enabled() || !is_mba_linear() ||
1962 mba_sc == is_mba_sc(r))
1963 return -EINVAL;
1964
1965 r->membw.mba_sc = mba_sc;
1966 list_for_each_entry(d, &r->domains, list)
1967 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
1968
1969 return 0;
1970}
1971
1972static int cdp_enable(int level, int data_type, int code_type)
1973{
1974 struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
1975 struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
1976 struct rdt_resource *r_l = &rdt_resources_all[level];
1977 int ret;
1978
1979 if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
1980 !r_lcode->alloc_capable)
1981 return -EINVAL;
1982
1983 ret = set_cache_qos_cfg(level, true);
1984 if (!ret) {
1985 r_l->alloc_enabled = false;
1986 r_ldata->alloc_enabled = true;
1987 r_lcode->alloc_enabled = true;
1988 }
1989 return ret;
1990}
1991
1992static int cdpl3_enable(void)
1993{
1994 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
1995 RDT_RESOURCE_L3CODE);
1996}
1997
1998static int cdpl2_enable(void)
1999{
2000 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
2001 RDT_RESOURCE_L2CODE);
2002}
2003
2004static void cdp_disable(int level, int data_type, int code_type)
2005{
2006 struct rdt_resource *r = &rdt_resources_all[level];
2007
2008 r->alloc_enabled = r->alloc_capable;
2009
2010 if (rdt_resources_all[data_type].alloc_enabled) {
2011 rdt_resources_all[data_type].alloc_enabled = false;
2012 rdt_resources_all[code_type].alloc_enabled = false;
2013 set_cache_qos_cfg(level, false);
2014 }
2015}
2016
2017static void cdpl3_disable(void)
2018{
2019 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
2020}
2021
2022static void cdpl2_disable(void)
2023{
2024 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
2025}
2026
2027static void cdp_disable_all(void)
2028{
2029 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
2030 cdpl3_disable();
2031 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
2032 cdpl2_disable();
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
2044{
2045 if (kernfs_type(kn) == KERNFS_DIR) {
2046
2047
2048
2049
2050
2051
2052 if (kn == kn_info || kn->parent == kn_info)
2053 return NULL;
2054 else
2055 return kn->priv;
2056 } else {
2057 return kn->parent->priv;
2058 }
2059}
2060
2061struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
2062{
2063 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2064
2065 if (!rdtgrp)
2066 return NULL;
2067
2068 atomic_inc(&rdtgrp->waitcount);
2069 kernfs_break_active_protection(kn);
2070
2071 mutex_lock(&rdtgroup_mutex);
2072
2073
2074 if (rdtgrp->flags & RDT_DELETED)
2075 return NULL;
2076
2077 return rdtgrp;
2078}
2079
2080void rdtgroup_kn_unlock(struct kernfs_node *kn)
2081{
2082 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2083
2084 if (!rdtgrp)
2085 return;
2086
2087 mutex_unlock(&rdtgroup_mutex);
2088
2089 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
2090 (rdtgrp->flags & RDT_DELETED)) {
2091 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2092 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2093 rdtgroup_pseudo_lock_remove(rdtgrp);
2094 kernfs_unbreak_active_protection(kn);
2095 kernfs_put(rdtgrp->kn);
2096 kfree(rdtgrp);
2097 } else {
2098 kernfs_unbreak_active_protection(kn);
2099 }
2100}
2101
2102static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2103 struct rdtgroup *prgrp,
2104 struct kernfs_node **mon_data_kn);
2105
2106static int rdt_enable_ctx(struct rdt_fs_context *ctx)
2107{
2108 int ret = 0;
2109
2110 if (ctx->enable_cdpl2)
2111 ret = cdpl2_enable();
2112
2113 if (!ret && ctx->enable_cdpl3)
2114 ret = cdpl3_enable();
2115
2116 if (!ret && ctx->enable_mba_mbps)
2117 ret = set_mba_sc(true);
2118
2119 return ret;
2120}
2121
2122static int rdt_get_tree(struct fs_context *fc)
2123{
2124 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2125 struct rdt_domain *dom;
2126 struct rdt_resource *r;
2127 int ret;
2128
2129 cpus_read_lock();
2130 mutex_lock(&rdtgroup_mutex);
2131
2132
2133
2134 if (static_branch_unlikely(&rdt_enable_key)) {
2135 ret = -EBUSY;
2136 goto out;
2137 }
2138
2139 ret = rdt_enable_ctx(ctx);
2140 if (ret < 0)
2141 goto out_cdp;
2142
2143 closid_init();
2144
2145 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2146 if (ret < 0)
2147 goto out_mba;
2148
2149 if (rdt_mon_capable) {
2150 ret = mongroup_create_dir(rdtgroup_default.kn,
2151 &rdtgroup_default, "mon_groups",
2152 &kn_mongrp);
2153 if (ret < 0)
2154 goto out_info;
2155 kernfs_get(kn_mongrp);
2156
2157 ret = mkdir_mondata_all(rdtgroup_default.kn,
2158 &rdtgroup_default, &kn_mondata);
2159 if (ret < 0)
2160 goto out_mongrp;
2161 kernfs_get(kn_mondata);
2162 rdtgroup_default.mon.mon_data_kn = kn_mondata;
2163 }
2164
2165 ret = rdt_pseudo_lock_init();
2166 if (ret)
2167 goto out_mondata;
2168
2169 ret = kernfs_get_tree(fc);
2170 if (ret < 0)
2171 goto out_psl;
2172
2173 if (rdt_alloc_capable)
2174 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
2175 if (rdt_mon_capable)
2176 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
2177
2178 if (rdt_alloc_capable || rdt_mon_capable)
2179 static_branch_enable_cpuslocked(&rdt_enable_key);
2180
2181 if (is_mbm_enabled()) {
2182 r = &rdt_resources_all[RDT_RESOURCE_L3];
2183 list_for_each_entry(dom, &r->domains, list)
2184 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
2185 }
2186
2187 goto out;
2188
2189out_psl:
2190 rdt_pseudo_lock_release();
2191out_mondata:
2192 if (rdt_mon_capable)
2193 kernfs_remove(kn_mondata);
2194out_mongrp:
2195 if (rdt_mon_capable)
2196 kernfs_remove(kn_mongrp);
2197out_info:
2198 kernfs_remove(kn_info);
2199out_mba:
2200 if (ctx->enable_mba_mbps)
2201 set_mba_sc(false);
2202out_cdp:
2203 cdp_disable_all();
2204out:
2205 rdt_last_cmd_clear();
2206 mutex_unlock(&rdtgroup_mutex);
2207 cpus_read_unlock();
2208 return ret;
2209}
2210
2211enum rdt_param {
2212 Opt_cdp,
2213 Opt_cdpl2,
2214 Opt_mba_mbps,
2215 nr__rdt_params
2216};
2217
2218static const struct fs_parameter_spec rdt_fs_parameters[] = {
2219 fsparam_flag("cdp", Opt_cdp),
2220 fsparam_flag("cdpl2", Opt_cdpl2),
2221 fsparam_flag("mba_MBps", Opt_mba_mbps),
2222 {}
2223};
2224
2225static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
2226{
2227 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2228 struct fs_parse_result result;
2229 int opt;
2230
2231 opt = fs_parse(fc, rdt_fs_parameters, param, &result);
2232 if (opt < 0)
2233 return opt;
2234
2235 switch (opt) {
2236 case Opt_cdp:
2237 ctx->enable_cdpl3 = true;
2238 return 0;
2239 case Opt_cdpl2:
2240 ctx->enable_cdpl2 = true;
2241 return 0;
2242 case Opt_mba_mbps:
2243 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2244 return -EINVAL;
2245 ctx->enable_mba_mbps = true;
2246 return 0;
2247 }
2248
2249 return -EINVAL;
2250}
2251
2252static void rdt_fs_context_free(struct fs_context *fc)
2253{
2254 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2255
2256 kernfs_free_fs_context(fc);
2257 kfree(ctx);
2258}
2259
2260static const struct fs_context_operations rdt_fs_context_ops = {
2261 .free = rdt_fs_context_free,
2262 .parse_param = rdt_parse_param,
2263 .get_tree = rdt_get_tree,
2264};
2265
2266static int rdt_init_fs_context(struct fs_context *fc)
2267{
2268 struct rdt_fs_context *ctx;
2269
2270 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL);
2271 if (!ctx)
2272 return -ENOMEM;
2273
2274 ctx->kfc.root = rdt_root;
2275 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
2276 fc->fs_private = &ctx->kfc;
2277 fc->ops = &rdt_fs_context_ops;
2278 put_user_ns(fc->user_ns);
2279 fc->user_ns = get_user_ns(&init_user_ns);
2280 fc->global = true;
2281 return 0;
2282}
2283
2284static int reset_all_ctrls(struct rdt_resource *r)
2285{
2286 struct msr_param msr_param;
2287 cpumask_var_t cpu_mask;
2288 struct rdt_domain *d;
2289 int i, cpu;
2290
2291 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
2292 return -ENOMEM;
2293
2294 msr_param.res = r;
2295 msr_param.low = 0;
2296 msr_param.high = r->num_closid;
2297
2298
2299
2300
2301
2302
2303 list_for_each_entry(d, &r->domains, list) {
2304 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
2305
2306 for (i = 0; i < r->num_closid; i++)
2307 d->ctrl_val[i] = r->default_ctrl;
2308 }
2309 cpu = get_cpu();
2310
2311 if (cpumask_test_cpu(cpu, cpu_mask))
2312 rdt_ctrl_update(&msr_param);
2313
2314 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
2315 put_cpu();
2316
2317 free_cpumask_var(cpu_mask);
2318
2319 return 0;
2320}
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2331 struct cpumask *mask)
2332{
2333 struct task_struct *p, *t;
2334
2335 read_lock(&tasklist_lock);
2336 for_each_process_thread(p, t) {
2337 if (!from || is_closid_match(t, from) ||
2338 is_rmid_match(t, from)) {
2339 t->closid = to->closid;
2340 t->rmid = to->mon.rmid;
2341
2342#ifdef CONFIG_SMP
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352 if (mask && t->on_cpu)
2353 cpumask_set_cpu(task_cpu(t), mask);
2354#endif
2355 }
2356 }
2357 read_unlock(&tasklist_lock);
2358}
2359
2360static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2361{
2362 struct rdtgroup *sentry, *stmp;
2363 struct list_head *head;
2364
2365 head = &rdtgrp->mon.crdtgrp_list;
2366 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2367 free_rmid(sentry->mon.rmid);
2368 list_del(&sentry->mon.crdtgrp_list);
2369
2370 if (atomic_read(&sentry->waitcount) != 0)
2371 sentry->flags = RDT_DELETED;
2372 else
2373 kfree(sentry);
2374 }
2375}
2376
2377
2378
2379
2380static void rmdir_all_sub(void)
2381{
2382 struct rdtgroup *rdtgrp, *tmp;
2383
2384
2385 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2386
2387 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2388
2389 free_all_child_rdtgrp(rdtgrp);
2390
2391
2392 if (rdtgrp == &rdtgroup_default)
2393 continue;
2394
2395 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2396 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2397 rdtgroup_pseudo_lock_remove(rdtgrp);
2398
2399
2400
2401
2402
2403
2404 cpumask_or(&rdtgroup_default.cpu_mask,
2405 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2406
2407 free_rmid(rdtgrp->mon.rmid);
2408
2409 kernfs_remove(rdtgrp->kn);
2410 list_del(&rdtgrp->rdtgroup_list);
2411
2412 if (atomic_read(&rdtgrp->waitcount) != 0)
2413 rdtgrp->flags = RDT_DELETED;
2414 else
2415 kfree(rdtgrp);
2416 }
2417
2418 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
2419
2420 kernfs_remove(kn_info);
2421 kernfs_remove(kn_mongrp);
2422 kernfs_remove(kn_mondata);
2423}
2424
2425static void rdt_kill_sb(struct super_block *sb)
2426{
2427 struct rdt_resource *r;
2428
2429 cpus_read_lock();
2430 mutex_lock(&rdtgroup_mutex);
2431
2432 set_mba_sc(false);
2433
2434
2435 for_each_alloc_enabled_rdt_resource(r)
2436 reset_all_ctrls(r);
2437 cdp_disable_all();
2438 rmdir_all_sub();
2439 rdt_pseudo_lock_release();
2440 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
2441 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
2442 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
2443 static_branch_disable_cpuslocked(&rdt_enable_key);
2444 kernfs_kill_sb(sb);
2445 mutex_unlock(&rdtgroup_mutex);
2446 cpus_read_unlock();
2447}
2448
2449static struct file_system_type rdt_fs_type = {
2450 .name = "resctrl",
2451 .init_fs_context = rdt_init_fs_context,
2452 .parameters = rdt_fs_parameters,
2453 .kill_sb = rdt_kill_sb,
2454};
2455
2456static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
2457 void *priv)
2458{
2459 struct kernfs_node *kn;
2460 int ret = 0;
2461
2462 kn = __kernfs_create_file(parent_kn, name, 0444,
2463 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
2464 &kf_mondata_ops, priv, NULL, NULL);
2465 if (IS_ERR(kn))
2466 return PTR_ERR(kn);
2467
2468 ret = rdtgroup_kn_set_ugid(kn);
2469 if (ret) {
2470 kernfs_remove(kn);
2471 return ret;
2472 }
2473
2474 return ret;
2475}
2476
2477
2478
2479
2480
2481void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
2482{
2483 struct rdtgroup *prgrp, *crgrp;
2484 char name[32];
2485
2486 if (!r->mon_enabled)
2487 return;
2488
2489 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2490 sprintf(name, "mon_%s_%02d", r->name, dom_id);
2491 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
2492
2493 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
2494 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
2495 }
2496}
2497
2498static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
2499 struct rdt_domain *d,
2500 struct rdt_resource *r, struct rdtgroup *prgrp)
2501{
2502 union mon_data_bits priv;
2503 struct kernfs_node *kn;
2504 struct mon_evt *mevt;
2505 struct rmid_read rr;
2506 char name[32];
2507 int ret;
2508
2509 sprintf(name, "mon_%s_%02d", r->name, d->id);
2510
2511 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2512 if (IS_ERR(kn))
2513 return PTR_ERR(kn);
2514
2515
2516
2517
2518
2519 kernfs_get(kn);
2520 ret = rdtgroup_kn_set_ugid(kn);
2521 if (ret)
2522 goto out_destroy;
2523
2524 if (WARN_ON(list_empty(&r->evt_list))) {
2525 ret = -EPERM;
2526 goto out_destroy;
2527 }
2528
2529 priv.u.rid = r->rid;
2530 priv.u.domid = d->id;
2531 list_for_each_entry(mevt, &r->evt_list, list) {
2532 priv.u.evtid = mevt->evtid;
2533 ret = mon_addfile(kn, mevt->name, priv.priv);
2534 if (ret)
2535 goto out_destroy;
2536
2537 if (is_mbm_event(mevt->evtid))
2538 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true);
2539 }
2540 kernfs_activate(kn);
2541 return 0;
2542
2543out_destroy:
2544 kernfs_remove(kn);
2545 return ret;
2546}
2547
2548
2549
2550
2551
2552void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
2553 struct rdt_domain *d)
2554{
2555 struct kernfs_node *parent_kn;
2556 struct rdtgroup *prgrp, *crgrp;
2557 struct list_head *head;
2558
2559 if (!r->mon_enabled)
2560 return;
2561
2562 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2563 parent_kn = prgrp->mon.mon_data_kn;
2564 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
2565
2566 head = &prgrp->mon.crdtgrp_list;
2567 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
2568 parent_kn = crgrp->mon.mon_data_kn;
2569 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
2570 }
2571 }
2572}
2573
2574static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
2575 struct rdt_resource *r,
2576 struct rdtgroup *prgrp)
2577{
2578 struct rdt_domain *dom;
2579 int ret;
2580
2581 list_for_each_entry(dom, &r->domains, list) {
2582 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
2583 if (ret)
2584 return ret;
2585 }
2586
2587 return 0;
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2608 struct rdtgroup *prgrp,
2609 struct kernfs_node **dest_kn)
2610{
2611 struct rdt_resource *r;
2612 struct kernfs_node *kn;
2613 int ret;
2614
2615
2616
2617
2618 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
2619 if (ret)
2620 return ret;
2621
2622 if (dest_kn)
2623 *dest_kn = kn;
2624
2625
2626
2627
2628
2629 for_each_mon_enabled_rdt_resource(r) {
2630 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
2631 if (ret)
2632 goto out_destroy;
2633 }
2634
2635 return 0;
2636
2637out_destroy:
2638 kernfs_remove(kn);
2639 return ret;
2640}
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
2658{
2659 unsigned int cbm_len = r->cache.cbm_len;
2660 unsigned long first_bit, zero_bit;
2661 unsigned long val = _val;
2662
2663 if (!val)
2664 return 0;
2665
2666 first_bit = find_first_bit(&val, cbm_len);
2667 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
2668
2669
2670 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
2671 return (u32)val;
2672}
2673
2674
2675
2676
2677
2678
2679
2680static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
2681 u32 closid)
2682{
2683 struct rdt_resource *r_cdp = NULL;
2684 struct rdt_domain *d_cdp = NULL;
2685 u32 used_b = 0, unused_b = 0;
2686 unsigned long tmp_cbm;
2687 enum rdtgrp_mode mode;
2688 u32 peer_ctl, *ctrl;
2689 int i;
2690
2691 rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
2692 d->have_new_ctrl = false;
2693 d->new_ctrl = r->cache.shareable_bits;
2694 used_b = r->cache.shareable_bits;
2695 ctrl = d->ctrl_val;
2696 for (i = 0; i < closids_supported(); i++, ctrl++) {
2697 if (closid_allocated(i) && i != closid) {
2698 mode = rdtgroup_mode_by_closid(i);
2699 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2700
2701
2702
2703
2704
2705 continue;
2706
2707
2708
2709
2710
2711 if (d_cdp)
2712 peer_ctl = d_cdp->ctrl_val[i];
2713 else
2714 peer_ctl = 0;
2715 used_b |= *ctrl | peer_ctl;
2716 if (mode == RDT_MODE_SHAREABLE)
2717 d->new_ctrl |= *ctrl | peer_ctl;
2718 }
2719 }
2720 if (d->plr && d->plr->cbm > 0)
2721 used_b |= d->plr->cbm;
2722 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
2723 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
2724 d->new_ctrl |= unused_b;
2725
2726
2727
2728
2729 d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r);
2730
2731
2732
2733
2734 tmp_cbm = d->new_ctrl;
2735 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
2736 rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
2737 return -ENOSPC;
2738 }
2739 d->have_new_ctrl = true;
2740
2741 return 0;
2742}
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
2755{
2756 struct rdt_domain *d;
2757 int ret;
2758
2759 list_for_each_entry(d, &r->domains, list) {
2760 ret = __init_one_rdt_domain(d, r, closid);
2761 if (ret < 0)
2762 return ret;
2763 }
2764
2765 return 0;
2766}
2767
2768
2769static void rdtgroup_init_mba(struct rdt_resource *r)
2770{
2771 struct rdt_domain *d;
2772
2773 list_for_each_entry(d, &r->domains, list) {
2774 d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
2775 d->have_new_ctrl = true;
2776 }
2777}
2778
2779
2780static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2781{
2782 struct rdt_resource *r;
2783 int ret;
2784
2785 for_each_alloc_enabled_rdt_resource(r) {
2786 if (r->rid == RDT_RESOURCE_MBA) {
2787 rdtgroup_init_mba(r);
2788 } else {
2789 ret = rdtgroup_init_cat(r, rdtgrp->closid);
2790 if (ret < 0)
2791 return ret;
2792 }
2793
2794 ret = update_domains(r, rdtgrp->closid);
2795 if (ret < 0) {
2796 rdt_last_cmd_puts("Failed to initialize allocations\n");
2797 return ret;
2798 }
2799
2800 }
2801
2802 rdtgrp->mode = RDT_MODE_SHAREABLE;
2803
2804 return 0;
2805}
2806
2807static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
2808 const char *name, umode_t mode,
2809 enum rdt_group_type rtype, struct rdtgroup **r)
2810{
2811 struct rdtgroup *prdtgrp, *rdtgrp;
2812 struct kernfs_node *kn;
2813 uint files = 0;
2814 int ret;
2815
2816 prdtgrp = rdtgroup_kn_lock_live(parent_kn);
2817 if (!prdtgrp) {
2818 ret = -ENODEV;
2819 goto out_unlock;
2820 }
2821
2822 if (rtype == RDTMON_GROUP &&
2823 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2824 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
2825 ret = -EINVAL;
2826 rdt_last_cmd_puts("Pseudo-locking in progress\n");
2827 goto out_unlock;
2828 }
2829
2830
2831 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
2832 if (!rdtgrp) {
2833 ret = -ENOSPC;
2834 rdt_last_cmd_puts("Kernel out of memory\n");
2835 goto out_unlock;
2836 }
2837 *r = rdtgrp;
2838 rdtgrp->mon.parent = prdtgrp;
2839 rdtgrp->type = rtype;
2840 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
2841
2842
2843 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
2844 if (IS_ERR(kn)) {
2845 ret = PTR_ERR(kn);
2846 rdt_last_cmd_puts("kernfs create error\n");
2847 goto out_free_rgrp;
2848 }
2849 rdtgrp->kn = kn;
2850
2851
2852
2853
2854
2855
2856
2857 kernfs_get(kn);
2858
2859 ret = rdtgroup_kn_set_ugid(kn);
2860 if (ret) {
2861 rdt_last_cmd_puts("kernfs perm error\n");
2862 goto out_destroy;
2863 }
2864
2865 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
2866 ret = rdtgroup_add_files(kn, files);
2867 if (ret) {
2868 rdt_last_cmd_puts("kernfs fill error\n");
2869 goto out_destroy;
2870 }
2871
2872 if (rdt_mon_capable) {
2873 ret = alloc_rmid();
2874 if (ret < 0) {
2875 rdt_last_cmd_puts("Out of RMIDs\n");
2876 goto out_destroy;
2877 }
2878 rdtgrp->mon.rmid = ret;
2879
2880 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
2881 if (ret) {
2882 rdt_last_cmd_puts("kernfs subdir error\n");
2883 goto out_idfree;
2884 }
2885 }
2886 kernfs_activate(kn);
2887
2888
2889
2890
2891 return 0;
2892
2893out_idfree:
2894 free_rmid(rdtgrp->mon.rmid);
2895out_destroy:
2896 kernfs_remove(rdtgrp->kn);
2897out_free_rgrp:
2898 kfree(rdtgrp);
2899out_unlock:
2900 rdtgroup_kn_unlock(parent_kn);
2901 return ret;
2902}
2903
2904static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
2905{
2906 kernfs_remove(rgrp->kn);
2907 free_rmid(rgrp->mon.rmid);
2908 kfree(rgrp);
2909}
2910
2911
2912
2913
2914
2915
2916static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
2917 const char *name, umode_t mode)
2918{
2919 struct rdtgroup *rdtgrp, *prgrp;
2920 int ret;
2921
2922 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
2923 if (ret)
2924 return ret;
2925
2926 prgrp = rdtgrp->mon.parent;
2927 rdtgrp->closid = prgrp->closid;
2928
2929
2930
2931
2932
2933 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
2934
2935 rdtgroup_kn_unlock(parent_kn);
2936 return ret;
2937}
2938
2939
2940
2941
2942
2943static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
2944 const char *name, umode_t mode)
2945{
2946 struct rdtgroup *rdtgrp;
2947 struct kernfs_node *kn;
2948 u32 closid;
2949 int ret;
2950
2951 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
2952 if (ret)
2953 return ret;
2954
2955 kn = rdtgrp->kn;
2956 ret = closid_alloc();
2957 if (ret < 0) {
2958 rdt_last_cmd_puts("Out of CLOSIDs\n");
2959 goto out_common_fail;
2960 }
2961 closid = ret;
2962 ret = 0;
2963
2964 rdtgrp->closid = closid;
2965 ret = rdtgroup_init_alloc(rdtgrp);
2966 if (ret < 0)
2967 goto out_id_free;
2968
2969 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
2970
2971 if (rdt_mon_capable) {
2972
2973
2974
2975
2976 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
2977 if (ret) {
2978 rdt_last_cmd_puts("kernfs subdir error\n");
2979 goto out_del_list;
2980 }
2981 }
2982
2983 goto out_unlock;
2984
2985out_del_list:
2986 list_del(&rdtgrp->rdtgroup_list);
2987out_id_free:
2988 closid_free(closid);
2989out_common_fail:
2990 mkdir_rdt_prepare_clean(rdtgrp);
2991out_unlock:
2992 rdtgroup_kn_unlock(parent_kn);
2993 return ret;
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006static bool is_mon_groups(struct kernfs_node *kn, const char *name)
3007{
3008 return (!strcmp(kn->name, "mon_groups") &&
3009 strcmp(name, "mon_groups"));
3010}
3011
3012static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
3013 umode_t mode)
3014{
3015
3016 if (strchr(name, '\n'))
3017 return -EINVAL;
3018
3019
3020
3021
3022
3023
3024 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
3025 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
3026
3027
3028
3029
3030
3031 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
3032 return rdtgroup_mkdir_mon(parent_kn, name, mode);
3033
3034 return -EPERM;
3035}
3036
3037static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
3038 cpumask_var_t tmpmask)
3039{
3040 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3041 int cpu;
3042
3043
3044 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
3045
3046
3047 for_each_cpu(cpu, &rdtgrp->cpu_mask)
3048 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
3049
3050
3051
3052
3053 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3054 update_closid_rmid(tmpmask, NULL);
3055
3056 rdtgrp->flags = RDT_DELETED;
3057 free_rmid(rdtgrp->mon.rmid);
3058
3059
3060
3061
3062 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
3063 list_del(&rdtgrp->mon.crdtgrp_list);
3064
3065
3066
3067
3068
3069 kernfs_get(kn);
3070 kernfs_remove(rdtgrp->kn);
3071
3072 return 0;
3073}
3074
3075static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
3076 struct rdtgroup *rdtgrp)
3077{
3078 rdtgrp->flags = RDT_DELETED;
3079 list_del(&rdtgrp->rdtgroup_list);
3080
3081
3082
3083
3084
3085 kernfs_get(kn);
3086 kernfs_remove(rdtgrp->kn);
3087 return 0;
3088}
3089
3090static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
3091 cpumask_var_t tmpmask)
3092{
3093 int cpu;
3094
3095
3096 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
3097
3098
3099 cpumask_or(&rdtgroup_default.cpu_mask,
3100 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
3101
3102
3103 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
3104 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
3105 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
3106 }
3107
3108
3109
3110
3111
3112 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3113 update_closid_rmid(tmpmask, NULL);
3114
3115 closid_free(rdtgrp->closid);
3116 free_rmid(rdtgrp->mon.rmid);
3117
3118 rdtgroup_ctrl_remove(kn, rdtgrp);
3119
3120
3121
3122
3123 free_all_child_rdtgrp(rdtgrp);
3124
3125 return 0;
3126}
3127
3128static int rdtgroup_rmdir(struct kernfs_node *kn)
3129{
3130 struct kernfs_node *parent_kn = kn->parent;
3131 struct rdtgroup *rdtgrp;
3132 cpumask_var_t tmpmask;
3133 int ret = 0;
3134
3135 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
3136 return -ENOMEM;
3137
3138 rdtgrp = rdtgroup_kn_lock_live(kn);
3139 if (!rdtgrp) {
3140 ret = -EPERM;
3141 goto out;
3142 }
3143
3144
3145
3146
3147
3148
3149
3150
3151 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
3152 rdtgrp != &rdtgroup_default) {
3153 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3154 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
3155 ret = rdtgroup_ctrl_remove(kn, rdtgrp);
3156 } else {
3157 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
3158 }
3159 } else if (rdtgrp->type == RDTMON_GROUP &&
3160 is_mon_groups(parent_kn, kn->name)) {
3161 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
3162 } else {
3163 ret = -EPERM;
3164 }
3165
3166out:
3167 rdtgroup_kn_unlock(kn);
3168 free_cpumask_var(tmpmask);
3169 return ret;
3170}
3171
3172static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
3173{
3174 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
3175 seq_puts(seq, ",cdp");
3176
3177 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
3178 seq_puts(seq, ",cdpl2");
3179
3180 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
3181 seq_puts(seq, ",mba_MBps");
3182
3183 return 0;
3184}
3185
3186static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
3187 .mkdir = rdtgroup_mkdir,
3188 .rmdir = rdtgroup_rmdir,
3189 .show_options = rdtgroup_show_options,
3190};
3191
3192static int __init rdtgroup_setup_root(void)
3193{
3194 int ret;
3195
3196 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
3197 KERNFS_ROOT_CREATE_DEACTIVATED |
3198 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
3199 &rdtgroup_default);
3200 if (IS_ERR(rdt_root))
3201 return PTR_ERR(rdt_root);
3202
3203 mutex_lock(&rdtgroup_mutex);
3204
3205 rdtgroup_default.closid = 0;
3206 rdtgroup_default.mon.rmid = 0;
3207 rdtgroup_default.type = RDTCTRL_GROUP;
3208 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
3209
3210 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
3211
3212 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
3213 if (ret) {
3214 kernfs_destroy_root(rdt_root);
3215 goto out;
3216 }
3217
3218 rdtgroup_default.kn = rdt_root->kn;
3219 kernfs_activate(rdtgroup_default.kn);
3220
3221out:
3222 mutex_unlock(&rdtgroup_mutex);
3223
3224 return ret;
3225}
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235int __init rdtgroup_init(void)
3236{
3237 int ret = 0;
3238
3239 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
3240 sizeof(last_cmd_status_buf));
3241
3242 ret = rdtgroup_setup_root();
3243 if (ret)
3244 return ret;
3245
3246 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
3247 if (ret)
3248 goto cleanup_root;
3249
3250 ret = register_filesystem(&rdt_fs_type);
3251 if (ret)
3252 goto cleanup_mountpoint;
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
3276
3277 return 0;
3278
3279cleanup_mountpoint:
3280 sysfs_remove_mount_point(fs_kobj, "resctrl");
3281cleanup_root:
3282 kernfs_destroy_root(rdt_root);
3283
3284 return ret;
3285}
3286
3287void __exit rdtgroup_exit(void)
3288{
3289 debugfs_remove_recursive(debugfs_resctrl);
3290 unregister_filesystem(&rdt_fs_type);
3291 sysfs_remove_mount_point(fs_kobj, "resctrl");
3292 kernfs_destroy_root(rdt_root);
3293}
3294