1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130#include "../workqueue_internal.h"
131#include <linux/sched/loadavg.h>
132#include <linux/seq_file.h>
133#include <linux/proc_fs.h>
134#include <linux/seqlock.h>
135#include <linux/uaccess.h>
136#include <linux/cgroup.h>
137#include <linux/module.h>
138#include <linux/sched.h>
139#include <linux/ctype.h>
140#include <linux/file.h>
141#include <linux/poll.h>
142#include <linux/psi.h>
143#include "sched.h"
144
145static int psi_bug __read_mostly;
146
147DEFINE_STATIC_KEY_FALSE(psi_disabled);
148
149#ifdef CONFIG_PSI_DEFAULT_DISABLED
150static bool psi_enable;
151#else
152static bool psi_enable = true;
153#endif
154static int __init setup_psi(char *str)
155{
156 return kstrtobool(str, &psi_enable) == 0;
157}
158__setup("psi=", setup_psi);
159
160
161#define PSI_FREQ (2*HZ+1)
162#define EXP_10s 1677
163#define EXP_60s 1981
164#define EXP_300s 2034
165
166
167#define WINDOW_MIN_US 500000
168#define WINDOW_MAX_US 10000000
169#define UPDATES_PER_WINDOW 10
170
171
172static u64 psi_period __read_mostly;
173
174
175static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
176struct psi_group psi_system = {
177 .pcpu = &system_group_pcpu,
178};
179
180static void psi_avgs_work(struct work_struct *work);
181
182static void group_init(struct psi_group *group)
183{
184 int cpu;
185
186 for_each_possible_cpu(cpu)
187 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
188 group->avg_next_update = sched_clock() + psi_period;
189 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
190 mutex_init(&group->avgs_lock);
191
192 atomic_set(&group->poll_scheduled, 0);
193 mutex_init(&group->trigger_lock);
194 INIT_LIST_HEAD(&group->triggers);
195 memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
196 group->poll_states = 0;
197 group->poll_min_period = U32_MAX;
198 memset(group->polling_total, 0, sizeof(group->polling_total));
199 group->polling_next_update = ULLONG_MAX;
200 group->polling_until = 0;
201 rcu_assign_pointer(group->poll_kworker, NULL);
202}
203
204void __init psi_init(void)
205{
206 if (!psi_enable) {
207 static_branch_enable(&psi_disabled);
208 return;
209 }
210
211 psi_period = jiffies_to_nsecs(PSI_FREQ);
212 group_init(&psi_system);
213}
214
215static bool test_state(unsigned int *tasks, enum psi_states state)
216{
217 switch (state) {
218 case PSI_IO_SOME:
219 return tasks[NR_IOWAIT];
220 case PSI_IO_FULL:
221 return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
222 case PSI_MEM_SOME:
223 return tasks[NR_MEMSTALL];
224 case PSI_MEM_FULL:
225 return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
226 case PSI_CPU_SOME:
227 return tasks[NR_RUNNING] > 1;
228 case PSI_NONIDLE:
229 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
230 tasks[NR_RUNNING];
231 default:
232 return false;
233 }
234}
235
236static void get_recent_times(struct psi_group *group, int cpu,
237 enum psi_aggregators aggregator, u32 *times,
238 u32 *pchanged_states)
239{
240 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
241 u64 now, state_start;
242 enum psi_states s;
243 unsigned int seq;
244 u32 state_mask;
245
246 *pchanged_states = 0;
247
248
249 do {
250 seq = read_seqcount_begin(&groupc->seq);
251 now = cpu_clock(cpu);
252 memcpy(times, groupc->times, sizeof(groupc->times));
253 state_mask = groupc->state_mask;
254 state_start = groupc->state_start;
255 } while (read_seqcount_retry(&groupc->seq, seq));
256
257
258 for (s = 0; s < NR_PSI_STATES; s++) {
259 u32 delta;
260
261
262
263
264
265
266
267
268
269 if (state_mask & (1 << s))
270 times[s] += now - state_start;
271
272 delta = times[s] - groupc->times_prev[aggregator][s];
273 groupc->times_prev[aggregator][s] = times[s];
274
275 times[s] = delta;
276 if (delta)
277 *pchanged_states |= (1 << s);
278 }
279}
280
281static void calc_avgs(unsigned long avg[3], int missed_periods,
282 u64 time, u64 period)
283{
284 unsigned long pct;
285
286
287 if (missed_periods) {
288 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
289 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
290 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
291 }
292
293
294 pct = div_u64(time * 100, period);
295 pct *= FIXED_1;
296 avg[0] = calc_load(avg[0], EXP_10s, pct);
297 avg[1] = calc_load(avg[1], EXP_60s, pct);
298 avg[2] = calc_load(avg[2], EXP_300s, pct);
299}
300
301static void collect_percpu_times(struct psi_group *group,
302 enum psi_aggregators aggregator,
303 u32 *pchanged_states)
304{
305 u64 deltas[NR_PSI_STATES - 1] = { 0, };
306 unsigned long nonidle_total = 0;
307 u32 changed_states = 0;
308 int cpu;
309 int s;
310
311
312
313
314
315
316
317
318
319 for_each_possible_cpu(cpu) {
320 u32 times[NR_PSI_STATES];
321 u32 nonidle;
322 u32 cpu_changed_states;
323
324 get_recent_times(group, cpu, aggregator, times,
325 &cpu_changed_states);
326 changed_states |= cpu_changed_states;
327
328 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
329 nonidle_total += nonidle;
330
331 for (s = 0; s < PSI_NONIDLE; s++)
332 deltas[s] += (u64)times[s] * nonidle;
333 }
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348 for (s = 0; s < NR_PSI_STATES - 1; s++)
349 group->total[aggregator][s] +=
350 div_u64(deltas[s], max(nonidle_total, 1UL));
351
352 if (pchanged_states)
353 *pchanged_states = changed_states;
354}
355
356static u64 update_averages(struct psi_group *group, u64 now)
357{
358 unsigned long missed_periods = 0;
359 u64 expires, period;
360 u64 avg_next_update;
361 int s;
362
363
364 expires = group->avg_next_update;
365 if (now - expires >= psi_period)
366 missed_periods = div_u64(now - expires, psi_period);
367
368
369
370
371
372
373
374
375 avg_next_update = expires + ((1 + missed_periods) * psi_period);
376 period = now - (group->avg_last_update + (missed_periods * psi_period));
377 group->avg_last_update = now;
378
379 for (s = 0; s < NR_PSI_STATES - 1; s++) {
380 u32 sample;
381
382 sample = group->total[PSI_AVGS][s] - group->avg_total[s];
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400 if (sample > period)
401 sample = period;
402 group->avg_total[s] += sample;
403 calc_avgs(group->avg[s], missed_periods, sample, period);
404 }
405
406 return avg_next_update;
407}
408
409static void psi_avgs_work(struct work_struct *work)
410{
411 struct delayed_work *dwork;
412 struct psi_group *group;
413 u32 changed_states;
414 bool nonidle;
415 u64 now;
416
417 dwork = to_delayed_work(work);
418 group = container_of(dwork, struct psi_group, avgs_work);
419
420 mutex_lock(&group->avgs_lock);
421
422 now = sched_clock();
423
424 collect_percpu_times(group, PSI_AVGS, &changed_states);
425 nonidle = changed_states & (1 << PSI_NONIDLE);
426
427
428
429
430
431
432
433 if (now >= group->avg_next_update)
434 group->avg_next_update = update_averages(group, now);
435
436 if (nonidle) {
437 schedule_delayed_work(dwork, nsecs_to_jiffies(
438 group->avg_next_update - now) + 1);
439 }
440
441 mutex_unlock(&group->avgs_lock);
442}
443
444
445static void window_reset(struct psi_window *win, u64 now, u64 value,
446 u64 prev_growth)
447{
448 win->start_time = now;
449 win->start_value = value;
450 win->prev_growth = prev_growth;
451}
452
453
454
455
456
457
458
459
460
461
462
463
464static u64 window_update(struct psi_window *win, u64 now, u64 value)
465{
466 u64 elapsed;
467 u64 growth;
468
469 elapsed = now - win->start_time;
470 growth = value - win->start_value;
471
472
473
474
475
476
477
478 if (elapsed > win->size)
479 window_reset(win, now, value, growth);
480 else {
481 u32 remaining;
482
483 remaining = win->size - elapsed;
484 growth += div_u64(win->prev_growth * remaining, win->size);
485 }
486
487 return growth;
488}
489
490static void init_triggers(struct psi_group *group, u64 now)
491{
492 struct psi_trigger *t;
493
494 list_for_each_entry(t, &group->triggers, node)
495 window_reset(&t->win, now,
496 group->total[PSI_POLL][t->state], 0);
497 memcpy(group->polling_total, group->total[PSI_POLL],
498 sizeof(group->polling_total));
499 group->polling_next_update = now + group->poll_min_period;
500}
501
502static u64 update_triggers(struct psi_group *group, u64 now)
503{
504 struct psi_trigger *t;
505 bool new_stall = false;
506 u64 *total = group->total[PSI_POLL];
507
508
509
510
511
512 list_for_each_entry(t, &group->triggers, node) {
513 u64 growth;
514
515
516 if (group->polling_total[t->state] == total[t->state])
517 continue;
518
519
520
521
522
523
524
525 new_stall = true;
526
527
528 growth = window_update(&t->win, now, total[t->state]);
529 if (growth < t->threshold)
530 continue;
531
532
533 if (now < t->last_event_time + t->win.size)
534 continue;
535
536
537 if (cmpxchg(&t->event, 0, 1) == 0)
538 wake_up_interruptible(&t->event_wait);
539 t->last_event_time = now;
540 }
541
542 if (new_stall)
543 memcpy(group->polling_total, total,
544 sizeof(group->polling_total));
545
546 return now + group->poll_min_period;
547}
548
549
550
551
552
553
554
555static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
556{
557 struct kthread_worker *kworker;
558
559
560 if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0)
561 return;
562
563 rcu_read_lock();
564
565 kworker = rcu_dereference(group->poll_kworker);
566
567
568
569
570 if (likely(kworker))
571 kthread_queue_delayed_work(kworker, &group->poll_work, delay);
572 else
573 atomic_set(&group->poll_scheduled, 0);
574
575 rcu_read_unlock();
576}
577
578static void psi_poll_work(struct kthread_work *work)
579{
580 struct kthread_delayed_work *dwork;
581 struct psi_group *group;
582 u32 changed_states;
583 u64 now;
584
585 dwork = container_of(work, struct kthread_delayed_work, work);
586 group = container_of(dwork, struct psi_group, poll_work);
587
588 atomic_set(&group->poll_scheduled, 0);
589
590 mutex_lock(&group->trigger_lock);
591
592 now = sched_clock();
593
594 collect_percpu_times(group, PSI_POLL, &changed_states);
595
596 if (changed_states & group->poll_states) {
597
598 if (now > group->polling_until)
599 init_triggers(group, now);
600
601
602
603
604
605
606 group->polling_until = now +
607 group->poll_min_period * UPDATES_PER_WINDOW;
608 }
609
610 if (now > group->polling_until) {
611 group->polling_next_update = ULLONG_MAX;
612 goto out;
613 }
614
615 if (now >= group->polling_next_update)
616 group->polling_next_update = update_triggers(group, now);
617
618 psi_schedule_poll_work(group,
619 nsecs_to_jiffies(group->polling_next_update - now) + 1);
620
621out:
622 mutex_unlock(&group->trigger_lock);
623}
624
625static void record_times(struct psi_group_cpu *groupc, int cpu,
626 bool memstall_tick)
627{
628 u32 delta;
629 u64 now;
630
631 now = cpu_clock(cpu);
632 delta = now - groupc->state_start;
633 groupc->state_start = now;
634
635 if (groupc->state_mask & (1 << PSI_IO_SOME)) {
636 groupc->times[PSI_IO_SOME] += delta;
637 if (groupc->state_mask & (1 << PSI_IO_FULL))
638 groupc->times[PSI_IO_FULL] += delta;
639 }
640
641 if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
642 groupc->times[PSI_MEM_SOME] += delta;
643 if (groupc->state_mask & (1 << PSI_MEM_FULL))
644 groupc->times[PSI_MEM_FULL] += delta;
645 else if (memstall_tick) {
646 u32 sample;
647
648
649
650
651
652
653
654
655
656
657
658
659 sample = min(delta, (u32)jiffies_to_nsecs(1));
660 groupc->times[PSI_MEM_FULL] += sample;
661 }
662 }
663
664 if (groupc->state_mask & (1 << PSI_CPU_SOME))
665 groupc->times[PSI_CPU_SOME] += delta;
666
667 if (groupc->state_mask & (1 << PSI_NONIDLE))
668 groupc->times[PSI_NONIDLE] += delta;
669}
670
671static u32 psi_group_change(struct psi_group *group, int cpu,
672 unsigned int clear, unsigned int set)
673{
674 struct psi_group_cpu *groupc;
675 unsigned int t, m;
676 enum psi_states s;
677 u32 state_mask = 0;
678
679 groupc = per_cpu_ptr(group->pcpu, cpu);
680
681
682
683
684
685
686
687
688
689 write_seqcount_begin(&groupc->seq);
690
691 record_times(groupc, cpu, false);
692
693 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
694 if (!(m & (1 << t)))
695 continue;
696 if (groupc->tasks[t] == 0 && !psi_bug) {
697 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
698 cpu, t, groupc->tasks[0],
699 groupc->tasks[1], groupc->tasks[2],
700 clear, set);
701 psi_bug = 1;
702 }
703 groupc->tasks[t]--;
704 }
705
706 for (t = 0; set; set &= ~(1 << t), t++)
707 if (set & (1 << t))
708 groupc->tasks[t]++;
709
710
711 for (s = 0; s < NR_PSI_STATES; s++) {
712 if (test_state(groupc->tasks, s))
713 state_mask |= (1 << s);
714 }
715 groupc->state_mask = state_mask;
716
717 write_seqcount_end(&groupc->seq);
718
719 return state_mask;
720}
721
722static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
723{
724#ifdef CONFIG_CGROUPS
725 struct cgroup *cgroup = NULL;
726
727 if (!*iter)
728 cgroup = task->cgroups->dfl_cgrp;
729 else if (*iter == &psi_system)
730 return NULL;
731 else
732 cgroup = cgroup_parent(*iter);
733
734 if (cgroup && cgroup_parent(cgroup)) {
735 *iter = cgroup;
736 return cgroup_psi(cgroup);
737 }
738#else
739 if (*iter)
740 return NULL;
741#endif
742 *iter = &psi_system;
743 return &psi_system;
744}
745
746void psi_task_change(struct task_struct *task, int clear, int set)
747{
748 int cpu = task_cpu(task);
749 struct psi_group *group;
750 bool wake_clock = true;
751 void *iter = NULL;
752
753 if (!task->pid)
754 return;
755
756 if (((task->psi_flags & set) ||
757 (task->psi_flags & clear) != clear) &&
758 !psi_bug) {
759 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
760 task->pid, task->comm, cpu,
761 task->psi_flags, clear, set);
762 psi_bug = 1;
763 }
764
765 task->psi_flags &= ~clear;
766 task->psi_flags |= set;
767
768
769
770
771
772
773
774 if (unlikely((clear & TSK_RUNNING) &&
775 (task->flags & PF_WQ_WORKER) &&
776 wq_worker_last_func(task) == psi_avgs_work))
777 wake_clock = false;
778
779 while ((group = iterate_groups(task, &iter))) {
780 u32 state_mask = psi_group_change(group, cpu, clear, set);
781
782 if (state_mask & group->poll_states)
783 psi_schedule_poll_work(group, 1);
784
785 if (wake_clock && !delayed_work_pending(&group->avgs_work))
786 schedule_delayed_work(&group->avgs_work, PSI_FREQ);
787 }
788}
789
790void psi_memstall_tick(struct task_struct *task, int cpu)
791{
792 struct psi_group *group;
793 void *iter = NULL;
794
795 while ((group = iterate_groups(task, &iter))) {
796 struct psi_group_cpu *groupc;
797
798 groupc = per_cpu_ptr(group->pcpu, cpu);
799 write_seqcount_begin(&groupc->seq);
800 record_times(groupc, cpu, true);
801 write_seqcount_end(&groupc->seq);
802 }
803}
804
805
806
807
808
809
810
811
812void psi_memstall_enter(unsigned long *flags)
813{
814 struct rq_flags rf;
815 struct rq *rq;
816
817 if (static_branch_likely(&psi_disabled))
818 return;
819
820 *flags = current->flags & PF_MEMSTALL;
821 if (*flags)
822 return;
823
824
825
826
827
828 rq = this_rq_lock_irq(&rf);
829
830 current->flags |= PF_MEMSTALL;
831 psi_task_change(current, 0, TSK_MEMSTALL);
832
833 rq_unlock_irq(rq, &rf);
834}
835
836
837
838
839
840
841
842void psi_memstall_leave(unsigned long *flags)
843{
844 struct rq_flags rf;
845 struct rq *rq;
846
847 if (static_branch_likely(&psi_disabled))
848 return;
849
850 if (*flags)
851 return;
852
853
854
855
856
857 rq = this_rq_lock_irq(&rf);
858
859 current->flags &= ~PF_MEMSTALL;
860 psi_task_change(current, TSK_MEMSTALL, 0);
861
862 rq_unlock_irq(rq, &rf);
863}
864
865#ifdef CONFIG_CGROUPS
866int psi_cgroup_alloc(struct cgroup *cgroup)
867{
868 if (static_branch_likely(&psi_disabled))
869 return 0;
870
871 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
872 if (!cgroup->psi.pcpu)
873 return -ENOMEM;
874 group_init(&cgroup->psi);
875 return 0;
876}
877
878void psi_cgroup_free(struct cgroup *cgroup)
879{
880 if (static_branch_likely(&psi_disabled))
881 return;
882
883 cancel_delayed_work_sync(&cgroup->psi.avgs_work);
884 free_percpu(cgroup->psi.pcpu);
885
886 WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
887}
888
889
890
891
892
893
894
895
896
897
898
899
900
901void cgroup_move_task(struct task_struct *task, struct css_set *to)
902{
903 unsigned int task_flags = 0;
904 struct rq_flags rf;
905 struct rq *rq;
906
907 if (static_branch_likely(&psi_disabled)) {
908
909
910
911
912 rcu_assign_pointer(task->cgroups, to);
913 return;
914 }
915
916 rq = task_rq_lock(task, &rf);
917
918 if (task_on_rq_queued(task))
919 task_flags = TSK_RUNNING;
920 else if (task->in_iowait)
921 task_flags = TSK_IOWAIT;
922
923 if (task->flags & PF_MEMSTALL)
924 task_flags |= TSK_MEMSTALL;
925
926 if (task_flags)
927 psi_task_change(task, task_flags, 0);
928
929
930 rcu_assign_pointer(task->cgroups, to);
931
932 if (task_flags)
933 psi_task_change(task, 0, task_flags);
934
935 task_rq_unlock(rq, task, &rf);
936}
937#endif
938
939int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
940{
941 int full;
942 u64 now;
943
944 if (static_branch_likely(&psi_disabled))
945 return -EOPNOTSUPP;
946
947
948 mutex_lock(&group->avgs_lock);
949 now = sched_clock();
950 collect_percpu_times(group, PSI_AVGS, NULL);
951 if (now >= group->avg_next_update)
952 group->avg_next_update = update_averages(group, now);
953 mutex_unlock(&group->avgs_lock);
954
955 for (full = 0; full < 2 - (res == PSI_CPU); full++) {
956 unsigned long avg[3];
957 u64 total;
958 int w;
959
960 for (w = 0; w < 3; w++)
961 avg[w] = group->avg[res * 2 + full][w];
962 total = div_u64(group->total[PSI_AVGS][res * 2 + full],
963 NSEC_PER_USEC);
964
965 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
966 full ? "full" : "some",
967 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
968 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
969 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
970 total);
971 }
972
973 return 0;
974}
975
976static int psi_io_show(struct seq_file *m, void *v)
977{
978 return psi_show(m, &psi_system, PSI_IO);
979}
980
981static int psi_memory_show(struct seq_file *m, void *v)
982{
983 return psi_show(m, &psi_system, PSI_MEM);
984}
985
986static int psi_cpu_show(struct seq_file *m, void *v)
987{
988 return psi_show(m, &psi_system, PSI_CPU);
989}
990
991static int psi_io_open(struct inode *inode, struct file *file)
992{
993 return single_open(file, psi_io_show, NULL);
994}
995
996static int psi_memory_open(struct inode *inode, struct file *file)
997{
998 return single_open(file, psi_memory_show, NULL);
999}
1000
1001static int psi_cpu_open(struct inode *inode, struct file *file)
1002{
1003 return single_open(file, psi_cpu_show, NULL);
1004}
1005
1006struct psi_trigger *psi_trigger_create(struct psi_group *group,
1007 char *buf, size_t nbytes, enum psi_res res)
1008{
1009 struct psi_trigger *t;
1010 enum psi_states state;
1011 u32 threshold_us;
1012 u32 window_us;
1013
1014 if (static_branch_likely(&psi_disabled))
1015 return ERR_PTR(-EOPNOTSUPP);
1016
1017 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1018 state = PSI_IO_SOME + res * 2;
1019 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1020 state = PSI_IO_FULL + res * 2;
1021 else
1022 return ERR_PTR(-EINVAL);
1023
1024 if (state >= PSI_NONIDLE)
1025 return ERR_PTR(-EINVAL);
1026
1027 if (window_us < WINDOW_MIN_US ||
1028 window_us > WINDOW_MAX_US)
1029 return ERR_PTR(-EINVAL);
1030
1031
1032 if (threshold_us == 0 || threshold_us > window_us)
1033 return ERR_PTR(-EINVAL);
1034
1035 t = kmalloc(sizeof(*t), GFP_KERNEL);
1036 if (!t)
1037 return ERR_PTR(-ENOMEM);
1038
1039 t->group = group;
1040 t->state = state;
1041 t->threshold = threshold_us * NSEC_PER_USEC;
1042 t->win.size = window_us * NSEC_PER_USEC;
1043 window_reset(&t->win, 0, 0, 0);
1044
1045 t->event = 0;
1046 t->last_event_time = 0;
1047 init_waitqueue_head(&t->event_wait);
1048 kref_init(&t->refcount);
1049
1050 mutex_lock(&group->trigger_lock);
1051
1052 if (!rcu_access_pointer(group->poll_kworker)) {
1053 struct sched_param param = {
1054 .sched_priority = 1,
1055 };
1056 struct kthread_worker *kworker;
1057
1058 kworker = kthread_create_worker(0, "psimon");
1059 if (IS_ERR(kworker)) {
1060 kfree(t);
1061 mutex_unlock(&group->trigger_lock);
1062 return ERR_CAST(kworker);
1063 }
1064 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
1065 kthread_init_delayed_work(&group->poll_work,
1066 psi_poll_work);
1067 rcu_assign_pointer(group->poll_kworker, kworker);
1068 }
1069
1070 list_add(&t->node, &group->triggers);
1071 group->poll_min_period = min(group->poll_min_period,
1072 div_u64(t->win.size, UPDATES_PER_WINDOW));
1073 group->nr_triggers[t->state]++;
1074 group->poll_states |= (1 << t->state);
1075
1076 mutex_unlock(&group->trigger_lock);
1077
1078 return t;
1079}
1080
1081static void psi_trigger_destroy(struct kref *ref)
1082{
1083 struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
1084 struct psi_group *group = t->group;
1085 struct kthread_worker *kworker_to_destroy = NULL;
1086
1087 if (static_branch_likely(&psi_disabled))
1088 return;
1089
1090
1091
1092
1093
1094 wake_up_interruptible(&t->event_wait);
1095
1096 mutex_lock(&group->trigger_lock);
1097
1098 if (!list_empty(&t->node)) {
1099 struct psi_trigger *tmp;
1100 u64 period = ULLONG_MAX;
1101
1102 list_del(&t->node);
1103 group->nr_triggers[t->state]--;
1104 if (!group->nr_triggers[t->state])
1105 group->poll_states &= ~(1 << t->state);
1106
1107 list_for_each_entry(tmp, &group->triggers, node)
1108 period = min(period, div_u64(tmp->win.size,
1109 UPDATES_PER_WINDOW));
1110 group->poll_min_period = period;
1111
1112 if (group->poll_states == 0) {
1113 group->polling_until = 0;
1114 kworker_to_destroy = rcu_dereference_protected(
1115 group->poll_kworker,
1116 lockdep_is_held(&group->trigger_lock));
1117 rcu_assign_pointer(group->poll_kworker, NULL);
1118 }
1119 }
1120
1121 mutex_unlock(&group->trigger_lock);
1122
1123
1124
1125
1126
1127
1128 synchronize_rcu();
1129
1130
1131
1132
1133 if (kworker_to_destroy) {
1134
1135
1136
1137
1138
1139
1140 kthread_cancel_delayed_work_sync(&group->poll_work);
1141 atomic_set(&group->poll_scheduled, 0);
1142
1143 kthread_destroy_worker(kworker_to_destroy);
1144 }
1145 kfree(t);
1146}
1147
1148void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
1149{
1150 struct psi_trigger *old = *trigger_ptr;
1151
1152 if (static_branch_likely(&psi_disabled))
1153 return;
1154
1155 rcu_assign_pointer(*trigger_ptr, new);
1156 if (old)
1157 kref_put(&old->refcount, psi_trigger_destroy);
1158}
1159
1160__poll_t psi_trigger_poll(void **trigger_ptr,
1161 struct file *file, poll_table *wait)
1162{
1163 __poll_t ret = DEFAULT_POLLMASK;
1164 struct psi_trigger *t;
1165
1166 if (static_branch_likely(&psi_disabled))
1167 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1168
1169 rcu_read_lock();
1170
1171 t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
1172 if (!t) {
1173 rcu_read_unlock();
1174 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1175 }
1176 kref_get(&t->refcount);
1177
1178 rcu_read_unlock();
1179
1180 poll_wait(file, &t->event_wait, wait);
1181
1182 if (cmpxchg(&t->event, 1, 0) == 1)
1183 ret |= EPOLLPRI;
1184
1185 kref_put(&t->refcount, psi_trigger_destroy);
1186
1187 return ret;
1188}
1189
1190static ssize_t psi_write(struct file *file, const char __user *user_buf,
1191 size_t nbytes, enum psi_res res)
1192{
1193 char buf[32];
1194 size_t buf_size;
1195 struct seq_file *seq;
1196 struct psi_trigger *new;
1197
1198 if (static_branch_likely(&psi_disabled))
1199 return -EOPNOTSUPP;
1200
1201 buf_size = min(nbytes, (sizeof(buf) - 1));
1202 if (copy_from_user(buf, user_buf, buf_size))
1203 return -EFAULT;
1204
1205 buf[buf_size - 1] = '\0';
1206
1207 new = psi_trigger_create(&psi_system, buf, nbytes, res);
1208 if (IS_ERR(new))
1209 return PTR_ERR(new);
1210
1211 seq = file->private_data;
1212
1213 mutex_lock(&seq->lock);
1214 psi_trigger_replace(&seq->private, new);
1215 mutex_unlock(&seq->lock);
1216
1217 return nbytes;
1218}
1219
1220static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1221 size_t nbytes, loff_t *ppos)
1222{
1223 return psi_write(file, user_buf, nbytes, PSI_IO);
1224}
1225
1226static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1227 size_t nbytes, loff_t *ppos)
1228{
1229 return psi_write(file, user_buf, nbytes, PSI_MEM);
1230}
1231
1232static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1233 size_t nbytes, loff_t *ppos)
1234{
1235 return psi_write(file, user_buf, nbytes, PSI_CPU);
1236}
1237
1238static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1239{
1240 struct seq_file *seq = file->private_data;
1241
1242 return psi_trigger_poll(&seq->private, file, wait);
1243}
1244
1245static int psi_fop_release(struct inode *inode, struct file *file)
1246{
1247 struct seq_file *seq = file->private_data;
1248
1249 psi_trigger_replace(&seq->private, NULL);
1250 return single_release(inode, file);
1251}
1252
1253static const struct file_operations psi_io_fops = {
1254 .open = psi_io_open,
1255 .read = seq_read,
1256 .llseek = seq_lseek,
1257 .write = psi_io_write,
1258 .poll = psi_fop_poll,
1259 .release = psi_fop_release,
1260};
1261
1262static const struct file_operations psi_memory_fops = {
1263 .open = psi_memory_open,
1264 .read = seq_read,
1265 .llseek = seq_lseek,
1266 .write = psi_memory_write,
1267 .poll = psi_fop_poll,
1268 .release = psi_fop_release,
1269};
1270
1271static const struct file_operations psi_cpu_fops = {
1272 .open = psi_cpu_open,
1273 .read = seq_read,
1274 .llseek = seq_lseek,
1275 .write = psi_cpu_write,
1276 .poll = psi_fop_poll,
1277 .release = psi_fop_release,
1278};
1279
1280static int __init psi_proc_init(void)
1281{
1282 proc_mkdir("pressure", NULL);
1283 proc_create("pressure/io", 0, NULL, &psi_io_fops);
1284 proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
1285 proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
1286 return 0;
1287}
1288module_init(psi_proc_init);
1289