1
2
3#ifdef CONFIG_SCHEDSTATS
4
5
6
7
8static inline void
9rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
10{
11 if (rq) {
12 rq->rq_sched_info.run_delay += delta;
13 rq->rq_sched_info.pcount++;
14 }
15}
16
17
18
19
20static inline void
21rq_sched_info_depart(struct rq *rq, unsigned long long delta)
22{
23 if (rq)
24 rq->rq_cpu_time += delta;
25}
26
27static inline void
28rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
29{
30 if (rq)
31 rq->rq_sched_info.run_delay += delta;
32}
33#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
34#define __schedstat_inc(var) do { var++; } while (0)
35#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
36#define __schedstat_add(var, amt) do { var += (amt); } while (0)
37#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
38#define __schedstat_set(var, val) do { var = (val); } while (0)
39#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
40#define schedstat_val(var) (var)
41#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
42
43#else
44static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
45static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
46static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
47# define schedstat_enabled() 0
48# define __schedstat_inc(var) do { } while (0)
49# define schedstat_inc(var) do { } while (0)
50# define __schedstat_add(var, amt) do { } while (0)
51# define schedstat_add(var, amt) do { } while (0)
52# define __schedstat_set(var, val) do { } while (0)
53# define schedstat_set(var, val) do { } while (0)
54# define schedstat_val(var) 0
55# define schedstat_val_or_zero(var) 0
56#endif
57
58#ifdef CONFIG_PSI
59
60
61
62
63
64
65static inline void psi_enqueue(struct task_struct *p, bool wakeup)
66{
67 int clear = 0, set = TSK_RUNNING;
68
69 if (static_branch_likely(&psi_disabled))
70 return;
71
72 if (!wakeup || p->sched_psi_wake_requeue) {
73 if (p->in_memstall)
74 set |= TSK_MEMSTALL;
75 if (p->sched_psi_wake_requeue)
76 p->sched_psi_wake_requeue = 0;
77 } else {
78 if (p->in_iowait)
79 clear |= TSK_IOWAIT;
80 }
81
82 psi_task_change(p, clear, set);
83}
84
85static inline void psi_dequeue(struct task_struct *p, bool sleep)
86{
87 int clear = TSK_RUNNING;
88
89 if (static_branch_likely(&psi_disabled))
90 return;
91
92
93
94
95
96
97
98 if (sleep)
99 return;
100
101 if (p->in_memstall)
102 clear |= TSK_MEMSTALL;
103
104 psi_task_change(p, clear, 0);
105}
106
107static inline void psi_ttwu_dequeue(struct task_struct *p)
108{
109 if (static_branch_likely(&psi_disabled))
110 return;
111
112
113
114
115
116 if (unlikely(p->in_iowait || p->in_memstall)) {
117 struct rq_flags rf;
118 struct rq *rq;
119 int clear = 0;
120
121 if (p->in_iowait)
122 clear |= TSK_IOWAIT;
123 if (p->in_memstall)
124 clear |= TSK_MEMSTALL;
125
126 rq = __task_rq_lock(p, &rf);
127 psi_task_change(p, clear, 0);
128 p->sched_psi_wake_requeue = 1;
129 __task_rq_unlock(rq, &rf);
130 }
131}
132
133static inline void psi_sched_switch(struct task_struct *prev,
134 struct task_struct *next,
135 bool sleep)
136{
137 if (static_branch_likely(&psi_disabled))
138 return;
139
140 psi_task_switch(prev, next, sleep);
141}
142
143#else
144static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
145static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
146static inline void psi_ttwu_dequeue(struct task_struct *p) {}
147static inline void psi_sched_switch(struct task_struct *prev,
148 struct task_struct *next,
149 bool sleep) {}
150#endif
151
152#ifdef CONFIG_SCHED_INFO
153static inline void sched_info_reset_dequeued(struct task_struct *t)
154{
155 t->sched_info.last_queued = 0;
156}
157
158
159
160
161
162
163
164static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
165{
166 unsigned long long now = rq_clock(rq), delta = 0;
167
168 if (sched_info_on()) {
169 if (t->sched_info.last_queued)
170 delta = now - t->sched_info.last_queued;
171 }
172 sched_info_reset_dequeued(t);
173 t->sched_info.run_delay += delta;
174
175 rq_sched_info_dequeued(rq, delta);
176}
177
178
179
180
181
182
183static void sched_info_arrive(struct rq *rq, struct task_struct *t)
184{
185 unsigned long long now = rq_clock(rq), delta = 0;
186
187 if (t->sched_info.last_queued)
188 delta = now - t->sched_info.last_queued;
189 sched_info_reset_dequeued(t);
190 t->sched_info.run_delay += delta;
191 t->sched_info.last_arrival = now;
192 t->sched_info.pcount++;
193
194 rq_sched_info_arrive(rq, delta);
195}
196
197
198
199
200
201
202static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
203{
204 if (sched_info_on()) {
205 if (!t->sched_info.last_queued)
206 t->sched_info.last_queued = rq_clock(rq);
207 }
208}
209
210
211
212
213
214
215
216
217
218static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
219{
220 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
221
222 rq_sched_info_depart(rq, delta);
223
224 if (t->state == TASK_RUNNING)
225 sched_info_queued(rq, t);
226}
227
228
229
230
231
232
233static inline void
234__sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
235{
236
237
238
239
240
241 if (prev != rq->idle)
242 sched_info_depart(rq, prev);
243
244 if (next != rq->idle)
245 sched_info_arrive(rq, next);
246}
247
248static inline void
249sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
250{
251 if (sched_info_on())
252 __sched_info_switch(rq, prev, next);
253}
254
255#else
256# define sched_info_queued(rq, t) do { } while (0)
257# define sched_info_reset_dequeued(t) do { } while (0)
258# define sched_info_dequeued(rq, t) do { } while (0)
259# define sched_info_depart(rq, t) do { } while (0)
260# define sched_info_arrive(rq, next) do { } while (0)
261# define sched_info_switch(rq, t, next) do { } while (0)
262#endif
263