1#ifdef CONFIG_SMP
2#include "sched-pelt.h"
3
4int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
7int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
9
10#ifdef CONFIG_SCHED_THERMAL_PRESSURE
11int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
12
13static inline u64 thermal_load_avg(struct rq *rq)
14{
15 return READ_ONCE(rq->avg_thermal.load_avg);
16}
17#else
18static inline int
19update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
20{
21 return 0;
22}
23
24static inline u64 thermal_load_avg(struct rq *rq)
25{
26 return 0;
27}
28#endif
29
30#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
31int update_irq_load_avg(struct rq *rq, u64 running);
32#else
33static inline int
34update_irq_load_avg(struct rq *rq, u64 running)
35{
36 return 0;
37}
38#endif
39
40static inline u32 get_pelt_divider(struct sched_avg *avg)
41{
42 return LOAD_AVG_MAX - 1024 + avg->period_contrib;
43}
44
45static inline void cfs_se_util_change(struct sched_avg *avg)
46{
47 unsigned int enqueued;
48
49 if (!sched_feat(UTIL_EST))
50 return;
51
52
53 enqueued = avg->util_est.enqueued;
54 if (!(enqueued & UTIL_AVG_UNCHANGED))
55 return;
56
57
58 enqueued &= ~UTIL_AVG_UNCHANGED;
59 WRITE_ONCE(avg->util_est.enqueued, enqueued);
60}
61
62
63
64
65
66
67
68
69
70
71
72
73
74static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
75{
76 if (unlikely(is_idle_task(rq->curr))) {
77
78 rq->clock_pelt = rq_clock_task(rq);
79 return;
80 }
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
99 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
100
101 rq->clock_pelt += delta;
102}
103
104
105
106
107
108
109
110
111
112
113static inline void update_idle_rq_clock_pelt(struct rq *rq)
114{
115 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
116 u32 util_sum = rq->cfs.avg.util_sum;
117 util_sum += rq->avg_rt.util_sum;
118 util_sum += rq->avg_dl.util_sum;
119
120
121
122
123
124
125
126
127
128
129 if (util_sum >= divider)
130 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
131}
132
133static inline u64 rq_clock_pelt(struct rq *rq)
134{
135 lockdep_assert_rq_held(rq);
136 assert_clock_updated(rq);
137
138 return rq->clock_pelt - rq->lost_idle_time;
139}
140
141#ifdef CONFIG_CFS_BANDWIDTH
142
143static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
144{
145 if (unlikely(cfs_rq->throttle_count))
146 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
147
148 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
149}
150#else
151static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
152{
153 return rq_clock_pelt(rq_of(cfs_rq));
154}
155#endif
156
157#else
158
159static inline int
160update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
161{
162 return 0;
163}
164
165static inline int
166update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
167{
168 return 0;
169}
170
171static inline int
172update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
173{
174 return 0;
175}
176
177static inline int
178update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
179{
180 return 0;
181}
182
183static inline u64 thermal_load_avg(struct rq *rq)
184{
185 return 0;
186}
187
188static inline int
189update_irq_load_avg(struct rq *rq, u64 running)
190{
191 return 0;
192}
193
194static inline u64 rq_clock_pelt(struct rq *rq)
195{
196 return rq_clock_task(rq);
197}
198
199static inline void
200update_rq_clock_pelt(struct rq *rq, s64 delta) { }
201
202static inline void
203update_idle_rq_clock_pelt(struct rq *rq) { }
204
205#endif
206
207
208