1#ifdef CONFIG_SMP
2#include "sched-pelt.h"
3
4int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
7int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
9
10#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
11int update_irq_load_avg(struct rq *rq, u64 running);
12#else
13static inline int
14update_irq_load_avg(struct rq *rq, u64 running)
15{
16 return 0;
17}
18#endif
19
20
21
22
23
24
25
26
27#define UTIL_AVG_UNCHANGED 0x1
28
29static inline void cfs_se_util_change(struct sched_avg *avg)
30{
31 unsigned int enqueued;
32
33 if (!sched_feat(UTIL_EST))
34 return;
35
36
37 enqueued = avg->util_est.enqueued;
38 if (!(enqueued & UTIL_AVG_UNCHANGED))
39 return;
40
41
42 enqueued &= ~UTIL_AVG_UNCHANGED;
43 WRITE_ONCE(avg->util_est.enqueued, enqueued);
44}
45
46
47
48
49
50
51
52
53
54
55
56
57
58static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
59{
60 if (unlikely(is_idle_task(rq->curr))) {
61
62 rq->clock_pelt = rq_clock_task(rq);
63 return;
64 }
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
83 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
84
85 rq->clock_pelt += delta;
86}
87
88
89
90
91
92
93
94
95
96
97static inline void update_idle_rq_clock_pelt(struct rq *rq)
98{
99 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
100 u32 util_sum = rq->cfs.avg.util_sum;
101 util_sum += rq->avg_rt.util_sum;
102 util_sum += rq->avg_dl.util_sum;
103
104
105
106
107
108
109
110
111
112
113 if (util_sum >= divider)
114 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
115}
116
117static inline u64 rq_clock_pelt(struct rq *rq)
118{
119 lockdep_assert_held(&rq->lock);
120 assert_clock_updated(rq);
121
122 return rq->clock_pelt - rq->lost_idle_time;
123}
124
125#ifdef CONFIG_CFS_BANDWIDTH
126
127static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
128{
129 if (unlikely(cfs_rq->throttle_count))
130 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
131
132 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
133}
134#else
135static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
136{
137 return rq_clock_pelt(rq_of(cfs_rq));
138}
139#endif
140
141#else
142
143static inline int
144update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
145{
146 return 0;
147}
148
149static inline int
150update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
151{
152 return 0;
153}
154
155static inline int
156update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
157{
158 return 0;
159}
160
161static inline int
162update_irq_load_avg(struct rq *rq, u64 running)
163{
164 return 0;
165}
166
167static inline u64 rq_clock_pelt(struct rq *rq)
168{
169 return rq_clock_task(rq);
170}
171
172static inline void
173update_rq_clock_pelt(struct rq *rq, s64 delta) { }
174
175static inline void
176update_idle_rq_clock_pelt(struct rq *rq) { }
177
178#endif
179
180
181