linux/kernel/sched/pelt.h
<<
>>
Prefs
   1#ifdef CONFIG_SMP
   2#include "sched-pelt.h"
   3
   4int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
   5int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
   6int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
   7int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
   8int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
   9
  10#ifdef CONFIG_SCHED_THERMAL_PRESSURE
  11int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
  12
  13static inline u64 thermal_load_avg(struct rq *rq)
  14{
  15        return READ_ONCE(rq->avg_thermal.load_avg);
  16}
  17#else
  18static inline int
  19update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
  20{
  21        return 0;
  22}
  23
  24static inline u64 thermal_load_avg(struct rq *rq)
  25{
  26        return 0;
  27}
  28#endif
  29
  30#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
  31int update_irq_load_avg(struct rq *rq, u64 running);
  32#else
  33static inline int
  34update_irq_load_avg(struct rq *rq, u64 running)
  35{
  36        return 0;
  37}
  38#endif
  39
  40static inline u32 get_pelt_divider(struct sched_avg *avg)
  41{
  42        return LOAD_AVG_MAX - 1024 + avg->period_contrib;
  43}
  44
  45static inline void cfs_se_util_change(struct sched_avg *avg)
  46{
  47        unsigned int enqueued;
  48
  49        if (!sched_feat(UTIL_EST))
  50                return;
  51
  52        /* Avoid store if the flag has been already reset */
  53        enqueued = avg->util_est.enqueued;
  54        if (!(enqueued & UTIL_AVG_UNCHANGED))
  55                return;
  56
  57        /* Reset flag to report util_avg has been updated */
  58        enqueued &= ~UTIL_AVG_UNCHANGED;
  59        WRITE_ONCE(avg->util_est.enqueued, enqueued);
  60}
  61
  62/*
  63 * The clock_pelt scales the time to reflect the effective amount of
  64 * computation done during the running delta time but then sync back to
  65 * clock_task when rq is idle.
  66 *
  67 *
  68 * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
  69 * @ max capacity  ------******---------------******---------------
  70 * @ half capacity ------************---------************---------
  71 * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
  72 *
  73 */
  74static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
  75{
  76        if (unlikely(is_idle_task(rq->curr))) {
  77                /* The rq is idle, we can sync to clock_task */
  78                rq->clock_pelt  = rq_clock_task(rq);
  79                return;
  80        }
  81
  82        /*
  83         * When a rq runs at a lower compute capacity, it will need
  84         * more time to do the same amount of work than at max
  85         * capacity. In order to be invariant, we scale the delta to
  86         * reflect how much work has been really done.
  87         * Running longer results in stealing idle time that will
  88         * disturb the load signal compared to max capacity. This
  89         * stolen idle time will be automatically reflected when the
  90         * rq will be idle and the clock will be synced with
  91         * rq_clock_task.
  92         */
  93
  94        /*
  95         * Scale the elapsed time to reflect the real amount of
  96         * computation
  97         */
  98        delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
  99        delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
 100
 101        rq->clock_pelt += delta;
 102}
 103
 104/*
 105 * When rq becomes idle, we have to check if it has lost idle time
 106 * because it was fully busy. A rq is fully used when the /Sum util_sum
 107 * is greater or equal to:
 108 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
 109 * For optimization and computing rounding purpose, we don't take into account
 110 * the position in the current window (period_contrib) and we use the higher
 111 * bound of util_sum to decide.
 112 */
 113static inline void update_idle_rq_clock_pelt(struct rq *rq)
 114{
 115        u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
 116        u32 util_sum = rq->cfs.avg.util_sum;
 117        util_sum += rq->avg_rt.util_sum;
 118        util_sum += rq->avg_dl.util_sum;
 119
 120        /*
 121         * Reflecting stolen time makes sense only if the idle
 122         * phase would be present at max capacity. As soon as the
 123         * utilization of a rq has reached the maximum value, it is
 124         * considered as an always running rq without idle time to
 125         * steal. This potential idle time is considered as lost in
 126         * this case. We keep track of this lost idle time compare to
 127         * rq's clock_task.
 128         */
 129        if (util_sum >= divider)
 130                rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
 131}
 132
 133static inline u64 rq_clock_pelt(struct rq *rq)
 134{
 135        lockdep_assert_rq_held(rq);
 136        assert_clock_updated(rq);
 137
 138        return rq->clock_pelt - rq->lost_idle_time;
 139}
 140
 141#ifdef CONFIG_CFS_BANDWIDTH
 142/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
 143static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
 144{
 145        if (unlikely(cfs_rq->throttle_count))
 146                return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
 147
 148        return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
 149}
 150#else
 151static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
 152{
 153        return rq_clock_pelt(rq_of(cfs_rq));
 154}
 155#endif
 156
 157#else
 158
 159static inline int
 160update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 161{
 162        return 0;
 163}
 164
 165static inline int
 166update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
 167{
 168        return 0;
 169}
 170
 171static inline int
 172update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
 173{
 174        return 0;
 175}
 176
 177static inline int
 178update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
 179{
 180        return 0;
 181}
 182
 183static inline u64 thermal_load_avg(struct rq *rq)
 184{
 185        return 0;
 186}
 187
 188static inline int
 189update_irq_load_avg(struct rq *rq, u64 running)
 190{
 191        return 0;
 192}
 193
 194static inline u64 rq_clock_pelt(struct rq *rq)
 195{
 196        return rq_clock_task(rq);
 197}
 198
 199static inline void
 200update_rq_clock_pelt(struct rq *rq, s64 delta) { }
 201
 202static inline void
 203update_idle_rq_clock_pelt(struct rq *rq) { }
 204
 205#endif
 206
 207
 208