linux/kernel/sched/loadavg.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * kernel/sched/loadavg.c
   4 *
   5 * This file contains the magic bits required to compute the global loadavg
   6 * figure. Its a silly number but people think its important. We go through
   7 * great pains to make it work on big machines and tickless kernels.
   8 */
   9#include "sched.h"
  10
  11/*
  12 * Global load-average calculations
  13 *
  14 * We take a distributed and async approach to calculating the global load-avg
  15 * in order to minimize overhead.
  16 *
  17 * The global load average is an exponentially decaying average of nr_running +
  18 * nr_uninterruptible.
  19 *
  20 * Once every LOAD_FREQ:
  21 *
  22 *   nr_active = 0;
  23 *   for_each_possible_cpu(cpu)
  24 *      nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
  25 *
  26 *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
  27 *
  28 * Due to a number of reasons the above turns in the mess below:
  29 *
  30 *  - for_each_possible_cpu() is prohibitively expensive on machines with
  31 *    serious number of CPUs, therefore we need to take a distributed approach
  32 *    to calculating nr_active.
  33 *
  34 *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
  35 *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
  36 *
  37 *    So assuming nr_active := 0 when we start out -- true per definition, we
  38 *    can simply take per-CPU deltas and fold those into a global accumulate
  39 *    to obtain the same result. See calc_load_fold_active().
  40 *
  41 *    Furthermore, in order to avoid synchronizing all per-CPU delta folding
  42 *    across the machine, we assume 10 ticks is sufficient time for every
  43 *    CPU to have completed this task.
  44 *
  45 *    This places an upper-bound on the IRQ-off latency of the machine. Then
  46 *    again, being late doesn't loose the delta, just wrecks the sample.
  47 *
  48 *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-CPU because
  49 *    this would add another cross-CPU cacheline miss and atomic operation
  50 *    to the wakeup path. Instead we increment on whatever CPU the task ran
  51 *    when it went into uninterruptible state and decrement on whatever CPU
  52 *    did the wakeup. This means that only the sum of nr_uninterruptible over
  53 *    all CPUs yields the correct result.
  54 *
  55 *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
  56 */
  57
  58/* Variables and functions for calc_load */
  59atomic_long_t calc_load_tasks;
  60unsigned long calc_load_update;
  61unsigned long avenrun[3];
  62EXPORT_SYMBOL(avenrun); /* should be removed */
  63
  64/**
  65 * get_avenrun - get the load average array
  66 * @loads:      pointer to dest load array
  67 * @offset:     offset to add
  68 * @shift:      shift count to shift the result left
  69 *
  70 * These values are estimates at best, so no need for locking.
  71 */
  72void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  73{
  74        loads[0] = (avenrun[0] + offset) << shift;
  75        loads[1] = (avenrun[1] + offset) << shift;
  76        loads[2] = (avenrun[2] + offset) << shift;
  77}
  78
  79long calc_load_fold_active(struct rq *this_rq, long adjust)
  80{
  81        long nr_active, delta = 0;
  82
  83        nr_active = this_rq->nr_running - adjust;
  84        nr_active += (long)this_rq->nr_uninterruptible;
  85
  86        if (nr_active != this_rq->calc_load_active) {
  87                delta = nr_active - this_rq->calc_load_active;
  88                this_rq->calc_load_active = nr_active;
  89        }
  90
  91        return delta;
  92}
  93
  94/*
  95 * a1 = a0 * e + a * (1 - e)
  96 */
  97static unsigned long
  98calc_load(unsigned long load, unsigned long exp, unsigned long active)
  99{
 100        unsigned long newload;
 101
 102        newload = load * exp + active * (FIXED_1 - exp);
 103        if (active >= load)
 104                newload += FIXED_1-1;
 105
 106        return newload / FIXED_1;
 107}
 108
 109#ifdef CONFIG_NO_HZ_COMMON
 110/*
 111 * Handle NO_HZ for the global load-average.
 112 *
 113 * Since the above described distributed algorithm to compute the global
 114 * load-average relies on per-CPU sampling from the tick, it is affected by
 115 * NO_HZ.
 116 *
 117 * The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon
 118 * entering NO_HZ state such that we can include this as an 'extra' CPU delta
 119 * when we read the global state.
 120 *
 121 * Obviously reality has to ruin such a delightfully simple scheme:
 122 *
 123 *  - When we go NO_HZ idle during the window, we can negate our sample
 124 *    contribution, causing under-accounting.
 125 *
 126 *    We avoid this by keeping two NO_HZ-delta counters and flipping them
 127 *    when the window starts, thus separating old and new NO_HZ load.
 128 *
 129 *    The only trick is the slight shift in index flip for read vs write.
 130 *
 131 *        0s            5s            10s           15s
 132 *          +10           +10           +10           +10
 133 *        |-|-----------|-|-----------|-|-----------|-|
 134 *    r:0 0 1           1 0           0 1           1 0
 135 *    w:0 1 1           0 0           1 1           0 0
 136 *
 137 *    This ensures we'll fold the old NO_HZ contribution in this window while
 138 *    accumlating the new one.
 139 *
 140 *  - When we wake up from NO_HZ during the window, we push up our
 141 *    contribution, since we effectively move our sample point to a known
 142 *    busy state.
 143 *
 144 *    This is solved by pushing the window forward, and thus skipping the
 145 *    sample, for this CPU (effectively using the NO_HZ-delta for this CPU which
 146 *    was in effect at the time the window opened). This also solves the issue
 147 *    of having to deal with a CPU having been in NO_HZ for multiple LOAD_FREQ
 148 *    intervals.
 149 *
 150 * When making the ILB scale, we should try to pull this in as well.
 151 */
 152static atomic_long_t calc_load_nohz[2];
 153static int calc_load_idx;
 154
 155static inline int calc_load_write_idx(void)
 156{
 157        int idx = calc_load_idx;
 158
 159        /*
 160         * See calc_global_nohz(), if we observe the new index, we also
 161         * need to observe the new update time.
 162         */
 163        smp_rmb();
 164
 165        /*
 166         * If the folding window started, make sure we start writing in the
 167         * next NO_HZ-delta.
 168         */
 169        if (!time_before(jiffies, READ_ONCE(calc_load_update)))
 170                idx++;
 171
 172        return idx & 1;
 173}
 174
 175static inline int calc_load_read_idx(void)
 176{
 177        return calc_load_idx & 1;
 178}
 179
 180void calc_load_nohz_start(void)
 181{
 182        struct rq *this_rq = this_rq();
 183        long delta;
 184
 185        /*
 186         * We're going into NO_HZ mode, if there's any pending delta, fold it
 187         * into the pending NO_HZ delta.
 188         */
 189        delta = calc_load_fold_active(this_rq, 0);
 190        if (delta) {
 191                int idx = calc_load_write_idx();
 192
 193                atomic_long_add(delta, &calc_load_nohz[idx]);
 194        }
 195}
 196
 197void calc_load_nohz_stop(void)
 198{
 199        struct rq *this_rq = this_rq();
 200
 201        /*
 202         * If we're still before the pending sample window, we're done.
 203         */
 204        this_rq->calc_load_update = READ_ONCE(calc_load_update);
 205        if (time_before(jiffies, this_rq->calc_load_update))
 206                return;
 207
 208        /*
 209         * We woke inside or after the sample window, this means we're already
 210         * accounted through the nohz accounting, so skip the entire deal and
 211         * sync up for the next window.
 212         */
 213        if (time_before(jiffies, this_rq->calc_load_update + 10))
 214                this_rq->calc_load_update += LOAD_FREQ;
 215}
 216
 217static long calc_load_nohz_fold(void)
 218{
 219        int idx = calc_load_read_idx();
 220        long delta = 0;
 221
 222        if (atomic_long_read(&calc_load_nohz[idx]))
 223                delta = atomic_long_xchg(&calc_load_nohz[idx], 0);
 224
 225        return delta;
 226}
 227
 228/**
 229 * fixed_power_int - compute: x^n, in O(log n) time
 230 *
 231 * @x:         base of the power
 232 * @frac_bits: fractional bits of @x
 233 * @n:         power to raise @x to.
 234 *
 235 * By exploiting the relation between the definition of the natural power
 236 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
 237 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
 238 * (where: n_i \elem {0, 1}, the binary vector representing n),
 239 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
 240 * of course trivially computable in O(log_2 n), the length of our binary
 241 * vector.
 242 */
 243static unsigned long
 244fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
 245{
 246        unsigned long result = 1UL << frac_bits;
 247
 248        if (n) {
 249                for (;;) {
 250                        if (n & 1) {
 251                                result *= x;
 252                                result += 1UL << (frac_bits - 1);
 253                                result >>= frac_bits;
 254                        }
 255                        n >>= 1;
 256                        if (!n)
 257                                break;
 258                        x *= x;
 259                        x += 1UL << (frac_bits - 1);
 260                        x >>= frac_bits;
 261                }
 262        }
 263
 264        return result;
 265}
 266
 267/*
 268 * a1 = a0 * e + a * (1 - e)
 269 *
 270 * a2 = a1 * e + a * (1 - e)
 271 *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
 272 *    = a0 * e^2 + a * (1 - e) * (1 + e)
 273 *
 274 * a3 = a2 * e + a * (1 - e)
 275 *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
 276 *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
 277 *
 278 *  ...
 279 *
 280 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
 281 *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
 282 *    = a0 * e^n + a * (1 - e^n)
 283 *
 284 * [1] application of the geometric series:
 285 *
 286 *              n         1 - x^(n+1)
 287 *     S_n := \Sum x^i = -------------
 288 *             i=0          1 - x
 289 */
 290static unsigned long
 291calc_load_n(unsigned long load, unsigned long exp,
 292            unsigned long active, unsigned int n)
 293{
 294        return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
 295}
 296
 297/*
 298 * NO_HZ can leave us missing all per-CPU ticks calling
 299 * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
 300 * calc_load_nohz per calc_load_nohz_start(), all we need to do is fold
 301 * in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary.
 302 *
 303 * Once we've updated the global active value, we need to apply the exponential
 304 * weights adjusted to the number of cycles missed.
 305 */
 306static void calc_global_nohz(void)
 307{
 308        unsigned long sample_window;
 309        long delta, active, n;
 310
 311        sample_window = READ_ONCE(calc_load_update);
 312        if (!time_before(jiffies, sample_window + 10)) {
 313                /*
 314                 * Catch-up, fold however many we are behind still
 315                 */
 316                delta = jiffies - sample_window - 10;
 317                n = 1 + (delta / LOAD_FREQ);
 318
 319                active = atomic_long_read(&calc_load_tasks);
 320                active = active > 0 ? active * FIXED_1 : 0;
 321
 322                avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
 323                avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
 324                avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 325
 326                WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
 327        }
 328
 329        /*
 330         * Flip the NO_HZ index...
 331         *
 332         * Make sure we first write the new time then flip the index, so that
 333         * calc_load_write_idx() will see the new time when it reads the new
 334         * index, this avoids a double flip messing things up.
 335         */
 336        smp_wmb();
 337        calc_load_idx++;
 338}
 339#else /* !CONFIG_NO_HZ_COMMON */
 340
 341static inline long calc_load_nohz_fold(void) { return 0; }
 342static inline void calc_global_nohz(void) { }
 343
 344#endif /* CONFIG_NO_HZ_COMMON */
 345
 346/*
 347 * calc_load - update the avenrun load estimates 10 ticks after the
 348 * CPUs have updated calc_load_tasks.
 349 *
 350 * Called from the global timer code.
 351 */
 352void calc_global_load(unsigned long ticks)
 353{
 354        unsigned long sample_window;
 355        long active, delta;
 356
 357        sample_window = READ_ONCE(calc_load_update);
 358        if (time_before(jiffies, sample_window + 10))
 359                return;
 360
 361        /*
 362         * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
 363         */
 364        delta = calc_load_nohz_fold();
 365        if (delta)
 366                atomic_long_add(delta, &calc_load_tasks);
 367
 368        active = atomic_long_read(&calc_load_tasks);
 369        active = active > 0 ? active * FIXED_1 : 0;
 370
 371        avenrun[0] = calc_load(avenrun[0], EXP_1, active);
 372        avenrun[1] = calc_load(avenrun[1], EXP_5, active);
 373        avenrun[2] = calc_load(avenrun[2], EXP_15, active);
 374
 375        WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
 376
 377        /*
 378         * In case we went to NO_HZ for multiple LOAD_FREQ intervals
 379         * catch up in bulk.
 380         */
 381        calc_global_nohz();
 382}
 383
 384/*
 385 * Called from scheduler_tick() to periodically update this CPU's
 386 * active count.
 387 */
 388void calc_global_load_tick(struct rq *this_rq)
 389{
 390        long delta;
 391
 392        if (time_before(jiffies, this_rq->calc_load_update))
 393                return;
 394
 395        delta  = calc_load_fold_active(this_rq, 0);
 396        if (delta)
 397                atomic_long_add(delta, &calc_load_tasks);
 398
 399        this_rq->calc_load_update += LOAD_FREQ;
 400}
 401