linux/kernel/time/tick-sched.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/time/tick-sched.c
   3 *
   4 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   6 *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   7 *
   8 *  No idle tick implementation for low and high resolution timers
   9 *
  10 *  Started by: Thomas Gleixner and Ingo Molnar
  11 *
  12 *  Distribute under GPLv2.
  13 */
  14#include <linux/cpu.h>
  15#include <linux/err.h>
  16#include <linux/hrtimer.h>
  17#include <linux/interrupt.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/percpu.h>
  20#include <linux/profile.h>
  21#include <linux/sched.h>
  22#include <linux/module.h>
  23#include <linux/irq_work.h>
  24#include <linux/posix-timers.h>
  25#include <linux/context_tracking.h>
  26
  27#include <asm/irq_regs.h>
  28
  29#include "tick-internal.h"
  30
  31#include <trace/events/timer.h>
  32
  33/*
  34 * Per cpu nohz control structure
  35 */
  36static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
  37
  38struct tick_sched *tick_get_tick_sched(int cpu)
  39{
  40        return &per_cpu(tick_cpu_sched, cpu);
  41}
  42
  43#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
  44/*
  45 * The time, when the last jiffy update happened. Protected by jiffies_lock.
  46 */
  47static ktime_t last_jiffies_update;
  48
  49/*
  50 * Must be called with interrupts disabled !
  51 */
  52static void tick_do_update_jiffies64(ktime_t now)
  53{
  54        unsigned long ticks = 0;
  55        ktime_t delta;
  56
  57        /*
  58         * Do a quick check without holding jiffies_lock:
  59         */
  60        delta = ktime_sub(now, last_jiffies_update);
  61        if (delta.tv64 < tick_period.tv64)
  62                return;
  63
  64        /* Reevalute with jiffies_lock held */
  65        write_seqlock(&jiffies_lock);
  66
  67        delta = ktime_sub(now, last_jiffies_update);
  68        if (delta.tv64 >= tick_period.tv64) {
  69
  70                delta = ktime_sub(delta, tick_period);
  71                last_jiffies_update = ktime_add(last_jiffies_update,
  72                                                tick_period);
  73
  74                /* Slow path for long timeouts */
  75                if (unlikely(delta.tv64 >= tick_period.tv64)) {
  76                        s64 incr = ktime_to_ns(tick_period);
  77
  78                        ticks = ktime_divns(delta, incr);
  79
  80                        last_jiffies_update = ktime_add_ns(last_jiffies_update,
  81                                                           incr * ticks);
  82                }
  83                do_timer(++ticks);
  84
  85                /* Keep the tick_next_period variable up to date */
  86                tick_next_period = ktime_add(last_jiffies_update, tick_period);
  87        } else {
  88                write_sequnlock(&jiffies_lock);
  89                return;
  90        }
  91        write_sequnlock(&jiffies_lock);
  92        update_wall_time();
  93}
  94
  95/*
  96 * Initialize and return retrieve the jiffies update.
  97 */
  98static ktime_t tick_init_jiffy_update(void)
  99{
 100        ktime_t period;
 101
 102        write_seqlock(&jiffies_lock);
 103        /* Did we start the jiffies update yet ? */
 104        if (last_jiffies_update.tv64 == 0)
 105                last_jiffies_update = tick_next_period;
 106        period = last_jiffies_update;
 107        write_sequnlock(&jiffies_lock);
 108        return period;
 109}
 110
 111
 112static void tick_sched_do_timer(ktime_t now)
 113{
 114        int cpu = smp_processor_id();
 115
 116#ifdef CONFIG_NO_HZ_COMMON
 117        /*
 118         * Check if the do_timer duty was dropped. We don't care about
 119         * concurrency: This happens only when the cpu in charge went
 120         * into a long sleep. If two cpus happen to assign themself to
 121         * this duty, then the jiffies update is still serialized by
 122         * jiffies_lock.
 123         */
 124        if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
 125            && !tick_nohz_full_cpu(cpu))
 126                tick_do_timer_cpu = cpu;
 127#endif
 128
 129        /* Check, if the jiffies need an update */
 130        if (tick_do_timer_cpu == cpu)
 131                tick_do_update_jiffies64(now);
 132}
 133
 134static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 135{
 136#ifdef CONFIG_NO_HZ_COMMON
 137        /*
 138         * When we are idle and the tick is stopped, we have to touch
 139         * the watchdog as we might not schedule for a really long
 140         * time. This happens on complete idle SMP systems while
 141         * waiting on the login prompt. We also increment the "start of
 142         * idle" jiffy stamp so the idle accounting adjustment we do
 143         * when we go busy again does not account too much ticks.
 144         */
 145        if (ts->tick_stopped) {
 146                touch_softlockup_watchdog_sched();
 147                if (is_idle_task(current))
 148                        ts->idle_jiffies++;
 149        }
 150#endif
 151        update_process_times(user_mode(regs));
 152        profile_tick(CPU_PROFILING);
 153}
 154#endif
 155
 156#ifdef CONFIG_NO_HZ_FULL
 157cpumask_var_t tick_nohz_full_mask;
 158cpumask_var_t housekeeping_mask;
 159bool tick_nohz_full_running;
 160static atomic_t tick_dep_mask;
 161
 162static bool check_tick_dependency(atomic_t *dep)
 163{
 164        int val = atomic_read(dep);
 165
 166        if (val & TICK_DEP_MASK_POSIX_TIMER) {
 167                trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
 168                return true;
 169        }
 170
 171        if (val & TICK_DEP_MASK_PERF_EVENTS) {
 172                trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
 173                return true;
 174        }
 175
 176        if (val & TICK_DEP_MASK_SCHED) {
 177                trace_tick_stop(0, TICK_DEP_MASK_SCHED);
 178                return true;
 179        }
 180
 181        if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
 182                trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
 183                return true;
 184        }
 185
 186        return false;
 187}
 188
 189static bool can_stop_full_tick(struct tick_sched *ts)
 190{
 191        WARN_ON_ONCE(!irqs_disabled());
 192
 193        if (check_tick_dependency(&tick_dep_mask))
 194                return false;
 195
 196        if (check_tick_dependency(&ts->tick_dep_mask))
 197                return false;
 198
 199        if (check_tick_dependency(&current->tick_dep_mask))
 200                return false;
 201
 202        if (check_tick_dependency(&current->signal->tick_dep_mask))
 203                return false;
 204
 205        return true;
 206}
 207
 208static void nohz_full_kick_func(struct irq_work *work)
 209{
 210        /* Empty, the tick restart happens on tick_nohz_irq_exit() */
 211}
 212
 213static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
 214        .func = nohz_full_kick_func,
 215};
 216
 217/*
 218 * Kick this CPU if it's full dynticks in order to force it to
 219 * re-evaluate its dependency on the tick and restart it if necessary.
 220 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
 221 * is NMI safe.
 222 */
 223static void tick_nohz_full_kick(void)
 224{
 225        if (!tick_nohz_full_cpu(smp_processor_id()))
 226                return;
 227
 228        irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
 229}
 230
 231/*
 232 * Kick the CPU if it's full dynticks in order to force it to
 233 * re-evaluate its dependency on the tick and restart it if necessary.
 234 */
 235void tick_nohz_full_kick_cpu(int cpu)
 236{
 237        if (!tick_nohz_full_cpu(cpu))
 238                return;
 239
 240        irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
 241}
 242
 243/*
 244 * Kick all full dynticks CPUs in order to force these to re-evaluate
 245 * their dependency on the tick and restart it if necessary.
 246 */
 247static void tick_nohz_full_kick_all(void)
 248{
 249        int cpu;
 250
 251        if (!tick_nohz_full_running)
 252                return;
 253
 254        preempt_disable();
 255        for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
 256                tick_nohz_full_kick_cpu(cpu);
 257        preempt_enable();
 258}
 259
 260static void tick_nohz_dep_set_all(atomic_t *dep,
 261                                  enum tick_dep_bits bit)
 262{
 263        int prev;
 264
 265        prev = atomic_fetch_or(dep, BIT(bit));
 266        if (!prev)
 267                tick_nohz_full_kick_all();
 268}
 269
 270/*
 271 * Set a global tick dependency. Used by perf events that rely on freq and
 272 * by unstable clock.
 273 */
 274void tick_nohz_dep_set(enum tick_dep_bits bit)
 275{
 276        tick_nohz_dep_set_all(&tick_dep_mask, bit);
 277}
 278
 279void tick_nohz_dep_clear(enum tick_dep_bits bit)
 280{
 281        atomic_andnot(BIT(bit), &tick_dep_mask);
 282}
 283
 284/*
 285 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
 286 * manage events throttling.
 287 */
 288void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
 289{
 290        int prev;
 291        struct tick_sched *ts;
 292
 293        ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 294
 295        prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit));
 296        if (!prev) {
 297                preempt_disable();
 298                /* Perf needs local kick that is NMI safe */
 299                if (cpu == smp_processor_id()) {
 300                        tick_nohz_full_kick();
 301                } else {
 302                        /* Remote irq work not NMI-safe */
 303                        if (!WARN_ON_ONCE(in_nmi()))
 304                                tick_nohz_full_kick_cpu(cpu);
 305                }
 306                preempt_enable();
 307        }
 308}
 309
 310void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
 311{
 312        struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 313
 314        atomic_andnot(BIT(bit), &ts->tick_dep_mask);
 315}
 316
 317/*
 318 * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
 319 * per task timers.
 320 */
 321void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
 322{
 323        /*
 324         * We could optimize this with just kicking the target running the task
 325         * if that noise matters for nohz full users.
 326         */
 327        tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
 328}
 329
 330void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
 331{
 332        atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
 333}
 334
 335/*
 336 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
 337 * per process timers.
 338 */
 339void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 340{
 341        tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
 342}
 343
 344void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 345{
 346        atomic_andnot(BIT(bit), &sig->tick_dep_mask);
 347}
 348
 349/*
 350 * Re-evaluate the need for the tick as we switch the current task.
 351 * It might need the tick due to per task/process properties:
 352 * perf events, posix cpu timers, ...
 353 */
 354void __tick_nohz_task_switch(void)
 355{
 356        unsigned long flags;
 357        struct tick_sched *ts;
 358
 359        local_irq_save(flags);
 360
 361        if (!tick_nohz_full_cpu(smp_processor_id()))
 362                goto out;
 363
 364        ts = this_cpu_ptr(&tick_cpu_sched);
 365
 366        if (ts->tick_stopped) {
 367                if (atomic_read(&current->tick_dep_mask) ||
 368                    atomic_read(&current->signal->tick_dep_mask))
 369                        tick_nohz_full_kick();
 370        }
 371out:
 372        local_irq_restore(flags);
 373}
 374
 375/* Parse the boot-time nohz CPU list from the kernel parameters. */
 376static int __init tick_nohz_full_setup(char *str)
 377{
 378        alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
 379        if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
 380                pr_warn("NO_HZ: Incorrect nohz_full cpumask\n");
 381                free_bootmem_cpumask_var(tick_nohz_full_mask);
 382                return 1;
 383        }
 384        tick_nohz_full_running = true;
 385
 386        return 1;
 387}
 388__setup("nohz_full=", tick_nohz_full_setup);
 389
 390static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
 391                                       unsigned long action,
 392                                       void *hcpu)
 393{
 394        unsigned int cpu = (unsigned long)hcpu;
 395
 396        switch (action & ~CPU_TASKS_FROZEN) {
 397        case CPU_DOWN_PREPARE:
 398                /*
 399                 * The boot CPU handles housekeeping duty (unbound timers,
 400                 * workqueues, timekeeping, ...) on behalf of full dynticks
 401                 * CPUs. It must remain online when nohz full is enabled.
 402                 */
 403                if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
 404                        return NOTIFY_BAD;
 405                break;
 406        }
 407        return NOTIFY_OK;
 408}
 409
 410static int tick_nohz_init_all(void)
 411{
 412        int err = -1;
 413
 414#ifdef CONFIG_NO_HZ_FULL_ALL
 415        if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
 416                WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n");
 417                return err;
 418        }
 419        err = 0;
 420        cpumask_setall(tick_nohz_full_mask);
 421        tick_nohz_full_running = true;
 422#endif
 423        return err;
 424}
 425
 426void __init tick_nohz_init(void)
 427{
 428        int cpu;
 429
 430        if (!tick_nohz_full_running) {
 431                if (tick_nohz_init_all() < 0)
 432                        return;
 433        }
 434
 435        if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
 436                WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
 437                cpumask_clear(tick_nohz_full_mask);
 438                tick_nohz_full_running = false;
 439                return;
 440        }
 441
 442        /*
 443         * Full dynticks uses irq work to drive the tick rescheduling on safe
 444         * locking contexts. But then we need irq work to raise its own
 445         * interrupts to avoid circular dependency on the tick
 446         */
 447        if (!arch_irq_work_has_interrupt()) {
 448                pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
 449                cpumask_clear(tick_nohz_full_mask);
 450                cpumask_copy(housekeeping_mask, cpu_possible_mask);
 451                tick_nohz_full_running = false;
 452                return;
 453        }
 454
 455        cpu = smp_processor_id();
 456
 457        if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
 458                pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n",
 459                        cpu);
 460                cpumask_clear_cpu(cpu, tick_nohz_full_mask);
 461        }
 462
 463        cpumask_andnot(housekeeping_mask,
 464                       cpu_possible_mask, tick_nohz_full_mask);
 465
 466        for_each_cpu(cpu, tick_nohz_full_mask)
 467                context_tracking_cpu_set(cpu);
 468
 469        cpu_notifier(tick_nohz_cpu_down_callback, 0);
 470        pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
 471                cpumask_pr_args(tick_nohz_full_mask));
 472
 473        /*
 474         * We need at least one CPU to handle housekeeping work such
 475         * as timekeeping, unbound timers, workqueues, ...
 476         */
 477        WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
 478}
 479#endif
 480
 481/*
 482 * NOHZ - aka dynamic tick functionality
 483 */
 484#ifdef CONFIG_NO_HZ_COMMON
 485/*
 486 * NO HZ enabled ?
 487 */
 488bool tick_nohz_enabled __read_mostly  = true;
 489unsigned long tick_nohz_active  __read_mostly;
 490/*
 491 * Enable / Disable tickless mode
 492 */
 493static int __init setup_tick_nohz(char *str)
 494{
 495        return (kstrtobool(str, &tick_nohz_enabled) == 0);
 496}
 497
 498__setup("nohz=", setup_tick_nohz);
 499
 500int tick_nohz_tick_stopped(void)
 501{
 502        return __this_cpu_read(tick_cpu_sched.tick_stopped);
 503}
 504
 505/**
 506 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
 507 *
 508 * Called from interrupt entry when the CPU was idle
 509 *
 510 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
 511 * must be updated. Otherwise an interrupt handler could use a stale jiffy
 512 * value. We do this unconditionally on any cpu, as we don't know whether the
 513 * cpu, which has the update task assigned is in a long sleep.
 514 */
 515static void tick_nohz_update_jiffies(ktime_t now)
 516{
 517        unsigned long flags;
 518
 519        __this_cpu_write(tick_cpu_sched.idle_waketime, now);
 520
 521        local_irq_save(flags);
 522        tick_do_update_jiffies64(now);
 523        local_irq_restore(flags);
 524
 525        touch_softlockup_watchdog_sched();
 526}
 527
 528/*
 529 * Updates the per cpu time idle statistics counters
 530 */
 531static void
 532update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
 533{
 534        ktime_t delta;
 535
 536        if (ts->idle_active) {
 537                delta = ktime_sub(now, ts->idle_entrytime);
 538                if (nr_iowait_cpu(cpu) > 0)
 539                        ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
 540                else
 541                        ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
 542                ts->idle_entrytime = now;
 543        }
 544
 545        if (last_update_time)
 546                *last_update_time = ktime_to_us(now);
 547
 548}
 549
 550static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
 551{
 552        update_ts_time_stats(smp_processor_id(), ts, now, NULL);
 553        ts->idle_active = 0;
 554
 555        sched_clock_idle_wakeup_event(0);
 556}
 557
 558static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
 559{
 560        ktime_t now = ktime_get();
 561
 562        ts->idle_entrytime = now;
 563        ts->idle_active = 1;
 564        sched_clock_idle_sleep_event();
 565        return now;
 566}
 567
 568/**
 569 * get_cpu_idle_time_us - get the total idle time of a cpu
 570 * @cpu: CPU number to query
 571 * @last_update_time: variable to store update time in. Do not update
 572 * counters if NULL.
 573 *
 574 * Return the cummulative idle time (since boot) for a given
 575 * CPU, in microseconds.
 576 *
 577 * This time is measured via accounting rather than sampling,
 578 * and is as accurate as ktime_get() is.
 579 *
 580 * This function returns -1 if NOHZ is not enabled.
 581 */
 582u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
 583{
 584        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 585        ktime_t now, idle;
 586
 587        if (!tick_nohz_active)
 588                return -1;
 589
 590        now = ktime_get();
 591        if (last_update_time) {
 592                update_ts_time_stats(cpu, ts, now, last_update_time);
 593                idle = ts->idle_sleeptime;
 594        } else {
 595                if (ts->idle_active && !nr_iowait_cpu(cpu)) {
 596                        ktime_t delta = ktime_sub(now, ts->idle_entrytime);
 597
 598                        idle = ktime_add(ts->idle_sleeptime, delta);
 599                } else {
 600                        idle = ts->idle_sleeptime;
 601                }
 602        }
 603
 604        return ktime_to_us(idle);
 605
 606}
 607EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
 608
 609/**
 610 * get_cpu_iowait_time_us - get the total iowait time of a cpu
 611 * @cpu: CPU number to query
 612 * @last_update_time: variable to store update time in. Do not update
 613 * counters if NULL.
 614 *
 615 * Return the cummulative iowait time (since boot) for a given
 616 * CPU, in microseconds.
 617 *
 618 * This time is measured via accounting rather than sampling,
 619 * and is as accurate as ktime_get() is.
 620 *
 621 * This function returns -1 if NOHZ is not enabled.
 622 */
 623u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 624{
 625        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 626        ktime_t now, iowait;
 627
 628        if (!tick_nohz_active)
 629                return -1;
 630
 631        now = ktime_get();
 632        if (last_update_time) {
 633                update_ts_time_stats(cpu, ts, now, last_update_time);
 634                iowait = ts->iowait_sleeptime;
 635        } else {
 636                if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
 637                        ktime_t delta = ktime_sub(now, ts->idle_entrytime);
 638
 639                        iowait = ktime_add(ts->iowait_sleeptime, delta);
 640                } else {
 641                        iowait = ts->iowait_sleeptime;
 642                }
 643        }
 644
 645        return ktime_to_us(iowait);
 646}
 647EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 648
 649static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 650{
 651        hrtimer_cancel(&ts->sched_timer);
 652        hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
 653
 654        /* Forward the time to expire in the future */
 655        hrtimer_forward(&ts->sched_timer, now, tick_period);
 656
 657        if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
 658                hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
 659        else
 660                tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
 661}
 662
 663static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 664                                         ktime_t now, int cpu)
 665{
 666        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 667        u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
 668        unsigned long seq, basejiff;
 669        ktime_t tick;
 670
 671        /* Read jiffies and the time when jiffies were updated last */
 672        do {
 673                seq = read_seqbegin(&jiffies_lock);
 674                basemono = last_jiffies_update.tv64;
 675                basejiff = jiffies;
 676        } while (read_seqretry(&jiffies_lock, seq));
 677        ts->last_jiffies = basejiff;
 678
 679        if (rcu_needs_cpu(basemono, &next_rcu) ||
 680            arch_needs_cpu() || irq_work_needs_cpu()) {
 681                next_tick = basemono + TICK_NSEC;
 682        } else {
 683                /*
 684                 * Get the next pending timer. If high resolution
 685                 * timers are enabled this only takes the timer wheel
 686                 * timers into account. If high resolution timers are
 687                 * disabled this also looks at the next expiring
 688                 * hrtimer.
 689                 */
 690                next_tmr = get_next_timer_interrupt(basejiff, basemono);
 691                ts->next_timer = next_tmr;
 692                /* Take the next rcu event into account */
 693                next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
 694        }
 695
 696        /*
 697         * If the tick is due in the next period, keep it ticking or
 698         * force prod the timer.
 699         */
 700        delta = next_tick - basemono;
 701        if (delta <= (u64)TICK_NSEC) {
 702                tick.tv64 = 0;
 703                /*
 704                 * We've not stopped the tick yet, and there's a timer in the
 705                 * next period, so no point in stopping it either, bail.
 706                 */
 707                if (!ts->tick_stopped)
 708                        goto out;
 709
 710                /*
 711                 * If, OTOH, we did stop it, but there's a pending (expired)
 712                 * timer reprogram the timer hardware to fire now.
 713                 *
 714                 * We will not restart the tick proper, just prod the timer
 715                 * hardware into firing an interrupt to process the pending
 716                 * timers. Just like tick_irq_exit() will not restart the tick
 717                 * for 'normal' interrupts.
 718                 *
 719                 * Only once we exit the idle loop will we re-enable the tick,
 720                 * see tick_nohz_idle_exit().
 721                 */
 722                if (delta == 0) {
 723                        tick_nohz_restart(ts, now);
 724                        goto out;
 725                }
 726        }
 727
 728        /*
 729         * If this cpu is the one which updates jiffies, then give up
 730         * the assignment and let it be taken by the cpu which runs
 731         * the tick timer next, which might be this cpu as well. If we
 732         * don't drop this here the jiffies might be stale and
 733         * do_timer() never invoked. Keep track of the fact that it
 734         * was the one which had the do_timer() duty last. If this cpu
 735         * is the one which had the do_timer() duty last, we limit the
 736         * sleep time to the timekeeping max_deferement value.
 737         * Otherwise we can sleep as long as we want.
 738         */
 739        delta = timekeeping_max_deferment();
 740        if (cpu == tick_do_timer_cpu) {
 741                tick_do_timer_cpu = TICK_DO_TIMER_NONE;
 742                ts->do_timer_last = 1;
 743        } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
 744                delta = KTIME_MAX;
 745                ts->do_timer_last = 0;
 746        } else if (!ts->do_timer_last) {
 747                delta = KTIME_MAX;
 748        }
 749
 750#ifdef CONFIG_NO_HZ_FULL
 751        /* Limit the tick delta to the maximum scheduler deferment */
 752        if (!ts->inidle)
 753                delta = min(delta, scheduler_tick_max_deferment());
 754#endif
 755
 756        /* Calculate the next expiry time */
 757        if (delta < (KTIME_MAX - basemono))
 758                expires = basemono + delta;
 759        else
 760                expires = KTIME_MAX;
 761
 762        expires = min_t(u64, expires, next_tick);
 763        tick.tv64 = expires;
 764
 765        /* Skip reprogram of event if its not changed */
 766        if (ts->tick_stopped && (expires == dev->next_event.tv64))
 767                goto out;
 768
 769        /*
 770         * nohz_stop_sched_tick can be called several times before
 771         * the nohz_restart_sched_tick is called. This happens when
 772         * interrupts arrive which do not cause a reschedule. In the
 773         * first call we save the current tick time, so we can restart
 774         * the scheduler tick in nohz_restart_sched_tick.
 775         */
 776        if (!ts->tick_stopped) {
 777                nohz_balance_enter_idle(cpu);
 778                calc_load_enter_idle();
 779
 780                ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
 781                ts->tick_stopped = 1;
 782                trace_tick_stop(1, TICK_DEP_MASK_NONE);
 783        }
 784
 785        /*
 786         * If the expiration time == KTIME_MAX, then we simply stop
 787         * the tick timer.
 788         */
 789        if (unlikely(expires == KTIME_MAX)) {
 790                if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
 791                        hrtimer_cancel(&ts->sched_timer);
 792                goto out;
 793        }
 794
 795        if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
 796                hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
 797        else
 798                tick_program_event(tick, 1);
 799out:
 800        /* Update the estimated sleep length */
 801        ts->sleep_length = ktime_sub(dev->next_event, now);
 802        return tick;
 803}
 804
 805static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now, int active)
 806{
 807        /* Update jiffies first */
 808        tick_do_update_jiffies64(now);
 809        update_cpu_load_nohz(active);
 810
 811        calc_load_exit_idle();
 812        touch_softlockup_watchdog_sched();
 813        /*
 814         * Cancel the scheduled timer and restore the tick
 815         */
 816        ts->tick_stopped  = 0;
 817        ts->idle_exittime = now;
 818
 819        tick_nohz_restart(ts, now);
 820}
 821
 822static void tick_nohz_full_update_tick(struct tick_sched *ts)
 823{
 824#ifdef CONFIG_NO_HZ_FULL
 825        int cpu = smp_processor_id();
 826
 827        if (!tick_nohz_full_cpu(cpu))
 828                return;
 829
 830        if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
 831                return;
 832
 833        if (can_stop_full_tick(ts))
 834                tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
 835        else if (ts->tick_stopped)
 836                tick_nohz_restart_sched_tick(ts, ktime_get(), 1);
 837#endif
 838}
 839
 840static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
 841{
 842        /*
 843         * If this cpu is offline and it is the one which updates
 844         * jiffies, then give up the assignment and let it be taken by
 845         * the cpu which runs the tick timer next. If we don't drop
 846         * this here the jiffies might be stale and do_timer() never
 847         * invoked.
 848         */
 849        if (unlikely(!cpu_online(cpu))) {
 850                if (cpu == tick_do_timer_cpu)
 851                        tick_do_timer_cpu = TICK_DO_TIMER_NONE;
 852                return false;
 853        }
 854
 855        if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
 856                ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
 857                return false;
 858        }
 859
 860        if (need_resched())
 861                return false;
 862
 863        if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
 864                static int ratelimit;
 865
 866                if (ratelimit < 10 &&
 867                    (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
 868                        pr_warn("NOHZ: local_softirq_pending %02x\n",
 869                                (unsigned int) local_softirq_pending());
 870                        ratelimit++;
 871                }
 872                return false;
 873        }
 874
 875        if (tick_nohz_full_enabled()) {
 876                /*
 877                 * Keep the tick alive to guarantee timekeeping progression
 878                 * if there are full dynticks CPUs around
 879                 */
 880                if (tick_do_timer_cpu == cpu)
 881                        return false;
 882                /*
 883                 * Boot safety: make sure the timekeeping duty has been
 884                 * assigned before entering dyntick-idle mode,
 885                 */
 886                if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
 887                        return false;
 888        }
 889
 890        return true;
 891}
 892
 893static void __tick_nohz_idle_enter(struct tick_sched *ts)
 894{
 895        ktime_t now, expires;
 896        int cpu = smp_processor_id();
 897
 898        now = tick_nohz_start_idle(ts);
 899
 900        if (can_stop_idle_tick(cpu, ts)) {
 901                int was_stopped = ts->tick_stopped;
 902
 903                ts->idle_calls++;
 904
 905                expires = tick_nohz_stop_sched_tick(ts, now, cpu);
 906                if (expires.tv64 > 0LL) {
 907                        ts->idle_sleeps++;
 908                        ts->idle_expires = expires;
 909                }
 910
 911                if (!was_stopped && ts->tick_stopped)
 912                        ts->idle_jiffies = ts->last_jiffies;
 913        }
 914}
 915
 916/**
 917 * tick_nohz_idle_enter - stop the idle tick from the idle task
 918 *
 919 * When the next event is more than a tick into the future, stop the idle tick
 920 * Called when we start the idle loop.
 921 *
 922 * The arch is responsible of calling:
 923 *
 924 * - rcu_idle_enter() after its last use of RCU before the CPU is put
 925 *  to sleep.
 926 * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
 927 */
 928void tick_nohz_idle_enter(void)
 929{
 930        struct tick_sched *ts;
 931
 932        WARN_ON_ONCE(irqs_disabled());
 933
 934        /*
 935         * Update the idle state in the scheduler domain hierarchy
 936         * when tick_nohz_stop_sched_tick() is called from the idle loop.
 937         * State will be updated to busy during the first busy tick after
 938         * exiting idle.
 939         */
 940        set_cpu_sd_state_idle();
 941
 942        local_irq_disable();
 943
 944        ts = this_cpu_ptr(&tick_cpu_sched);
 945        ts->inidle = 1;
 946        __tick_nohz_idle_enter(ts);
 947
 948        local_irq_enable();
 949}
 950
 951/**
 952 * tick_nohz_irq_exit - update next tick event from interrupt exit
 953 *
 954 * When an interrupt fires while we are idle and it doesn't cause
 955 * a reschedule, it may still add, modify or delete a timer, enqueue
 956 * an RCU callback, etc...
 957 * So we need to re-calculate and reprogram the next tick event.
 958 */
 959void tick_nohz_irq_exit(void)
 960{
 961        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 962
 963        if (ts->inidle)
 964                __tick_nohz_idle_enter(ts);
 965        else
 966                tick_nohz_full_update_tick(ts);
 967}
 968
 969/**
 970 * tick_nohz_get_sleep_length - return the length of the current sleep
 971 *
 972 * Called from power state control code with interrupts disabled
 973 */
 974ktime_t tick_nohz_get_sleep_length(void)
 975{
 976        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 977
 978        return ts->sleep_length;
 979}
 980
 981static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
 982{
 983#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 984        unsigned long ticks;
 985
 986        if (vtime_accounting_cpu_enabled())
 987                return;
 988        /*
 989         * We stopped the tick in idle. Update process times would miss the
 990         * time we slept as update_process_times does only a 1 tick
 991         * accounting. Enforce that this is accounted to idle !
 992         */
 993        ticks = jiffies - ts->idle_jiffies;
 994        /*
 995         * We might be one off. Do not randomly account a huge number of ticks!
 996         */
 997        if (ticks && ticks < LONG_MAX)
 998                account_idle_ticks(ticks);
 999#endif
1000}
1001
1002/**
1003 * tick_nohz_idle_exit - restart the idle tick from the idle task
1004 *
1005 * Restart the idle tick when the CPU is woken up from idle
1006 * This also exit the RCU extended quiescent state. The CPU
1007 * can use RCU again after this function is called.
1008 */
1009void tick_nohz_idle_exit(void)
1010{
1011        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1012        ktime_t now;
1013
1014        local_irq_disable();
1015
1016        WARN_ON_ONCE(!ts->inidle);
1017
1018        ts->inidle = 0;
1019
1020        if (ts->idle_active || ts->tick_stopped)
1021                now = ktime_get();
1022
1023        if (ts->idle_active)
1024                tick_nohz_stop_idle(ts, now);
1025
1026        if (ts->tick_stopped) {
1027                tick_nohz_restart_sched_tick(ts, now, 0);
1028                tick_nohz_account_idle_ticks(ts);
1029        }
1030
1031        local_irq_enable();
1032}
1033
1034/*
1035 * The nohz low res interrupt handler
1036 */
1037static void tick_nohz_handler(struct clock_event_device *dev)
1038{
1039        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1040        struct pt_regs *regs = get_irq_regs();
1041        ktime_t now = ktime_get();
1042
1043        dev->next_event.tv64 = KTIME_MAX;
1044
1045        tick_sched_do_timer(now);
1046        tick_sched_handle(ts, regs);
1047
1048        /* No need to reprogram if we are running tickless  */
1049        if (unlikely(ts->tick_stopped))
1050                return;
1051
1052        hrtimer_forward(&ts->sched_timer, now, tick_period);
1053        tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1054}
1055
1056static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
1057{
1058        if (!tick_nohz_enabled)
1059                return;
1060        ts->nohz_mode = mode;
1061        /* One update is enough */
1062        if (!test_and_set_bit(0, &tick_nohz_active))
1063                timers_update_migration(true);
1064}
1065
1066/**
1067 * tick_nohz_switch_to_nohz - switch to nohz mode
1068 */
1069static void tick_nohz_switch_to_nohz(void)
1070{
1071        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1072        ktime_t next;
1073
1074        if (!tick_nohz_enabled)
1075                return;
1076
1077        if (tick_switch_to_oneshot(tick_nohz_handler))
1078                return;
1079
1080        /*
1081         * Recycle the hrtimer in ts, so we can share the
1082         * hrtimer_forward with the highres code.
1083         */
1084        hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1085        /* Get the next period */
1086        next = tick_init_jiffy_update();
1087
1088        hrtimer_set_expires(&ts->sched_timer, next);
1089        hrtimer_forward_now(&ts->sched_timer, tick_period);
1090        tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1091        tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
1092}
1093
1094/*
1095 * When NOHZ is enabled and the tick is stopped, we need to kick the
1096 * tick timer from irq_enter() so that the jiffies update is kept
1097 * alive during long running softirqs. That's ugly as hell, but
1098 * correctness is key even if we need to fix the offending softirq in
1099 * the first place.
1100 *
1101 * Note, this is different to tick_nohz_restart. We just kick the
1102 * timer and do not touch the other magic bits which need to be done
1103 * when idle is left.
1104 */
1105static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1106{
1107#if 0
1108        /* Switch back to 2.6.27 behaviour */
1109        ktime_t delta;
1110
1111        /*
1112         * Do not touch the tick device, when the next expiry is either
1113         * already reached or less/equal than the tick period.
1114         */
1115        delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
1116        if (delta.tv64 <= tick_period.tv64)
1117                return;
1118
1119        tick_nohz_restart(ts, now);
1120#endif
1121}
1122
1123static inline void tick_nohz_irq_enter(void)
1124{
1125        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1126        ktime_t now;
1127
1128        if (!ts->idle_active && !ts->tick_stopped)
1129                return;
1130        now = ktime_get();
1131        if (ts->idle_active)
1132                tick_nohz_stop_idle(ts, now);
1133        if (ts->tick_stopped) {
1134                tick_nohz_update_jiffies(now);
1135                tick_nohz_kick_tick(ts, now);
1136        }
1137}
1138
1139#else
1140
1141static inline void tick_nohz_switch_to_nohz(void) { }
1142static inline void tick_nohz_irq_enter(void) { }
1143static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
1144
1145#endif /* CONFIG_NO_HZ_COMMON */
1146
1147/*
1148 * Called from irq_enter to notify about the possible interruption of idle()
1149 */
1150void tick_irq_enter(void)
1151{
1152        tick_check_oneshot_broadcast_this_cpu();
1153        tick_nohz_irq_enter();
1154}
1155
1156/*
1157 * High resolution timer specific code
1158 */
1159#ifdef CONFIG_HIGH_RES_TIMERS
1160/*
1161 * We rearm the timer until we get disabled by the idle code.
1162 * Called with interrupts disabled.
1163 */
1164static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1165{
1166        struct tick_sched *ts =
1167                container_of(timer, struct tick_sched, sched_timer);
1168        struct pt_regs *regs = get_irq_regs();
1169        ktime_t now = ktime_get();
1170
1171        tick_sched_do_timer(now);
1172
1173        /*
1174         * Do not call, when we are not in irq context and have
1175         * no valid regs pointer
1176         */
1177        if (regs)
1178                tick_sched_handle(ts, regs);
1179
1180        /* No need to reprogram if we are in idle or full dynticks mode */
1181        if (unlikely(ts->tick_stopped))
1182                return HRTIMER_NORESTART;
1183
1184        hrtimer_forward(timer, now, tick_period);
1185
1186        return HRTIMER_RESTART;
1187}
1188
1189static int sched_skew_tick;
1190
1191static int __init skew_tick(char *str)
1192{
1193        get_option(&str, &sched_skew_tick);
1194
1195        return 0;
1196}
1197early_param("skew_tick", skew_tick);
1198
1199/**
1200 * tick_setup_sched_timer - setup the tick emulation timer
1201 */
1202void tick_setup_sched_timer(void)
1203{
1204        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1205        ktime_t now = ktime_get();
1206
1207        /*
1208         * Emulate tick processing via per-CPU hrtimers:
1209         */
1210        hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1211        ts->sched_timer.function = tick_sched_timer;
1212
1213        /* Get the next period (per cpu) */
1214        hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1215
1216        /* Offset the tick to avert jiffies_lock contention. */
1217        if (sched_skew_tick) {
1218                u64 offset = ktime_to_ns(tick_period) >> 1;
1219                do_div(offset, num_possible_cpus());
1220                offset *= smp_processor_id();
1221                hrtimer_add_expires_ns(&ts->sched_timer, offset);
1222        }
1223
1224        hrtimer_forward(&ts->sched_timer, now, tick_period);
1225        hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
1226        tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
1227}
1228#endif /* HIGH_RES_TIMERS */
1229
1230#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1231void tick_cancel_sched_timer(int cpu)
1232{
1233        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1234
1235# ifdef CONFIG_HIGH_RES_TIMERS
1236        if (ts->sched_timer.base)
1237                hrtimer_cancel(&ts->sched_timer);
1238# endif
1239
1240        memset(ts, 0, sizeof(*ts));
1241}
1242#endif
1243
1244/**
1245 * Async notification about clocksource changes
1246 */
1247void tick_clock_notify(void)
1248{
1249        int cpu;
1250
1251        for_each_possible_cpu(cpu)
1252                set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1253}
1254
1255/*
1256 * Async notification about clock event changes
1257 */
1258void tick_oneshot_notify(void)
1259{
1260        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1261
1262        set_bit(0, &ts->check_clocks);
1263}
1264
1265/**
1266 * Check, if a change happened, which makes oneshot possible.
1267 *
1268 * Called cyclic from the hrtimer softirq (driven by the timer
1269 * softirq) allow_nohz signals, that we can switch into low-res nohz
1270 * mode, because high resolution timers are disabled (either compile
1271 * or runtime). Called with interrupts disabled.
1272 */
1273int tick_check_oneshot_change(int allow_nohz)
1274{
1275        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1276
1277        if (!test_and_clear_bit(0, &ts->check_clocks))
1278                return 0;
1279
1280        if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
1281                return 0;
1282
1283        if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1284                return 0;
1285
1286        if (!allow_nohz)
1287                return 1;
1288
1289        tick_nohz_switch_to_nohz();
1290        return 0;
1291}
1292