linux/kernel/hrtimer.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/hrtimer.c
   3 *
   4 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   6 *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   7 *
   8 *  High-resolution kernel timers
   9 *
  10 *  In contrast to the low-resolution timeout API implemented in
  11 *  kernel/timer.c, hrtimers provide finer resolution and accuracy
  12 *  depending on system configuration and capabilities.
  13 *
  14 *  These timers are currently used for:
  15 *   - itimers
  16 *   - POSIX timers
  17 *   - nanosleep
  18 *   - precise in-kernel timing
  19 *
  20 *  Started by: Thomas Gleixner and Ingo Molnar
  21 *
  22 *  Credits:
  23 *      based on kernel/timer.c
  24 *
  25 *      Help, testing, suggestions, bugfixes, improvements were
  26 *      provided by:
  27 *
  28 *      George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
  29 *      et. al.
  30 *
  31 *  For licencing details see kernel-base/COPYING
  32 */
  33
  34#include <linux/cpu.h>
  35#include <linux/export.h>
  36#include <linux/percpu.h>
  37#include <linux/hrtimer.h>
  38#include <linux/notifier.h>
  39#include <linux/syscalls.h>
  40#include <linux/kallsyms.h>
  41#include <linux/interrupt.h>
  42#include <linux/tick.h>
  43#include <linux/seq_file.h>
  44#include <linux/err.h>
  45#include <linux/debugobjects.h>
  46#include <linux/sched.h>
  47#include <linux/sched/sysctl.h>
  48#include <linux/sched/rt.h>
  49#include <linux/sched/deadline.h>
  50#include <linux/timer.h>
  51#include <linux/freezer.h>
  52
  53#include <asm/uaccess.h>
  54
  55#include <trace/events/timer.h>
  56
  57#include "time/timekeeping.h"
  58
  59/*
  60 * The timer bases:
  61 *
  62 * There are more clockids then hrtimer bases. Thus, we index
  63 * into the timer bases by the hrtimer_base_type enum. When trying
  64 * to reach a base using a clockid, hrtimer_clockid_to_base()
  65 * is used to convert from clockid to the proper hrtimer_base_type.
  66 */
  67DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
  68{
  69
  70        .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
  71        .clock_base =
  72        {
  73                {
  74                        .index = HRTIMER_BASE_MONOTONIC,
  75                        .clockid = CLOCK_MONOTONIC,
  76                        .get_time = &ktime_get,
  77                        .resolution = KTIME_LOW_RES,
  78                },
  79                {
  80                        .index = HRTIMER_BASE_REALTIME,
  81                        .clockid = CLOCK_REALTIME,
  82                        .get_time = &ktime_get_real,
  83                        .resolution = KTIME_LOW_RES,
  84                },
  85                {
  86                        .index = HRTIMER_BASE_BOOTTIME,
  87                        .clockid = CLOCK_BOOTTIME,
  88                        .get_time = &ktime_get_boottime,
  89                        .resolution = KTIME_LOW_RES,
  90                },
  91                {
  92                        .index = HRTIMER_BASE_TAI,
  93                        .clockid = CLOCK_TAI,
  94                        .get_time = &ktime_get_clocktai,
  95                        .resolution = KTIME_LOW_RES,
  96                },
  97        }
  98};
  99
 100static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
 101        [CLOCK_REALTIME]        = HRTIMER_BASE_REALTIME,
 102        [CLOCK_MONOTONIC]       = HRTIMER_BASE_MONOTONIC,
 103        [CLOCK_BOOTTIME]        = HRTIMER_BASE_BOOTTIME,
 104        [CLOCK_TAI]             = HRTIMER_BASE_TAI,
 105};
 106
 107static inline int hrtimer_clockid_to_base(clockid_t clock_id)
 108{
 109        return hrtimer_clock_to_base_table[clock_id];
 110}
 111
 112/*
 113 * Functions and macros which are different for UP/SMP systems are kept in a
 114 * single place
 115 */
 116#ifdef CONFIG_SMP
 117
 118/*
 119 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
 120 * means that all timers which are tied to this base via timer->base are
 121 * locked, and the base itself is locked too.
 122 *
 123 * So __run_timers/migrate_timers can safely modify all timers which could
 124 * be found on the lists/queues.
 125 *
 126 * When the timer's base is locked, and the timer removed from list, it is
 127 * possible to set timer->base = NULL and drop the lock: the timer remains
 128 * locked.
 129 */
 130static
 131struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
 132                                             unsigned long *flags)
 133{
 134        struct hrtimer_clock_base *base;
 135
 136        for (;;) {
 137                base = timer->base;
 138                if (likely(base != NULL)) {
 139                        raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
 140                        if (likely(base == timer->base))
 141                                return base;
 142                        /* The timer has migrated to another CPU: */
 143                        raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
 144                }
 145                cpu_relax();
 146        }
 147}
 148
 149
 150/*
 151 * Get the preferred target CPU for NOHZ
 152 */
 153static int hrtimer_get_target(int this_cpu, int pinned)
 154{
 155#ifdef CONFIG_NO_HZ_COMMON
 156        if (!pinned && get_sysctl_timer_migration())
 157                return get_nohz_timer_target();
 158#endif
 159        return this_cpu;
 160}
 161
 162/*
 163 * With HIGHRES=y we do not migrate the timer when it is expiring
 164 * before the next event on the target cpu because we cannot reprogram
 165 * the target cpu hardware and we would cause it to fire late.
 166 *
 167 * Called with cpu_base->lock of target cpu held.
 168 */
 169static int
 170hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
 171{
 172#ifdef CONFIG_HIGH_RES_TIMERS
 173        ktime_t expires;
 174
 175        if (!new_base->cpu_base->hres_active)
 176                return 0;
 177
 178        expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
 179        return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
 180#else
 181        return 0;
 182#endif
 183}
 184
 185/*
 186 * Switch the timer base to the current CPU when possible.
 187 */
 188static inline struct hrtimer_clock_base *
 189switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
 190                    int pinned)
 191{
 192        struct hrtimer_clock_base *new_base;
 193        struct hrtimer_cpu_base *new_cpu_base;
 194        int this_cpu = smp_processor_id();
 195        int cpu = hrtimer_get_target(this_cpu, pinned);
 196        int basenum = base->index;
 197
 198again:
 199        new_cpu_base = &per_cpu(hrtimer_bases, cpu);
 200        new_base = &new_cpu_base->clock_base[basenum];
 201
 202        if (base != new_base) {
 203                /*
 204                 * We are trying to move timer to new_base.
 205                 * However we can't change timer's base while it is running,
 206                 * so we keep it on the same CPU. No hassle vs. reprogramming
 207                 * the event source in the high resolution case. The softirq
 208                 * code will take care of this when the timer function has
 209                 * completed. There is no conflict as we hold the lock until
 210                 * the timer is enqueued.
 211                 */
 212                if (unlikely(hrtimer_callback_running(timer)))
 213                        return base;
 214
 215                /* See the comment in lock_timer_base() */
 216                timer->base = NULL;
 217                raw_spin_unlock(&base->cpu_base->lock);
 218                raw_spin_lock(&new_base->cpu_base->lock);
 219
 220                if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
 221                        cpu = this_cpu;
 222                        raw_spin_unlock(&new_base->cpu_base->lock);
 223                        raw_spin_lock(&base->cpu_base->lock);
 224                        timer->base = base;
 225                        goto again;
 226                }
 227                timer->base = new_base;
 228        } else {
 229                if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
 230                        cpu = this_cpu;
 231                        goto again;
 232                }
 233        }
 234        return new_base;
 235}
 236
 237#else /* CONFIG_SMP */
 238
 239static inline struct hrtimer_clock_base *
 240lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 241{
 242        struct hrtimer_clock_base *base = timer->base;
 243
 244        raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
 245
 246        return base;
 247}
 248
 249# define switch_hrtimer_base(t, b, p)   (b)
 250
 251#endif  /* !CONFIG_SMP */
 252
 253/*
 254 * Functions for the union type storage format of ktime_t which are
 255 * too large for inlining:
 256 */
 257#if BITS_PER_LONG < 64
 258# ifndef CONFIG_KTIME_SCALAR
 259/**
 260 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
 261 * @kt:         addend
 262 * @nsec:       the scalar nsec value to add
 263 *
 264 * Returns the sum of kt and nsec in ktime_t format
 265 */
 266ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
 267{
 268        ktime_t tmp;
 269
 270        if (likely(nsec < NSEC_PER_SEC)) {
 271                tmp.tv64 = nsec;
 272        } else {
 273                unsigned long rem = do_div(nsec, NSEC_PER_SEC);
 274
 275                /* Make sure nsec fits into long */
 276                if (unlikely(nsec > KTIME_SEC_MAX))
 277                        return (ktime_t){ .tv64 = KTIME_MAX };
 278
 279                tmp = ktime_set((long)nsec, rem);
 280        }
 281
 282        return ktime_add(kt, tmp);
 283}
 284
 285EXPORT_SYMBOL_GPL(ktime_add_ns);
 286
 287/**
 288 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
 289 * @kt:         minuend
 290 * @nsec:       the scalar nsec value to subtract
 291 *
 292 * Returns the subtraction of @nsec from @kt in ktime_t format
 293 */
 294ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
 295{
 296        ktime_t tmp;
 297
 298        if (likely(nsec < NSEC_PER_SEC)) {
 299                tmp.tv64 = nsec;
 300        } else {
 301                unsigned long rem = do_div(nsec, NSEC_PER_SEC);
 302
 303                tmp = ktime_set((long)nsec, rem);
 304        }
 305
 306        return ktime_sub(kt, tmp);
 307}
 308
 309EXPORT_SYMBOL_GPL(ktime_sub_ns);
 310# endif /* !CONFIG_KTIME_SCALAR */
 311
 312/*
 313 * Divide a ktime value by a nanosecond value
 314 */
 315u64 ktime_divns(const ktime_t kt, s64 div)
 316{
 317        u64 dclc;
 318        int sft = 0;
 319
 320        dclc = ktime_to_ns(kt);
 321        /* Make sure the divisor is less than 2^32: */
 322        while (div >> 32) {
 323                sft++;
 324                div >>= 1;
 325        }
 326        dclc >>= sft;
 327        do_div(dclc, (unsigned long) div);
 328
 329        return dclc;
 330}
 331#endif /* BITS_PER_LONG >= 64 */
 332
 333/*
 334 * Add two ktime values and do a safety check for overflow:
 335 */
 336ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
 337{
 338        ktime_t res = ktime_add(lhs, rhs);
 339
 340        /*
 341         * We use KTIME_SEC_MAX here, the maximum timeout which we can
 342         * return to user space in a timespec:
 343         */
 344        if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
 345                res = ktime_set(KTIME_SEC_MAX, 0);
 346
 347        return res;
 348}
 349
 350EXPORT_SYMBOL_GPL(ktime_add_safe);
 351
 352#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 353
 354static struct debug_obj_descr hrtimer_debug_descr;
 355
 356static void *hrtimer_debug_hint(void *addr)
 357{
 358        return ((struct hrtimer *) addr)->function;
 359}
 360
 361/*
 362 * fixup_init is called when:
 363 * - an active object is initialized
 364 */
 365static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
 366{
 367        struct hrtimer *timer = addr;
 368
 369        switch (state) {
 370        case ODEBUG_STATE_ACTIVE:
 371                hrtimer_cancel(timer);
 372                debug_object_init(timer, &hrtimer_debug_descr);
 373                return 1;
 374        default:
 375                return 0;
 376        }
 377}
 378
 379/*
 380 * fixup_activate is called when:
 381 * - an active object is activated
 382 * - an unknown object is activated (might be a statically initialized object)
 383 */
 384static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
 385{
 386        switch (state) {
 387
 388        case ODEBUG_STATE_NOTAVAILABLE:
 389                WARN_ON_ONCE(1);
 390                return 0;
 391
 392        case ODEBUG_STATE_ACTIVE:
 393                WARN_ON(1);
 394
 395        default:
 396                return 0;
 397        }
 398}
 399
 400/*
 401 * fixup_free is called when:
 402 * - an active object is freed
 403 */
 404static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
 405{
 406        struct hrtimer *timer = addr;
 407
 408        switch (state) {
 409        case ODEBUG_STATE_ACTIVE:
 410                hrtimer_cancel(timer);
 411                debug_object_free(timer, &hrtimer_debug_descr);
 412                return 1;
 413        default:
 414                return 0;
 415        }
 416}
 417
 418static struct debug_obj_descr hrtimer_debug_descr = {
 419        .name           = "hrtimer",
 420        .debug_hint     = hrtimer_debug_hint,
 421        .fixup_init     = hrtimer_fixup_init,
 422        .fixup_activate = hrtimer_fixup_activate,
 423        .fixup_free     = hrtimer_fixup_free,
 424};
 425
 426static inline void debug_hrtimer_init(struct hrtimer *timer)
 427{
 428        debug_object_init(timer, &hrtimer_debug_descr);
 429}
 430
 431static inline void debug_hrtimer_activate(struct hrtimer *timer)
 432{
 433        debug_object_activate(timer, &hrtimer_debug_descr);
 434}
 435
 436static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
 437{
 438        debug_object_deactivate(timer, &hrtimer_debug_descr);
 439}
 440
 441static inline void debug_hrtimer_free(struct hrtimer *timer)
 442{
 443        debug_object_free(timer, &hrtimer_debug_descr);
 444}
 445
 446static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 447                           enum hrtimer_mode mode);
 448
 449void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
 450                           enum hrtimer_mode mode)
 451{
 452        debug_object_init_on_stack(timer, &hrtimer_debug_descr);
 453        __hrtimer_init(timer, clock_id, mode);
 454}
 455EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
 456
 457void destroy_hrtimer_on_stack(struct hrtimer *timer)
 458{
 459        debug_object_free(timer, &hrtimer_debug_descr);
 460}
 461
 462#else
 463static inline void debug_hrtimer_init(struct hrtimer *timer) { }
 464static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
 465static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
 466#endif
 467
 468static inline void
 469debug_init(struct hrtimer *timer, clockid_t clockid,
 470           enum hrtimer_mode mode)
 471{
 472        debug_hrtimer_init(timer);
 473        trace_hrtimer_init(timer, clockid, mode);
 474}
 475
 476static inline void debug_activate(struct hrtimer *timer)
 477{
 478        debug_hrtimer_activate(timer);
 479        trace_hrtimer_start(timer);
 480}
 481
 482static inline void debug_deactivate(struct hrtimer *timer)
 483{
 484        debug_hrtimer_deactivate(timer);
 485        trace_hrtimer_cancel(timer);
 486}
 487
 488#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
 489ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
 490{
 491        struct hrtimer_clock_base *base = cpu_base->clock_base;
 492        ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
 493        int i;
 494
 495        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
 496                struct timerqueue_node *next;
 497                struct hrtimer *timer;
 498
 499                next = timerqueue_getnext(&base->active);
 500                if (!next)
 501                        continue;
 502
 503                timer = container_of(next, struct hrtimer, node);
 504                expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 505                if (expires.tv64 < expires_next.tv64)
 506                        expires_next = expires;
 507        }
 508        /*
 509         * clock_was_set() might have changed base->offset of any of
 510         * the clock bases so the result might be negative. Fix it up
 511         * to prevent a false positive in clockevents_program_event().
 512         */
 513        if (expires_next.tv64 < 0)
 514                expires_next.tv64 = 0;
 515        return expires_next;
 516}
 517#endif
 518
 519static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
 520{
 521        ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
 522        ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
 523        ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
 524
 525        /* RHEL7: base->clock_was_set is upstream's base->clock_was_set_seq) */
 526        return ktime_get_update_offsets_now(&base->clock_was_set,
 527                                            offs_real, offs_boot, offs_tai);
 528}
 529
 530/* High resolution timer related functions */
 531#ifdef CONFIG_HIGH_RES_TIMERS
 532
 533/*
 534 * High resolution timer enabled ?
 535 */
 536static int hrtimer_hres_enabled __read_mostly  = 1;
 537
 538/*
 539 * Enable / Disable high resolution mode
 540 */
 541static int __init setup_hrtimer_hres(char *str)
 542{
 543        if (!strcmp(str, "off"))
 544                hrtimer_hres_enabled = 0;
 545        else if (!strcmp(str, "on"))
 546                hrtimer_hres_enabled = 1;
 547        else
 548                return 0;
 549        return 1;
 550}
 551
 552__setup("highres=", setup_hrtimer_hres);
 553
 554/*
 555 * hrtimer_high_res_enabled - query, if the highres mode is enabled
 556 */
 557static inline int hrtimer_is_hres_enabled(void)
 558{
 559        return hrtimer_hres_enabled;
 560}
 561
 562/*
 563 * Is the high resolution mode active ?
 564 */
 565static inline int hrtimer_hres_active(void)
 566{
 567        return __this_cpu_read(hrtimer_bases.hres_active);
 568}
 569
 570/*
 571 * Reprogram the event source with checking both queues for the
 572 * next event
 573 * Called with interrupts disabled and base->lock held
 574 */
 575static void
 576hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 577{
 578        ktime_t expires_next;
 579
 580        if (!cpu_base->hres_active)
 581                return;
 582
 583        expires_next = __hrtimer_get_next_event(cpu_base);
 584
 585        if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
 586                return;
 587
 588        cpu_base->expires_next.tv64 = expires_next.tv64;
 589
 590        /*
 591         * If a hang was detected in the last timer interrupt then we
 592         * leave the hang delay active in the hardware. We want the
 593         * system to make progress. That also prevents the following
 594         * scenario:
 595         * T1 expires 50ms from now
 596         * T2 expires 5s from now
 597         *
 598         * T1 is removed, so this code is called and would reprogram
 599         * the hardware to 5s from now. Any hrtimer_start after that
 600         * will not reprogram the hardware due to hang_detected being
 601         * set. So we'd effectivly block all timers until the T2 event
 602         * fires.
 603         */
 604        if (cpu_base->hang_detected)
 605                return;
 606
 607        if (cpu_base->expires_next.tv64 != KTIME_MAX)
 608                tick_program_event(cpu_base->expires_next, 1);
 609}
 610
 611/*
 612 * When a timer is enqueued and expires earlier than the already enqueued
 613 * timers, we have to check, whether it expires earlier than the timer for
 614 * which the clock event device was armed.
 615 *
 616 * Called with interrupts disabled and base->cpu_base.lock held
 617 */
 618static void hrtimer_reprogram(struct hrtimer *timer,
 619                              struct hrtimer_clock_base *base)
 620{
 621        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
 622        ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 623
 624        WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
 625
 626        /*
 627         * If the timer is not on the current cpu, we cannot reprogram
 628         * the other cpus clock event device.
 629         */
 630        if (base->cpu_base != cpu_base)
 631                return;
 632
 633        /*
 634         * If the hrtimer interrupt is running, then it will
 635         * reevaluate the clock bases and reprogram the clock event
 636         * device. The callbacks are always executed in hard interrupt
 637         * context so we don't need an extra check for a running
 638         * callback.
 639         */
 640        if (cpu_base->in_hrtirq)
 641                return;
 642
 643        /*
 644         * CLOCK_REALTIME timer might be requested with an absolute
 645         * expiry time which is less than base->offset. Set it to 0.
 646         */
 647        if (expires.tv64 < 0)
 648                expires.tv64 = 0;
 649
 650        if (expires.tv64 >= cpu_base->expires_next.tv64)
 651                return;
 652
 653        /*
 654         * If a hang was detected in the last timer interrupt then we
 655         * do not schedule a timer which is earlier than the expiry
 656         * which we enforced in the hang detection. We want the system
 657         * to make progress.
 658         */
 659        if (cpu_base->hang_detected)
 660                return;
 661
 662        /*
 663         * Program the timer hardware. We enforce the expiry for
 664         * events which are already in the past.
 665         */
 666        cpu_base->expires_next = expires;
 667        tick_program_event(expires, 1);
 668}
 669
 670/*
 671 * Initialize the high resolution related parts of cpu_base
 672 */
 673static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
 674{
 675        base->expires_next.tv64 = KTIME_MAX;
 676        base->hres_active = 0;
 677}
 678
 679/*
 680 * Retrigger next event is called after clock was set
 681 *
 682 * Called with interrupts disabled via on_each_cpu()
 683 */
 684static void retrigger_next_event(void *arg)
 685{
 686        struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
 687
 688        if (!hrtimer_hres_active())
 689                return;
 690
 691        raw_spin_lock(&base->lock);
 692        hrtimer_update_base(base);
 693        hrtimer_force_reprogram(base, 0);
 694        raw_spin_unlock(&base->lock);
 695}
 696
 697/*
 698 * Switch to high resolution mode
 699 */
 700static int hrtimer_switch_to_hres(void)
 701{
 702        struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 703        int i;
 704
 705        if (tick_init_highres()) {
 706                printk(KERN_WARNING "Could not switch to high resolution "
 707                                    "mode on CPU %d\n", base->cpu);
 708                return 0;
 709        }
 710        base->hres_active = 1;
 711        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
 712                base->clock_base[i].resolution = KTIME_HIGH_RES;
 713
 714        tick_setup_sched_timer();
 715        /* "Retrigger" the interrupt to get things going */
 716        retrigger_next_event(NULL);
 717        return 1;
 718}
 719
 720static void clock_was_set_work(struct work_struct *work)
 721{
 722        clock_was_set();
 723}
 724
 725static DECLARE_WORK(hrtimer_work, clock_was_set_work);
 726
 727/*
 728 * Called from timekeeping and resume code to reprogramm the hrtimer
 729 * interrupt device on all cpus.
 730 */
 731void clock_was_set_delayed(void)
 732{
 733        schedule_work(&hrtimer_work);
 734}
 735
 736#else
 737
 738static inline int hrtimer_hres_active(void) { return 0; }
 739static inline int hrtimer_is_hres_enabled(void) { return 0; }
 740static inline int hrtimer_switch_to_hres(void) { return 0; }
 741static inline void
 742hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
 743static inline int hrtimer_reprogram(struct hrtimer *timer,
 744                                    struct hrtimer_clock_base *base)
 745{
 746        return 0;
 747}
 748static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
 749static inline void retrigger_next_event(void *arg) { }
 750
 751#endif /* CONFIG_HIGH_RES_TIMERS */
 752
 753/*
 754 * Clock realtime was set
 755 *
 756 * Change the offset of the realtime clock vs. the monotonic
 757 * clock.
 758 *
 759 * We might have to reprogram the high resolution timer interrupt. On
 760 * SMP we call the architecture specific code to retrigger _all_ high
 761 * resolution timer interrupts. On UP we just disable interrupts and
 762 * call the high resolution interrupt code.
 763 */
 764void clock_was_set(void)
 765{
 766#ifdef CONFIG_HIGH_RES_TIMERS
 767        /* Retrigger the CPU local events everywhere */
 768        on_each_cpu(retrigger_next_event, NULL, 1);
 769#endif
 770        timerfd_clock_was_set();
 771}
 772
 773/*
 774 * During resume we might have to reprogram the high resolution timer
 775 * interrupt (on the local CPU):
 776 */
 777void hrtimers_resume(void)
 778{
 779        WARN_ONCE(!irqs_disabled(),
 780                  KERN_INFO "hrtimers_resume() called with IRQs enabled!");
 781
 782        /* Retrigger on the local CPU */
 783        retrigger_next_event(NULL);
 784        /* And schedule a retrigger for all others */
 785        clock_was_set_delayed();
 786}
 787
 788static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
 789{
 790#ifdef CONFIG_TIMER_STATS
 791        if (timer->start_site)
 792                return;
 793        timer->start_site = __builtin_return_address(0);
 794        memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 795        timer->start_pid = current->pid;
 796#endif
 797}
 798
 799static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
 800{
 801#ifdef CONFIG_TIMER_STATS
 802        timer->start_site = NULL;
 803#endif
 804}
 805
 806static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
 807{
 808#ifdef CONFIG_TIMER_STATS
 809        if (likely(!timer_stats_active))
 810                return;
 811        timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
 812                                 timer->function, timer->start_comm, 0);
 813#endif
 814}
 815
 816/*
 817 * Counterpart to lock_hrtimer_base above:
 818 */
 819static inline
 820void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 821{
 822        raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
 823}
 824
 825/**
 826 * hrtimer_forward - forward the timer expiry
 827 * @timer:      hrtimer to forward
 828 * @now:        forward past this time
 829 * @interval:   the interval to forward
 830 *
 831 * Forward the timer expiry so it will expire in the future.
 832 * Returns the number of overruns.
 833 */
 834u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
 835{
 836        u64 orun = 1;
 837        ktime_t delta;
 838
 839        delta = ktime_sub(now, hrtimer_get_expires(timer));
 840
 841        if (delta.tv64 < 0)
 842                return 0;
 843
 844        if (interval.tv64 < timer->base->resolution.tv64)
 845                interval.tv64 = timer->base->resolution.tv64;
 846
 847        if (unlikely(delta.tv64 >= interval.tv64)) {
 848                s64 incr = ktime_to_ns(interval);
 849
 850                orun = ktime_divns(delta, incr);
 851                hrtimer_add_expires_ns(timer, incr * orun);
 852                if (hrtimer_get_expires_tv64(timer) > now.tv64)
 853                        return orun;
 854                /*
 855                 * This (and the ktime_add() below) is the
 856                 * correction for exact:
 857                 */
 858                orun++;
 859        }
 860        hrtimer_add_expires(timer, interval);
 861
 862        return orun;
 863}
 864EXPORT_SYMBOL_GPL(hrtimer_forward);
 865
 866/*
 867 * enqueue_hrtimer - internal function to (re)start a timer
 868 *
 869 * The timer is inserted in expiry order. Insertion into the
 870 * red black tree is O(log(n)). Must hold the base lock.
 871 *
 872 * Returns 1 when the new timer is the leftmost timer in the tree.
 873 */
 874static int enqueue_hrtimer(struct hrtimer *timer,
 875                           struct hrtimer_clock_base *base)
 876{
 877        debug_activate(timer);
 878
 879        timerqueue_add(&base->active, &timer->node);
 880        base->cpu_base->active_bases |= 1 << base->index;
 881
 882        /*
 883         * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
 884         * state of a possibly running callback.
 885         */
 886        timer->state |= HRTIMER_STATE_ENQUEUED;
 887
 888        return (&timer->node == base->active.next);
 889}
 890
 891/*
 892 * __remove_hrtimer - internal function to remove a timer
 893 *
 894 * Caller must hold the base lock.
 895 *
 896 * High resolution timer mode reprograms the clock event device when the
 897 * timer is the one which expires next. The caller can disable this by setting
 898 * reprogram to zero. This is useful, when the context does a reprogramming
 899 * anyway (e.g. timer interrupt)
 900 */
 901static void __remove_hrtimer(struct hrtimer *timer,
 902                             struct hrtimer_clock_base *base,
 903                             unsigned long newstate, int reprogram)
 904{
 905        struct timerqueue_node *next_timer;
 906        if (!(timer->state & HRTIMER_STATE_ENQUEUED))
 907                goto out;
 908
 909        next_timer = timerqueue_getnext(&base->active);
 910        timerqueue_del(&base->active, &timer->node);
 911        if (&timer->node == next_timer) {
 912#ifdef CONFIG_HIGH_RES_TIMERS
 913                /* Reprogram the clock event device. if enabled */
 914                if (reprogram && hrtimer_hres_active()) {
 915                        ktime_t expires;
 916
 917                        expires = ktime_sub(hrtimer_get_expires(timer),
 918                                            base->offset);
 919                        if (base->cpu_base->expires_next.tv64 == expires.tv64)
 920                                hrtimer_force_reprogram(base->cpu_base, 1);
 921                }
 922#endif
 923        }
 924        if (!timerqueue_getnext(&base->active))
 925                base->cpu_base->active_bases &= ~(1 << base->index);
 926out:
 927        timer->state = newstate;
 928}
 929
 930/*
 931 * remove hrtimer, called with base lock held
 932 */
 933static inline int
 934remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
 935{
 936        if (hrtimer_is_queued(timer)) {
 937                unsigned long state;
 938                int reprogram;
 939
 940                /*
 941                 * Remove the timer and force reprogramming when high
 942                 * resolution mode is active and the timer is on the current
 943                 * CPU. If we remove a timer on another CPU, reprogramming is
 944                 * skipped. The interrupt event on this CPU is fired and
 945                 * reprogramming happens in the interrupt handler. This is a
 946                 * rare case and less expensive than a smp call.
 947                 */
 948                debug_deactivate(timer);
 949                timer_stats_hrtimer_clear_start_info(timer);
 950                reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
 951                /*
 952                 * We must preserve the CALLBACK state flag here,
 953                 * otherwise we could move the timer base in
 954                 * switch_hrtimer_base.
 955                 */
 956                state = timer->state & HRTIMER_STATE_CALLBACK;
 957                __remove_hrtimer(timer, base, state, reprogram);
 958                return 1;
 959        }
 960        return 0;
 961}
 962
 963/**
 964 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
 965 * @timer:      the timer to be added
 966 * @tim:        expiry time
 967 * @delta_ns:   "slack" range for the timer
 968 * @mode:       expiry mode: absolute (HRTIMER_MODE_ABS) or
 969 *              relative (HRTIMER_MODE_REL)
 970 *
 971 * Note: see KABI note at bottom of function.
 972 */
 973int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 974                            unsigned long delta_ns, const enum hrtimer_mode mode)
 975{
 976        struct hrtimer_clock_base *base, *new_base;
 977        unsigned long flags;
 978        int leftmost;
 979
 980        base = lock_hrtimer_base(timer, &flags);
 981
 982        /* Remove an active timer from the queue: */
 983        remove_hrtimer(timer, base);
 984
 985        if (mode & HRTIMER_MODE_REL) {
 986                tim = ktime_add_safe(tim, base->get_time());
 987                /*
 988                 * CONFIG_TIME_LOW_RES is a temporary way for architectures
 989                 * to signal that they simply return xtime in
 990                 * do_gettimeoffset(). In this case we want to round up by
 991                 * resolution when starting a relative timer, to avoid short
 992                 * timeouts. This will go away with the GTOD framework.
 993                 */
 994#ifdef CONFIG_TIME_LOW_RES
 995                tim = ktime_add_safe(tim, base->resolution);
 996#endif
 997        }
 998
 999        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1000
1001        /* Switch the timer base, if necessary: */
1002        new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1003
1004        timer_stats_hrtimer_set_start_info(timer);
1005
1006        leftmost = enqueue_hrtimer(timer, new_base);
1007        if (!leftmost)
1008                goto unlock;
1009
1010        if (!hrtimer_is_hres_active(timer)) {
1011                /*
1012                 * Kick to reschedule the next tick to handle the new timer
1013                 * on dynticks target.
1014                 */
1015                wake_up_nohz_cpu(new_base->cpu_base->cpu);
1016        } else {
1017                hrtimer_reprogram(timer, new_base);
1018        }
1019unlock:
1020        unlock_hrtimer_base(timer, &flags);
1021        /*
1022         * RHEL7 KABI: There may be external callers to this function that
1023         * check this return value.  For the purposes of matching upstream this
1024         * return can be safely ignored, as upstream has made this function
1025         * return void.
1026         */
1027        return 0;
1028}
1029EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1030
1031/**
1032 * hrtimer_start - (re)start an hrtimer on the current CPU
1033 * @timer:      the timer to be added
1034 * @tim:        expiry time
1035 * @mode:       expiry mode: absolute (HRTIMER_MODE_ABS) or
1036 *              relative (HRTIMER_MODE_REL)
1037 *
1038 * Returns:
1039 *  0 on success
1040 *  1 when the timer was active
1041 */
1042int
1043hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1044{
1045        return hrtimer_start_range_ns(timer, tim, 0, mode);
1046}
1047EXPORT_SYMBOL_GPL(hrtimer_start);
1048
1049
1050/**
1051 * hrtimer_try_to_cancel - try to deactivate a timer
1052 * @timer:      hrtimer to stop
1053 *
1054 * Returns:
1055 *  0 when the timer was not active
1056 *  1 when the timer was active
1057 * -1 when the timer is currently excuting the callback function and
1058 *    cannot be stopped
1059 */
1060int hrtimer_try_to_cancel(struct hrtimer *timer)
1061{
1062        struct hrtimer_clock_base *base;
1063        unsigned long flags;
1064        int ret = -1;
1065
1066        /*
1067         * Check lockless first. If the timer is not active (neither
1068         * enqueued nor running the callback, nothing to do here.  The
1069         * base lock does not serialize against a concurrent enqueue,
1070         * so we can avoid taking it.
1071         */
1072        if (!hrtimer_active(timer))
1073                return 0;
1074
1075        base = lock_hrtimer_base(timer, &flags);
1076
1077        if (!hrtimer_callback_running(timer))
1078                ret = remove_hrtimer(timer, base);
1079
1080        unlock_hrtimer_base(timer, &flags);
1081
1082        return ret;
1083
1084}
1085EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1086
1087/**
1088 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1089 * @timer:      the timer to be cancelled
1090 *
1091 * Returns:
1092 *  0 when the timer was not active
1093 *  1 when the timer was active
1094 */
1095int hrtimer_cancel(struct hrtimer *timer)
1096{
1097        for (;;) {
1098                int ret = hrtimer_try_to_cancel(timer);
1099
1100                if (ret >= 0)
1101                        return ret;
1102                cpu_relax();
1103        }
1104}
1105EXPORT_SYMBOL_GPL(hrtimer_cancel);
1106
1107/**
1108 * hrtimer_get_remaining - get remaining time for the timer
1109 * @timer:      the timer to read
1110 */
1111ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1112{
1113        unsigned long flags;
1114        ktime_t rem;
1115
1116        lock_hrtimer_base(timer, &flags);
1117        rem = hrtimer_expires_remaining(timer);
1118        unlock_hrtimer_base(timer, &flags);
1119
1120        return rem;
1121}
1122EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1123
1124#ifdef CONFIG_NO_HZ_COMMON
1125/**
1126 * hrtimer_get_next_event - get the time until next expiry event
1127 *
1128 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1129 */
1130u64 hrtimer_get_next_event(void)
1131{
1132        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1133        u64 expires = KTIME_MAX;
1134        unsigned long flags;
1135
1136        raw_spin_lock_irqsave(&cpu_base->lock, flags);
1137
1138        if (!hrtimer_hres_active())
1139                expires = __hrtimer_get_next_event(cpu_base).tv64;
1140
1141        raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1142
1143        return expires;
1144}
1145#endif
1146
1147static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1148                           enum hrtimer_mode mode)
1149{
1150        struct hrtimer_cpu_base *cpu_base;
1151        int base;
1152
1153        memset(timer, 0, sizeof(struct hrtimer));
1154
1155        cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1156
1157        if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1158                clock_id = CLOCK_MONOTONIC;
1159
1160        base = hrtimer_clockid_to_base(clock_id);
1161        timer->base = &cpu_base->clock_base[base];
1162        timerqueue_init(&timer->node);
1163
1164#ifdef CONFIG_TIMER_STATS
1165        timer->start_site = NULL;
1166        timer->start_pid = -1;
1167        memset(timer->start_comm, 0, TASK_COMM_LEN);
1168#endif
1169}
1170
1171/**
1172 * hrtimer_init - initialize a timer to the given clock
1173 * @timer:      the timer to be initialized
1174 * @clock_id:   the clock to be used
1175 * @mode:       timer mode abs/rel
1176 */
1177void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1178                  enum hrtimer_mode mode)
1179{
1180        debug_init(timer, clock_id, mode);
1181        __hrtimer_init(timer, clock_id, mode);
1182}
1183EXPORT_SYMBOL_GPL(hrtimer_init);
1184
1185/**
1186 * hrtimer_get_res - get the timer resolution for a clock
1187 * @which_clock: which clock to query
1188 * @tp:          pointer to timespec variable to store the resolution
1189 *
1190 * Store the resolution of the clock selected by @which_clock in the
1191 * variable pointed to by @tp.
1192 */
1193int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1194{
1195        struct hrtimer_cpu_base *cpu_base;
1196        int base = hrtimer_clockid_to_base(which_clock);
1197
1198        cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1199        *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
1200
1201        return 0;
1202}
1203EXPORT_SYMBOL_GPL(hrtimer_get_res);
1204
1205static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1206                          struct hrtimer_clock_base *base,
1207                          struct hrtimer *timer, ktime_t *now)
1208{
1209        enum hrtimer_restart (*fn)(struct hrtimer *);
1210        int restart;
1211
1212        WARN_ON(!irqs_disabled());
1213
1214        debug_deactivate(timer);
1215        __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1216        timer_stats_account_hrtimer(timer);
1217        fn = timer->function;
1218
1219        /*
1220         * Because we run timers from hardirq context, there is no chance
1221         * they get migrated to another cpu, therefore its safe to unlock
1222         * the timer base.
1223         */
1224        raw_spin_unlock(&cpu_base->lock);
1225        trace_hrtimer_expire_entry(timer, now);
1226        restart = fn(timer);
1227        trace_hrtimer_expire_exit(timer);
1228        raw_spin_lock(&cpu_base->lock);
1229
1230        /*
1231         * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1232         * we do not reprogramm the event hardware. Happens either in
1233         * hrtimer_start_range_ns() or in hrtimer_interrupt()
1234         */
1235        if (restart != HRTIMER_NORESTART) {
1236                BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1237                enqueue_hrtimer(timer, base);
1238        }
1239
1240        WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
1241
1242        timer->state &= ~HRTIMER_STATE_CALLBACK;
1243}
1244
1245static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
1246{
1247        int i;
1248
1249        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1250                struct hrtimer_clock_base *base;
1251                struct timerqueue_node *node;
1252                ktime_t basenow;
1253
1254                if (!(cpu_base->active_bases & (1 << i)))
1255                        continue;
1256
1257                base = cpu_base->clock_base + i;
1258                basenow = ktime_add(now, base->offset);
1259
1260                while ((node = timerqueue_getnext(&base->active))) {
1261                        struct hrtimer *timer;
1262
1263                        timer = container_of(node, struct hrtimer, node);
1264
1265                        /*
1266                         * The immediate goal for using the softexpires is
1267                         * minimizing wakeups, not running timers at the
1268                         * earliest interrupt after their soft expiration.
1269                         * This allows us to avoid using a Priority Search
1270                         * Tree, which can answer a stabbing querry for
1271                         * overlapping intervals and instead use the simple
1272                         * BST we already have.
1273                         * We don't add extra wakeups by delaying timers that
1274                         * are right-of a not yet expired timer, because that
1275                         * timer will have to trigger a wakeup anyway.
1276                         */
1277                        if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
1278                                break;
1279
1280                        __run_hrtimer(cpu_base, base, timer, &basenow);
1281                }
1282        }
1283}
1284
1285#ifdef CONFIG_HIGH_RES_TIMERS
1286
1287/*
1288 * High resolution timer interrupt
1289 * Called with interrupts disabled
1290 */
1291void hrtimer_interrupt(struct clock_event_device *dev)
1292{
1293        struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1294        ktime_t expires_next, now, entry_time, delta;
1295        int retries = 0;
1296
1297        BUG_ON(!cpu_base->hres_active);
1298        cpu_base->nr_events++;
1299        dev->next_event.tv64 = KTIME_MAX;
1300
1301        raw_spin_lock(&cpu_base->lock);
1302        entry_time = now = hrtimer_update_base(cpu_base);
1303retry:
1304        cpu_base->in_hrtirq = 1;
1305        /*
1306         * We set expires_next to KTIME_MAX here with cpu_base->lock
1307         * held to prevent that a timer is enqueued in our queue via
1308         * the migration code. This does not affect enqueueing of
1309         * timers which run their callback and need to be requeued on
1310         * this CPU.
1311         */
1312        cpu_base->expires_next.tv64 = KTIME_MAX;
1313
1314        __hrtimer_run_queues(cpu_base, now);
1315
1316        /* Reevaluate the clock bases for the next expiry */
1317        expires_next = __hrtimer_get_next_event(cpu_base);
1318        /*
1319         * Store the new expiry value so the migration code can verify
1320         * against it.
1321         */
1322        cpu_base->expires_next = expires_next;
1323        cpu_base->in_hrtirq = 0;
1324        raw_spin_unlock(&cpu_base->lock);
1325
1326        /* Reprogramming necessary ? */
1327        if (expires_next.tv64 == KTIME_MAX ||
1328            !tick_program_event(expires_next, 0)) {
1329                cpu_base->hang_detected = 0;
1330                return;
1331        }
1332
1333        /*
1334         * The next timer was already expired due to:
1335         * - tracing
1336         * - long lasting callbacks
1337         * - being scheduled away when running in a VM
1338         *
1339         * We need to prevent that we loop forever in the hrtimer
1340         * interrupt routine. We give it 3 attempts to avoid
1341         * overreacting on some spurious event.
1342         *
1343         * Acquire base lock for updating the offsets and retrieving
1344         * the current time.
1345         */
1346        raw_spin_lock(&cpu_base->lock);
1347        now = hrtimer_update_base(cpu_base);
1348        cpu_base->nr_retries++;
1349        if (++retries < 3)
1350                goto retry;
1351        /*
1352         * Give the system a chance to do something else than looping
1353         * here. We stored the entry time, so we know exactly how long
1354         * we spent here. We schedule the next event this amount of
1355         * time away.
1356         */
1357        cpu_base->nr_hangs++;
1358        cpu_base->hang_detected = 1;
1359        raw_spin_unlock(&cpu_base->lock);
1360        delta = ktime_sub(now, entry_time);
1361        if (delta.tv64 > cpu_base->max_hang_time.tv64)
1362                cpu_base->max_hang_time = delta;
1363        /*
1364         * Limit it to a sensible value as we enforce a longer
1365         * delay. Give the CPU at least 100ms to catch up.
1366         */
1367        if (delta.tv64 > 100 * NSEC_PER_MSEC)
1368                expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1369        else
1370                expires_next = ktime_add(now, delta);
1371        tick_program_event(expires_next, 1);
1372        printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
1373                    ktime_to_ns(delta));
1374}
1375
1376/*
1377 * local version of hrtimer_peek_ahead_timers() called with interrupts
1378 * disabled.
1379 */
1380static inline void __hrtimer_peek_ahead_timers(void)
1381{
1382        struct tick_device *td;
1383
1384        if (!hrtimer_hres_active())
1385                return;
1386
1387        td = &__get_cpu_var(tick_cpu_device);
1388        if (td && td->evtdev)
1389                hrtimer_interrupt(td->evtdev);
1390}
1391
1392#else /* CONFIG_HIGH_RES_TIMERS */
1393
1394static inline void __hrtimer_peek_ahead_timers(void) { }
1395
1396#endif  /* !CONFIG_HIGH_RES_TIMERS */
1397
1398/*
1399 * Called from run_local_timers in hardirq context every jiffy
1400 */
1401void hrtimer_run_queues(void)
1402{
1403        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1404        ktime_t now;
1405
1406        if (hrtimer_hres_active())
1407                return;
1408
1409        /*
1410         * This _is_ ugly: We have to check periodically, whether we
1411         * can switch to highres and / or nohz mode. The clocksource
1412         * switch happens with xtime_lock held. Notification from
1413         * there only sets the check bit in the tick_oneshot code,
1414         * otherwise we might deadlock vs. xtime_lock.
1415         */
1416        if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1417                hrtimer_switch_to_hres();
1418                return;
1419        }
1420
1421        raw_spin_lock(&cpu_base->lock);
1422        now = hrtimer_update_base(cpu_base);
1423        __hrtimer_run_queues(cpu_base, now);
1424        raw_spin_unlock(&cpu_base->lock);
1425}
1426
1427/*
1428 * Sleep related functions:
1429 */
1430static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1431{
1432        struct hrtimer_sleeper *t =
1433                container_of(timer, struct hrtimer_sleeper, timer);
1434        struct task_struct *task = t->task;
1435
1436        t->task = NULL;
1437        if (task)
1438                wake_up_process(task);
1439
1440        return HRTIMER_NORESTART;
1441}
1442
1443void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1444{
1445        sl->timer.function = hrtimer_wakeup;
1446        sl->task = task;
1447}
1448EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1449
1450static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1451{
1452        hrtimer_init_sleeper(t, current);
1453
1454        do {
1455                set_current_state(TASK_INTERRUPTIBLE);
1456                hrtimer_start_expires(&t->timer, mode);
1457
1458                if (likely(t->task))
1459                        freezable_schedule();
1460
1461                hrtimer_cancel(&t->timer);
1462                mode = HRTIMER_MODE_ABS;
1463
1464        } while (t->task && !signal_pending(current));
1465
1466        __set_current_state(TASK_RUNNING);
1467
1468        return t->task == NULL;
1469}
1470
1471static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1472{
1473        struct timespec rmt;
1474        ktime_t rem;
1475
1476        rem = hrtimer_expires_remaining(timer);
1477        if (rem.tv64 <= 0)
1478                return 0;
1479        rmt = ktime_to_timespec(rem);
1480
1481        if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1482                return -EFAULT;
1483
1484        return 1;
1485}
1486
1487long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1488{
1489        struct hrtimer_sleeper t;
1490        struct timespec __user  *rmtp;
1491        int ret = 0;
1492
1493        hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
1494                                HRTIMER_MODE_ABS);
1495        hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1496
1497        if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1498                goto out;
1499
1500        rmtp = restart->nanosleep.rmtp;
1501        if (rmtp) {
1502                ret = update_rmtp(&t.timer, rmtp);
1503                if (ret <= 0)
1504                        goto out;
1505        }
1506
1507        /* The other values in restart are already filled in */
1508        ret = -ERESTART_RESTARTBLOCK;
1509out:
1510        destroy_hrtimer_on_stack(&t.timer);
1511        return ret;
1512}
1513
1514long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1515                       const enum hrtimer_mode mode, const clockid_t clockid)
1516{
1517        struct restart_block *restart;
1518        struct hrtimer_sleeper t;
1519        int ret = 0;
1520        unsigned long slack;
1521
1522        slack = current->timer_slack_ns;
1523        if (dl_task(current) || rt_task(current))
1524                slack = 0;
1525
1526        hrtimer_init_on_stack(&t.timer, clockid, mode);
1527        hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1528        if (do_nanosleep(&t, mode))
1529                goto out;
1530
1531        /* Absolute timers do not update the rmtp value and restart: */
1532        if (mode == HRTIMER_MODE_ABS) {
1533                ret = -ERESTARTNOHAND;
1534                goto out;
1535        }
1536
1537        if (rmtp) {
1538                ret = update_rmtp(&t.timer, rmtp);
1539                if (ret <= 0)
1540                        goto out;
1541        }
1542
1543        restart = &current_thread_info()->restart_block;
1544        restart->fn = hrtimer_nanosleep_restart;
1545        restart->nanosleep.clockid = t.timer.base->clockid;
1546        restart->nanosleep.rmtp = rmtp;
1547        restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1548
1549        ret = -ERESTART_RESTARTBLOCK;
1550out:
1551        destroy_hrtimer_on_stack(&t.timer);
1552        return ret;
1553}
1554
1555SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1556                struct timespec __user *, rmtp)
1557{
1558        struct timespec tu;
1559
1560        if (copy_from_user(&tu, rqtp, sizeof(tu)))
1561                return -EFAULT;
1562
1563        if (!timespec_valid(&tu))
1564                return -EINVAL;
1565
1566        return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1567}
1568
1569/*
1570 * Functions related to boot-time initialization:
1571 */
1572static void init_hrtimers_cpu(int cpu)
1573{
1574        struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1575        int i;
1576
1577        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1578                cpu_base->clock_base[i].cpu_base = cpu_base;
1579                timerqueue_init_head(&cpu_base->clock_base[i].active);
1580        }
1581
1582        cpu_base->cpu = cpu;
1583        hrtimer_init_hres(cpu_base);
1584}
1585
1586#ifdef CONFIG_HOTPLUG_CPU
1587
1588static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1589                                struct hrtimer_clock_base *new_base)
1590{
1591        struct hrtimer *timer;
1592        struct timerqueue_node *node;
1593
1594        while ((node = timerqueue_getnext(&old_base->active))) {
1595                timer = container_of(node, struct hrtimer, node);
1596                BUG_ON(hrtimer_callback_running(timer));
1597                debug_deactivate(timer);
1598
1599                /*
1600                 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1601                 * timer could be seen as !active and just vanish away
1602                 * under us on another CPU
1603                 */
1604                __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1605                timer->base = new_base;
1606                /*
1607                 * Enqueue the timers on the new cpu. This does not
1608                 * reprogram the event device in case the timer
1609                 * expires before the earliest on this CPU, but we run
1610                 * hrtimer_interrupt after we migrated everything to
1611                 * sort out already expired timers and reprogram the
1612                 * event device.
1613                 */
1614                enqueue_hrtimer(timer, new_base);
1615
1616                /* Clear the migration state bit */
1617                timer->state &= ~HRTIMER_STATE_MIGRATE;
1618        }
1619}
1620
1621static void migrate_hrtimers(int scpu)
1622{
1623        struct hrtimer_cpu_base *old_base, *new_base;
1624        int i;
1625
1626        BUG_ON(cpu_online(scpu));
1627        tick_cancel_sched_timer(scpu);
1628
1629        local_irq_disable();
1630        old_base = &per_cpu(hrtimer_bases, scpu);
1631        new_base = &__get_cpu_var(hrtimer_bases);
1632        /*
1633         * The caller is globally serialized and nobody else
1634         * takes two locks at once, deadlock is not possible.
1635         */
1636        raw_spin_lock(&new_base->lock);
1637        raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1638
1639        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1640                migrate_hrtimer_list(&old_base->clock_base[i],
1641                                     &new_base->clock_base[i]);
1642        }
1643
1644        raw_spin_unlock(&old_base->lock);
1645        raw_spin_unlock(&new_base->lock);
1646
1647        /* Check, if we got expired work to do */
1648        __hrtimer_peek_ahead_timers();
1649        local_irq_enable();
1650}
1651
1652#endif /* CONFIG_HOTPLUG_CPU */
1653
1654static int hrtimer_cpu_notify(struct notifier_block *self,
1655                                        unsigned long action, void *hcpu)
1656{
1657        int scpu = (long)hcpu;
1658
1659        switch (action) {
1660
1661        case CPU_UP_PREPARE:
1662        case CPU_UP_PREPARE_FROZEN:
1663                init_hrtimers_cpu(scpu);
1664                break;
1665
1666#ifdef CONFIG_HOTPLUG_CPU
1667        case CPU_DYING:
1668        case CPU_DYING_FROZEN:
1669                clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1670                break;
1671        case CPU_DEAD:
1672        case CPU_DEAD_FROZEN:
1673        {
1674                clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1675                migrate_hrtimers(scpu);
1676                break;
1677        }
1678#endif
1679
1680        default:
1681                break;
1682        }
1683
1684        return NOTIFY_OK;
1685}
1686
1687static struct notifier_block hrtimers_nb = {
1688        .notifier_call = hrtimer_cpu_notify,
1689};
1690
1691void __init hrtimers_init(void)
1692{
1693        hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1694                          (void *)(long)smp_processor_id());
1695        register_cpu_notifier(&hrtimers_nb);
1696}
1697
1698/**
1699 * schedule_hrtimeout_range_clock - sleep until timeout
1700 * @expires:    timeout value (ktime_t)
1701 * @delta:      slack in expires timeout (ktime_t)
1702 * @mode:       timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1703 * @clock:      timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
1704 */
1705int __sched
1706schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
1707                               const enum hrtimer_mode mode, int clock)
1708{
1709        struct hrtimer_sleeper t;
1710
1711        /*
1712         * Optimize when a zero timeout value is given. It does not
1713         * matter whether this is an absolute or a relative time.
1714         */
1715        if (expires && !expires->tv64) {
1716                __set_current_state(TASK_RUNNING);
1717                return 0;
1718        }
1719
1720        /*
1721         * A NULL parameter means "infinite"
1722         */
1723        if (!expires) {
1724                schedule();
1725                __set_current_state(TASK_RUNNING);
1726                return -EINTR;
1727        }
1728
1729        hrtimer_init_on_stack(&t.timer, clock, mode);
1730        hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1731
1732        hrtimer_init_sleeper(&t, current);
1733
1734        hrtimer_start_expires(&t.timer, mode);
1735
1736        if (likely(t.task))
1737                schedule();
1738
1739        hrtimer_cancel(&t.timer);
1740        destroy_hrtimer_on_stack(&t.timer);
1741
1742        __set_current_state(TASK_RUNNING);
1743
1744        return !t.task ? 0 : -EINTR;
1745}
1746
1747/**
1748 * schedule_hrtimeout_range - sleep until timeout
1749 * @expires:    timeout value (ktime_t)
1750 * @delta:      slack in expires timeout (ktime_t)
1751 * @mode:       timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1752 *
1753 * Make the current task sleep until the given expiry time has
1754 * elapsed. The routine will return immediately unless
1755 * the current task state has been set (see set_current_state()).
1756 *
1757 * The @delta argument gives the kernel the freedom to schedule the
1758 * actual wakeup to a time that is both power and performance friendly.
1759 * The kernel give the normal best effort behavior for "@expires+@delta",
1760 * but may decide to fire the timer earlier, but no earlier than @expires.
1761 *
1762 * You can set the task state as follows -
1763 *
1764 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1765 * pass before the routine returns.
1766 *
1767 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1768 * delivered to the current task.
1769 *
1770 * The current task state is guaranteed to be TASK_RUNNING when this
1771 * routine returns.
1772 *
1773 * Returns 0 when the timer has expired otherwise -EINTR
1774 */
1775int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1776                                     const enum hrtimer_mode mode)
1777{
1778        return schedule_hrtimeout_range_clock(expires, delta, mode,
1779                                              CLOCK_MONOTONIC);
1780}
1781EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1782
1783/**
1784 * schedule_hrtimeout - sleep until timeout
1785 * @expires:    timeout value (ktime_t)
1786 * @mode:       timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1787 *
1788 * Make the current task sleep until the given expiry time has
1789 * elapsed. The routine will return immediately unless
1790 * the current task state has been set (see set_current_state()).
1791 *
1792 * You can set the task state as follows -
1793 *
1794 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1795 * pass before the routine returns.
1796 *
1797 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1798 * delivered to the current task.
1799 *
1800 * The current task state is guaranteed to be TASK_RUNNING when this
1801 * routine returns.
1802 *
1803 * Returns 0 when the timer has expired otherwise -EINTR
1804 */
1805int __sched schedule_hrtimeout(ktime_t *expires,
1806                               const enum hrtimer_mode mode)
1807{
1808        return schedule_hrtimeout_range(expires, 0, mode);
1809}
1810EXPORT_SYMBOL_GPL(schedule_hrtimeout);
1811