linux/kernel/hrtimer.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/hrtimer.c
   3 *
   4 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   6 *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   7 *
   8 *  High-resolution kernel timers
   9 *
  10 *  In contrast to the low-resolution timeout API implemented in
  11 *  kernel/timer.c, hrtimers provide finer resolution and accuracy
  12 *  depending on system configuration and capabilities.
  13 *
  14 *  These timers are currently used for:
  15 *   - itimers
  16 *   - POSIX timers
  17 *   - nanosleep
  18 *   - precise in-kernel timing
  19 *
  20 *  Started by: Thomas Gleixner and Ingo Molnar
  21 *
  22 *  Credits:
  23 *      based on kernel/timer.c
  24 *
  25 *      Help, testing, suggestions, bugfixes, improvements were
  26 *      provided by:
  27 *
  28 *      George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
  29 *      et. al.
  30 *
  31 *  For licencing details see kernel-base/COPYING
  32 */
  33
  34#include <linux/cpu.h>
  35#include <linux/module.h>
  36#include <linux/percpu.h>
  37#include <linux/hrtimer.h>
  38#include <linux/notifier.h>
  39#include <linux/syscalls.h>
  40#include <linux/kallsyms.h>
  41#include <linux/interrupt.h>
  42#include <linux/tick.h>
  43#include <linux/seq_file.h>
  44#include <linux/err.h>
  45#include <linux/debugobjects.h>
  46#include <linux/sched.h>
  47#include <linux/timer.h>
  48
  49#include <asm/uaccess.h>
  50
  51#include <trace/events/timer.h>
  52
  53/*
  54 * The timer bases:
  55 *
  56 * Note: If we want to add new timer bases, we have to skip the two
  57 * clock ids captured by the cpu-timers. We do this by holding empty
  58 * entries rather than doing math adjustment of the clock ids.
  59 * This ensures that we capture erroneous accesses to these clock ids
  60 * rather than moving them into the range of valid clock id's.
  61 */
  62DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
  63{
  64
  65        .clock_base =
  66        {
  67                {
  68                        .index = CLOCK_REALTIME,
  69                        .get_time = &ktime_get_real,
  70                        .resolution = KTIME_LOW_RES,
  71                },
  72                {
  73                        .index = CLOCK_MONOTONIC,
  74                        .get_time = &ktime_get,
  75                        .resolution = KTIME_LOW_RES,
  76                },
  77        }
  78};
  79
  80/*
  81 * Get the coarse grained time at the softirq based on xtime and
  82 * wall_to_monotonic.
  83 */
  84static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
  85{
  86        ktime_t xtim, tomono;
  87        struct timespec xts, tom;
  88        unsigned long seq;
  89
  90        do {
  91                seq = read_seqbegin(&xtime_lock);
  92                xts = current_kernel_time();
  93                tom = wall_to_monotonic;
  94        } while (read_seqretry(&xtime_lock, seq));
  95
  96        xtim = timespec_to_ktime(xts);
  97        tomono = timespec_to_ktime(tom);
  98        base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
  99        base->clock_base[CLOCK_MONOTONIC].softirq_time =
 100                ktime_add(xtim, tomono);
 101}
 102
 103/*
 104 * Functions and macros which are different for UP/SMP systems are kept in a
 105 * single place
 106 */
 107#ifdef CONFIG_SMP
 108
 109/*
 110 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
 111 * means that all timers which are tied to this base via timer->base are
 112 * locked, and the base itself is locked too.
 113 *
 114 * So __run_timers/migrate_timers can safely modify all timers which could
 115 * be found on the lists/queues.
 116 *
 117 * When the timer's base is locked, and the timer removed from list, it is
 118 * possible to set timer->base = NULL and drop the lock: the timer remains
 119 * locked.
 120 */
 121static
 122struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
 123                                             unsigned long *flags)
 124{
 125        struct hrtimer_clock_base *base;
 126
 127        for (;;) {
 128                base = timer->base;
 129                if (likely(base != NULL)) {
 130                        spin_lock_irqsave(&base->cpu_base->lock, *flags);
 131                        if (likely(base == timer->base))
 132                                return base;
 133                        /* The timer has migrated to another CPU: */
 134                        spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
 135                }
 136                cpu_relax();
 137        }
 138}
 139
 140
 141/*
 142 * Get the preferred target CPU for NOHZ
 143 */
 144static int hrtimer_get_target(int this_cpu, int pinned)
 145{
 146#ifdef CONFIG_NO_HZ
 147        if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) {
 148                int preferred_cpu = get_nohz_load_balancer();
 149
 150                if (preferred_cpu >= 0)
 151                        return preferred_cpu;
 152        }
 153#endif
 154        return this_cpu;
 155}
 156
 157/*
 158 * With HIGHRES=y we do not migrate the timer when it is expiring
 159 * before the next event on the target cpu because we cannot reprogram
 160 * the target cpu hardware and we would cause it to fire late.
 161 *
 162 * Called with cpu_base->lock of target cpu held.
 163 */
 164static int
 165hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
 166{
 167#ifdef CONFIG_HIGH_RES_TIMERS
 168        ktime_t expires;
 169
 170        if (!new_base->cpu_base->hres_active)
 171                return 0;
 172
 173        expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
 174        return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
 175#else
 176        return 0;
 177#endif
 178}
 179
 180/*
 181 * Switch the timer base to the current CPU when possible.
 182 */
 183static inline struct hrtimer_clock_base *
 184switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
 185                    int pinned)
 186{
 187        struct hrtimer_clock_base *new_base;
 188        struct hrtimer_cpu_base *new_cpu_base;
 189        int this_cpu = smp_processor_id();
 190        int cpu = hrtimer_get_target(this_cpu, pinned);
 191
 192again:
 193        new_cpu_base = &per_cpu(hrtimer_bases, cpu);
 194        new_base = &new_cpu_base->clock_base[base->index];
 195
 196        if (base != new_base) {
 197                /*
 198                 * We are trying to move timer to new_base.
 199                 * However we can't change timer's base while it is running,
 200                 * so we keep it on the same CPU. No hassle vs. reprogramming
 201                 * the event source in the high resolution case. The softirq
 202                 * code will take care of this when the timer function has
 203                 * completed. There is no conflict as we hold the lock until
 204                 * the timer is enqueued.
 205                 */
 206                if (unlikely(hrtimer_callback_running(timer)))
 207                        return base;
 208
 209                /* See the comment in lock_timer_base() */
 210                timer->base = NULL;
 211                spin_unlock(&base->cpu_base->lock);
 212                spin_lock(&new_base->cpu_base->lock);
 213
 214                if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
 215                        cpu = this_cpu;
 216                        spin_unlock(&new_base->cpu_base->lock);
 217                        spin_lock(&base->cpu_base->lock);
 218                        timer->base = base;
 219                        goto again;
 220                }
 221                timer->base = new_base;
 222        }
 223        return new_base;
 224}
 225
 226#else /* CONFIG_SMP */
 227
 228static inline struct hrtimer_clock_base *
 229lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 230{
 231        struct hrtimer_clock_base *base = timer->base;
 232
 233        spin_lock_irqsave(&base->cpu_base->lock, *flags);
 234
 235        return base;
 236}
 237
 238# define switch_hrtimer_base(t, b, p)   (b)
 239
 240#endif  /* !CONFIG_SMP */
 241
 242/*
 243 * Functions for the union type storage format of ktime_t which are
 244 * too large for inlining:
 245 */
 246#if BITS_PER_LONG < 64
 247# ifndef CONFIG_KTIME_SCALAR
 248/**
 249 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
 250 * @kt:         addend
 251 * @nsec:       the scalar nsec value to add
 252 *
 253 * Returns the sum of kt and nsec in ktime_t format
 254 */
 255ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
 256{
 257        ktime_t tmp;
 258
 259        if (likely(nsec < NSEC_PER_SEC)) {
 260                tmp.tv64 = nsec;
 261        } else {
 262                unsigned long rem = do_div(nsec, NSEC_PER_SEC);
 263
 264                tmp = ktime_set((long)nsec, rem);
 265        }
 266
 267        return ktime_add(kt, tmp);
 268}
 269
 270EXPORT_SYMBOL_GPL(ktime_add_ns);
 271
 272/**
 273 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
 274 * @kt:         minuend
 275 * @nsec:       the scalar nsec value to subtract
 276 *
 277 * Returns the subtraction of @nsec from @kt in ktime_t format
 278 */
 279ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
 280{
 281        ktime_t tmp;
 282
 283        if (likely(nsec < NSEC_PER_SEC)) {
 284                tmp.tv64 = nsec;
 285        } else {
 286                unsigned long rem = do_div(nsec, NSEC_PER_SEC);
 287
 288                tmp = ktime_set((long)nsec, rem);
 289        }
 290
 291        return ktime_sub(kt, tmp);
 292}
 293
 294EXPORT_SYMBOL_GPL(ktime_sub_ns);
 295# endif /* !CONFIG_KTIME_SCALAR */
 296
 297/*
 298 * Divide a ktime value by a nanosecond value
 299 */
 300u64 ktime_divns(const ktime_t kt, s64 div)
 301{
 302        u64 dclc;
 303        int sft = 0;
 304
 305        dclc = ktime_to_ns(kt);
 306        /* Make sure the divisor is less than 2^32: */
 307        while (div >> 32) {
 308                sft++;
 309                div >>= 1;
 310        }
 311        dclc >>= sft;
 312        do_div(dclc, (unsigned long) div);
 313
 314        return dclc;
 315}
 316#endif /* BITS_PER_LONG >= 64 */
 317
 318/*
 319 * Add two ktime values and do a safety check for overflow:
 320 */
 321ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
 322{
 323        ktime_t res = ktime_add(lhs, rhs);
 324
 325        /*
 326         * We use KTIME_SEC_MAX here, the maximum timeout which we can
 327         * return to user space in a timespec:
 328         */
 329        if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
 330                res = ktime_set(KTIME_SEC_MAX, 0);
 331
 332        return res;
 333}
 334
 335EXPORT_SYMBOL_GPL(ktime_add_safe);
 336
 337#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 338
 339static struct debug_obj_descr hrtimer_debug_descr;
 340
 341/*
 342 * fixup_init is called when:
 343 * - an active object is initialized
 344 */
 345static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
 346{
 347        struct hrtimer *timer = addr;
 348
 349        switch (state) {
 350        case ODEBUG_STATE_ACTIVE:
 351                hrtimer_cancel(timer);
 352                debug_object_init(timer, &hrtimer_debug_descr);
 353                return 1;
 354        default:
 355                return 0;
 356        }
 357}
 358
 359/*
 360 * fixup_activate is called when:
 361 * - an active object is activated
 362 * - an unknown object is activated (might be a statically initialized object)
 363 */
 364static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
 365{
 366        switch (state) {
 367
 368        case ODEBUG_STATE_NOTAVAILABLE:
 369                WARN_ON_ONCE(1);
 370                return 0;
 371
 372        case ODEBUG_STATE_ACTIVE:
 373                WARN_ON(1);
 374
 375        default:
 376                return 0;
 377        }
 378}
 379
 380/*
 381 * fixup_free is called when:
 382 * - an active object is freed
 383 */
 384static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
 385{
 386        struct hrtimer *timer = addr;
 387
 388        switch (state) {
 389        case ODEBUG_STATE_ACTIVE:
 390                hrtimer_cancel(timer);
 391                debug_object_free(timer, &hrtimer_debug_descr);
 392                return 1;
 393        default:
 394                return 0;
 395        }
 396}
 397
 398static struct debug_obj_descr hrtimer_debug_descr = {
 399        .name           = "hrtimer",
 400        .fixup_init     = hrtimer_fixup_init,
 401        .fixup_activate = hrtimer_fixup_activate,
 402        .fixup_free     = hrtimer_fixup_free,
 403};
 404
 405static inline void debug_hrtimer_init(struct hrtimer *timer)
 406{
 407        debug_object_init(timer, &hrtimer_debug_descr);
 408}
 409
 410static inline void debug_hrtimer_activate(struct hrtimer *timer)
 411{
 412        debug_object_activate(timer, &hrtimer_debug_descr);
 413}
 414
 415static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
 416{
 417        debug_object_deactivate(timer, &hrtimer_debug_descr);
 418}
 419
 420static inline void debug_hrtimer_free(struct hrtimer *timer)
 421{
 422        debug_object_free(timer, &hrtimer_debug_descr);
 423}
 424
 425static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 426                           enum hrtimer_mode mode);
 427
 428void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
 429                           enum hrtimer_mode mode)
 430{
 431        debug_object_init_on_stack(timer, &hrtimer_debug_descr);
 432        __hrtimer_init(timer, clock_id, mode);
 433}
 434EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
 435
 436void destroy_hrtimer_on_stack(struct hrtimer *timer)
 437{
 438        debug_object_free(timer, &hrtimer_debug_descr);
 439}
 440
 441#else
 442static inline void debug_hrtimer_init(struct hrtimer *timer) { }
 443static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
 444static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
 445#endif
 446
 447static inline void
 448debug_init(struct hrtimer *timer, clockid_t clockid,
 449           enum hrtimer_mode mode)
 450{
 451        debug_hrtimer_init(timer);
 452        trace_hrtimer_init(timer, clockid, mode);
 453}
 454
 455static inline void debug_activate(struct hrtimer *timer)
 456{
 457        debug_hrtimer_activate(timer);
 458        trace_hrtimer_start(timer);
 459}
 460
 461static inline void debug_deactivate(struct hrtimer *timer)
 462{
 463        debug_hrtimer_deactivate(timer);
 464        trace_hrtimer_cancel(timer);
 465}
 466
 467/* High resolution timer related functions */
 468#ifdef CONFIG_HIGH_RES_TIMERS
 469
 470/*
 471 * High resolution timer enabled ?
 472 */
 473static int hrtimer_hres_enabled __read_mostly  = 1;
 474
 475/*
 476 * Enable / Disable high resolution mode
 477 */
 478static int __init setup_hrtimer_hres(char *str)
 479{
 480        if (!strcmp(str, "off"))
 481                hrtimer_hres_enabled = 0;
 482        else if (!strcmp(str, "on"))
 483                hrtimer_hres_enabled = 1;
 484        else
 485                return 0;
 486        return 1;
 487}
 488
 489__setup("highres=", setup_hrtimer_hres);
 490
 491/*
 492 * hrtimer_high_res_enabled - query, if the highres mode is enabled
 493 */
 494static inline int hrtimer_is_hres_enabled(void)
 495{
 496        return hrtimer_hres_enabled;
 497}
 498
 499/*
 500 * Is the high resolution mode active ?
 501 */
 502static inline int hrtimer_hres_active(void)
 503{
 504        return __get_cpu_var(hrtimer_bases).hres_active;
 505}
 506
 507/*
 508 * Reprogram the event source with checking both queues for the
 509 * next event
 510 * Called with interrupts disabled and base->lock held
 511 */
 512static void
 513hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 514{
 515        int i;
 516        struct hrtimer_clock_base *base = cpu_base->clock_base;
 517        ktime_t expires, expires_next;
 518
 519        expires_next.tv64 = KTIME_MAX;
 520
 521        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
 522                struct hrtimer *timer;
 523
 524                if (!base->first)
 525                        continue;
 526                timer = rb_entry(base->first, struct hrtimer, node);
 527                expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 528                /*
 529                 * clock_was_set() has changed base->offset so the
 530                 * result might be negative. Fix it up to prevent a
 531                 * false positive in clockevents_program_event()
 532                 */
 533                if (expires.tv64 < 0)
 534                        expires.tv64 = 0;
 535                if (expires.tv64 < expires_next.tv64)
 536                        expires_next = expires;
 537        }
 538
 539        if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
 540                return;
 541
 542        cpu_base->expires_next.tv64 = expires_next.tv64;
 543
 544        if (cpu_base->expires_next.tv64 != KTIME_MAX)
 545                tick_program_event(cpu_base->expires_next, 1);
 546}
 547
 548/*
 549 * Shared reprogramming for clock_realtime and clock_monotonic
 550 *
 551 * When a timer is enqueued and expires earlier than the already enqueued
 552 * timers, we have to check, whether it expires earlier than the timer for
 553 * which the clock event device was armed.
 554 *
 555 * Called with interrupts disabled and base->cpu_base.lock held
 556 */
 557static int hrtimer_reprogram(struct hrtimer *timer,
 558                             struct hrtimer_clock_base *base)
 559{
 560        ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
 561        ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 562        int res;
 563
 564        WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
 565
 566        /*
 567         * When the callback is running, we do not reprogram the clock event
 568         * device. The timer callback is either running on a different CPU or
 569         * the callback is executed in the hrtimer_interrupt context. The
 570         * reprogramming is handled either by the softirq, which called the
 571         * callback or at the end of the hrtimer_interrupt.
 572         */
 573        if (hrtimer_callback_running(timer))
 574                return 0;
 575
 576        /*
 577         * CLOCK_REALTIME timer might be requested with an absolute
 578         * expiry time which is less than base->offset. Nothing wrong
 579         * about that, just avoid to call into the tick code, which
 580         * has now objections against negative expiry values.
 581         */
 582        if (expires.tv64 < 0)
 583                return -ETIME;
 584
 585        if (expires.tv64 >= expires_next->tv64)
 586                return 0;
 587
 588        /*
 589         * Clockevents returns -ETIME, when the event was in the past.
 590         */
 591        res = tick_program_event(expires, 0);
 592        if (!IS_ERR_VALUE(res))
 593                *expires_next = expires;
 594        return res;
 595}
 596
 597
 598/*
 599 * Retrigger next event is called after clock was set
 600 *
 601 * Called with interrupts disabled via on_each_cpu()
 602 */
 603static void retrigger_next_event(void *arg)
 604{
 605        struct hrtimer_cpu_base *base;
 606        struct timespec realtime_offset;
 607        unsigned long seq;
 608
 609        if (!hrtimer_hres_active())
 610                return;
 611
 612        do {
 613                seq = read_seqbegin(&xtime_lock);
 614                set_normalized_timespec(&realtime_offset,
 615                                        -wall_to_monotonic.tv_sec,
 616                                        -wall_to_monotonic.tv_nsec);
 617        } while (read_seqretry(&xtime_lock, seq));
 618
 619        base = &__get_cpu_var(hrtimer_bases);
 620
 621        /* Adjust CLOCK_REALTIME offset */
 622        spin_lock(&base->lock);
 623        base->clock_base[CLOCK_REALTIME].offset =
 624                timespec_to_ktime(realtime_offset);
 625
 626        hrtimer_force_reprogram(base, 0);
 627        spin_unlock(&base->lock);
 628}
 629
 630/*
 631 * Clock realtime was set
 632 *
 633 * Change the offset of the realtime clock vs. the monotonic
 634 * clock.
 635 *
 636 * We might have to reprogram the high resolution timer interrupt. On
 637 * SMP we call the architecture specific code to retrigger _all_ high
 638 * resolution timer interrupts. On UP we just disable interrupts and
 639 * call the high resolution interrupt code.
 640 */
 641void clock_was_set(void)
 642{
 643        /* Retrigger the CPU local events everywhere */
 644        on_each_cpu(retrigger_next_event, NULL, 1);
 645}
 646
 647/*
 648 * During resume we might have to reprogram the high resolution timer
 649 * interrupt (on the local CPU):
 650 */
 651void hres_timers_resume(void)
 652{
 653        WARN_ONCE(!irqs_disabled(),
 654                  KERN_INFO "hres_timers_resume() called with IRQs enabled!");
 655
 656        retrigger_next_event(NULL);
 657}
 658
 659/*
 660 * Initialize the high resolution related parts of cpu_base
 661 */
 662static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
 663{
 664        base->expires_next.tv64 = KTIME_MAX;
 665        base->hres_active = 0;
 666}
 667
 668/*
 669 * Initialize the high resolution related parts of a hrtimer
 670 */
 671static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
 672{
 673}
 674
 675
 676/*
 677 * When High resolution timers are active, try to reprogram. Note, that in case
 678 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
 679 * check happens. The timer gets enqueued into the rbtree. The reprogramming
 680 * and expiry check is done in the hrtimer_interrupt or in the softirq.
 681 */
 682static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 683                                            struct hrtimer_clock_base *base,
 684                                            int wakeup)
 685{
 686        if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
 687                if (wakeup) {
 688                        spin_unlock(&base->cpu_base->lock);
 689                        raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 690                        spin_lock(&base->cpu_base->lock);
 691                } else
 692                        __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 693
 694                return 1;
 695        }
 696
 697        return 0;
 698}
 699
 700/*
 701 * Switch to high resolution mode
 702 */
 703static int hrtimer_switch_to_hres(void)
 704{
 705        int cpu = smp_processor_id();
 706        struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
 707        unsigned long flags;
 708
 709        if (base->hres_active)
 710                return 1;
 711
 712        local_irq_save(flags);
 713
 714        if (tick_init_highres()) {
 715                local_irq_restore(flags);
 716                printk(KERN_WARNING "Could not switch to high resolution "
 717                                    "mode on CPU %d\n", cpu);
 718                return 0;
 719        }
 720        base->hres_active = 1;
 721        base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
 722        base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
 723
 724        tick_setup_sched_timer();
 725
 726        /* "Retrigger" the interrupt to get things going */
 727        retrigger_next_event(NULL);
 728        local_irq_restore(flags);
 729        return 1;
 730}
 731
 732#else
 733
 734static inline int hrtimer_hres_active(void) { return 0; }
 735static inline int hrtimer_is_hres_enabled(void) { return 0; }
 736static inline int hrtimer_switch_to_hres(void) { return 0; }
 737static inline void
 738hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
 739static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 740                                            struct hrtimer_clock_base *base,
 741                                            int wakeup)
 742{
 743        return 0;
 744}
 745static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
 746static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
 747
 748#endif /* CONFIG_HIGH_RES_TIMERS */
 749
 750#ifdef CONFIG_TIMER_STATS
 751void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
 752{
 753        if (timer->start_site)
 754                return;
 755
 756        timer->start_site = addr;
 757        memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 758        timer->start_pid = current->pid;
 759}
 760#endif
 761
 762/*
 763 * Counterpart to lock_hrtimer_base above:
 764 */
 765static inline
 766void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 767{
 768        spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
 769}
 770
 771/**
 772 * hrtimer_forward - forward the timer expiry
 773 * @timer:      hrtimer to forward
 774 * @now:        forward past this time
 775 * @interval:   the interval to forward
 776 *
 777 * Forward the timer expiry so it will expire in the future.
 778 * Returns the number of overruns.
 779 */
 780u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
 781{
 782        u64 orun = 1;
 783        ktime_t delta;
 784
 785        delta = ktime_sub(now, hrtimer_get_expires(timer));
 786
 787        if (delta.tv64 < 0)
 788                return 0;
 789
 790        if (interval.tv64 < timer->base->resolution.tv64)
 791                interval.tv64 = timer->base->resolution.tv64;
 792
 793        if (unlikely(delta.tv64 >= interval.tv64)) {
 794                s64 incr = ktime_to_ns(interval);
 795
 796                orun = ktime_divns(delta, incr);
 797                hrtimer_add_expires_ns(timer, incr * orun);
 798                if (hrtimer_get_expires_tv64(timer) > now.tv64)
 799                        return orun;
 800                /*
 801                 * This (and the ktime_add() below) is the
 802                 * correction for exact:
 803                 */
 804                orun++;
 805        }
 806        hrtimer_add_expires(timer, interval);
 807
 808        return orun;
 809}
 810EXPORT_SYMBOL_GPL(hrtimer_forward);
 811
 812/*
 813 * enqueue_hrtimer - internal function to (re)start a timer
 814 *
 815 * The timer is inserted in expiry order. Insertion into the
 816 * red black tree is O(log(n)). Must hold the base lock.
 817 *
 818 * Returns 1 when the new timer is the leftmost timer in the tree.
 819 */
 820static int enqueue_hrtimer(struct hrtimer *timer,
 821                           struct hrtimer_clock_base *base)
 822{
 823        struct rb_node **link = &base->active.rb_node;
 824        struct rb_node *parent = NULL;
 825        struct hrtimer *entry;
 826        int leftmost = 1;
 827
 828        debug_activate(timer);
 829
 830        /*
 831         * Find the right place in the rbtree:
 832         */
 833        while (*link) {
 834                parent = *link;
 835                entry = rb_entry(parent, struct hrtimer, node);
 836                /*
 837                 * We dont care about collisions. Nodes with
 838                 * the same expiry time stay together.
 839                 */
 840                if (hrtimer_get_expires_tv64(timer) <
 841                                hrtimer_get_expires_tv64(entry)) {
 842                        link = &(*link)->rb_left;
 843                } else {
 844                        link = &(*link)->rb_right;
 845                        leftmost = 0;
 846                }
 847        }
 848
 849        /*
 850         * Insert the timer to the rbtree and check whether it
 851         * replaces the first pending timer
 852         */
 853        if (leftmost)
 854                base->first = &timer->node;
 855
 856        rb_link_node(&timer->node, parent, link);
 857        rb_insert_color(&timer->node, &base->active);
 858        /*
 859         * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
 860         * state of a possibly running callback.
 861         */
 862        timer->state |= HRTIMER_STATE_ENQUEUED;
 863
 864        return leftmost;
 865}
 866
 867/*
 868 * __remove_hrtimer - internal function to remove a timer
 869 *
 870 * Caller must hold the base lock.
 871 *
 872 * High resolution timer mode reprograms the clock event device when the
 873 * timer is the one which expires next. The caller can disable this by setting
 874 * reprogram to zero. This is useful, when the context does a reprogramming
 875 * anyway (e.g. timer interrupt)
 876 */
 877static void __remove_hrtimer(struct hrtimer *timer,
 878                             struct hrtimer_clock_base *base,
 879                             unsigned long newstate, int reprogram)
 880{
 881        if (!(timer->state & HRTIMER_STATE_ENQUEUED))
 882                goto out;
 883
 884        /*
 885         * Remove the timer from the rbtree and replace the first
 886         * entry pointer if necessary.
 887         */
 888        if (base->first == &timer->node) {
 889                base->first = rb_next(&timer->node);
 890#ifdef CONFIG_HIGH_RES_TIMERS
 891                /* Reprogram the clock event device. if enabled */
 892                if (reprogram && hrtimer_hres_active()) {
 893                        ktime_t expires;
 894
 895                        expires = ktime_sub(hrtimer_get_expires(timer),
 896                                            base->offset);
 897                        if (base->cpu_base->expires_next.tv64 == expires.tv64)
 898                                hrtimer_force_reprogram(base->cpu_base, 1);
 899                }
 900#endif
 901        }
 902        rb_erase(&timer->node, &base->active);
 903out:
 904        timer->state = newstate;
 905}
 906
 907/*
 908 * remove hrtimer, called with base lock held
 909 */
 910static inline int
 911remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
 912{
 913        if (hrtimer_is_queued(timer)) {
 914                int reprogram;
 915
 916                /*
 917                 * Remove the timer and force reprogramming when high
 918                 * resolution mode is active and the timer is on the current
 919                 * CPU. If we remove a timer on another CPU, reprogramming is
 920                 * skipped. The interrupt event on this CPU is fired and
 921                 * reprogramming happens in the interrupt handler. This is a
 922                 * rare case and less expensive than a smp call.
 923                 */
 924                debug_deactivate(timer);
 925                timer_stats_hrtimer_clear_start_info(timer);
 926                reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
 927                __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
 928                                 reprogram);
 929                return 1;
 930        }
 931        return 0;
 932}
 933
 934int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 935                unsigned long delta_ns, const enum hrtimer_mode mode,
 936                int wakeup)
 937{
 938        struct hrtimer_clock_base *base, *new_base;
 939        unsigned long flags;
 940        int ret, leftmost;
 941
 942        base = lock_hrtimer_base(timer, &flags);
 943
 944        /* Remove an active timer from the queue: */
 945        ret = remove_hrtimer(timer, base);
 946
 947        /* Switch the timer base, if necessary: */
 948        new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
 949
 950        if (mode & HRTIMER_MODE_REL) {
 951                tim = ktime_add_safe(tim, new_base->get_time());
 952                /*
 953                 * CONFIG_TIME_LOW_RES is a temporary way for architectures
 954                 * to signal that they simply return xtime in
 955                 * do_gettimeoffset(). In this case we want to round up by
 956                 * resolution when starting a relative timer, to avoid short
 957                 * timeouts. This will go away with the GTOD framework.
 958                 */
 959#ifdef CONFIG_TIME_LOW_RES
 960                tim = ktime_add_safe(tim, base->resolution);
 961#endif
 962        }
 963
 964        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 965
 966        timer_stats_hrtimer_set_start_info(timer);
 967
 968        leftmost = enqueue_hrtimer(timer, new_base);
 969
 970        /*
 971         * Only allow reprogramming if the new base is on this CPU.
 972         * (it might still be on another CPU if the timer was pending)
 973         *
 974         * XXX send_remote_softirq() ?
 975         */
 976        if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
 977                hrtimer_enqueue_reprogram(timer, new_base, wakeup);
 978
 979        unlock_hrtimer_base(timer, &flags);
 980
 981        return ret;
 982}
 983
 984/**
 985 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
 986 * @timer:      the timer to be added
 987 * @tim:        expiry time
 988 * @delta_ns:   "slack" range for the timer
 989 * @mode:       expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
 990 *
 991 * Returns:
 992 *  0 on success
 993 *  1 when the timer was active
 994 */
 995int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 996                unsigned long delta_ns, const enum hrtimer_mode mode)
 997{
 998        return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
 999}
1000EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1001
1002/**
1003 * hrtimer_start - (re)start an hrtimer on the current CPU
1004 * @timer:      the timer to be added
1005 * @tim:        expiry time
1006 * @mode:       expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
1007 *
1008 * Returns:
1009 *  0 on success
1010 *  1 when the timer was active
1011 */
1012int
1013hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1014{
1015        return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
1016}
1017EXPORT_SYMBOL_GPL(hrtimer_start);
1018
1019
1020/**
1021 * hrtimer_try_to_cancel - try to deactivate a timer
1022 * @timer:      hrtimer to stop
1023 *
1024 * Returns:
1025 *  0 when the timer was not active
1026 *  1 when the timer was active
1027 * -1 when the timer is currently excuting the callback function and
1028 *    cannot be stopped
1029 */
1030int hrtimer_try_to_cancel(struct hrtimer *timer)
1031{
1032        struct hrtimer_clock_base *base;
1033        unsigned long flags;
1034        int ret = -1;
1035
1036        base = lock_hrtimer_base(timer, &flags);
1037
1038        if (!hrtimer_callback_running(timer))
1039                ret = remove_hrtimer(timer, base);
1040
1041        unlock_hrtimer_base(timer, &flags);
1042
1043        return ret;
1044
1045}
1046EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1047
1048/**
1049 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1050 * @timer:      the timer to be cancelled
1051 *
1052 * Returns:
1053 *  0 when the timer was not active
1054 *  1 when the timer was active
1055 */
1056int hrtimer_cancel(struct hrtimer *timer)
1057{
1058        for (;;) {
1059                int ret = hrtimer_try_to_cancel(timer);
1060
1061                if (ret >= 0)
1062                        return ret;
1063                cpu_relax();
1064        }
1065}
1066EXPORT_SYMBOL_GPL(hrtimer_cancel);
1067
1068/**
1069 * hrtimer_get_remaining - get remaining time for the timer
1070 * @timer:      the timer to read
1071 */
1072ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1073{
1074        struct hrtimer_clock_base *base;
1075        unsigned long flags;
1076        ktime_t rem;
1077
1078        base = lock_hrtimer_base(timer, &flags);
1079        rem = hrtimer_expires_remaining(timer);
1080        unlock_hrtimer_base(timer, &flags);
1081
1082        return rem;
1083}
1084EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1085
1086#ifdef CONFIG_NO_HZ
1087/**
1088 * hrtimer_get_next_event - get the time until next expiry event
1089 *
1090 * Returns the delta to the next expiry event or KTIME_MAX if no timer
1091 * is pending.
1092 */
1093ktime_t hrtimer_get_next_event(void)
1094{
1095        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1096        struct hrtimer_clock_base *base = cpu_base->clock_base;
1097        ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1098        unsigned long flags;
1099        int i;
1100
1101        spin_lock_irqsave(&cpu_base->lock, flags);
1102
1103        if (!hrtimer_hres_active()) {
1104                for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
1105                        struct hrtimer *timer;
1106
1107                        if (!base->first)
1108                                continue;
1109
1110                        timer = rb_entry(base->first, struct hrtimer, node);
1111                        delta.tv64 = hrtimer_get_expires_tv64(timer);
1112                        delta = ktime_sub(delta, base->get_time());
1113                        if (delta.tv64 < mindelta.tv64)
1114                                mindelta.tv64 = delta.tv64;
1115                }
1116        }
1117
1118        spin_unlock_irqrestore(&cpu_base->lock, flags);
1119
1120        if (mindelta.tv64 < 0)
1121                mindelta.tv64 = 0;
1122        return mindelta;
1123}
1124#endif
1125
1126static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1127                           enum hrtimer_mode mode)
1128{
1129        struct hrtimer_cpu_base *cpu_base;
1130
1131        memset(timer, 0, sizeof(struct hrtimer));
1132
1133        cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1134
1135        if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1136                clock_id = CLOCK_MONOTONIC;
1137
1138        timer->base = &cpu_base->clock_base[clock_id];
1139        hrtimer_init_timer_hres(timer);
1140
1141#ifdef CONFIG_TIMER_STATS
1142        timer->start_site = NULL;
1143        timer->start_pid = -1;
1144        memset(timer->start_comm, 0, TASK_COMM_LEN);
1145#endif
1146}
1147
1148/**
1149 * hrtimer_init - initialize a timer to the given clock
1150 * @timer:      the timer to be initialized
1151 * @clock_id:   the clock to be used
1152 * @mode:       timer mode abs/rel
1153 */
1154void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1155                  enum hrtimer_mode mode)
1156{
1157        debug_init(timer, clock_id, mode);
1158        __hrtimer_init(timer, clock_id, mode);
1159}
1160EXPORT_SYMBOL_GPL(hrtimer_init);
1161
1162/**
1163 * hrtimer_get_res - get the timer resolution for a clock
1164 * @which_clock: which clock to query
1165 * @tp:          pointer to timespec variable to store the resolution
1166 *
1167 * Store the resolution of the clock selected by @which_clock in the
1168 * variable pointed to by @tp.
1169 */
1170int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1171{
1172        struct hrtimer_cpu_base *cpu_base;
1173
1174        cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1175        *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
1176
1177        return 0;
1178}
1179EXPORT_SYMBOL_GPL(hrtimer_get_res);
1180
1181static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1182{
1183        struct hrtimer_clock_base *base = timer->base;
1184        struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1185        enum hrtimer_restart (*fn)(struct hrtimer *);
1186        int restart;
1187
1188        WARN_ON(!irqs_disabled());
1189
1190        debug_deactivate(timer);
1191        __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1192        timer_stats_account_hrtimer(timer);
1193        fn = timer->function;
1194
1195        /*
1196         * Because we run timers from hardirq context, there is no chance
1197         * they get migrated to another cpu, therefore its safe to unlock
1198         * the timer base.
1199         */
1200        spin_unlock(&cpu_base->lock);
1201        trace_hrtimer_expire_entry(timer, now);
1202        restart = fn(timer);
1203        trace_hrtimer_expire_exit(timer);
1204        spin_lock(&cpu_base->lock);
1205
1206        /*
1207         * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1208         * we do not reprogramm the event hardware. Happens either in
1209         * hrtimer_start_range_ns() or in hrtimer_interrupt()
1210         */
1211        if (restart != HRTIMER_NORESTART) {
1212                BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1213                enqueue_hrtimer(timer, base);
1214        }
1215        timer->state &= ~HRTIMER_STATE_CALLBACK;
1216}
1217
1218#ifdef CONFIG_HIGH_RES_TIMERS
1219
1220static int force_clock_reprogram;
1221
1222/*
1223 * After 5 iteration's attempts, we consider that hrtimer_interrupt()
1224 * is hanging, which could happen with something that slows the interrupt
1225 * such as the tracing. Then we force the clock reprogramming for each future
1226 * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
1227 * threshold that we will overwrite.
1228 * The next tick event will be scheduled to 3 times we currently spend on
1229 * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
1230 * 1/4 of their time to process the hrtimer interrupts. This is enough to
1231 * let it running without serious starvation.
1232 */
1233
1234static inline void
1235hrtimer_interrupt_hanging(struct clock_event_device *dev,
1236                        ktime_t try_time)
1237{
1238        force_clock_reprogram = 1;
1239        dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
1240        printk(KERN_WARNING "hrtimer: interrupt too slow, "
1241                "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
1242}
1243/*
1244 * High resolution timer interrupt
1245 * Called with interrupts disabled
1246 */
1247void hrtimer_interrupt(struct clock_event_device *dev)
1248{
1249        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1250        struct hrtimer_clock_base *base;
1251        ktime_t expires_next, now;
1252        int nr_retries = 0;
1253        int i;
1254
1255        BUG_ON(!cpu_base->hres_active);
1256        cpu_base->nr_events++;
1257        dev->next_event.tv64 = KTIME_MAX;
1258
1259 retry:
1260        /* 5 retries is enough to notice a hang */
1261        if (!(++nr_retries % 5))
1262                hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
1263
1264        now = ktime_get();
1265
1266        expires_next.tv64 = KTIME_MAX;
1267
1268        spin_lock(&cpu_base->lock);
1269        /*
1270         * We set expires_next to KTIME_MAX here with cpu_base->lock
1271         * held to prevent that a timer is enqueued in our queue via
1272         * the migration code. This does not affect enqueueing of
1273         * timers which run their callback and need to be requeued on
1274         * this CPU.
1275         */
1276        cpu_base->expires_next.tv64 = KTIME_MAX;
1277
1278        base = cpu_base->clock_base;
1279
1280        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1281                ktime_t basenow;
1282                struct rb_node *node;
1283
1284                basenow = ktime_add(now, base->offset);
1285
1286                while ((node = base->first)) {
1287                        struct hrtimer *timer;
1288
1289                        timer = rb_entry(node, struct hrtimer, node);
1290
1291                        /*
1292                         * The immediate goal for using the softexpires is
1293                         * minimizing wakeups, not running timers at the
1294                         * earliest interrupt after their soft expiration.
1295                         * This allows us to avoid using a Priority Search
1296                         * Tree, which can answer a stabbing querry for
1297                         * overlapping intervals and instead use the simple
1298                         * BST we already have.
1299                         * We don't add extra wakeups by delaying timers that
1300                         * are right-of a not yet expired timer, because that
1301                         * timer will have to trigger a wakeup anyway.
1302                         */
1303
1304                        if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
1305                                ktime_t expires;
1306
1307                                expires = ktime_sub(hrtimer_get_expires(timer),
1308                                                    base->offset);
1309                                if (expires.tv64 < expires_next.tv64)
1310                                        expires_next = expires;
1311                                break;
1312                        }
1313
1314                        __run_hrtimer(timer, &basenow);
1315                }
1316                base++;
1317        }
1318
1319        /*
1320         * Store the new expiry value so the migration code can verify
1321         * against it.
1322         */
1323        cpu_base->expires_next = expires_next;
1324        spin_unlock(&cpu_base->lock);
1325
1326        /* Reprogramming necessary ? */
1327        if (expires_next.tv64 != KTIME_MAX) {
1328                if (tick_program_event(expires_next, force_clock_reprogram))
1329                        goto retry;
1330        }
1331}
1332
1333/*
1334 * local version of hrtimer_peek_ahead_timers() called with interrupts
1335 * disabled.
1336 */
1337static void __hrtimer_peek_ahead_timers(void)
1338{
1339        struct tick_device *td;
1340
1341        if (!hrtimer_hres_active())
1342                return;
1343
1344        td = &__get_cpu_var(tick_cpu_device);
1345        if (td && td->evtdev)
1346                hrtimer_interrupt(td->evtdev);
1347}
1348
1349/**
1350 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1351 *
1352 * hrtimer_peek_ahead_timers will peek at the timer queue of
1353 * the current cpu and check if there are any timers for which
1354 * the soft expires time has passed. If any such timers exist,
1355 * they are run immediately and then removed from the timer queue.
1356 *
1357 */
1358void hrtimer_peek_ahead_timers(void)
1359{
1360        unsigned long flags;
1361
1362        local_irq_save(flags);
1363        __hrtimer_peek_ahead_timers();
1364        local_irq_restore(flags);
1365}
1366
1367static void run_hrtimer_softirq(struct softirq_action *h)
1368{
1369        hrtimer_peek_ahead_timers();
1370}
1371
1372#else /* CONFIG_HIGH_RES_TIMERS */
1373
1374static inline void __hrtimer_peek_ahead_timers(void) { }
1375
1376#endif  /* !CONFIG_HIGH_RES_TIMERS */
1377
1378/*
1379 * Called from timer softirq every jiffy, expire hrtimers:
1380 *
1381 * For HRT its the fall back code to run the softirq in the timer
1382 * softirq context in case the hrtimer initialization failed or has
1383 * not been done yet.
1384 */
1385void hrtimer_run_pending(void)
1386{
1387        if (hrtimer_hres_active())
1388                return;
1389
1390        /*
1391         * This _is_ ugly: We have to check in the softirq context,
1392         * whether we can switch to highres and / or nohz mode. The
1393         * clocksource switch happens in the timer interrupt with
1394         * xtime_lock held. Notification from there only sets the
1395         * check bit in the tick_oneshot code, otherwise we might
1396         * deadlock vs. xtime_lock.
1397         */
1398        if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1399                hrtimer_switch_to_hres();
1400}
1401
1402/*
1403 * Called from hardirq context every jiffy
1404 */
1405void hrtimer_run_queues(void)
1406{
1407        struct rb_node *node;
1408        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1409        struct hrtimer_clock_base *base;
1410        int index, gettime = 1;
1411
1412        if (hrtimer_hres_active())
1413                return;
1414
1415        for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
1416                base = &cpu_base->clock_base[index];
1417
1418                if (!base->first)
1419                        continue;
1420
1421                if (gettime) {
1422                        hrtimer_get_softirq_time(cpu_base);
1423                        gettime = 0;
1424                }
1425
1426                spin_lock(&cpu_base->lock);
1427
1428                while ((node = base->first)) {
1429                        struct hrtimer *timer;
1430
1431                        timer = rb_entry(node, struct hrtimer, node);
1432                        if (base->softirq_time.tv64 <=
1433                                        hrtimer_get_expires_tv64(timer))
1434                                break;
1435
1436                        __run_hrtimer(timer, &base->softirq_time);
1437                }
1438                spin_unlock(&cpu_base->lock);
1439        }
1440}
1441
1442/*
1443 * Sleep related functions:
1444 */
1445static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1446{
1447        struct hrtimer_sleeper *t =
1448                container_of(timer, struct hrtimer_sleeper, timer);
1449        struct task_struct *task = t->task;
1450
1451        t->task = NULL;
1452        if (task)
1453                wake_up_process(task);
1454
1455        return HRTIMER_NORESTART;
1456}
1457
1458void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1459{
1460        sl->timer.function = hrtimer_wakeup;
1461        sl->task = task;
1462}
1463EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1464
1465static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1466{
1467        hrtimer_init_sleeper(t, current);
1468
1469        do {
1470                set_current_state(TASK_INTERRUPTIBLE);
1471                hrtimer_start_expires(&t->timer, mode);
1472                if (!hrtimer_active(&t->timer))
1473                        t->task = NULL;
1474
1475                if (likely(t->task))
1476                        schedule();
1477
1478                hrtimer_cancel(&t->timer);
1479                mode = HRTIMER_MODE_ABS;
1480
1481        } while (t->task && !signal_pending(current));
1482
1483        __set_current_state(TASK_RUNNING);
1484
1485        return t->task == NULL;
1486}
1487
1488static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1489{
1490        struct timespec rmt;
1491        ktime_t rem;
1492
1493        rem = hrtimer_expires_remaining(timer);
1494        if (rem.tv64 <= 0)
1495                return 0;
1496        rmt = ktime_to_timespec(rem);
1497
1498        if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1499                return -EFAULT;
1500
1501        return 1;
1502}
1503
1504long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1505{
1506        struct hrtimer_sleeper t;
1507        struct timespec __user  *rmtp;
1508        int ret = 0;
1509
1510        hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
1511                                HRTIMER_MODE_ABS);
1512        hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1513
1514        if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1515                goto out;
1516
1517        rmtp = restart->nanosleep.rmtp;
1518        if (rmtp) {
1519                ret = update_rmtp(&t.timer, rmtp);
1520                if (ret <= 0)
1521                        goto out;
1522        }
1523
1524        /* The other values in restart are already filled in */
1525        ret = -ERESTART_RESTARTBLOCK;
1526out:
1527        destroy_hrtimer_on_stack(&t.timer);
1528        return ret;
1529}
1530
1531long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1532                       const enum hrtimer_mode mode, const clockid_t clockid)
1533{
1534        struct restart_block *restart;
1535        struct hrtimer_sleeper t;
1536        int ret = 0;
1537        unsigned long slack;
1538
1539        slack = current->timer_slack_ns;
1540        if (rt_task(current))
1541                slack = 0;
1542
1543        hrtimer_init_on_stack(&t.timer, clockid, mode);
1544        hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1545        if (do_nanosleep(&t, mode))
1546                goto out;
1547
1548        /* Absolute timers do not update the rmtp value and restart: */
1549        if (mode == HRTIMER_MODE_ABS) {
1550                ret = -ERESTARTNOHAND;
1551                goto out;
1552        }
1553
1554        if (rmtp) {
1555                ret = update_rmtp(&t.timer, rmtp);
1556                if (ret <= 0)
1557                        goto out;
1558        }
1559
1560        restart = &current_thread_info()->restart_block;
1561        restart->fn = hrtimer_nanosleep_restart;
1562        restart->nanosleep.index = t.timer.base->index;
1563        restart->nanosleep.rmtp = rmtp;
1564        restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1565
1566        ret = -ERESTART_RESTARTBLOCK;
1567out:
1568        destroy_hrtimer_on_stack(&t.timer);
1569        return ret;
1570}
1571
1572SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1573                struct timespec __user *, rmtp)
1574{
1575        struct timespec tu;
1576
1577        if (copy_from_user(&tu, rqtp, sizeof(tu)))
1578                return -EFAULT;
1579
1580        if (!timespec_valid(&tu))
1581                return -EINVAL;
1582
1583        return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1584}
1585
1586/*
1587 * Functions related to boot-time initialization:
1588 */
1589static void __cpuinit init_hrtimers_cpu(int cpu)
1590{
1591        struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1592        int i;
1593
1594        spin_lock_init(&cpu_base->lock);
1595
1596        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1597                cpu_base->clock_base[i].cpu_base = cpu_base;
1598
1599        hrtimer_init_hres(cpu_base);
1600}
1601
1602#ifdef CONFIG_HOTPLUG_CPU
1603
1604static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1605                                struct hrtimer_clock_base *new_base)
1606{
1607        struct hrtimer *timer;
1608        struct rb_node *node;
1609
1610        while ((node = rb_first(&old_base->active))) {
1611                timer = rb_entry(node, struct hrtimer, node);
1612                BUG_ON(hrtimer_callback_running(timer));
1613                debug_deactivate(timer);
1614
1615                /*
1616                 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1617                 * timer could be seen as !active and just vanish away
1618                 * under us on another CPU
1619                 */
1620                __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1621                timer->base = new_base;
1622                /*
1623                 * Enqueue the timers on the new cpu. This does not
1624                 * reprogram the event device in case the timer
1625                 * expires before the earliest on this CPU, but we run
1626                 * hrtimer_interrupt after we migrated everything to
1627                 * sort out already expired timers and reprogram the
1628                 * event device.
1629                 */
1630                enqueue_hrtimer(timer, new_base);
1631
1632                /* Clear the migration state bit */
1633                timer->state &= ~HRTIMER_STATE_MIGRATE;
1634        }
1635}
1636
1637static void migrate_hrtimers(int scpu)
1638{
1639        struct hrtimer_cpu_base *old_base, *new_base;
1640        int i;
1641
1642        BUG_ON(cpu_online(scpu));
1643        tick_cancel_sched_timer(scpu);
1644
1645        local_irq_disable();
1646        old_base = &per_cpu(hrtimer_bases, scpu);
1647        new_base = &__get_cpu_var(hrtimer_bases);
1648        /*
1649         * The caller is globally serialized and nobody else
1650         * takes two locks at once, deadlock is not possible.
1651         */
1652        spin_lock(&new_base->lock);
1653        spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1654
1655        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1656                migrate_hrtimer_list(&old_base->clock_base[i],
1657                                     &new_base->clock_base[i]);
1658        }
1659
1660        spin_unlock(&old_base->lock);
1661        spin_unlock(&new_base->lock);
1662
1663        /* Check, if we got expired work to do */
1664        __hrtimer_peek_ahead_timers();
1665        local_irq_enable();
1666}
1667
1668#endif /* CONFIG_HOTPLUG_CPU */
1669
1670static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1671                                        unsigned long action, void *hcpu)
1672{
1673        int scpu = (long)hcpu;
1674
1675        switch (action) {
1676
1677        case CPU_UP_PREPARE:
1678        case CPU_UP_PREPARE_FROZEN:
1679                init_hrtimers_cpu(scpu);
1680                break;
1681
1682#ifdef CONFIG_HOTPLUG_CPU
1683        case CPU_DYING:
1684        case CPU_DYING_FROZEN:
1685                clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1686                break;
1687        case CPU_DEAD:
1688        case CPU_DEAD_FROZEN:
1689        {
1690                clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1691                migrate_hrtimers(scpu);
1692                break;
1693        }
1694#endif
1695
1696        default:
1697                break;
1698        }
1699
1700        return NOTIFY_OK;
1701}
1702
1703static struct notifier_block __cpuinitdata hrtimers_nb = {
1704        .notifier_call = hrtimer_cpu_notify,
1705};
1706
1707void __init hrtimers_init(void)
1708{
1709        hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1710                          (void *)(long)smp_processor_id());
1711        register_cpu_notifier(&hrtimers_nb);
1712#ifdef CONFIG_HIGH_RES_TIMERS
1713        open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1714#endif
1715}
1716
1717/**
1718 * schedule_hrtimeout_range - sleep until timeout
1719 * @expires:    timeout value (ktime_t)
1720 * @delta:      slack in expires timeout (ktime_t)
1721 * @mode:       timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1722 *
1723 * Make the current task sleep until the given expiry time has
1724 * elapsed. The routine will return immediately unless
1725 * the current task state has been set (see set_current_state()).
1726 *
1727 * The @delta argument gives the kernel the freedom to schedule the
1728 * actual wakeup to a time that is both power and performance friendly.
1729 * The kernel give the normal best effort behavior for "@expires+@delta",
1730 * but may decide to fire the timer earlier, but no earlier than @expires.
1731 *
1732 * You can set the task state as follows -
1733 *
1734 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1735 * pass before the routine returns.
1736 *
1737 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1738 * delivered to the current task.
1739 *
1740 * The current task state is guaranteed to be TASK_RUNNING when this
1741 * routine returns.
1742 *
1743 * Returns 0 when the timer has expired otherwise -EINTR
1744 */
1745int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1746                               const enum hrtimer_mode mode)
1747{
1748        struct hrtimer_sleeper t;
1749
1750        /*
1751         * Optimize when a zero timeout value is given. It does not
1752         * matter whether this is an absolute or a relative time.
1753         */
1754        if (expires && !expires->tv64) {
1755                __set_current_state(TASK_RUNNING);
1756                return 0;
1757        }
1758
1759        /*
1760         * A NULL parameter means "inifinte"
1761         */
1762        if (!expires) {
1763                schedule();
1764                __set_current_state(TASK_RUNNING);
1765                return -EINTR;
1766        }
1767
1768        hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
1769        hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1770
1771        hrtimer_init_sleeper(&t, current);
1772
1773        hrtimer_start_expires(&t.timer, mode);
1774        if (!hrtimer_active(&t.timer))
1775                t.task = NULL;
1776
1777        if (likely(t.task))
1778                schedule();
1779
1780        hrtimer_cancel(&t.timer);
1781        destroy_hrtimer_on_stack(&t.timer);
1782
1783        __set_current_state(TASK_RUNNING);
1784
1785        return !t.task ? 0 : -EINTR;
1786}
1787EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1788
1789/**
1790 * schedule_hrtimeout - sleep until timeout
1791 * @expires:    timeout value (ktime_t)
1792 * @mode:       timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1793 *
1794 * Make the current task sleep until the given expiry time has
1795 * elapsed. The routine will return immediately unless
1796 * the current task state has been set (see set_current_state()).
1797 *
1798 * You can set the task state as follows -
1799 *
1800 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1801 * pass before the routine returns.
1802 *
1803 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1804 * delivered to the current task.
1805 *
1806 * The current task state is guaranteed to be TASK_RUNNING when this
1807 * routine returns.
1808 *
1809 * Returns 0 when the timer has expired otherwise -EINTR
1810 */
1811int __sched schedule_hrtimeout(ktime_t *expires,
1812                               const enum hrtimer_mode mode)
1813{
1814        return schedule_hrtimeout_range(expires, 0, mode);
1815}
1816EXPORT_SYMBOL_GPL(schedule_hrtimeout);
1817