linux/kernel/timer.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/timer.c
   3 *
   4 *  Kernel internal timers, basic process system calls
   5 *
   6 *  Copyright (C) 1991, 1992  Linus Torvalds
   7 *
   8 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   9 *
  10 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
  11 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  12 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13 *              serialize accesses to xtime/lost_ticks).
  14 *                              Copyright (C) 1998  Andrea Arcangeli
  15 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
  16 *  2002-05-31  Move sys_sysinfo here and make its locking sane, Robert Love
  17 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
  18 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
  19 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20 */
  21
  22#include <linux/kernel_stat.h>
  23#include <linux/module.h>
  24#include <linux/interrupt.h>
  25#include <linux/percpu.h>
  26#include <linux/init.h>
  27#include <linux/mm.h>
  28#include <linux/swap.h>
  29#include <linux/pid_namespace.h>
  30#include <linux/notifier.h>
  31#include <linux/thread_info.h>
  32#include <linux/time.h>
  33#include <linux/jiffies.h>
  34#include <linux/posix-timers.h>
  35#include <linux/cpu.h>
  36#include <linux/syscalls.h>
  37#include <linux/delay.h>
  38#include <linux/tick.h>
  39#include <linux/kallsyms.h>
  40
  41#include <asm/uaccess.h>
  42#include <asm/unistd.h>
  43#include <asm/div64.h>
  44#include <asm/timex.h>
  45#include <asm/io.h>
  46
  47u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  48
  49EXPORT_SYMBOL(jiffies_64);
  50
  51/*
  52 * per-CPU timer vector definitions:
  53 */
  54#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  55#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  56#define TVN_SIZE (1 << TVN_BITS)
  57#define TVR_SIZE (1 << TVR_BITS)
  58#define TVN_MASK (TVN_SIZE - 1)
  59#define TVR_MASK (TVR_SIZE - 1)
  60
  61typedef struct tvec_s {
  62        struct list_head vec[TVN_SIZE];
  63} tvec_t;
  64
  65typedef struct tvec_root_s {
  66        struct list_head vec[TVR_SIZE];
  67} tvec_root_t;
  68
  69struct tvec_t_base_s {
  70        spinlock_t lock;
  71        struct timer_list *running_timer;
  72        unsigned long timer_jiffies;
  73        tvec_root_t tv1;
  74        tvec_t tv2;
  75        tvec_t tv3;
  76        tvec_t tv4;
  77        tvec_t tv5;
  78} ____cacheline_aligned;
  79
  80typedef struct tvec_t_base_s tvec_base_t;
  81
  82tvec_base_t boot_tvec_bases;
  83EXPORT_SYMBOL(boot_tvec_bases);
  84static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
  85
  86/*
  87 * Note that all tvec_bases is 2 byte aligned and lower bit of
  88 * base in timer_list is guaranteed to be zero. Use the LSB for
  89 * the new flag to indicate whether the timer is deferrable
  90 */
  91#define TBASE_DEFERRABLE_FLAG           (0x1)
  92
  93/* Functions below help us manage 'deferrable' flag */
  94static inline unsigned int tbase_get_deferrable(tvec_base_t *base)
  95{
  96        return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
  97}
  98
  99static inline tvec_base_t *tbase_get_base(tvec_base_t *base)
 100{
 101        return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
 102}
 103
 104static inline void timer_set_deferrable(struct timer_list *timer)
 105{
 106        timer->base = ((tvec_base_t *)((unsigned long)(timer->base) |
 107                                       TBASE_DEFERRABLE_FLAG));
 108}
 109
 110static inline void
 111timer_set_base(struct timer_list *timer, tvec_base_t *new_base)
 112{
 113        timer->base = (tvec_base_t *)((unsigned long)(new_base) |
 114                                      tbase_get_deferrable(timer->base));
 115}
 116
 117/**
 118 * __round_jiffies - function to round jiffies to a full second
 119 * @j: the time in (absolute) jiffies that should be rounded
 120 * @cpu: the processor number on which the timeout will happen
 121 *
 122 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 123 * up or down to (approximately) full seconds. This is useful for timers
 124 * for which the exact time they fire does not matter too much, as long as
 125 * they fire approximately every X seconds.
 126 *
 127 * By rounding these timers to whole seconds, all such timers will fire
 128 * at the same time, rather than at various times spread out. The goal
 129 * of this is to have the CPU wake up less, which saves power.
 130 *
 131 * The exact rounding is skewed for each processor to avoid all
 132 * processors firing at the exact same time, which could lead
 133 * to lock contention or spurious cache line bouncing.
 134 *
 135 * The return value is the rounded version of the @j parameter.
 136 */
 137unsigned long __round_jiffies(unsigned long j, int cpu)
 138{
 139        int rem;
 140        unsigned long original = j;
 141
 142        /*
 143         * We don't want all cpus firing their timers at once hitting the
 144         * same lock or cachelines, so we skew each extra cpu with an extra
 145         * 3 jiffies. This 3 jiffies came originally from the mm/ code which
 146         * already did this.
 147         * The skew is done by adding 3*cpunr, then round, then subtract this
 148         * extra offset again.
 149         */
 150        j += cpu * 3;
 151
 152        rem = j % HZ;
 153
 154        /*
 155         * If the target jiffie is just after a whole second (which can happen
 156         * due to delays of the timer irq, long irq off times etc etc) then
 157         * we should round down to the whole second, not up. Use 1/4th second
 158         * as cutoff for this rounding as an extreme upper bound for this.
 159         */
 160        if (rem < HZ/4) /* round down */
 161                j = j - rem;
 162        else /* round up */
 163                j = j - rem + HZ;
 164
 165        /* now that we have rounded, subtract the extra skew again */
 166        j -= cpu * 3;
 167
 168        if (j <= jiffies) /* rounding ate our timeout entirely; */
 169                return original;
 170        return j;
 171}
 172EXPORT_SYMBOL_GPL(__round_jiffies);
 173
 174/**
 175 * __round_jiffies_relative - function to round jiffies to a full second
 176 * @j: the time in (relative) jiffies that should be rounded
 177 * @cpu: the processor number on which the timeout will happen
 178 *
 179 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 180 * up or down to (approximately) full seconds. This is useful for timers
 181 * for which the exact time they fire does not matter too much, as long as
 182 * they fire approximately every X seconds.
 183 *
 184 * By rounding these timers to whole seconds, all such timers will fire
 185 * at the same time, rather than at various times spread out. The goal
 186 * of this is to have the CPU wake up less, which saves power.
 187 *
 188 * The exact rounding is skewed for each processor to avoid all
 189 * processors firing at the exact same time, which could lead
 190 * to lock contention or spurious cache line bouncing.
 191 *
 192 * The return value is the rounded version of the @j parameter.
 193 */
 194unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 195{
 196        /*
 197         * In theory the following code can skip a jiffy in case jiffies
 198         * increments right between the addition and the later subtraction.
 199         * However since the entire point of this function is to use approximate
 200         * timeouts, it's entirely ok to not handle that.
 201         */
 202        return  __round_jiffies(j + jiffies, cpu) - jiffies;
 203}
 204EXPORT_SYMBOL_GPL(__round_jiffies_relative);
 205
 206/**
 207 * round_jiffies - function to round jiffies to a full second
 208 * @j: the time in (absolute) jiffies that should be rounded
 209 *
 210 * round_jiffies() rounds an absolute time in the future (in jiffies)
 211 * up or down to (approximately) full seconds. This is useful for timers
 212 * for which the exact time they fire does not matter too much, as long as
 213 * they fire approximately every X seconds.
 214 *
 215 * By rounding these timers to whole seconds, all such timers will fire
 216 * at the same time, rather than at various times spread out. The goal
 217 * of this is to have the CPU wake up less, which saves power.
 218 *
 219 * The return value is the rounded version of the @j parameter.
 220 */
 221unsigned long round_jiffies(unsigned long j)
 222{
 223        return __round_jiffies(j, raw_smp_processor_id());
 224}
 225EXPORT_SYMBOL_GPL(round_jiffies);
 226
 227/**
 228 * round_jiffies_relative - function to round jiffies to a full second
 229 * @j: the time in (relative) jiffies that should be rounded
 230 *
 231 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 232 * up or down to (approximately) full seconds. This is useful for timers
 233 * for which the exact time they fire does not matter too much, as long as
 234 * they fire approximately every X seconds.
 235 *
 236 * By rounding these timers to whole seconds, all such timers will fire
 237 * at the same time, rather than at various times spread out. The goal
 238 * of this is to have the CPU wake up less, which saves power.
 239 *
 240 * The return value is the rounded version of the @j parameter.
 241 */
 242unsigned long round_jiffies_relative(unsigned long j)
 243{
 244        return __round_jiffies_relative(j, raw_smp_processor_id());
 245}
 246EXPORT_SYMBOL_GPL(round_jiffies_relative);
 247
 248
 249static inline void set_running_timer(tvec_base_t *base,
 250                                        struct timer_list *timer)
 251{
 252#ifdef CONFIG_SMP
 253        base->running_timer = timer;
 254#endif
 255}
 256
 257static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
 258{
 259        unsigned long expires = timer->expires;
 260        unsigned long idx = expires - base->timer_jiffies;
 261        struct list_head *vec;
 262
 263        if (idx < TVR_SIZE) {
 264                int i = expires & TVR_MASK;
 265                vec = base->tv1.vec + i;
 266        } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
 267                int i = (expires >> TVR_BITS) & TVN_MASK;
 268                vec = base->tv2.vec + i;
 269        } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
 270                int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
 271                vec = base->tv3.vec + i;
 272        } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
 273                int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
 274                vec = base->tv4.vec + i;
 275        } else if ((signed long) idx < 0) {
 276                /*
 277                 * Can happen if you add a timer with expires == jiffies,
 278                 * or you set a timer to go off in the past
 279                 */
 280                vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
 281        } else {
 282                int i;
 283                /* If the timeout is larger than 0xffffffff on 64-bit
 284                 * architectures then we use the maximum timeout:
 285                 */
 286                if (idx > 0xffffffffUL) {
 287                        idx = 0xffffffffUL;
 288                        expires = idx + base->timer_jiffies;
 289                }
 290                i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 291                vec = base->tv5.vec + i;
 292        }
 293        /*
 294         * Timers are FIFO:
 295         */
 296        list_add_tail(&timer->entry, vec);
 297}
 298
 299#ifdef CONFIG_TIMER_STATS
 300void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 301{
 302        if (timer->start_site)
 303                return;
 304
 305        timer->start_site = addr;
 306        memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 307        timer->start_pid = current->pid;
 308}
 309
 310static void timer_stats_account_timer(struct timer_list *timer)
 311{
 312        unsigned int flag = 0;
 313
 314        if (unlikely(tbase_get_deferrable(timer->base)))
 315                flag |= TIMER_STATS_FLAG_DEFERRABLE;
 316
 317        timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
 318                                 timer->function, timer->start_comm, flag);
 319}
 320
 321#else
 322static void timer_stats_account_timer(struct timer_list *timer) {}
 323#endif
 324
 325/**
 326 * init_timer - initialize a timer.
 327 * @timer: the timer to be initialized
 328 *
 329 * init_timer() must be done to a timer prior calling *any* of the
 330 * other timer functions.
 331 */
 332void fastcall init_timer(struct timer_list *timer)
 333{
 334        timer->entry.next = NULL;
 335        timer->base = __raw_get_cpu_var(tvec_bases);
 336#ifdef CONFIG_TIMER_STATS
 337        timer->start_site = NULL;
 338        timer->start_pid = -1;
 339        memset(timer->start_comm, 0, TASK_COMM_LEN);
 340#endif
 341}
 342EXPORT_SYMBOL(init_timer);
 343
 344void fastcall init_timer_deferrable(struct timer_list *timer)
 345{
 346        init_timer(timer);
 347        timer_set_deferrable(timer);
 348}
 349EXPORT_SYMBOL(init_timer_deferrable);
 350
 351static inline void detach_timer(struct timer_list *timer,
 352                                int clear_pending)
 353{
 354        struct list_head *entry = &timer->entry;
 355
 356        __list_del(entry->prev, entry->next);
 357        if (clear_pending)
 358                entry->next = NULL;
 359        entry->prev = LIST_POISON2;
 360}
 361
 362/*
 363 * We are using hashed locking: holding per_cpu(tvec_bases).lock
 364 * means that all timers which are tied to this base via timer->base are
 365 * locked, and the base itself is locked too.
 366 *
 367 * So __run_timers/migrate_timers can safely modify all timers which could
 368 * be found on ->tvX lists.
 369 *
 370 * When the timer's base is locked, and the timer removed from list, it is
 371 * possible to set timer->base = NULL and drop the lock: the timer remains
 372 * locked.
 373 */
 374static tvec_base_t *lock_timer_base(struct timer_list *timer,
 375                                        unsigned long *flags)
 376        __acquires(timer->base->lock)
 377{
 378        tvec_base_t *base;
 379
 380        for (;;) {
 381                tvec_base_t *prelock_base = timer->base;
 382                base = tbase_get_base(prelock_base);
 383                if (likely(base != NULL)) {
 384                        spin_lock_irqsave(&base->lock, *flags);
 385                        if (likely(prelock_base == timer->base))
 386                                return base;
 387                        /* The timer has migrated to another CPU */
 388                        spin_unlock_irqrestore(&base->lock, *flags);
 389                }
 390                cpu_relax();
 391        }
 392}
 393
 394int __mod_timer(struct timer_list *timer, unsigned long expires)
 395{
 396        tvec_base_t *base, *new_base;
 397        unsigned long flags;
 398        int ret = 0;
 399
 400        timer_stats_timer_set_start_info(timer);
 401        BUG_ON(!timer->function);
 402
 403        base = lock_timer_base(timer, &flags);
 404
 405        if (timer_pending(timer)) {
 406                detach_timer(timer, 0);
 407                ret = 1;
 408        }
 409
 410        new_base = __get_cpu_var(tvec_bases);
 411
 412        if (base != new_base) {
 413                /*
 414                 * We are trying to schedule the timer on the local CPU.
 415                 * However we can't change timer's base while it is running,
 416                 * otherwise del_timer_sync() can't detect that the timer's
 417                 * handler yet has not finished. This also guarantees that
 418                 * the timer is serialized wrt itself.
 419                 */
 420                if (likely(base->running_timer != timer)) {
 421                        /* See the comment in lock_timer_base() */
 422                        timer_set_base(timer, NULL);
 423                        spin_unlock(&base->lock);
 424                        base = new_base;
 425                        spin_lock(&base->lock);
 426                        timer_set_base(timer, base);
 427                }
 428        }
 429
 430        timer->expires = expires;
 431        internal_add_timer(base, timer);
 432        spin_unlock_irqrestore(&base->lock, flags);
 433
 434        return ret;
 435}
 436
 437EXPORT_SYMBOL(__mod_timer);
 438
 439/**
 440 * add_timer_on - start a timer on a particular CPU
 441 * @timer: the timer to be added
 442 * @cpu: the CPU to start it on
 443 *
 444 * This is not very scalable on SMP. Double adds are not possible.
 445 */
 446void add_timer_on(struct timer_list *timer, int cpu)
 447{
 448        tvec_base_t *base = per_cpu(tvec_bases, cpu);
 449        unsigned long flags;
 450
 451        timer_stats_timer_set_start_info(timer);
 452        BUG_ON(timer_pending(timer) || !timer->function);
 453        spin_lock_irqsave(&base->lock, flags);
 454        timer_set_base(timer, base);
 455        internal_add_timer(base, timer);
 456        spin_unlock_irqrestore(&base->lock, flags);
 457}
 458
 459
 460/**
 461 * mod_timer - modify a timer's timeout
 462 * @timer: the timer to be modified
 463 * @expires: new timeout in jiffies
 464 *
 465 * mod_timer() is a more efficient way to update the expire field of an
 466 * active timer (if the timer is inactive it will be activated)
 467 *
 468 * mod_timer(timer, expires) is equivalent to:
 469 *
 470 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 471 *
 472 * Note that if there are multiple unserialized concurrent users of the
 473 * same timer, then mod_timer() is the only safe way to modify the timeout,
 474 * since add_timer() cannot modify an already running timer.
 475 *
 476 * The function returns whether it has modified a pending timer or not.
 477 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 478 * active timer returns 1.)
 479 */
 480int mod_timer(struct timer_list *timer, unsigned long expires)
 481{
 482        BUG_ON(!timer->function);
 483
 484        timer_stats_timer_set_start_info(timer);
 485        /*
 486         * This is a common optimization triggered by the
 487         * networking code - if the timer is re-modified
 488         * to be the same thing then just return:
 489         */
 490        if (timer->expires == expires && timer_pending(timer))
 491                return 1;
 492
 493        return __mod_timer(timer, expires);
 494}
 495
 496EXPORT_SYMBOL(mod_timer);
 497
 498/**
 499 * del_timer - deactive a timer.
 500 * @timer: the timer to be deactivated
 501 *
 502 * del_timer() deactivates a timer - this works on both active and inactive
 503 * timers.
 504 *
 505 * The function returns whether it has deactivated a pending timer or not.
 506 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
 507 * active timer returns 1.)
 508 */
 509int del_timer(struct timer_list *timer)
 510{
 511        tvec_base_t *base;
 512        unsigned long flags;
 513        int ret = 0;
 514
 515        timer_stats_timer_clear_start_info(timer);
 516        if (timer_pending(timer)) {
 517                base = lock_timer_base(timer, &flags);
 518                if (timer_pending(timer)) {
 519                        detach_timer(timer, 1);
 520                        ret = 1;
 521                }
 522                spin_unlock_irqrestore(&base->lock, flags);
 523        }
 524
 525        return ret;
 526}
 527
 528EXPORT_SYMBOL(del_timer);
 529
 530#ifdef CONFIG_SMP
 531/**
 532 * try_to_del_timer_sync - Try to deactivate a timer
 533 * @timer: timer do del
 534 *
 535 * This function tries to deactivate a timer. Upon successful (ret >= 0)
 536 * exit the timer is not queued and the handler is not running on any CPU.
 537 *
 538 * It must not be called from interrupt contexts.
 539 */
 540int try_to_del_timer_sync(struct timer_list *timer)
 541{
 542        tvec_base_t *base;
 543        unsigned long flags;
 544        int ret = -1;
 545
 546        base = lock_timer_base(timer, &flags);
 547
 548        if (base->running_timer == timer)
 549                goto out;
 550
 551        ret = 0;
 552        if (timer_pending(timer)) {
 553                detach_timer(timer, 1);
 554                ret = 1;
 555        }
 556out:
 557        spin_unlock_irqrestore(&base->lock, flags);
 558
 559        return ret;
 560}
 561
 562EXPORT_SYMBOL(try_to_del_timer_sync);
 563
 564/**
 565 * del_timer_sync - deactivate a timer and wait for the handler to finish.
 566 * @timer: the timer to be deactivated
 567 *
 568 * This function only differs from del_timer() on SMP: besides deactivating
 569 * the timer it also makes sure the handler has finished executing on other
 570 * CPUs.
 571 *
 572 * Synchronization rules: Callers must prevent restarting of the timer,
 573 * otherwise this function is meaningless. It must not be called from
 574 * interrupt contexts. The caller must not hold locks which would prevent
 575 * completion of the timer's handler. The timer's handler must not call
 576 * add_timer_on(). Upon exit the timer is not queued and the handler is
 577 * not running on any CPU.
 578 *
 579 * The function returns whether it has deactivated a pending timer or not.
 580 */
 581int del_timer_sync(struct timer_list *timer)
 582{
 583        for (;;) {
 584                int ret = try_to_del_timer_sync(timer);
 585                if (ret >= 0)
 586                        return ret;
 587                cpu_relax();
 588        }
 589}
 590
 591EXPORT_SYMBOL(del_timer_sync);
 592#endif
 593
 594static int cascade(tvec_base_t *base, tvec_t *tv, int index)
 595{
 596        /* cascade all the timers from tv up one level */
 597        struct timer_list *timer, *tmp;
 598        struct list_head tv_list;
 599
 600        list_replace_init(tv->vec + index, &tv_list);
 601
 602        /*
 603         * We are removing _all_ timers from the list, so we
 604         * don't have to detach them individually.
 605         */
 606        list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
 607                BUG_ON(tbase_get_base(timer->base) != base);
 608                internal_add_timer(base, timer);
 609        }
 610
 611        return index;
 612}
 613
 614#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
 615
 616/**
 617 * __run_timers - run all expired timers (if any) on this CPU.
 618 * @base: the timer vector to be processed.
 619 *
 620 * This function cascades all vectors and executes all expired timer
 621 * vectors.
 622 */
 623static inline void __run_timers(tvec_base_t *base)
 624{
 625        struct timer_list *timer;
 626
 627        spin_lock_irq(&base->lock);
 628        while (time_after_eq(jiffies, base->timer_jiffies)) {
 629                struct list_head work_list;
 630                struct list_head *head = &work_list;
 631                int index = base->timer_jiffies & TVR_MASK;
 632
 633                /*
 634                 * Cascade timers:
 635                 */
 636                if (!index &&
 637                        (!cascade(base, &base->tv2, INDEX(0))) &&
 638                                (!cascade(base, &base->tv3, INDEX(1))) &&
 639                                        !cascade(base, &base->tv4, INDEX(2)))
 640                        cascade(base, &base->tv5, INDEX(3));
 641                ++base->timer_jiffies;
 642                list_replace_init(base->tv1.vec + index, &work_list);
 643                while (!list_empty(head)) {
 644                        void (*fn)(unsigned long);
 645                        unsigned long data;
 646
 647                        timer = list_first_entry(head, struct timer_list,entry);
 648                        fn = timer->function;
 649                        data = timer->data;
 650
 651                        timer_stats_account_timer(timer);
 652
 653                        set_running_timer(base, timer);
 654                        detach_timer(timer, 1);
 655                        spin_unlock_irq(&base->lock);
 656                        {
 657                                int preempt_count = preempt_count();
 658                                fn(data);
 659                                if (preempt_count != preempt_count()) {
 660                                        printk(KERN_WARNING "huh, entered %p "
 661                                               "with preempt_count %08x, exited"
 662                                               " with %08x?\n",
 663                                               fn, preempt_count,
 664                                               preempt_count());
 665                                        BUG();
 666                                }
 667                        }
 668                        spin_lock_irq(&base->lock);
 669                }
 670        }
 671        set_running_timer(base, NULL);
 672        spin_unlock_irq(&base->lock);
 673}
 674
 675#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
 676/*
 677 * Find out when the next timer event is due to happen. This
 678 * is used on S/390 to stop all activity when a cpus is idle.
 679 * This functions needs to be called disabled.
 680 */
 681static unsigned long __next_timer_interrupt(tvec_base_t *base)
 682{
 683        unsigned long timer_jiffies = base->timer_jiffies;
 684        unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
 685        int index, slot, array, found = 0;
 686        struct timer_list *nte;
 687        tvec_t *varray[4];
 688
 689        /* Look for timer events in tv1. */
 690        index = slot = timer_jiffies & TVR_MASK;
 691        do {
 692                list_for_each_entry(nte, base->tv1.vec + slot, entry) {
 693                        if (tbase_get_deferrable(nte->base))
 694                                continue;
 695
 696                        found = 1;
 697                        expires = nte->expires;
 698                        /* Look at the cascade bucket(s)? */
 699                        if (!index || slot < index)
 700                                goto cascade;
 701                        return expires;
 702                }
 703                slot = (slot + 1) & TVR_MASK;
 704        } while (slot != index);
 705
 706cascade:
 707        /* Calculate the next cascade event */
 708        if (index)
 709                timer_jiffies += TVR_SIZE - index;
 710        timer_jiffies >>= TVR_BITS;
 711
 712        /* Check tv2-tv5. */
 713        varray[0] = &base->tv2;
 714        varray[1] = &base->tv3;
 715        varray[2] = &base->tv4;
 716        varray[3] = &base->tv5;
 717
 718        for (array = 0; array < 4; array++) {
 719                tvec_t *varp = varray[array];
 720
 721                index = slot = timer_jiffies & TVN_MASK;
 722                do {
 723                        list_for_each_entry(nte, varp->vec + slot, entry) {
 724                                found = 1;
 725                                if (time_before(nte->expires, expires))
 726                                        expires = nte->expires;
 727                        }
 728                        /*
 729                         * Do we still search for the first timer or are
 730                         * we looking up the cascade buckets ?
 731                         */
 732                        if (found) {
 733                                /* Look at the cascade bucket(s)? */
 734                                if (!index || slot < index)
 735                                        break;
 736                                return expires;
 737                        }
 738                        slot = (slot + 1) & TVN_MASK;
 739                } while (slot != index);
 740
 741                if (index)
 742                        timer_jiffies += TVN_SIZE - index;
 743                timer_jiffies >>= TVN_BITS;
 744        }
 745        return expires;
 746}
 747
 748/*
 749 * Check, if the next hrtimer event is before the next timer wheel
 750 * event:
 751 */
 752static unsigned long cmp_next_hrtimer_event(unsigned long now,
 753                                            unsigned long expires)
 754{
 755        ktime_t hr_delta = hrtimer_get_next_event();
 756        struct timespec tsdelta;
 757        unsigned long delta;
 758
 759        if (hr_delta.tv64 == KTIME_MAX)
 760                return expires;
 761
 762        /*
 763         * Expired timer available, let it expire in the next tick
 764         */
 765        if (hr_delta.tv64 <= 0)
 766                return now + 1;
 767
 768        tsdelta = ktime_to_timespec(hr_delta);
 769        delta = timespec_to_jiffies(&tsdelta);
 770
 771        /*
 772         * Limit the delta to the max value, which is checked in
 773         * tick_nohz_stop_sched_tick():
 774         */
 775        if (delta > NEXT_TIMER_MAX_DELTA)
 776                delta = NEXT_TIMER_MAX_DELTA;
 777
 778        /*
 779         * Take rounding errors in to account and make sure, that it
 780         * expires in the next tick. Otherwise we go into an endless
 781         * ping pong due to tick_nohz_stop_sched_tick() retriggering
 782         * the timer softirq
 783         */
 784        if (delta < 1)
 785                delta = 1;
 786        now += delta;
 787        if (time_before(now, expires))
 788                return now;
 789        return expires;
 790}
 791
 792/**
 793 * get_next_timer_interrupt - return the jiffy of the next pending timer
 794 * @now: current time (in jiffies)
 795 */
 796unsigned long get_next_timer_interrupt(unsigned long now)
 797{
 798        tvec_base_t *base = __get_cpu_var(tvec_bases);
 799        unsigned long expires;
 800
 801        spin_lock(&base->lock);
 802        expires = __next_timer_interrupt(base);
 803        spin_unlock(&base->lock);
 804
 805        if (time_before_eq(expires, now))
 806                return now;
 807
 808        return cmp_next_hrtimer_event(now, expires);
 809}
 810
 811#ifdef CONFIG_NO_IDLE_HZ
 812unsigned long next_timer_interrupt(void)
 813{
 814        return get_next_timer_interrupt(jiffies);
 815}
 816#endif
 817
 818#endif
 819
 820#ifndef CONFIG_VIRT_CPU_ACCOUNTING
 821void account_process_tick(struct task_struct *p, int user_tick)
 822{
 823        if (user_tick) {
 824                account_user_time(p, jiffies_to_cputime(1));
 825                account_user_time_scaled(p, jiffies_to_cputime(1));
 826        } else {
 827                account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
 828                account_system_time_scaled(p, jiffies_to_cputime(1));
 829        }
 830}
 831#endif
 832
 833/*
 834 * Called from the timer interrupt handler to charge one tick to the current
 835 * process.  user_tick is 1 if the tick is user time, 0 for system.
 836 */
 837void update_process_times(int user_tick)
 838{
 839        struct task_struct *p = current;
 840        int cpu = smp_processor_id();
 841
 842        /* Note: this timer irq context must be accounted for as well. */
 843        account_process_tick(p, user_tick);
 844        run_local_timers();
 845        if (rcu_pending(cpu))
 846                rcu_check_callbacks(cpu, user_tick);
 847        scheduler_tick();
 848        run_posix_cpu_timers(p);
 849}
 850
 851/*
 852 * Nr of active tasks - counted in fixed-point numbers
 853 */
 854static unsigned long count_active_tasks(void)
 855{
 856        return nr_active() * FIXED_1;
 857}
 858
 859/*
 860 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 861 * imply that avenrun[] is the standard name for this kind of thing.
 862 * Nothing else seems to be standardized: the fractional size etc
 863 * all seem to differ on different machines.
 864 *
 865 * Requires xtime_lock to access.
 866 */
 867unsigned long avenrun[3];
 868
 869EXPORT_SYMBOL(avenrun);
 870
 871/*
 872 * calc_load - given tick count, update the avenrun load estimates.
 873 * This is called while holding a write_lock on xtime_lock.
 874 */
 875static inline void calc_load(unsigned long ticks)
 876{
 877        unsigned long active_tasks; /* fixed-point */
 878        static int count = LOAD_FREQ;
 879
 880        count -= ticks;
 881        if (unlikely(count < 0)) {
 882                active_tasks = count_active_tasks();
 883                do {
 884                        CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 885                        CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 886                        CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 887                        count += LOAD_FREQ;
 888                } while (count < 0);
 889        }
 890}
 891
 892/*
 893 * This function runs timers and the timer-tq in bottom half context.
 894 */
 895static void run_timer_softirq(struct softirq_action *h)
 896{
 897        tvec_base_t *base = __get_cpu_var(tvec_bases);
 898
 899        hrtimer_run_queues();
 900
 901        if (time_after_eq(jiffies, base->timer_jiffies))
 902                __run_timers(base);
 903}
 904
 905/*
 906 * Called by the local, per-CPU timer interrupt on SMP.
 907 */
 908void run_local_timers(void)
 909{
 910        raise_softirq(TIMER_SOFTIRQ);
 911        softlockup_tick();
 912}
 913
 914/*
 915 * Called by the timer interrupt. xtime_lock must already be taken
 916 * by the timer IRQ!
 917 */
 918static inline void update_times(unsigned long ticks)
 919{
 920        update_wall_time();
 921        calc_load(ticks);
 922}
 923
 924/*
 925 * The 64-bit jiffies value is not atomic - you MUST NOT read it
 926 * without sampling the sequence number in xtime_lock.
 927 * jiffies is defined in the linker script...
 928 */
 929
 930void do_timer(unsigned long ticks)
 931{
 932        jiffies_64 += ticks;
 933        update_times(ticks);
 934}
 935
 936#ifdef __ARCH_WANT_SYS_ALARM
 937
 938/*
 939 * For backwards compatibility?  This can be done in libc so Alpha
 940 * and all newer ports shouldn't need it.
 941 */
 942asmlinkage unsigned long sys_alarm(unsigned int seconds)
 943{
 944        return alarm_setitimer(seconds);
 945}
 946
 947#endif
 948
 949#ifndef __alpha__
 950
 951/*
 952 * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
 953 * should be moved into arch/i386 instead?
 954 */
 955
 956/**
 957 * sys_getpid - return the thread group id of the current process
 958 *
 959 * Note, despite the name, this returns the tgid not the pid.  The tgid and
 960 * the pid are identical unless CLONE_THREAD was specified on clone() in
 961 * which case the tgid is the same in all threads of the same group.
 962 *
 963 * This is SMP safe as current->tgid does not change.
 964 */
 965asmlinkage long sys_getpid(void)
 966{
 967        return task_tgid_vnr(current);
 968}
 969
 970/*
 971 * Accessing ->real_parent is not SMP-safe, it could
 972 * change from under us. However, we can use a stale
 973 * value of ->real_parent under rcu_read_lock(), see
 974 * release_task()->call_rcu(delayed_put_task_struct).
 975 */
 976asmlinkage long sys_getppid(void)
 977{
 978        int pid;
 979
 980        rcu_read_lock();
 981        pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns);
 982        rcu_read_unlock();
 983
 984        return pid;
 985}
 986
 987asmlinkage long sys_getuid(void)
 988{
 989        /* Only we change this so SMP safe */
 990        return current->uid;
 991}
 992
 993asmlinkage long sys_geteuid(void)
 994{
 995        /* Only we change this so SMP safe */
 996        return current->euid;
 997}
 998
 999asmlinkage long sys_getgid(void)
1000{
1001        /* Only we change this so SMP safe */
1002        return current->gid;
1003}
1004
1005asmlinkage long sys_getegid(void)
1006{
1007        /* Only we change this so SMP safe */
1008        return  current->egid;
1009}
1010
1011#endif
1012
1013static void process_timeout(unsigned long __data)
1014{
1015        wake_up_process((struct task_struct *)__data);
1016}
1017
1018/**
1019 * schedule_timeout - sleep until timeout
1020 * @timeout: timeout value in jiffies
1021 *
1022 * Make the current task sleep until @timeout jiffies have
1023 * elapsed. The routine will return immediately unless
1024 * the current task state has been set (see set_current_state()).
1025 *
1026 * You can set the task state as follows -
1027 *
1028 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1029 * pass before the routine returns. The routine will return 0
1030 *
1031 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1032 * delivered to the current task. In this case the remaining time
1033 * in jiffies will be returned, or 0 if the timer expired in time
1034 *
1035 * The current task state is guaranteed to be TASK_RUNNING when this
1036 * routine returns.
1037 *
1038 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1039 * the CPU away without a bound on the timeout. In this case the return
1040 * value will be %MAX_SCHEDULE_TIMEOUT.
1041 *
1042 * In all cases the return value is guaranteed to be non-negative.
1043 */
1044fastcall signed long __sched schedule_timeout(signed long timeout)
1045{
1046        struct timer_list timer;
1047        unsigned long expire;
1048
1049        switch (timeout)
1050        {
1051        case MAX_SCHEDULE_TIMEOUT:
1052                /*
1053                 * These two special cases are useful to be comfortable
1054                 * in the caller. Nothing more. We could take
1055                 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1056                 * but I' d like to return a valid offset (>=0) to allow
1057                 * the caller to do everything it want with the retval.
1058                 */
1059                schedule();
1060                goto out;
1061        default:
1062                /*
1063                 * Another bit of PARANOID. Note that the retval will be
1064                 * 0 since no piece of kernel is supposed to do a check
1065                 * for a negative retval of schedule_timeout() (since it
1066                 * should never happens anyway). You just have the printk()
1067                 * that will tell you if something is gone wrong and where.
1068                 */
1069                if (timeout < 0) {
1070                        printk(KERN_ERR "schedule_timeout: wrong timeout "
1071                                "value %lx\n", timeout);
1072                        dump_stack();
1073                        current->state = TASK_RUNNING;
1074                        goto out;
1075                }
1076        }
1077
1078        expire = timeout + jiffies;
1079
1080        setup_timer(&timer, process_timeout, (unsigned long)current);
1081        __mod_timer(&timer, expire);
1082        schedule();
1083        del_singleshot_timer_sync(&timer);
1084
1085        timeout = expire - jiffies;
1086
1087 out:
1088        return timeout < 0 ? 0 : timeout;
1089}
1090EXPORT_SYMBOL(schedule_timeout);
1091
1092/*
1093 * We can use __set_current_state() here because schedule_timeout() calls
1094 * schedule() unconditionally.
1095 */
1096signed long __sched schedule_timeout_interruptible(signed long timeout)
1097{
1098        __set_current_state(TASK_INTERRUPTIBLE);
1099        return schedule_timeout(timeout);
1100}
1101EXPORT_SYMBOL(schedule_timeout_interruptible);
1102
1103signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1104{
1105        __set_current_state(TASK_UNINTERRUPTIBLE);
1106        return schedule_timeout(timeout);
1107}
1108EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1109
1110/* Thread ID - the internal kernel "pid" */
1111asmlinkage long sys_gettid(void)
1112{
1113        return task_pid_vnr(current);
1114}
1115
1116/**
1117 * do_sysinfo - fill in sysinfo struct
1118 * @info: pointer to buffer to fill
1119 */
1120int do_sysinfo(struct sysinfo *info)
1121{
1122        unsigned long mem_total, sav_total;
1123        unsigned int mem_unit, bitcount;
1124        unsigned long seq;
1125
1126        memset(info, 0, sizeof(struct sysinfo));
1127
1128        do {
1129                struct timespec tp;
1130                seq = read_seqbegin(&xtime_lock);
1131
1132                /*
1133                 * This is annoying.  The below is the same thing
1134                 * posix_get_clock_monotonic() does, but it wants to
1135                 * take the lock which we want to cover the loads stuff
1136                 * too.
1137                 */
1138
1139                getnstimeofday(&tp);
1140                tp.tv_sec += wall_to_monotonic.tv_sec;
1141                tp.tv_nsec += wall_to_monotonic.tv_nsec;
1142                monotonic_to_bootbased(&tp);
1143                if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1144                        tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1145                        tp.tv_sec++;
1146                }
1147                info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1148
1149                info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1150                info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1151                info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1152
1153                info->procs = nr_threads;
1154        } while (read_seqretry(&xtime_lock, seq));
1155
1156        si_meminfo(info);
1157        si_swapinfo(info);
1158
1159        /*
1160         * If the sum of all the available memory (i.e. ram + swap)
1161         * is less than can be stored in a 32 bit unsigned long then
1162         * we can be binary compatible with 2.2.x kernels.  If not,
1163         * well, in that case 2.2.x was broken anyways...
1164         *
1165         *  -Erik Andersen <andersee@debian.org>
1166         */
1167
1168        mem_total = info->totalram + info->totalswap;
1169        if (mem_total < info->totalram || mem_total < info->totalswap)
1170                goto out;
1171        bitcount = 0;
1172        mem_unit = info->mem_unit;
1173        while (mem_unit > 1) {
1174                bitcount++;
1175                mem_unit >>= 1;
1176                sav_total = mem_total;
1177                mem_total <<= 1;
1178                if (mem_total < sav_total)
1179                        goto out;
1180        }
1181
1182        /*
1183         * If mem_total did not overflow, multiply all memory values by
1184         * info->mem_unit and set it to 1.  This leaves things compatible
1185         * with 2.2.x, and also retains compatibility with earlier 2.4.x
1186         * kernels...
1187         */
1188
1189        info->mem_unit = 1;
1190        info->totalram <<= bitcount;
1191        info->freeram <<= bitcount;
1192        info->sharedram <<= bitcount;
1193        info->bufferram <<= bitcount;
1194        info->totalswap <<= bitcount;
1195        info->freeswap <<= bitcount;
1196        info->totalhigh <<= bitcount;
1197        info->freehigh <<= bitcount;
1198
1199out:
1200        return 0;
1201}
1202
1203asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1204{
1205        struct sysinfo val;
1206
1207        do_sysinfo(&val);
1208
1209        if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1210                return -EFAULT;
1211
1212        return 0;
1213}
1214
1215/*
1216 * lockdep: we want to track each per-CPU base as a separate lock-class,
1217 * but timer-bases are kmalloc()-ed, so we need to attach separate
1218 * keys to them:
1219 */
1220static struct lock_class_key base_lock_keys[NR_CPUS];
1221
1222static int __cpuinit init_timers_cpu(int cpu)
1223{
1224        int j;
1225        tvec_base_t *base;
1226        static char __cpuinitdata tvec_base_done[NR_CPUS];
1227
1228        if (!tvec_base_done[cpu]) {
1229                static char boot_done;
1230
1231                if (boot_done) {
1232                        /*
1233                         * The APs use this path later in boot
1234                         */
1235                        base = kmalloc_node(sizeof(*base),
1236                                                GFP_KERNEL | __GFP_ZERO,
1237                                                cpu_to_node(cpu));
1238                        if (!base)
1239                                return -ENOMEM;
1240
1241                        /* Make sure that tvec_base is 2 byte aligned */
1242                        if (tbase_get_deferrable(base)) {
1243                                WARN_ON(1);
1244                                kfree(base);
1245                                return -ENOMEM;
1246                        }
1247                        per_cpu(tvec_bases, cpu) = base;
1248                } else {
1249                        /*
1250                         * This is for the boot CPU - we use compile-time
1251                         * static initialisation because per-cpu memory isn't
1252                         * ready yet and because the memory allocators are not
1253                         * initialised either.
1254                         */
1255                        boot_done = 1;
1256                        base = &boot_tvec_bases;
1257                }
1258                tvec_base_done[cpu] = 1;
1259        } else {
1260                base = per_cpu(tvec_bases, cpu);
1261        }
1262
1263        spin_lock_init(&base->lock);
1264        lockdep_set_class(&base->lock, base_lock_keys + cpu);
1265
1266        for (j = 0; j < TVN_SIZE; j++) {
1267                INIT_LIST_HEAD(base->tv5.vec + j);
1268                INIT_LIST_HEAD(base->tv4.vec + j);
1269                INIT_LIST_HEAD(base->tv3.vec + j);
1270                INIT_LIST_HEAD(base->tv2.vec + j);
1271        }
1272        for (j = 0; j < TVR_SIZE; j++)
1273                INIT_LIST_HEAD(base->tv1.vec + j);
1274
1275        base->timer_jiffies = jiffies;
1276        return 0;
1277}
1278
1279#ifdef CONFIG_HOTPLUG_CPU
1280static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1281{
1282        struct timer_list *timer;
1283
1284        while (!list_empty(head)) {
1285                timer = list_first_entry(head, struct timer_list, entry);
1286                detach_timer(timer, 0);
1287                timer_set_base(timer, new_base);
1288                internal_add_timer(new_base, timer);
1289        }
1290}
1291
1292static void __cpuinit migrate_timers(int cpu)
1293{
1294        tvec_base_t *old_base;
1295        tvec_base_t *new_base;
1296        int i;
1297
1298        BUG_ON(cpu_online(cpu));
1299        old_base = per_cpu(tvec_bases, cpu);
1300        new_base = get_cpu_var(tvec_bases);
1301
1302        local_irq_disable();
1303        double_spin_lock(&new_base->lock, &old_base->lock,
1304                         smp_processor_id() < cpu);
1305
1306        BUG_ON(old_base->running_timer);
1307
1308        for (i = 0; i < TVR_SIZE; i++)
1309                migrate_timer_list(new_base, old_base->tv1.vec + i);
1310        for (i = 0; i < TVN_SIZE; i++) {
1311                migrate_timer_list(new_base, old_base->tv2.vec + i);
1312                migrate_timer_list(new_base, old_base->tv3.vec + i);
1313                migrate_timer_list(new_base, old_base->tv4.vec + i);
1314                migrate_timer_list(new_base, old_base->tv5.vec + i);
1315        }
1316
1317        double_spin_unlock(&new_base->lock, &old_base->lock,
1318                           smp_processor_id() < cpu);
1319        local_irq_enable();
1320        put_cpu_var(tvec_bases);
1321}
1322#endif /* CONFIG_HOTPLUG_CPU */
1323
1324static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1325                                unsigned long action, void *hcpu)
1326{
1327        long cpu = (long)hcpu;
1328        switch(action) {
1329        case CPU_UP_PREPARE:
1330        case CPU_UP_PREPARE_FROZEN:
1331                if (init_timers_cpu(cpu) < 0)
1332                        return NOTIFY_BAD;
1333                break;
1334#ifdef CONFIG_HOTPLUG_CPU
1335        case CPU_DEAD:
1336        case CPU_DEAD_FROZEN:
1337                migrate_timers(cpu);
1338                break;
1339#endif
1340        default:
1341                break;
1342        }
1343        return NOTIFY_OK;
1344}
1345
1346static struct notifier_block __cpuinitdata timers_nb = {
1347        .notifier_call  = timer_cpu_notify,
1348};
1349
1350
1351void __init init_timers(void)
1352{
1353        int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1354                                (void *)(long)smp_processor_id());
1355
1356        init_timer_stats();
1357
1358        BUG_ON(err == NOTIFY_BAD);
1359        register_cpu_notifier(&timers_nb);
1360        open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1361}
1362
1363/**
1364 * msleep - sleep safely even with waitqueue interruptions
1365 * @msecs: Time in milliseconds to sleep for
1366 */
1367void msleep(unsigned int msecs)
1368{
1369        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1370
1371        while (timeout)
1372                timeout = schedule_timeout_uninterruptible(timeout);
1373}
1374
1375EXPORT_SYMBOL(msleep);
1376
1377/**
1378 * msleep_interruptible - sleep waiting for signals
1379 * @msecs: Time in milliseconds to sleep for
1380 */
1381unsigned long msleep_interruptible(unsigned int msecs)
1382{
1383        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1384
1385        while (timeout && !signal_pending(current))
1386                timeout = schedule_timeout_interruptible(timeout);
1387        return jiffies_to_msecs(timeout);
1388}
1389
1390EXPORT_SYMBOL(msleep_interruptible);
1391