linux/kernel/timer.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/timer.c
   3 *
   4 *  Kernel internal timers, basic process system calls
   5 *
   6 *  Copyright (C) 1991, 1992  Linus Torvalds
   7 *
   8 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   9 *
  10 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
  11 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  12 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13 *              serialize accesses to xtime/lost_ticks).
  14 *                              Copyright (C) 1998  Andrea Arcangeli
  15 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
  16 *  2002-05-31  Move sys_sysinfo here and make its locking sane, Robert Love
  17 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
  18 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
  19 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20 */
  21
  22#include <linux/kernel_stat.h>
  23#include <linux/export.h>
  24#include <linux/interrupt.h>
  25#include <linux/percpu.h>
  26#include <linux/init.h>
  27#include <linux/mm.h>
  28#include <linux/swap.h>
  29#include <linux/pid_namespace.h>
  30#include <linux/notifier.h>
  31#include <linux/thread_info.h>
  32#include <linux/time.h>
  33#include <linux/jiffies.h>
  34#include <linux/posix-timers.h>
  35#include <linux/cpu.h>
  36#include <linux/syscalls.h>
  37#include <linux/delay.h>
  38#include <linux/tick.h>
  39#include <linux/kallsyms.h>
  40#include <linux/irq_work.h>
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43
  44#include <asm/uaccess.h>
  45#include <asm/unistd.h>
  46#include <asm/div64.h>
  47#include <asm/timex.h>
  48#include <asm/io.h>
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/timer.h>
  52
  53u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  54
  55EXPORT_SYMBOL(jiffies_64);
  56
  57/*
  58 * per-CPU timer vector definitions:
  59 */
  60#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  61#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  62#define TVN_SIZE (1 << TVN_BITS)
  63#define TVR_SIZE (1 << TVR_BITS)
  64#define TVN_MASK (TVN_SIZE - 1)
  65#define TVR_MASK (TVR_SIZE - 1)
  66#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
  67
  68struct tvec {
  69        struct list_head vec[TVN_SIZE];
  70};
  71
  72struct tvec_root {
  73        struct list_head vec[TVR_SIZE];
  74};
  75
  76struct tvec_base {
  77        spinlock_t lock;
  78        struct timer_list *running_timer;
  79        unsigned long timer_jiffies;
  80        unsigned long next_timer;
  81        unsigned long active_timers;
  82        struct tvec_root tv1;
  83        struct tvec tv2;
  84        struct tvec tv3;
  85        struct tvec tv4;
  86        struct tvec tv5;
  87} ____cacheline_aligned;
  88
  89struct tvec_base boot_tvec_bases;
  90EXPORT_SYMBOL(boot_tvec_bases);
  91static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
  92
  93/* Functions below help us manage 'deferrable' flag */
  94static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
  95{
  96        return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
  97}
  98
  99static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
 100{
 101        return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
 102}
 103
 104static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
 105{
 106        return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
 107}
 108
 109static inline void
 110timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
 111{
 112        unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
 113
 114        timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
 115}
 116
 117static unsigned long round_jiffies_common(unsigned long j, int cpu,
 118                bool force_up)
 119{
 120        int rem;
 121        unsigned long original = j;
 122
 123        /*
 124         * We don't want all cpus firing their timers at once hitting the
 125         * same lock or cachelines, so we skew each extra cpu with an extra
 126         * 3 jiffies. This 3 jiffies came originally from the mm/ code which
 127         * already did this.
 128         * The skew is done by adding 3*cpunr, then round, then subtract this
 129         * extra offset again.
 130         */
 131        j += cpu * 3;
 132
 133        rem = j % HZ;
 134
 135        /*
 136         * If the target jiffie is just after a whole second (which can happen
 137         * due to delays of the timer irq, long irq off times etc etc) then
 138         * we should round down to the whole second, not up. Use 1/4th second
 139         * as cutoff for this rounding as an extreme upper bound for this.
 140         * But never round down if @force_up is set.
 141         */
 142        if (rem < HZ/4 && !force_up) /* round down */
 143                j = j - rem;
 144        else /* round up */
 145                j = j - rem + HZ;
 146
 147        /* now that we have rounded, subtract the extra skew again */
 148        j -= cpu * 3;
 149
 150        if (j <= jiffies) /* rounding ate our timeout entirely; */
 151                return original;
 152        return j;
 153}
 154
 155/**
 156 * __round_jiffies - function to round jiffies to a full second
 157 * @j: the time in (absolute) jiffies that should be rounded
 158 * @cpu: the processor number on which the timeout will happen
 159 *
 160 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 161 * up or down to (approximately) full seconds. This is useful for timers
 162 * for which the exact time they fire does not matter too much, as long as
 163 * they fire approximately every X seconds.
 164 *
 165 * By rounding these timers to whole seconds, all such timers will fire
 166 * at the same time, rather than at various times spread out. The goal
 167 * of this is to have the CPU wake up less, which saves power.
 168 *
 169 * The exact rounding is skewed for each processor to avoid all
 170 * processors firing at the exact same time, which could lead
 171 * to lock contention or spurious cache line bouncing.
 172 *
 173 * The return value is the rounded version of the @j parameter.
 174 */
 175unsigned long __round_jiffies(unsigned long j, int cpu)
 176{
 177        return round_jiffies_common(j, cpu, false);
 178}
 179EXPORT_SYMBOL_GPL(__round_jiffies);
 180
 181/**
 182 * __round_jiffies_relative - function to round jiffies to a full second
 183 * @j: the time in (relative) jiffies that should be rounded
 184 * @cpu: the processor number on which the timeout will happen
 185 *
 186 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 187 * up or down to (approximately) full seconds. This is useful for timers
 188 * for which the exact time they fire does not matter too much, as long as
 189 * they fire approximately every X seconds.
 190 *
 191 * By rounding these timers to whole seconds, all such timers will fire
 192 * at the same time, rather than at various times spread out. The goal
 193 * of this is to have the CPU wake up less, which saves power.
 194 *
 195 * The exact rounding is skewed for each processor to avoid all
 196 * processors firing at the exact same time, which could lead
 197 * to lock contention or spurious cache line bouncing.
 198 *
 199 * The return value is the rounded version of the @j parameter.
 200 */
 201unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 202{
 203        unsigned long j0 = jiffies;
 204
 205        /* Use j0 because jiffies might change while we run */
 206        return round_jiffies_common(j + j0, cpu, false) - j0;
 207}
 208EXPORT_SYMBOL_GPL(__round_jiffies_relative);
 209
 210/**
 211 * round_jiffies - function to round jiffies to a full second
 212 * @j: the time in (absolute) jiffies that should be rounded
 213 *
 214 * round_jiffies() rounds an absolute time in the future (in jiffies)
 215 * up or down to (approximately) full seconds. This is useful for timers
 216 * for which the exact time they fire does not matter too much, as long as
 217 * they fire approximately every X seconds.
 218 *
 219 * By rounding these timers to whole seconds, all such timers will fire
 220 * at the same time, rather than at various times spread out. The goal
 221 * of this is to have the CPU wake up less, which saves power.
 222 *
 223 * The return value is the rounded version of the @j parameter.
 224 */
 225unsigned long round_jiffies(unsigned long j)
 226{
 227        return round_jiffies_common(j, raw_smp_processor_id(), false);
 228}
 229EXPORT_SYMBOL_GPL(round_jiffies);
 230
 231/**
 232 * round_jiffies_relative - function to round jiffies to a full second
 233 * @j: the time in (relative) jiffies that should be rounded
 234 *
 235 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 236 * up or down to (approximately) full seconds. This is useful for timers
 237 * for which the exact time they fire does not matter too much, as long as
 238 * they fire approximately every X seconds.
 239 *
 240 * By rounding these timers to whole seconds, all such timers will fire
 241 * at the same time, rather than at various times spread out. The goal
 242 * of this is to have the CPU wake up less, which saves power.
 243 *
 244 * The return value is the rounded version of the @j parameter.
 245 */
 246unsigned long round_jiffies_relative(unsigned long j)
 247{
 248        return __round_jiffies_relative(j, raw_smp_processor_id());
 249}
 250EXPORT_SYMBOL_GPL(round_jiffies_relative);
 251
 252/**
 253 * __round_jiffies_up - function to round jiffies up to a full second
 254 * @j: the time in (absolute) jiffies that should be rounded
 255 * @cpu: the processor number on which the timeout will happen
 256 *
 257 * This is the same as __round_jiffies() except that it will never
 258 * round down.  This is useful for timeouts for which the exact time
 259 * of firing does not matter too much, as long as they don't fire too
 260 * early.
 261 */
 262unsigned long __round_jiffies_up(unsigned long j, int cpu)
 263{
 264        return round_jiffies_common(j, cpu, true);
 265}
 266EXPORT_SYMBOL_GPL(__round_jiffies_up);
 267
 268/**
 269 * __round_jiffies_up_relative - function to round jiffies up to a full second
 270 * @j: the time in (relative) jiffies that should be rounded
 271 * @cpu: the processor number on which the timeout will happen
 272 *
 273 * This is the same as __round_jiffies_relative() except that it will never
 274 * round down.  This is useful for timeouts for which the exact time
 275 * of firing does not matter too much, as long as they don't fire too
 276 * early.
 277 */
 278unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
 279{
 280        unsigned long j0 = jiffies;
 281
 282        /* Use j0 because jiffies might change while we run */
 283        return round_jiffies_common(j + j0, cpu, true) - j0;
 284}
 285EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
 286
 287/**
 288 * round_jiffies_up - function to round jiffies up to a full second
 289 * @j: the time in (absolute) jiffies that should be rounded
 290 *
 291 * This is the same as round_jiffies() except that it will never
 292 * round down.  This is useful for timeouts for which the exact time
 293 * of firing does not matter too much, as long as they don't fire too
 294 * early.
 295 */
 296unsigned long round_jiffies_up(unsigned long j)
 297{
 298        return round_jiffies_common(j, raw_smp_processor_id(), true);
 299}
 300EXPORT_SYMBOL_GPL(round_jiffies_up);
 301
 302/**
 303 * round_jiffies_up_relative - function to round jiffies up to a full second
 304 * @j: the time in (relative) jiffies that should be rounded
 305 *
 306 * This is the same as round_jiffies_relative() except that it will never
 307 * round down.  This is useful for timeouts for which the exact time
 308 * of firing does not matter too much, as long as they don't fire too
 309 * early.
 310 */
 311unsigned long round_jiffies_up_relative(unsigned long j)
 312{
 313        return __round_jiffies_up_relative(j, raw_smp_processor_id());
 314}
 315EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 316
 317/**
 318 * set_timer_slack - set the allowed slack for a timer
 319 * @timer: the timer to be modified
 320 * @slack_hz: the amount of time (in jiffies) allowed for rounding
 321 *
 322 * Set the amount of time, in jiffies, that a certain timer has
 323 * in terms of slack. By setting this value, the timer subsystem
 324 * will schedule the actual timer somewhere between
 325 * the time mod_timer() asks for, and that time plus the slack.
 326 *
 327 * By setting the slack to -1, a percentage of the delay is used
 328 * instead.
 329 */
 330void set_timer_slack(struct timer_list *timer, int slack_hz)
 331{
 332        timer->slack = slack_hz;
 333}
 334EXPORT_SYMBOL_GPL(set_timer_slack);
 335
 336static void
 337__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 338{
 339        unsigned long expires = timer->expires;
 340        unsigned long idx = expires - base->timer_jiffies;
 341        struct list_head *vec;
 342
 343        if (idx < TVR_SIZE) {
 344                int i = expires & TVR_MASK;
 345                vec = base->tv1.vec + i;
 346        } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
 347                int i = (expires >> TVR_BITS) & TVN_MASK;
 348                vec = base->tv2.vec + i;
 349        } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
 350                int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
 351                vec = base->tv3.vec + i;
 352        } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
 353                int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
 354                vec = base->tv4.vec + i;
 355        } else if ((signed long) idx < 0) {
 356                /*
 357                 * Can happen if you add a timer with expires == jiffies,
 358                 * or you set a timer to go off in the past
 359                 */
 360                vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
 361        } else {
 362                int i;
 363                /* If the timeout is larger than MAX_TVAL (on 64-bit
 364                 * architectures or with CONFIG_BASE_SMALL=1) then we
 365                 * use the maximum timeout.
 366                 */
 367                if (idx > MAX_TVAL) {
 368                        idx = MAX_TVAL;
 369                        expires = idx + base->timer_jiffies;
 370                }
 371                i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 372                vec = base->tv5.vec + i;
 373        }
 374        /*
 375         * Timers are FIFO:
 376         */
 377        list_add_tail(&timer->entry, vec);
 378}
 379
 380static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 381{
 382        __internal_add_timer(base, timer);
 383        /*
 384         * Update base->active_timers and base->next_timer
 385         */
 386        if (!tbase_get_deferrable(timer->base)) {
 387                if (time_before(timer->expires, base->next_timer))
 388                        base->next_timer = timer->expires;
 389                base->active_timers++;
 390        }
 391}
 392
 393#ifdef CONFIG_TIMER_STATS
 394void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 395{
 396        if (timer->start_site)
 397                return;
 398
 399        timer->start_site = addr;
 400        memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 401        timer->start_pid = current->pid;
 402}
 403
 404static void timer_stats_account_timer(struct timer_list *timer)
 405{
 406        unsigned int flag = 0;
 407
 408        if (likely(!timer->start_site))
 409                return;
 410        if (unlikely(tbase_get_deferrable(timer->base)))
 411                flag |= TIMER_STATS_FLAG_DEFERRABLE;
 412
 413        timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
 414                                 timer->function, timer->start_comm, flag);
 415}
 416
 417#else
 418static void timer_stats_account_timer(struct timer_list *timer) {}
 419#endif
 420
 421#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 422
 423static struct debug_obj_descr timer_debug_descr;
 424
 425static void *timer_debug_hint(void *addr)
 426{
 427        return ((struct timer_list *) addr)->function;
 428}
 429
 430/*
 431 * fixup_init is called when:
 432 * - an active object is initialized
 433 */
 434static int timer_fixup_init(void *addr, enum debug_obj_state state)
 435{
 436        struct timer_list *timer = addr;
 437
 438        switch (state) {
 439        case ODEBUG_STATE_ACTIVE:
 440                del_timer_sync(timer);
 441                debug_object_init(timer, &timer_debug_descr);
 442                return 1;
 443        default:
 444                return 0;
 445        }
 446}
 447
 448/* Stub timer callback for improperly used timers. */
 449static void stub_timer(unsigned long data)
 450{
 451        WARN_ON(1);
 452}
 453
 454/*
 455 * fixup_activate is called when:
 456 * - an active object is activated
 457 * - an unknown object is activated (might be a statically initialized object)
 458 */
 459static int timer_fixup_activate(void *addr, enum debug_obj_state state)
 460{
 461        struct timer_list *timer = addr;
 462
 463        switch (state) {
 464
 465        case ODEBUG_STATE_NOTAVAILABLE:
 466                /*
 467                 * This is not really a fixup. The timer was
 468                 * statically initialized. We just make sure that it
 469                 * is tracked in the object tracker.
 470                 */
 471                if (timer->entry.next == NULL &&
 472                    timer->entry.prev == TIMER_ENTRY_STATIC) {
 473                        debug_object_init(timer, &timer_debug_descr);
 474                        debug_object_activate(timer, &timer_debug_descr);
 475                        return 0;
 476                } else {
 477                        setup_timer(timer, stub_timer, 0);
 478                        return 1;
 479                }
 480                return 0;
 481
 482        case ODEBUG_STATE_ACTIVE:
 483                WARN_ON(1);
 484
 485        default:
 486                return 0;
 487        }
 488}
 489
 490/*
 491 * fixup_free is called when:
 492 * - an active object is freed
 493 */
 494static int timer_fixup_free(void *addr, enum debug_obj_state state)
 495{
 496        struct timer_list *timer = addr;
 497
 498        switch (state) {
 499        case ODEBUG_STATE_ACTIVE:
 500                del_timer_sync(timer);
 501                debug_object_free(timer, &timer_debug_descr);
 502                return 1;
 503        default:
 504                return 0;
 505        }
 506}
 507
 508/*
 509 * fixup_assert_init is called when:
 510 * - an untracked/uninit-ed object is found
 511 */
 512static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
 513{
 514        struct timer_list *timer = addr;
 515
 516        switch (state) {
 517        case ODEBUG_STATE_NOTAVAILABLE:
 518                if (timer->entry.prev == TIMER_ENTRY_STATIC) {
 519                        /*
 520                         * This is not really a fixup. The timer was
 521                         * statically initialized. We just make sure that it
 522                         * is tracked in the object tracker.
 523                         */
 524                        debug_object_init(timer, &timer_debug_descr);
 525                        return 0;
 526                } else {
 527                        setup_timer(timer, stub_timer, 0);
 528                        return 1;
 529                }
 530        default:
 531                return 0;
 532        }
 533}
 534
 535static struct debug_obj_descr timer_debug_descr = {
 536        .name                   = "timer_list",
 537        .debug_hint             = timer_debug_hint,
 538        .fixup_init             = timer_fixup_init,
 539        .fixup_activate         = timer_fixup_activate,
 540        .fixup_free             = timer_fixup_free,
 541        .fixup_assert_init      = timer_fixup_assert_init,
 542};
 543
 544static inline void debug_timer_init(struct timer_list *timer)
 545{
 546        debug_object_init(timer, &timer_debug_descr);
 547}
 548
 549static inline void debug_timer_activate(struct timer_list *timer)
 550{
 551        debug_object_activate(timer, &timer_debug_descr);
 552}
 553
 554static inline void debug_timer_deactivate(struct timer_list *timer)
 555{
 556        debug_object_deactivate(timer, &timer_debug_descr);
 557}
 558
 559static inline void debug_timer_free(struct timer_list *timer)
 560{
 561        debug_object_free(timer, &timer_debug_descr);
 562}
 563
 564static inline void debug_timer_assert_init(struct timer_list *timer)
 565{
 566        debug_object_assert_init(timer, &timer_debug_descr);
 567}
 568
 569static void do_init_timer(struct timer_list *timer, unsigned int flags,
 570                          const char *name, struct lock_class_key *key);
 571
 572void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
 573                             const char *name, struct lock_class_key *key)
 574{
 575        debug_object_init_on_stack(timer, &timer_debug_descr);
 576        do_init_timer(timer, flags, name, key);
 577}
 578EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 579
 580void destroy_timer_on_stack(struct timer_list *timer)
 581{
 582        debug_object_free(timer, &timer_debug_descr);
 583}
 584EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
 585
 586#else
 587static inline void debug_timer_init(struct timer_list *timer) { }
 588static inline void debug_timer_activate(struct timer_list *timer) { }
 589static inline void debug_timer_deactivate(struct timer_list *timer) { }
 590static inline void debug_timer_assert_init(struct timer_list *timer) { }
 591#endif
 592
 593static inline void debug_init(struct timer_list *timer)
 594{
 595        debug_timer_init(timer);
 596        trace_timer_init(timer);
 597}
 598
 599static inline void
 600debug_activate(struct timer_list *timer, unsigned long expires)
 601{
 602        debug_timer_activate(timer);
 603        trace_timer_start(timer, expires);
 604}
 605
 606static inline void debug_deactivate(struct timer_list *timer)
 607{
 608        debug_timer_deactivate(timer);
 609        trace_timer_cancel(timer);
 610}
 611
 612static inline void debug_assert_init(struct timer_list *timer)
 613{
 614        debug_timer_assert_init(timer);
 615}
 616
 617static void do_init_timer(struct timer_list *timer, unsigned int flags,
 618                          const char *name, struct lock_class_key *key)
 619{
 620        struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
 621
 622        timer->entry.next = NULL;
 623        timer->base = (void *)((unsigned long)base | flags);
 624        timer->slack = -1;
 625#ifdef CONFIG_TIMER_STATS
 626        timer->start_site = NULL;
 627        timer->start_pid = -1;
 628        memset(timer->start_comm, 0, TASK_COMM_LEN);
 629#endif
 630        lockdep_init_map(&timer->lockdep_map, name, key, 0);
 631}
 632
 633/**
 634 * init_timer_key - initialize a timer
 635 * @timer: the timer to be initialized
 636 * @flags: timer flags
 637 * @name: name of the timer
 638 * @key: lockdep class key of the fake lock used for tracking timer
 639 *       sync lock dependencies
 640 *
 641 * init_timer_key() must be done to a timer prior calling *any* of the
 642 * other timer functions.
 643 */
 644void init_timer_key(struct timer_list *timer, unsigned int flags,
 645                    const char *name, struct lock_class_key *key)
 646{
 647        debug_init(timer);
 648        do_init_timer(timer, flags, name, key);
 649}
 650EXPORT_SYMBOL(init_timer_key);
 651
 652static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 653{
 654        struct list_head *entry = &timer->entry;
 655
 656        debug_deactivate(timer);
 657
 658        __list_del(entry->prev, entry->next);
 659        if (clear_pending)
 660                entry->next = NULL;
 661        entry->prev = LIST_POISON2;
 662}
 663
 664static inline void
 665detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
 666{
 667        detach_timer(timer, true);
 668        if (!tbase_get_deferrable(timer->base))
 669                base->active_timers--;
 670}
 671
 672static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
 673                             bool clear_pending)
 674{
 675        if (!timer_pending(timer))
 676                return 0;
 677
 678        detach_timer(timer, clear_pending);
 679        if (!tbase_get_deferrable(timer->base)) {
 680                base->active_timers--;
 681                if (timer->expires == base->next_timer)
 682                        base->next_timer = base->timer_jiffies;
 683        }
 684        return 1;
 685}
 686
 687/*
 688 * We are using hashed locking: holding per_cpu(tvec_bases).lock
 689 * means that all timers which are tied to this base via timer->base are
 690 * locked, and the base itself is locked too.
 691 *
 692 * So __run_timers/migrate_timers can safely modify all timers which could
 693 * be found on ->tvX lists.
 694 *
 695 * When the timer's base is locked, and the timer removed from list, it is
 696 * possible to set timer->base = NULL and drop the lock: the timer remains
 697 * locked.
 698 */
 699static struct tvec_base *lock_timer_base(struct timer_list *timer,
 700                                        unsigned long *flags)
 701        __acquires(timer->base->lock)
 702{
 703        struct tvec_base *base;
 704
 705        for (;;) {
 706                struct tvec_base *prelock_base = timer->base;
 707                base = tbase_get_base(prelock_base);
 708                if (likely(base != NULL)) {
 709                        spin_lock_irqsave(&base->lock, *flags);
 710                        if (likely(prelock_base == timer->base))
 711                                return base;
 712                        /* The timer has migrated to another CPU */
 713                        spin_unlock_irqrestore(&base->lock, *flags);
 714                }
 715                cpu_relax();
 716        }
 717}
 718
 719static inline int
 720__mod_timer(struct timer_list *timer, unsigned long expires,
 721                                                bool pending_only, int pinned)
 722{
 723        struct tvec_base *base, *new_base;
 724        unsigned long flags;
 725        int ret = 0 , cpu;
 726
 727        timer_stats_timer_set_start_info(timer);
 728        BUG_ON(!timer->function);
 729
 730        base = lock_timer_base(timer, &flags);
 731
 732        ret = detach_if_pending(timer, base, false);
 733        if (!ret && pending_only)
 734                goto out_unlock;
 735
 736        debug_activate(timer, expires);
 737
 738        cpu = smp_processor_id();
 739
 740#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
 741        if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
 742                cpu = get_nohz_timer_target();
 743#endif
 744        new_base = per_cpu(tvec_bases, cpu);
 745
 746        if (base != new_base) {
 747                /*
 748                 * We are trying to schedule the timer on the local CPU.
 749                 * However we can't change timer's base while it is running,
 750                 * otherwise del_timer_sync() can't detect that the timer's
 751                 * handler yet has not finished. This also guarantees that
 752                 * the timer is serialized wrt itself.
 753                 */
 754                if (likely(base->running_timer != timer)) {
 755                        /* See the comment in lock_timer_base() */
 756                        timer_set_base(timer, NULL);
 757                        spin_unlock(&base->lock);
 758                        base = new_base;
 759                        spin_lock(&base->lock);
 760                        timer_set_base(timer, base);
 761                }
 762        }
 763
 764        timer->expires = expires;
 765        internal_add_timer(base, timer);
 766
 767out_unlock:
 768        spin_unlock_irqrestore(&base->lock, flags);
 769
 770        return ret;
 771}
 772
 773/**
 774 * mod_timer_pending - modify a pending timer's timeout
 775 * @timer: the pending timer to be modified
 776 * @expires: new timeout in jiffies
 777 *
 778 * mod_timer_pending() is the same for pending timers as mod_timer(),
 779 * but will not re-activate and modify already deleted timers.
 780 *
 781 * It is useful for unserialized use of timers.
 782 */
 783int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 784{
 785        return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
 786}
 787EXPORT_SYMBOL(mod_timer_pending);
 788
 789/*
 790 * Decide where to put the timer while taking the slack into account
 791 *
 792 * Algorithm:
 793 *   1) calculate the maximum (absolute) time
 794 *   2) calculate the highest bit where the expires and new max are different
 795 *   3) use this bit to make a mask
 796 *   4) use the bitmask to round down the maximum time, so that all last
 797 *      bits are zeros
 798 */
 799static inline
 800unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 801{
 802        unsigned long expires_limit, mask;
 803        int bit;
 804
 805        if (timer->slack >= 0) {
 806                expires_limit = expires + timer->slack;
 807        } else {
 808                long delta = expires - jiffies;
 809
 810                if (delta < 256)
 811                        return expires;
 812
 813                expires_limit = expires + delta / 256;
 814        }
 815        mask = expires ^ expires_limit;
 816        if (mask == 0)
 817                return expires;
 818
 819        bit = find_last_bit(&mask, BITS_PER_LONG);
 820
 821        mask = (1 << bit) - 1;
 822
 823        expires_limit = expires_limit & ~(mask);
 824
 825        return expires_limit;
 826}
 827
 828/**
 829 * mod_timer - modify a timer's timeout
 830 * @timer: the timer to be modified
 831 * @expires: new timeout in jiffies
 832 *
 833 * mod_timer() is a more efficient way to update the expire field of an
 834 * active timer (if the timer is inactive it will be activated)
 835 *
 836 * mod_timer(timer, expires) is equivalent to:
 837 *
 838 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 839 *
 840 * Note that if there are multiple unserialized concurrent users of the
 841 * same timer, then mod_timer() is the only safe way to modify the timeout,
 842 * since add_timer() cannot modify an already running timer.
 843 *
 844 * The function returns whether it has modified a pending timer or not.
 845 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 846 * active timer returns 1.)
 847 */
 848int mod_timer(struct timer_list *timer, unsigned long expires)
 849{
 850        expires = apply_slack(timer, expires);
 851
 852        /*
 853         * This is a common optimization triggered by the
 854         * networking code - if the timer is re-modified
 855         * to be the same thing then just return:
 856         */
 857        if (timer_pending(timer) && timer->expires == expires)
 858                return 1;
 859
 860        return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
 861}
 862EXPORT_SYMBOL(mod_timer);
 863
 864/**
 865 * mod_timer_pinned - modify a timer's timeout
 866 * @timer: the timer to be modified
 867 * @expires: new timeout in jiffies
 868 *
 869 * mod_timer_pinned() is a way to update the expire field of an
 870 * active timer (if the timer is inactive it will be activated)
 871 * and to ensure that the timer is scheduled on the current CPU.
 872 *
 873 * Note that this does not prevent the timer from being migrated
 874 * when the current CPU goes offline.  If this is a problem for
 875 * you, use CPU-hotplug notifiers to handle it correctly, for
 876 * example, cancelling the timer when the corresponding CPU goes
 877 * offline.
 878 *
 879 * mod_timer_pinned(timer, expires) is equivalent to:
 880 *
 881 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 882 */
 883int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
 884{
 885        if (timer->expires == expires && timer_pending(timer))
 886                return 1;
 887
 888        return __mod_timer(timer, expires, false, TIMER_PINNED);
 889}
 890EXPORT_SYMBOL(mod_timer_pinned);
 891
 892/**
 893 * add_timer - start a timer
 894 * @timer: the timer to be added
 895 *
 896 * The kernel will do a ->function(->data) callback from the
 897 * timer interrupt at the ->expires point in the future. The
 898 * current time is 'jiffies'.
 899 *
 900 * The timer's ->expires, ->function (and if the handler uses it, ->data)
 901 * fields must be set prior calling this function.
 902 *
 903 * Timers with an ->expires field in the past will be executed in the next
 904 * timer tick.
 905 */
 906void add_timer(struct timer_list *timer)
 907{
 908        BUG_ON(timer_pending(timer));
 909        mod_timer(timer, timer->expires);
 910}
 911EXPORT_SYMBOL(add_timer);
 912
 913/**
 914 * add_timer_on - start a timer on a particular CPU
 915 * @timer: the timer to be added
 916 * @cpu: the CPU to start it on
 917 *
 918 * This is not very scalable on SMP. Double adds are not possible.
 919 */
 920void add_timer_on(struct timer_list *timer, int cpu)
 921{
 922        struct tvec_base *base = per_cpu(tvec_bases, cpu);
 923        unsigned long flags;
 924
 925        timer_stats_timer_set_start_info(timer);
 926        BUG_ON(timer_pending(timer) || !timer->function);
 927        spin_lock_irqsave(&base->lock, flags);
 928        timer_set_base(timer, base);
 929        debug_activate(timer, timer->expires);
 930        internal_add_timer(base, timer);
 931        /*
 932         * Check whether the other CPU is idle and needs to be
 933         * triggered to reevaluate the timer wheel when nohz is
 934         * active. We are protected against the other CPU fiddling
 935         * with the timer by holding the timer base lock. This also
 936         * makes sure that a CPU on the way to idle can not evaluate
 937         * the timer wheel.
 938         */
 939        wake_up_idle_cpu(cpu);
 940        spin_unlock_irqrestore(&base->lock, flags);
 941}
 942EXPORT_SYMBOL_GPL(add_timer_on);
 943
 944/**
 945 * del_timer - deactive a timer.
 946 * @timer: the timer to be deactivated
 947 *
 948 * del_timer() deactivates a timer - this works on both active and inactive
 949 * timers.
 950 *
 951 * The function returns whether it has deactivated a pending timer or not.
 952 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
 953 * active timer returns 1.)
 954 */
 955int del_timer(struct timer_list *timer)
 956{
 957        struct tvec_base *base;
 958        unsigned long flags;
 959        int ret = 0;
 960
 961        debug_assert_init(timer);
 962
 963        timer_stats_timer_clear_start_info(timer);
 964        if (timer_pending(timer)) {
 965                base = lock_timer_base(timer, &flags);
 966                ret = detach_if_pending(timer, base, true);
 967                spin_unlock_irqrestore(&base->lock, flags);
 968        }
 969
 970        return ret;
 971}
 972EXPORT_SYMBOL(del_timer);
 973
 974/**
 975 * try_to_del_timer_sync - Try to deactivate a timer
 976 * @timer: timer do del
 977 *
 978 * This function tries to deactivate a timer. Upon successful (ret >= 0)
 979 * exit the timer is not queued and the handler is not running on any CPU.
 980 */
 981int try_to_del_timer_sync(struct timer_list *timer)
 982{
 983        struct tvec_base *base;
 984        unsigned long flags;
 985        int ret = -1;
 986
 987        debug_assert_init(timer);
 988
 989        base = lock_timer_base(timer, &flags);
 990
 991        if (base->running_timer != timer) {
 992                timer_stats_timer_clear_start_info(timer);
 993                ret = detach_if_pending(timer, base, true);
 994        }
 995        spin_unlock_irqrestore(&base->lock, flags);
 996
 997        return ret;
 998}
 999EXPORT_SYMBOL(try_to_del_timer_sync);
1000
1001#ifdef CONFIG_SMP
1002/**
1003 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1004 * @timer: the timer to be deactivated
1005 *
1006 * This function only differs from del_timer() on SMP: besides deactivating
1007 * the timer it also makes sure the handler has finished executing on other
1008 * CPUs.
1009 *
1010 * Synchronization rules: Callers must prevent restarting of the timer,
1011 * otherwise this function is meaningless. It must not be called from
1012 * interrupt contexts unless the timer is an irqsafe one. The caller must
1013 * not hold locks which would prevent completion of the timer's
1014 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1015 * timer is not queued and the handler is not running on any CPU.
1016 *
1017 * Note: For !irqsafe timers, you must not hold locks that are held in
1018 *   interrupt context while calling this function. Even if the lock has
1019 *   nothing to do with the timer in question.  Here's why:
1020 *
1021 *    CPU0                             CPU1
1022 *    ----                             ----
1023 *                                   <SOFTIRQ>
1024 *                                   call_timer_fn();
1025 *                                     base->running_timer = mytimer;
1026 *  spin_lock_irq(somelock);
1027 *                                     <IRQ>
1028 *                                        spin_lock(somelock);
1029 *  del_timer_sync(mytimer);
1030 *   while (base->running_timer == mytimer);
1031 *
1032 * Now del_timer_sync() will never return and never release somelock.
1033 * The interrupt on the other CPU is waiting to grab somelock but
1034 * it has interrupted the softirq that CPU0 is waiting to finish.
1035 *
1036 * The function returns whether it has deactivated a pending timer or not.
1037 */
1038int del_timer_sync(struct timer_list *timer)
1039{
1040#ifdef CONFIG_LOCKDEP
1041        unsigned long flags;
1042
1043        /*
1044         * If lockdep gives a backtrace here, please reference
1045         * the synchronization rules above.
1046         */
1047        local_irq_save(flags);
1048        lock_map_acquire(&timer->lockdep_map);
1049        lock_map_release(&timer->lockdep_map);
1050        local_irq_restore(flags);
1051#endif
1052        /*
1053         * don't use it in hardirq context, because it
1054         * could lead to deadlock.
1055         */
1056        WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
1057        for (;;) {
1058                int ret = try_to_del_timer_sync(timer);
1059                if (ret >= 0)
1060                        return ret;
1061                cpu_relax();
1062        }
1063}
1064EXPORT_SYMBOL(del_timer_sync);
1065#endif
1066
1067static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1068{
1069        /* cascade all the timers from tv up one level */
1070        struct timer_list *timer, *tmp;
1071        struct list_head tv_list;
1072
1073        list_replace_init(tv->vec + index, &tv_list);
1074
1075        /*
1076         * We are removing _all_ timers from the list, so we
1077         * don't have to detach them individually.
1078         */
1079        list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1080                BUG_ON(tbase_get_base(timer->base) != base);
1081                /* No accounting, while moving them */
1082                __internal_add_timer(base, timer);
1083        }
1084
1085        return index;
1086}
1087
1088static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1089                          unsigned long data)
1090{
1091        int preempt_count = preempt_count();
1092
1093#ifdef CONFIG_LOCKDEP
1094        /*
1095         * It is permissible to free the timer from inside the
1096         * function that is called from it, this we need to take into
1097         * account for lockdep too. To avoid bogus "held lock freed"
1098         * warnings as well as problems when looking into
1099         * timer->lockdep_map, make a copy and use that here.
1100         */
1101        struct lockdep_map lockdep_map;
1102
1103        lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1104#endif
1105        /*
1106         * Couple the lock chain with the lock chain at
1107         * del_timer_sync() by acquiring the lock_map around the fn()
1108         * call here and in del_timer_sync().
1109         */
1110        lock_map_acquire(&lockdep_map);
1111
1112        trace_timer_expire_entry(timer);
1113        fn(data);
1114        trace_timer_expire_exit(timer);
1115
1116        lock_map_release(&lockdep_map);
1117
1118        if (preempt_count != preempt_count()) {
1119                WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1120                          fn, preempt_count, preempt_count());
1121                /*
1122                 * Restore the preempt count. That gives us a decent
1123                 * chance to survive and extract information. If the
1124                 * callback kept a lock held, bad luck, but not worse
1125                 * than the BUG() we had.
1126                 */
1127                preempt_count() = preempt_count;
1128        }
1129}
1130
1131#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1132
1133/**
1134 * __run_timers - run all expired timers (if any) on this CPU.
1135 * @base: the timer vector to be processed.
1136 *
1137 * This function cascades all vectors and executes all expired timer
1138 * vectors.
1139 */
1140static inline void __run_timers(struct tvec_base *base)
1141{
1142        struct timer_list *timer;
1143
1144        spin_lock_irq(&base->lock);
1145        while (time_after_eq(jiffies, base->timer_jiffies)) {
1146                struct list_head work_list;
1147                struct list_head *head = &work_list;
1148                int index = base->timer_jiffies & TVR_MASK;
1149
1150                /*
1151                 * Cascade timers:
1152                 */
1153                if (!index &&
1154                        (!cascade(base, &base->tv2, INDEX(0))) &&
1155                                (!cascade(base, &base->tv3, INDEX(1))) &&
1156                                        !cascade(base, &base->tv4, INDEX(2)))
1157                        cascade(base, &base->tv5, INDEX(3));
1158                ++base->timer_jiffies;
1159                list_replace_init(base->tv1.vec + index, &work_list);
1160                while (!list_empty(head)) {
1161                        void (*fn)(unsigned long);
1162                        unsigned long data;
1163                        bool irqsafe;
1164
1165                        timer = list_first_entry(head, struct timer_list,entry);
1166                        fn = timer->function;
1167                        data = timer->data;
1168                        irqsafe = tbase_get_irqsafe(timer->base);
1169
1170                        timer_stats_account_timer(timer);
1171
1172                        base->running_timer = timer;
1173                        detach_expired_timer(timer, base);
1174
1175                        if (irqsafe) {
1176                                spin_unlock(&base->lock);
1177                                call_timer_fn(timer, fn, data);
1178                                spin_lock(&base->lock);
1179                        } else {
1180                                spin_unlock_irq(&base->lock);
1181                                call_timer_fn(timer, fn, data);
1182                                spin_lock_irq(&base->lock);
1183                        }
1184                }
1185        }
1186        base->running_timer = NULL;
1187        spin_unlock_irq(&base->lock);
1188}
1189
1190#ifdef CONFIG_NO_HZ
1191/*
1192 * Find out when the next timer event is due to happen. This
1193 * is used on S/390 to stop all activity when a CPU is idle.
1194 * This function needs to be called with interrupts disabled.
1195 */
1196static unsigned long __next_timer_interrupt(struct tvec_base *base)
1197{
1198        unsigned long timer_jiffies = base->timer_jiffies;
1199        unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1200        int index, slot, array, found = 0;
1201        struct timer_list *nte;
1202        struct tvec *varray[4];
1203
1204        /* Look for timer events in tv1. */
1205        index = slot = timer_jiffies & TVR_MASK;
1206        do {
1207                list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1208                        if (tbase_get_deferrable(nte->base))
1209                                continue;
1210
1211                        found = 1;
1212                        expires = nte->expires;
1213                        /* Look at the cascade bucket(s)? */
1214                        if (!index || slot < index)
1215                                goto cascade;
1216                        return expires;
1217                }
1218                slot = (slot + 1) & TVR_MASK;
1219        } while (slot != index);
1220
1221cascade:
1222        /* Calculate the next cascade event */
1223        if (index)
1224                timer_jiffies += TVR_SIZE - index;
1225        timer_jiffies >>= TVR_BITS;
1226
1227        /* Check tv2-tv5. */
1228        varray[0] = &base->tv2;
1229        varray[1] = &base->tv3;
1230        varray[2] = &base->tv4;
1231        varray[3] = &base->tv5;
1232
1233        for (array = 0; array < 4; array++) {
1234                struct tvec *varp = varray[array];
1235
1236                index = slot = timer_jiffies & TVN_MASK;
1237                do {
1238                        list_for_each_entry(nte, varp->vec + slot, entry) {
1239                                if (tbase_get_deferrable(nte->base))
1240                                        continue;
1241
1242                                found = 1;
1243                                if (time_before(nte->expires, expires))
1244                                        expires = nte->expires;
1245                        }
1246                        /*
1247                         * Do we still search for the first timer or are
1248                         * we looking up the cascade buckets ?
1249                         */
1250                        if (found) {
1251                                /* Look at the cascade bucket(s)? */
1252                                if (!index || slot < index)
1253                                        break;
1254                                return expires;
1255                        }
1256                        slot = (slot + 1) & TVN_MASK;
1257                } while (slot != index);
1258
1259                if (index)
1260                        timer_jiffies += TVN_SIZE - index;
1261                timer_jiffies >>= TVN_BITS;
1262        }
1263        return expires;
1264}
1265
1266/*
1267 * Check, if the next hrtimer event is before the next timer wheel
1268 * event:
1269 */
1270static unsigned long cmp_next_hrtimer_event(unsigned long now,
1271                                            unsigned long expires)
1272{
1273        ktime_t hr_delta = hrtimer_get_next_event();
1274        struct timespec tsdelta;
1275        unsigned long delta;
1276
1277        if (hr_delta.tv64 == KTIME_MAX)
1278                return expires;
1279
1280        /*
1281         * Expired timer available, let it expire in the next tick
1282         */
1283        if (hr_delta.tv64 <= 0)
1284                return now + 1;
1285
1286        tsdelta = ktime_to_timespec(hr_delta);
1287        delta = timespec_to_jiffies(&tsdelta);
1288
1289        /*
1290         * Limit the delta to the max value, which is checked in
1291         * tick_nohz_stop_sched_tick():
1292         */
1293        if (delta > NEXT_TIMER_MAX_DELTA)
1294                delta = NEXT_TIMER_MAX_DELTA;
1295
1296        /*
1297         * Take rounding errors in to account and make sure, that it
1298         * expires in the next tick. Otherwise we go into an endless
1299         * ping pong due to tick_nohz_stop_sched_tick() retriggering
1300         * the timer softirq
1301         */
1302        if (delta < 1)
1303                delta = 1;
1304        now += delta;
1305        if (time_before(now, expires))
1306                return now;
1307        return expires;
1308}
1309
1310/**
1311 * get_next_timer_interrupt - return the jiffy of the next pending timer
1312 * @now: current time (in jiffies)
1313 */
1314unsigned long get_next_timer_interrupt(unsigned long now)
1315{
1316        struct tvec_base *base = __this_cpu_read(tvec_bases);
1317        unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
1318
1319        /*
1320         * Pretend that there is no timer pending if the cpu is offline.
1321         * Possible pending timers will be migrated later to an active cpu.
1322         */
1323        if (cpu_is_offline(smp_processor_id()))
1324                return expires;
1325
1326        spin_lock(&base->lock);
1327        if (base->active_timers) {
1328                if (time_before_eq(base->next_timer, base->timer_jiffies))
1329                        base->next_timer = __next_timer_interrupt(base);
1330                expires = base->next_timer;
1331        }
1332        spin_unlock(&base->lock);
1333
1334        if (time_before_eq(expires, now))
1335                return now;
1336
1337        return cmp_next_hrtimer_event(now, expires);
1338}
1339#endif
1340
1341/*
1342 * Called from the timer interrupt handler to charge one tick to the current
1343 * process.  user_tick is 1 if the tick is user time, 0 for system.
1344 */
1345void update_process_times(int user_tick)
1346{
1347        struct task_struct *p = current;
1348        int cpu = smp_processor_id();
1349
1350        /* Note: this timer irq context must be accounted for as well. */
1351        account_process_tick(p, user_tick);
1352        run_local_timers();
1353        rcu_check_callbacks(cpu, user_tick);
1354        printk_tick();
1355#ifdef CONFIG_IRQ_WORK
1356        if (in_irq())
1357                irq_work_run();
1358#endif
1359        scheduler_tick();
1360        run_posix_cpu_timers(p);
1361}
1362
1363/*
1364 * This function runs timers and the timer-tq in bottom half context.
1365 */
1366static void run_timer_softirq(struct softirq_action *h)
1367{
1368        struct tvec_base *base = __this_cpu_read(tvec_bases);
1369
1370        hrtimer_run_pending();
1371
1372        if (time_after_eq(jiffies, base->timer_jiffies))
1373                __run_timers(base);
1374}
1375
1376/*
1377 * Called by the local, per-CPU timer interrupt on SMP.
1378 */
1379void run_local_timers(void)
1380{
1381        hrtimer_run_queues();
1382        raise_softirq(TIMER_SOFTIRQ);
1383}
1384
1385#ifdef __ARCH_WANT_SYS_ALARM
1386
1387/*
1388 * For backwards compatibility?  This can be done in libc so Alpha
1389 * and all newer ports shouldn't need it.
1390 */
1391SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1392{
1393        return alarm_setitimer(seconds);
1394}
1395
1396#endif
1397
1398/**
1399 * sys_getpid - return the thread group id of the current process
1400 *
1401 * Note, despite the name, this returns the tgid not the pid.  The tgid and
1402 * the pid are identical unless CLONE_THREAD was specified on clone() in
1403 * which case the tgid is the same in all threads of the same group.
1404 *
1405 * This is SMP safe as current->tgid does not change.
1406 */
1407SYSCALL_DEFINE0(getpid)
1408{
1409        return task_tgid_vnr(current);
1410}
1411
1412/*
1413 * Accessing ->real_parent is not SMP-safe, it could
1414 * change from under us. However, we can use a stale
1415 * value of ->real_parent under rcu_read_lock(), see
1416 * release_task()->call_rcu(delayed_put_task_struct).
1417 */
1418SYSCALL_DEFINE0(getppid)
1419{
1420        int pid;
1421
1422        rcu_read_lock();
1423        pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1424        rcu_read_unlock();
1425
1426        return pid;
1427}
1428
1429SYSCALL_DEFINE0(getuid)
1430{
1431        /* Only we change this so SMP safe */
1432        return from_kuid_munged(current_user_ns(), current_uid());
1433}
1434
1435SYSCALL_DEFINE0(geteuid)
1436{
1437        /* Only we change this so SMP safe */
1438        return from_kuid_munged(current_user_ns(), current_euid());
1439}
1440
1441SYSCALL_DEFINE0(getgid)
1442{
1443        /* Only we change this so SMP safe */
1444        return from_kgid_munged(current_user_ns(), current_gid());
1445}
1446
1447SYSCALL_DEFINE0(getegid)
1448{
1449        /* Only we change this so SMP safe */
1450        return from_kgid_munged(current_user_ns(), current_egid());
1451}
1452
1453static void process_timeout(unsigned long __data)
1454{
1455        wake_up_process((struct task_struct *)__data);
1456}
1457
1458/**
1459 * schedule_timeout - sleep until timeout
1460 * @timeout: timeout value in jiffies
1461 *
1462 * Make the current task sleep until @timeout jiffies have
1463 * elapsed. The routine will return immediately unless
1464 * the current task state has been set (see set_current_state()).
1465 *
1466 * You can set the task state as follows -
1467 *
1468 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1469 * pass before the routine returns. The routine will return 0
1470 *
1471 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1472 * delivered to the current task. In this case the remaining time
1473 * in jiffies will be returned, or 0 if the timer expired in time
1474 *
1475 * The current task state is guaranteed to be TASK_RUNNING when this
1476 * routine returns.
1477 *
1478 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1479 * the CPU away without a bound on the timeout. In this case the return
1480 * value will be %MAX_SCHEDULE_TIMEOUT.
1481 *
1482 * In all cases the return value is guaranteed to be non-negative.
1483 */
1484signed long __sched schedule_timeout(signed long timeout)
1485{
1486        struct timer_list timer;
1487        unsigned long expire;
1488
1489        switch (timeout)
1490        {
1491        case MAX_SCHEDULE_TIMEOUT:
1492                /*
1493                 * These two special cases are useful to be comfortable
1494                 * in the caller. Nothing more. We could take
1495                 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1496                 * but I' d like to return a valid offset (>=0) to allow
1497                 * the caller to do everything it want with the retval.
1498                 */
1499                schedule();
1500                goto out;
1501        default:
1502                /*
1503                 * Another bit of PARANOID. Note that the retval will be
1504                 * 0 since no piece of kernel is supposed to do a check
1505                 * for a negative retval of schedule_timeout() (since it
1506                 * should never happens anyway). You just have the printk()
1507                 * that will tell you if something is gone wrong and where.
1508                 */
1509                if (timeout < 0) {
1510                        printk(KERN_ERR "schedule_timeout: wrong timeout "
1511                                "value %lx\n", timeout);
1512                        dump_stack();
1513                        current->state = TASK_RUNNING;
1514                        goto out;
1515                }
1516        }
1517
1518        expire = timeout + jiffies;
1519
1520        setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1521        __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1522        schedule();
1523        del_singleshot_timer_sync(&timer);
1524
1525        /* Remove the timer from the object tracker */
1526        destroy_timer_on_stack(&timer);
1527
1528        timeout = expire - jiffies;
1529
1530 out:
1531        return timeout < 0 ? 0 : timeout;
1532}
1533EXPORT_SYMBOL(schedule_timeout);
1534
1535/*
1536 * We can use __set_current_state() here because schedule_timeout() calls
1537 * schedule() unconditionally.
1538 */
1539signed long __sched schedule_timeout_interruptible(signed long timeout)
1540{
1541        __set_current_state(TASK_INTERRUPTIBLE);
1542        return schedule_timeout(timeout);
1543}
1544EXPORT_SYMBOL(schedule_timeout_interruptible);
1545
1546signed long __sched schedule_timeout_killable(signed long timeout)
1547{
1548        __set_current_state(TASK_KILLABLE);
1549        return schedule_timeout(timeout);
1550}
1551EXPORT_SYMBOL(schedule_timeout_killable);
1552
1553signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1554{
1555        __set_current_state(TASK_UNINTERRUPTIBLE);
1556        return schedule_timeout(timeout);
1557}
1558EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1559
1560/* Thread ID - the internal kernel "pid" */
1561SYSCALL_DEFINE0(gettid)
1562{
1563        return task_pid_vnr(current);
1564}
1565
1566/**
1567 * do_sysinfo - fill in sysinfo struct
1568 * @info: pointer to buffer to fill
1569 */
1570int do_sysinfo(struct sysinfo *info)
1571{
1572        unsigned long mem_total, sav_total;
1573        unsigned int mem_unit, bitcount;
1574        struct timespec tp;
1575
1576        memset(info, 0, sizeof(struct sysinfo));
1577
1578        ktime_get_ts(&tp);
1579        monotonic_to_bootbased(&tp);
1580        info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1581
1582        get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1583
1584        info->procs = nr_threads;
1585
1586        si_meminfo(info);
1587        si_swapinfo(info);
1588
1589        /*
1590         * If the sum of all the available memory (i.e. ram + swap)
1591         * is less than can be stored in a 32 bit unsigned long then
1592         * we can be binary compatible with 2.2.x kernels.  If not,
1593         * well, in that case 2.2.x was broken anyways...
1594         *
1595         *  -Erik Andersen <andersee@debian.org>
1596         */
1597
1598        mem_total = info->totalram + info->totalswap;
1599        if (mem_total < info->totalram || mem_total < info->totalswap)
1600                goto out;
1601        bitcount = 0;
1602        mem_unit = info->mem_unit;
1603        while (mem_unit > 1) {
1604                bitcount++;
1605                mem_unit >>= 1;
1606                sav_total = mem_total;
1607                mem_total <<= 1;
1608                if (mem_total < sav_total)
1609                        goto out;
1610        }
1611
1612        /*
1613         * If mem_total did not overflow, multiply all memory values by
1614         * info->mem_unit and set it to 1.  This leaves things compatible
1615         * with 2.2.x, and also retains compatibility with earlier 2.4.x
1616         * kernels...
1617         */
1618
1619        info->mem_unit = 1;
1620        info->totalram <<= bitcount;
1621        info->freeram <<= bitcount;
1622        info->sharedram <<= bitcount;
1623        info->bufferram <<= bitcount;
1624        info->totalswap <<= bitcount;
1625        info->freeswap <<= bitcount;
1626        info->totalhigh <<= bitcount;
1627        info->freehigh <<= bitcount;
1628
1629out:
1630        return 0;
1631}
1632
1633SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1634{
1635        struct sysinfo val;
1636
1637        do_sysinfo(&val);
1638
1639        if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1640                return -EFAULT;
1641
1642        return 0;
1643}
1644
1645static int __cpuinit init_timers_cpu(int cpu)
1646{
1647        int j;
1648        struct tvec_base *base;
1649        static char __cpuinitdata tvec_base_done[NR_CPUS];
1650
1651        if (!tvec_base_done[cpu]) {
1652                static char boot_done;
1653
1654                if (boot_done) {
1655                        /*
1656                         * The APs use this path later in boot
1657                         */
1658                        base = kmalloc_node(sizeof(*base),
1659                                                GFP_KERNEL | __GFP_ZERO,
1660                                                cpu_to_node(cpu));
1661                        if (!base)
1662                                return -ENOMEM;
1663
1664                        /* Make sure that tvec_base is 2 byte aligned */
1665                        if (tbase_get_deferrable(base)) {
1666                                WARN_ON(1);
1667                                kfree(base);
1668                                return -ENOMEM;
1669                        }
1670                        per_cpu(tvec_bases, cpu) = base;
1671                } else {
1672                        /*
1673                         * This is for the boot CPU - we use compile-time
1674                         * static initialisation because per-cpu memory isn't
1675                         * ready yet and because the memory allocators are not
1676                         * initialised either.
1677                         */
1678                        boot_done = 1;
1679                        base = &boot_tvec_bases;
1680                }
1681                tvec_base_done[cpu] = 1;
1682        } else {
1683                base = per_cpu(tvec_bases, cpu);
1684        }
1685
1686        spin_lock_init(&base->lock);
1687
1688        for (j = 0; j < TVN_SIZE; j++) {
1689                INIT_LIST_HEAD(base->tv5.vec + j);
1690                INIT_LIST_HEAD(base->tv4.vec + j);
1691                INIT_LIST_HEAD(base->tv3.vec + j);
1692                INIT_LIST_HEAD(base->tv2.vec + j);
1693        }
1694        for (j = 0; j < TVR_SIZE; j++)
1695                INIT_LIST_HEAD(base->tv1.vec + j);
1696
1697        base->timer_jiffies = jiffies;
1698        base->next_timer = base->timer_jiffies;
1699        base->active_timers = 0;
1700        return 0;
1701}
1702
1703#ifdef CONFIG_HOTPLUG_CPU
1704static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1705{
1706        struct timer_list *timer;
1707
1708        while (!list_empty(head)) {
1709                timer = list_first_entry(head, struct timer_list, entry);
1710                /* We ignore the accounting on the dying cpu */
1711                detach_timer(timer, false);
1712                timer_set_base(timer, new_base);
1713                internal_add_timer(new_base, timer);
1714        }
1715}
1716
1717static void __cpuinit migrate_timers(int cpu)
1718{
1719        struct tvec_base *old_base;
1720        struct tvec_base *new_base;
1721        int i;
1722
1723        BUG_ON(cpu_online(cpu));
1724        old_base = per_cpu(tvec_bases, cpu);
1725        new_base = get_cpu_var(tvec_bases);
1726        /*
1727         * The caller is globally serialized and nobody else
1728         * takes two locks at once, deadlock is not possible.
1729         */
1730        spin_lock_irq(&new_base->lock);
1731        spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1732
1733        BUG_ON(old_base->running_timer);
1734
1735        for (i = 0; i < TVR_SIZE; i++)
1736                migrate_timer_list(new_base, old_base->tv1.vec + i);
1737        for (i = 0; i < TVN_SIZE; i++) {
1738                migrate_timer_list(new_base, old_base->tv2.vec + i);
1739                migrate_timer_list(new_base, old_base->tv3.vec + i);
1740                migrate_timer_list(new_base, old_base->tv4.vec + i);
1741                migrate_timer_list(new_base, old_base->tv5.vec + i);
1742        }
1743
1744        spin_unlock(&old_base->lock);
1745        spin_unlock_irq(&new_base->lock);
1746        put_cpu_var(tvec_bases);
1747}
1748#endif /* CONFIG_HOTPLUG_CPU */
1749
1750static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1751                                unsigned long action, void *hcpu)
1752{
1753        long cpu = (long)hcpu;
1754        int err;
1755
1756        switch(action) {
1757        case CPU_UP_PREPARE:
1758        case CPU_UP_PREPARE_FROZEN:
1759                err = init_timers_cpu(cpu);
1760                if (err < 0)
1761                        return notifier_from_errno(err);
1762                break;
1763#ifdef CONFIG_HOTPLUG_CPU
1764        case CPU_DEAD:
1765        case CPU_DEAD_FROZEN:
1766                migrate_timers(cpu);
1767                break;
1768#endif
1769        default:
1770                break;
1771        }
1772        return NOTIFY_OK;
1773}
1774
1775static struct notifier_block __cpuinitdata timers_nb = {
1776        .notifier_call  = timer_cpu_notify,
1777};
1778
1779
1780void __init init_timers(void)
1781{
1782        int err;
1783
1784        /* ensure there are enough low bits for flags in timer->base pointer */
1785        BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
1786
1787        err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1788                               (void *)(long)smp_processor_id());
1789        init_timer_stats();
1790
1791        BUG_ON(err != NOTIFY_OK);
1792        register_cpu_notifier(&timers_nb);
1793        open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1794}
1795
1796/**
1797 * msleep - sleep safely even with waitqueue interruptions
1798 * @msecs: Time in milliseconds to sleep for
1799 */
1800void msleep(unsigned int msecs)
1801{
1802        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1803
1804        while (timeout)
1805                timeout = schedule_timeout_uninterruptible(timeout);
1806}
1807
1808EXPORT_SYMBOL(msleep);
1809
1810/**
1811 * msleep_interruptible - sleep waiting for signals
1812 * @msecs: Time in milliseconds to sleep for
1813 */
1814unsigned long msleep_interruptible(unsigned int msecs)
1815{
1816        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1817
1818        while (timeout && !signal_pending(current))
1819                timeout = schedule_timeout_interruptible(timeout);
1820        return jiffies_to_msecs(timeout);
1821}
1822
1823EXPORT_SYMBOL(msleep_interruptible);
1824
1825static int __sched do_usleep_range(unsigned long min, unsigned long max)
1826{
1827        ktime_t kmin;
1828        unsigned long delta;
1829
1830        kmin = ktime_set(0, min * NSEC_PER_USEC);
1831        delta = (max - min) * NSEC_PER_USEC;
1832        return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1833}
1834
1835/**
1836 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1837 * @min: Minimum time in usecs to sleep
1838 * @max: Maximum time in usecs to sleep
1839 */
1840void usleep_range(unsigned long min, unsigned long max)
1841{
1842        __set_current_state(TASK_UNINTERRUPTIBLE);
1843        do_usleep_range(min, max);
1844}
1845EXPORT_SYMBOL(usleep_range);
1846