linux/kernel/time/tick-common.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/time/tick-common.c
   3 *
   4 * This file contains the base functions to manage periodic tick
   5 * related events.
   6 *
   7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  10 *
  11 * This code is licenced under the GPL version 2. For details see
  12 * kernel-base/COPYING.
  13 */
  14#include <linux/cpu.h>
  15#include <linux/err.h>
  16#include <linux/hrtimer.h>
  17#include <linux/interrupt.h>
  18#include <linux/percpu.h>
  19#include <linux/profile.h>
  20#include <linux/sched.h>
  21
  22#include <asm/irq_regs.h>
  23
  24#include "tick-internal.h"
  25
  26/*
  27 * Tick devices
  28 */
  29DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
  30/*
  31 * Tick next event: keeps track of the tick time
  32 */
  33ktime_t tick_next_period;
  34ktime_t tick_period;
  35int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
  36static DEFINE_RAW_SPINLOCK(tick_device_lock);
  37
  38/*
  39 * Debugging: see timer_list.c
  40 */
  41struct tick_device *tick_get_device(int cpu)
  42{
  43        return &per_cpu(tick_cpu_device, cpu);
  44}
  45
  46/**
  47 * tick_is_oneshot_available - check for a oneshot capable event device
  48 */
  49int tick_is_oneshot_available(void)
  50{
  51        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
  52
  53        if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
  54                return 0;
  55        if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
  56                return 1;
  57        return tick_broadcast_oneshot_available();
  58}
  59
  60/*
  61 * Periodic tick
  62 */
  63static void tick_periodic(int cpu)
  64{
  65        if (tick_do_timer_cpu == cpu) {
  66                write_seqlock(&xtime_lock);
  67
  68                /* Keep track of the next tick event */
  69                tick_next_period = ktime_add(tick_next_period, tick_period);
  70
  71                do_timer(1);
  72                write_sequnlock(&xtime_lock);
  73        }
  74
  75        update_process_times(user_mode(get_irq_regs()));
  76        profile_tick(CPU_PROFILING);
  77}
  78
  79/*
  80 * Event handler for periodic ticks
  81 */
  82void tick_handle_periodic(struct clock_event_device *dev)
  83{
  84        int cpu = smp_processor_id();
  85        ktime_t next;
  86
  87        tick_periodic(cpu);
  88
  89        if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
  90                return;
  91        /*
  92         * Setup the next period for devices, which do not have
  93         * periodic mode:
  94         */
  95        next = ktime_add(dev->next_event, tick_period);
  96        for (;;) {
  97                if (!clockevents_program_event(dev, next, false))
  98                        return;
  99                /*
 100                 * Have to be careful here. If we're in oneshot mode,
 101                 * before we call tick_periodic() in a loop, we need
 102                 * to be sure we're using a real hardware clocksource.
 103                 * Otherwise we could get trapped in an infinite
 104                 * loop, as the tick_periodic() increments jiffies,
 105                 * when then will increment time, posibly causing
 106                 * the loop to trigger again and again.
 107                 */
 108                if (timekeeping_valid_for_hres())
 109                        tick_periodic(cpu);
 110                next = ktime_add(next, tick_period);
 111        }
 112}
 113
 114/*
 115 * Setup the device for a periodic tick
 116 */
 117void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
 118{
 119        tick_set_periodic_handler(dev, broadcast);
 120
 121        /* Broadcast setup ? */
 122        if (!tick_device_is_functional(dev))
 123                return;
 124
 125        if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
 126            !tick_broadcast_oneshot_active()) {
 127                clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
 128        } else {
 129                unsigned long seq;
 130                ktime_t next;
 131
 132                do {
 133                        seq = read_seqbegin(&xtime_lock);
 134                        next = tick_next_period;
 135                } while (read_seqretry(&xtime_lock, seq));
 136
 137                clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
 138
 139                for (;;) {
 140                        if (!clockevents_program_event(dev, next, false))
 141                                return;
 142                        next = ktime_add(next, tick_period);
 143                }
 144        }
 145}
 146
 147/*
 148 * Setup the tick device
 149 */
 150static void tick_setup_device(struct tick_device *td,
 151                              struct clock_event_device *newdev, int cpu,
 152                              const struct cpumask *cpumask)
 153{
 154        ktime_t next_event;
 155        void (*handler)(struct clock_event_device *) = NULL;
 156
 157        /*
 158         * First device setup ?
 159         */
 160        if (!td->evtdev) {
 161                /*
 162                 * If no cpu took the do_timer update, assign it to
 163                 * this cpu:
 164                 */
 165                if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
 166                        tick_do_timer_cpu = cpu;
 167                        tick_next_period = ktime_get();
 168                        tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
 169                }
 170
 171                /*
 172                 * Startup in periodic mode first.
 173                 */
 174                td->mode = TICKDEV_MODE_PERIODIC;
 175        } else {
 176                handler = td->evtdev->event_handler;
 177                next_event = td->evtdev->next_event;
 178                td->evtdev->event_handler = clockevents_handle_noop;
 179        }
 180
 181        td->evtdev = newdev;
 182
 183        /*
 184         * When the device is not per cpu, pin the interrupt to the
 185         * current cpu:
 186         */
 187        if (!cpumask_equal(newdev->cpumask, cpumask))
 188                irq_set_affinity(newdev->irq, cpumask);
 189
 190        /*
 191         * When global broadcasting is active, check if the current
 192         * device is registered as a placeholder for broadcast mode.
 193         * This allows us to handle this x86 misfeature in a generic
 194         * way.
 195         */
 196        if (tick_device_uses_broadcast(newdev, cpu))
 197                return;
 198
 199        if (td->mode == TICKDEV_MODE_PERIODIC)
 200                tick_setup_periodic(newdev, 0);
 201        else
 202                tick_setup_oneshot(newdev, handler, next_event);
 203}
 204
 205/*
 206 * Check, if the new registered device should be used.
 207 */
 208static int tick_check_new_device(struct clock_event_device *newdev)
 209{
 210        struct clock_event_device *curdev;
 211        struct tick_device *td;
 212        int cpu, ret = NOTIFY_OK;
 213        unsigned long flags;
 214
 215        raw_spin_lock_irqsave(&tick_device_lock, flags);
 216
 217        cpu = smp_processor_id();
 218        if (!cpumask_test_cpu(cpu, newdev->cpumask))
 219                goto out_bc;
 220
 221        td = &per_cpu(tick_cpu_device, cpu);
 222        curdev = td->evtdev;
 223
 224        /* cpu local device ? */
 225        if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
 226
 227                /*
 228                 * If the cpu affinity of the device interrupt can not
 229                 * be set, ignore it.
 230                 */
 231                if (!irq_can_set_affinity(newdev->irq))
 232                        goto out_bc;
 233
 234                /*
 235                 * If we have a cpu local device already, do not replace it
 236                 * by a non cpu local device
 237                 */
 238                if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
 239                        goto out_bc;
 240        }
 241
 242        /*
 243         * If we have an active device, then check the rating and the oneshot
 244         * feature.
 245         */
 246        if (curdev) {
 247                /*
 248                 * Prefer one shot capable devices !
 249                 */
 250                if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
 251                    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
 252                        goto out_bc;
 253                /*
 254                 * Check the rating
 255                 */
 256                if (curdev->rating >= newdev->rating)
 257                        goto out_bc;
 258        }
 259
 260        /*
 261         * Replace the eventually existing device by the new
 262         * device. If the current device is the broadcast device, do
 263         * not give it back to the clockevents layer !
 264         */
 265        if (tick_is_broadcast_device(curdev)) {
 266                clockevents_shutdown(curdev);
 267                curdev = NULL;
 268        }
 269        clockevents_exchange_device(curdev, newdev);
 270        tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
 271        if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
 272                tick_oneshot_notify();
 273
 274        raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 275        return NOTIFY_STOP;
 276
 277out_bc:
 278        /*
 279         * Can the new device be used as a broadcast device ?
 280         */
 281        if (tick_check_broadcast_device(newdev))
 282                ret = NOTIFY_STOP;
 283
 284        raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 285
 286        return ret;
 287}
 288
 289/*
 290 * Transfer the do_timer job away from a dying cpu.
 291 *
 292 * Called with interrupts disabled.
 293 */
 294static void tick_handover_do_timer(int *cpup)
 295{
 296        if (*cpup == tick_do_timer_cpu) {
 297                int cpu = cpumask_first(cpu_online_mask);
 298
 299                tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
 300                        TICK_DO_TIMER_NONE;
 301        }
 302}
 303
 304/*
 305 * Shutdown an event device on a given cpu:
 306 *
 307 * This is called on a life CPU, when a CPU is dead. So we cannot
 308 * access the hardware device itself.
 309 * We just set the mode and remove it from the lists.
 310 */
 311static void tick_shutdown(unsigned int *cpup)
 312{
 313        struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
 314        struct clock_event_device *dev = td->evtdev;
 315        unsigned long flags;
 316
 317        raw_spin_lock_irqsave(&tick_device_lock, flags);
 318        td->mode = TICKDEV_MODE_PERIODIC;
 319        if (dev) {
 320                /*
 321                 * Prevent that the clock events layer tries to call
 322                 * the set mode function!
 323                 */
 324                dev->mode = CLOCK_EVT_MODE_UNUSED;
 325                clockevents_exchange_device(dev, NULL);
 326                td->evtdev = NULL;
 327        }
 328        raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 329}
 330
 331static void tick_suspend(void)
 332{
 333        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
 334        unsigned long flags;
 335
 336        raw_spin_lock_irqsave(&tick_device_lock, flags);
 337        clockevents_shutdown(td->evtdev);
 338        raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 339}
 340
 341static void tick_resume(void)
 342{
 343        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
 344        unsigned long flags;
 345        int broadcast = tick_resume_broadcast();
 346
 347        raw_spin_lock_irqsave(&tick_device_lock, flags);
 348        clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
 349
 350        if (!broadcast) {
 351                if (td->mode == TICKDEV_MODE_PERIODIC)
 352                        tick_setup_periodic(td->evtdev, 0);
 353                else
 354                        tick_resume_oneshot();
 355        }
 356        raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 357}
 358
 359/*
 360 * Notification about clock event devices
 361 */
 362static int tick_notify(struct notifier_block *nb, unsigned long reason,
 363                               void *dev)
 364{
 365        switch (reason) {
 366
 367        case CLOCK_EVT_NOTIFY_ADD:
 368                return tick_check_new_device(dev);
 369
 370        case CLOCK_EVT_NOTIFY_BROADCAST_ON:
 371        case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
 372        case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
 373                tick_broadcast_on_off(reason, dev);
 374                break;
 375
 376        case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
 377        case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
 378                tick_broadcast_oneshot_control(reason);
 379                break;
 380
 381        case CLOCK_EVT_NOTIFY_CPU_DYING:
 382                tick_handover_do_timer(dev);
 383                break;
 384
 385        case CLOCK_EVT_NOTIFY_CPU_DEAD:
 386                tick_shutdown_broadcast_oneshot(dev);
 387                tick_shutdown_broadcast(dev);
 388                tick_shutdown(dev);
 389                break;
 390
 391        case CLOCK_EVT_NOTIFY_SUSPEND:
 392                tick_suspend();
 393                tick_suspend_broadcast();
 394                break;
 395
 396        case CLOCK_EVT_NOTIFY_RESUME:
 397                tick_resume();
 398                break;
 399
 400        default:
 401                break;
 402        }
 403
 404        return NOTIFY_OK;
 405}
 406
 407static struct notifier_block tick_notifier = {
 408        .notifier_call = tick_notify,
 409};
 410
 411/**
 412 * tick_init - initialize the tick control
 413 *
 414 * Register the notifier with the clockevents framework
 415 */
 416void __init tick_init(void)
 417{
 418        clockevents_register_notifier(&tick_notifier);
 419}
 420