linux/kernel/time/tick-common.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/time/tick-common.c
   3 *
   4 * This file contains the base functions to manage periodic tick
   5 * related events.
   6 *
   7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  10 *
  11 * This code is licenced under the GPL version 2. For details see
  12 * kernel-base/COPYING.
  13 */
  14#include <linux/cpu.h>
  15#include <linux/err.h>
  16#include <linux/hrtimer.h>
  17#include <linux/interrupt.h>
  18#include <linux/percpu.h>
  19#include <linux/profile.h>
  20#include <linux/sched.h>
  21#include <linux/tick.h>
  22
  23#include <asm/irq_regs.h>
  24
  25#include "tick-internal.h"
  26
  27/*
  28 * Tick devices
  29 */
  30DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
  31/*
  32 * Tick next event: keeps track of the tick time
  33 */
  34ktime_t tick_next_period;
  35ktime_t tick_period;
  36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
  37DEFINE_SPINLOCK(tick_device_lock);
  38
  39/*
  40 * Debugging: see timer_list.c
  41 */
  42struct tick_device *tick_get_device(int cpu)
  43{
  44        return &per_cpu(tick_cpu_device, cpu);
  45}
  46
  47/**
  48 * tick_is_oneshot_available - check for a oneshot capable event device
  49 */
  50int tick_is_oneshot_available(void)
  51{
  52        struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
  53
  54        return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
  55}
  56
  57/*
  58 * Periodic tick
  59 */
  60static void tick_periodic(int cpu)
  61{
  62        if (tick_do_timer_cpu == cpu) {
  63                write_seqlock(&xtime_lock);
  64
  65                /* Keep track of the next tick event */
  66                tick_next_period = ktime_add(tick_next_period, tick_period);
  67
  68                do_timer(1);
  69                write_sequnlock(&xtime_lock);
  70        }
  71
  72        update_process_times(user_mode(get_irq_regs()));
  73        profile_tick(CPU_PROFILING);
  74}
  75
  76/*
  77 * Event handler for periodic ticks
  78 */
  79void tick_handle_periodic(struct clock_event_device *dev)
  80{
  81        int cpu = smp_processor_id();
  82        ktime_t next;
  83
  84        tick_periodic(cpu);
  85
  86        if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
  87                return;
  88        /*
  89         * Setup the next period for devices, which do not have
  90         * periodic mode:
  91         */
  92        next = ktime_add(dev->next_event, tick_period);
  93        for (;;) {
  94                if (!clockevents_program_event(dev, next, ktime_get()))
  95                        return;
  96                /*
  97                 * Have to be careful here. If we're in oneshot mode,
  98                 * before we call tick_periodic() in a loop, we need
  99                 * to be sure we're using a real hardware clocksource.
 100                 * Otherwise we could get trapped in an infinite
 101                 * loop, as the tick_periodic() increments jiffies,
 102                 * when then will increment time, posibly causing
 103                 * the loop to trigger again and again.
 104                 */
 105                if (timekeeping_valid_for_hres())
 106                        tick_periodic(cpu);
 107                next = ktime_add(next, tick_period);
 108        }
 109}
 110
 111/*
 112 * Setup the device for a periodic tick
 113 */
 114void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
 115{
 116        tick_set_periodic_handler(dev, broadcast);
 117
 118        /* Broadcast setup ? */
 119        if (!tick_device_is_functional(dev))
 120                return;
 121
 122        if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
 123            !tick_broadcast_oneshot_active()) {
 124                clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
 125        } else {
 126                unsigned long seq;
 127                ktime_t next;
 128
 129                do {
 130                        seq = read_seqbegin(&xtime_lock);
 131                        next = tick_next_period;
 132                } while (read_seqretry(&xtime_lock, seq));
 133
 134                clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
 135
 136                for (;;) {
 137                        if (!clockevents_program_event(dev, next, ktime_get()))
 138                                return;
 139                        next = ktime_add(next, tick_period);
 140                }
 141        }
 142}
 143
 144/*
 145 * Setup the tick device
 146 */
 147static void tick_setup_device(struct tick_device *td,
 148                              struct clock_event_device *newdev, int cpu,
 149                              const struct cpumask *cpumask)
 150{
 151        ktime_t next_event;
 152        void (*handler)(struct clock_event_device *) = NULL;
 153
 154        /*
 155         * First device setup ?
 156         */
 157        if (!td->evtdev) {
 158                /*
 159                 * If no cpu took the do_timer update, assign it to
 160                 * this cpu:
 161                 */
 162                if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
 163                        tick_do_timer_cpu = cpu;
 164                        tick_next_period = ktime_get();
 165                        tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
 166                }
 167
 168                /*
 169                 * Startup in periodic mode first.
 170                 */
 171                td->mode = TICKDEV_MODE_PERIODIC;
 172        } else {
 173                handler = td->evtdev->event_handler;
 174                next_event = td->evtdev->next_event;
 175                td->evtdev->event_handler = clockevents_handle_noop;
 176        }
 177
 178        td->evtdev = newdev;
 179
 180        /*
 181         * When the device is not per cpu, pin the interrupt to the
 182         * current cpu:
 183         */
 184        if (!cpumask_equal(newdev->cpumask, cpumask))
 185                irq_set_affinity(newdev->irq, cpumask);
 186
 187        /*
 188         * When global broadcasting is active, check if the current
 189         * device is registered as a placeholder for broadcast mode.
 190         * This allows us to handle this x86 misfeature in a generic
 191         * way.
 192         */
 193        if (tick_device_uses_broadcast(newdev, cpu))
 194                return;
 195
 196        if (td->mode == TICKDEV_MODE_PERIODIC)
 197                tick_setup_periodic(newdev, 0);
 198        else
 199                tick_setup_oneshot(newdev, handler, next_event);
 200}
 201
 202/*
 203 * Check, if the new registered device should be used.
 204 */
 205static int tick_check_new_device(struct clock_event_device *newdev)
 206{
 207        struct clock_event_device *curdev;
 208        struct tick_device *td;
 209        int cpu, ret = NOTIFY_OK;
 210        unsigned long flags;
 211
 212        spin_lock_irqsave(&tick_device_lock, flags);
 213
 214        cpu = smp_processor_id();
 215        if (!cpumask_test_cpu(cpu, newdev->cpumask))
 216                goto out_bc;
 217
 218        td = &per_cpu(tick_cpu_device, cpu);
 219        curdev = td->evtdev;
 220
 221        /* cpu local device ? */
 222        if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
 223
 224                /*
 225                 * If the cpu affinity of the device interrupt can not
 226                 * be set, ignore it.
 227                 */
 228                if (!irq_can_set_affinity(newdev->irq))
 229                        goto out_bc;
 230
 231                /*
 232                 * If we have a cpu local device already, do not replace it
 233                 * by a non cpu local device
 234                 */
 235                if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
 236                        goto out_bc;
 237        }
 238
 239        /*
 240         * If we have an active device, then check the rating and the oneshot
 241         * feature.
 242         */
 243        if (curdev) {
 244                /*
 245                 * Prefer one shot capable devices !
 246                 */
 247                if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
 248                    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
 249                        goto out_bc;
 250                /*
 251                 * Check the rating
 252                 */
 253                if (curdev->rating >= newdev->rating)
 254                        goto out_bc;
 255        }
 256
 257        /*
 258         * Replace the eventually existing device by the new
 259         * device. If the current device is the broadcast device, do
 260         * not give it back to the clockevents layer !
 261         */
 262        if (tick_is_broadcast_device(curdev)) {
 263                clockevents_shutdown(curdev);
 264                curdev = NULL;
 265        }
 266        clockevents_exchange_device(curdev, newdev);
 267        tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
 268        if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
 269                tick_oneshot_notify();
 270
 271        spin_unlock_irqrestore(&tick_device_lock, flags);
 272        return NOTIFY_STOP;
 273
 274out_bc:
 275        /*
 276         * Can the new device be used as a broadcast device ?
 277         */
 278        if (tick_check_broadcast_device(newdev))
 279                ret = NOTIFY_STOP;
 280
 281        spin_unlock_irqrestore(&tick_device_lock, flags);
 282
 283        return ret;
 284}
 285
 286/*
 287 * Transfer the do_timer job away from a dying cpu.
 288 *
 289 * Called with interrupts disabled.
 290 */
 291static void tick_handover_do_timer(int *cpup)
 292{
 293        if (*cpup == tick_do_timer_cpu) {
 294                int cpu = cpumask_first(cpu_online_mask);
 295
 296                tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
 297                        TICK_DO_TIMER_NONE;
 298        }
 299}
 300
 301/*
 302 * Shutdown an event device on a given cpu:
 303 *
 304 * This is called on a life CPU, when a CPU is dead. So we cannot
 305 * access the hardware device itself.
 306 * We just set the mode and remove it from the lists.
 307 */
 308static void tick_shutdown(unsigned int *cpup)
 309{
 310        struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
 311        struct clock_event_device *dev = td->evtdev;
 312        unsigned long flags;
 313
 314        spin_lock_irqsave(&tick_device_lock, flags);
 315        td->mode = TICKDEV_MODE_PERIODIC;
 316        if (dev) {
 317                /*
 318                 * Prevent that the clock events layer tries to call
 319                 * the set mode function!
 320                 */
 321                dev->mode = CLOCK_EVT_MODE_UNUSED;
 322                clockevents_exchange_device(dev, NULL);
 323                td->evtdev = NULL;
 324        }
 325        spin_unlock_irqrestore(&tick_device_lock, flags);
 326}
 327
 328static void tick_suspend(void)
 329{
 330        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
 331        unsigned long flags;
 332
 333        spin_lock_irqsave(&tick_device_lock, flags);
 334        clockevents_shutdown(td->evtdev);
 335        spin_unlock_irqrestore(&tick_device_lock, flags);
 336}
 337
 338static void tick_resume(void)
 339{
 340        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
 341        unsigned long flags;
 342        int broadcast = tick_resume_broadcast();
 343
 344        spin_lock_irqsave(&tick_device_lock, flags);
 345        clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
 346
 347        if (!broadcast) {
 348                if (td->mode == TICKDEV_MODE_PERIODIC)
 349                        tick_setup_periodic(td->evtdev, 0);
 350                else
 351                        tick_resume_oneshot();
 352        }
 353        spin_unlock_irqrestore(&tick_device_lock, flags);
 354}
 355
 356/*
 357 * Notification about clock event devices
 358 */
 359static int tick_notify(struct notifier_block *nb, unsigned long reason,
 360                               void *dev)
 361{
 362        switch (reason) {
 363
 364        case CLOCK_EVT_NOTIFY_ADD:
 365                return tick_check_new_device(dev);
 366
 367        case CLOCK_EVT_NOTIFY_BROADCAST_ON:
 368        case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
 369        case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
 370                tick_broadcast_on_off(reason, dev);
 371                break;
 372
 373        case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
 374        case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
 375                tick_broadcast_oneshot_control(reason);
 376                break;
 377
 378        case CLOCK_EVT_NOTIFY_CPU_DYING:
 379                tick_handover_do_timer(dev);
 380                break;
 381
 382        case CLOCK_EVT_NOTIFY_CPU_DEAD:
 383                tick_shutdown_broadcast_oneshot(dev);
 384                tick_shutdown_broadcast(dev);
 385                tick_shutdown(dev);
 386                break;
 387
 388        case CLOCK_EVT_NOTIFY_SUSPEND:
 389                tick_suspend();
 390                tick_suspend_broadcast();
 391                break;
 392
 393        case CLOCK_EVT_NOTIFY_RESUME:
 394                tick_resume();
 395                break;
 396
 397        default:
 398                break;
 399        }
 400
 401        return NOTIFY_OK;
 402}
 403
 404static struct notifier_block tick_notifier = {
 405        .notifier_call = tick_notify,
 406};
 407
 408/**
 409 * tick_init - initialize the tick control
 410 *
 411 * Register the notifier with the clockevents framework
 412 */
 413void __init tick_init(void)
 414{
 415        clockevents_register_notifier(&tick_notifier);
 416}
 417