linux/arch/blackfin/kernel/time-ts.c
<<
>>
Prefs
   1/*
   2 * Based on arm clockevents implementation and old bfin time tick.
   3 *
   4 * Copyright 2008-2009 Analog Devics Inc.
   5 *                2008 GeoTechnologies
   6 *                     Vitja Makarov
   7 *
   8 * Licensed under the GPL-2
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/profile.h>
  13#include <linux/interrupt.h>
  14#include <linux/time.h>
  15#include <linux/timex.h>
  16#include <linux/irq.h>
  17#include <linux/clocksource.h>
  18#include <linux/clockchips.h>
  19#include <linux/cpufreq.h>
  20
  21#include <asm/blackfin.h>
  22#include <asm/time.h>
  23#include <asm/gptimers.h>
  24#include <asm/nmi.h>
  25
  26
  27#if defined(CONFIG_CYCLES_CLOCKSOURCE)
  28
  29static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
  30{
  31#ifdef CONFIG_CPU_FREQ
  32        return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
  33#else
  34        return get_cycles();
  35#endif
  36}
  37
  38static struct clocksource bfin_cs_cycles = {
  39        .name           = "bfin_cs_cycles",
  40        .rating         = 400,
  41        .read           = bfin_read_cycles,
  42        .mask           = CLOCKSOURCE_MASK(64),
  43        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  44};
  45
  46static inline unsigned long long bfin_cs_cycles_sched_clock(void)
  47{
  48        return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles),
  49                bfin_cs_cycles.mult, bfin_cs_cycles.shift);
  50}
  51
  52static int __init bfin_cs_cycles_init(void)
  53{
  54        if (clocksource_register_hz(&bfin_cs_cycles, get_cclk()))
  55                panic("failed to register clocksource");
  56
  57        return 0;
  58}
  59#else
  60# define bfin_cs_cycles_init()
  61#endif
  62
  63#ifdef CONFIG_GPTMR0_CLOCKSOURCE
  64
  65void __init setup_gptimer0(void)
  66{
  67        disable_gptimers(TIMER0bit);
  68
  69        set_gptimer_config(TIMER0_id, \
  70                TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
  71        set_gptimer_period(TIMER0_id, -1);
  72        set_gptimer_pwidth(TIMER0_id, -2);
  73        SSYNC();
  74        enable_gptimers(TIMER0bit);
  75}
  76
  77static cycle_t bfin_read_gptimer0(struct clocksource *cs)
  78{
  79        return bfin_read_TIMER0_COUNTER();
  80}
  81
  82static struct clocksource bfin_cs_gptimer0 = {
  83        .name           = "bfin_cs_gptimer0",
  84        .rating         = 350,
  85        .read           = bfin_read_gptimer0,
  86        .mask           = CLOCKSOURCE_MASK(32),
  87        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  88};
  89
  90static inline unsigned long long bfin_cs_gptimer0_sched_clock(void)
  91{
  92        return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(),
  93                bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift);
  94}
  95
  96static int __init bfin_cs_gptimer0_init(void)
  97{
  98        setup_gptimer0();
  99
 100        if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk()))
 101                panic("failed to register clocksource");
 102
 103        return 0;
 104}
 105#else
 106# define bfin_cs_gptimer0_init()
 107#endif
 108
 109#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
 110/* prefer to use cycles since it has higher rating */
 111notrace unsigned long long sched_clock(void)
 112{
 113#if defined(CONFIG_CYCLES_CLOCKSOURCE)
 114        return bfin_cs_cycles_sched_clock();
 115#else
 116        return bfin_cs_gptimer0_sched_clock();
 117#endif
 118}
 119#endif
 120
 121#if defined(CONFIG_TICKSOURCE_GPTMR0)
 122static int bfin_gptmr0_set_next_event(unsigned long cycles,
 123                                     struct clock_event_device *evt)
 124{
 125        disable_gptimers(TIMER0bit);
 126
 127        /* it starts counting three SCLK cycles after the TIMENx bit is set */
 128        set_gptimer_pwidth(TIMER0_id, cycles - 3);
 129        enable_gptimers(TIMER0bit);
 130        return 0;
 131}
 132
 133static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
 134                                struct clock_event_device *evt)
 135{
 136        switch (mode) {
 137        case CLOCK_EVT_MODE_PERIODIC: {
 138                set_gptimer_config(TIMER0_id, \
 139                        TIMER_OUT_DIS | TIMER_IRQ_ENA | \
 140                        TIMER_PERIOD_CNT | TIMER_MODE_PWM);
 141                set_gptimer_period(TIMER0_id, get_sclk() / HZ);
 142                set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
 143                enable_gptimers(TIMER0bit);
 144                break;
 145        }
 146        case CLOCK_EVT_MODE_ONESHOT:
 147                disable_gptimers(TIMER0bit);
 148                set_gptimer_config(TIMER0_id, \
 149                        TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
 150                set_gptimer_period(TIMER0_id, 0);
 151                break;
 152        case CLOCK_EVT_MODE_UNUSED:
 153        case CLOCK_EVT_MODE_SHUTDOWN:
 154                disable_gptimers(TIMER0bit);
 155                break;
 156        case CLOCK_EVT_MODE_RESUME:
 157                break;
 158        }
 159}
 160
 161static void bfin_gptmr0_ack(void)
 162{
 163        set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
 164}
 165
 166static void __init bfin_gptmr0_init(void)
 167{
 168        disable_gptimers(TIMER0bit);
 169}
 170
 171#ifdef CONFIG_CORE_TIMER_IRQ_L1
 172__attribute__((l1_text))
 173#endif
 174irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
 175{
 176        struct clock_event_device *evt = dev_id;
 177        smp_mb();
 178        /*
 179         * We want to ACK before we handle so that we can handle smaller timer
 180         * intervals.  This way if the timer expires again while we're handling
 181         * things, we're more likely to see that 2nd int rather than swallowing
 182         * it by ACKing the int at the end of this handler.
 183         */
 184        bfin_gptmr0_ack();
 185        evt->event_handler(evt);
 186        return IRQ_HANDLED;
 187}
 188
 189static struct irqaction gptmr0_irq = {
 190        .name           = "Blackfin GPTimer0",
 191        .flags          = IRQF_DISABLED | IRQF_TIMER | \
 192                          IRQF_IRQPOLL | IRQF_PERCPU,
 193        .handler        = bfin_gptmr0_interrupt,
 194};
 195
 196static struct clock_event_device clockevent_gptmr0 = {
 197        .name           = "bfin_gptimer0",
 198        .rating         = 300,
 199        .irq            = IRQ_TIMER0,
 200        .shift          = 32,
 201        .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 202        .set_next_event = bfin_gptmr0_set_next_event,
 203        .set_mode       = bfin_gptmr0_set_mode,
 204};
 205
 206static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
 207{
 208        unsigned long clock_tick;
 209
 210        clock_tick = get_sclk();
 211        evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
 212        evt->max_delta_ns = clockevent_delta2ns(-1, evt);
 213        evt->min_delta_ns = clockevent_delta2ns(100, evt);
 214
 215        evt->cpumask = cpumask_of(0);
 216
 217        clockevents_register_device(evt);
 218}
 219#endif /* CONFIG_TICKSOURCE_GPTMR0 */
 220
 221#if defined(CONFIG_TICKSOURCE_CORETMR)
 222/* per-cpu local core timer */
 223static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
 224
 225static int bfin_coretmr_set_next_event(unsigned long cycles,
 226                                struct clock_event_device *evt)
 227{
 228        bfin_write_TCNTL(TMPWR);
 229        CSYNC();
 230        bfin_write_TCOUNT(cycles);
 231        CSYNC();
 232        bfin_write_TCNTL(TMPWR | TMREN);
 233        return 0;
 234}
 235
 236static void bfin_coretmr_set_mode(enum clock_event_mode mode,
 237                                struct clock_event_device *evt)
 238{
 239        switch (mode) {
 240        case CLOCK_EVT_MODE_PERIODIC: {
 241                unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
 242                bfin_write_TCNTL(TMPWR);
 243                CSYNC();
 244                bfin_write_TSCALE(TIME_SCALE - 1);
 245                bfin_write_TPERIOD(tcount);
 246                bfin_write_TCOUNT(tcount);
 247                CSYNC();
 248                bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
 249                break;
 250        }
 251        case CLOCK_EVT_MODE_ONESHOT:
 252                bfin_write_TCNTL(TMPWR);
 253                CSYNC();
 254                bfin_write_TSCALE(TIME_SCALE - 1);
 255                bfin_write_TPERIOD(0);
 256                bfin_write_TCOUNT(0);
 257                break;
 258        case CLOCK_EVT_MODE_UNUSED:
 259        case CLOCK_EVT_MODE_SHUTDOWN:
 260                bfin_write_TCNTL(0);
 261                CSYNC();
 262                break;
 263        case CLOCK_EVT_MODE_RESUME:
 264                break;
 265        }
 266}
 267
 268void bfin_coretmr_init(void)
 269{
 270        /* power up the timer, but don't enable it just yet */
 271        bfin_write_TCNTL(TMPWR);
 272        CSYNC();
 273
 274        /* the TSCALE prescaler counter. */
 275        bfin_write_TSCALE(TIME_SCALE - 1);
 276        bfin_write_TPERIOD(0);
 277        bfin_write_TCOUNT(0);
 278
 279        CSYNC();
 280}
 281
 282#ifdef CONFIG_CORE_TIMER_IRQ_L1
 283__attribute__((l1_text))
 284#endif
 285irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
 286{
 287        int cpu = smp_processor_id();
 288        struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
 289
 290        smp_mb();
 291        evt->event_handler(evt);
 292
 293        touch_nmi_watchdog();
 294
 295        return IRQ_HANDLED;
 296}
 297
 298static struct irqaction coretmr_irq = {
 299        .name           = "Blackfin CoreTimer",
 300        .flags          = IRQF_DISABLED | IRQF_TIMER | \
 301                          IRQF_IRQPOLL | IRQF_PERCPU,
 302        .handler        = bfin_coretmr_interrupt,
 303};
 304
 305void bfin_coretmr_clockevent_init(void)
 306{
 307        unsigned long clock_tick;
 308        unsigned int cpu = smp_processor_id();
 309        struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
 310
 311        evt->name = "bfin_core_timer";
 312        evt->rating = 350;
 313        evt->irq = -1;
 314        evt->shift = 32;
 315        evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 316        evt->set_next_event = bfin_coretmr_set_next_event;
 317        evt->set_mode = bfin_coretmr_set_mode;
 318
 319        clock_tick = get_cclk() / TIME_SCALE;
 320        evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
 321        evt->max_delta_ns = clockevent_delta2ns(-1, evt);
 322        evt->min_delta_ns = clockevent_delta2ns(100, evt);
 323
 324        evt->cpumask = cpumask_of(cpu);
 325
 326        clockevents_register_device(evt);
 327}
 328#endif /* CONFIG_TICKSOURCE_CORETMR */
 329
 330
 331void read_persistent_clock(struct timespec *ts)
 332{
 333        time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
 334        ts->tv_sec = secs_since_1970;
 335        ts->tv_nsec = 0;
 336}
 337
 338void __init time_init(void)
 339{
 340
 341#ifdef CONFIG_RTC_DRV_BFIN
 342        /* [#2663] hack to filter junk RTC values that would cause
 343         * userspace to have to deal with time values greater than
 344         * 2^31 seconds (which uClibc cannot cope with yet)
 345         */
 346        if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
 347                printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
 348                bfin_write_RTC_STAT(0);
 349        }
 350#endif
 351
 352        bfin_cs_cycles_init();
 353        bfin_cs_gptimer0_init();
 354
 355#if defined(CONFIG_TICKSOURCE_CORETMR)
 356        bfin_coretmr_init();
 357        setup_irq(IRQ_CORETMR, &coretmr_irq);
 358        bfin_coretmr_clockevent_init();
 359#endif
 360
 361#if defined(CONFIG_TICKSOURCE_GPTMR0)
 362        bfin_gptmr0_init();
 363        setup_irq(IRQ_TIMER0, &gptmr0_irq);
 364        gptmr0_irq.dev_id = &clockevent_gptmr0;
 365        bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
 366#endif
 367
 368#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
 369# error at least one clock event device is required
 370#endif
 371}
 372