1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/interrupt.h>
18#include <linux/time.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/timex.h>
22#include <linux/sched.h>
23#include <linux/io.h>
24#include <linux/clocksource.h>
25#include <linux/clockchips.h>
26#include <mach/hardware.h>
27#include <asm/irq.h>
28#include <asm/sched_clock.h>
29#include <asm/uaccess.h>
30#include <asm/mach/irq.h>
31#include <asm/mach/time.h>
32#include <mach/time.h>
33
34
35
36
37#define IOP_MIN_RANGE 4
38
39
40
41
42static cycle_t notrace iop_clocksource_read(struct clocksource *unused)
43{
44 return 0xffffffffu - read_tcr1();
45}
46
47static struct clocksource iop_clocksource = {
48 .name = "iop_timer1",
49 .rating = 300,
50 .read = iop_clocksource_read,
51 .mask = CLOCKSOURCE_MASK(32),
52 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
53};
54
55static DEFINE_CLOCK_DATA(cd);
56
57
58
59
60unsigned long long notrace sched_clock(void)
61{
62 u32 cyc = 0xffffffffu - read_tcr1();
63 return cyc_to_sched_clock(&cd, cyc, (u32)~0);
64}
65
66static void notrace iop_update_sched_clock(void)
67{
68 u32 cyc = 0xffffffffu - read_tcr1();
69 update_sched_clock(&cd, cyc, (u32)~0);
70}
71
72
73
74
75static int iop_set_next_event(unsigned long delta,
76 struct clock_event_device *unused)
77{
78 u32 tmr = IOP_TMR_PRIVILEGED | IOP_TMR_RATIO_1_1;
79
80 BUG_ON(delta == 0);
81 write_tmr0(tmr & ~(IOP_TMR_EN | IOP_TMR_RELOAD));
82 write_tcr0(delta);
83 write_tmr0((tmr & ~IOP_TMR_RELOAD) | IOP_TMR_EN);
84
85 return 0;
86}
87
88static unsigned long ticks_per_jiffy;
89
90static void iop_set_mode(enum clock_event_mode mode,
91 struct clock_event_device *unused)
92{
93 u32 tmr = read_tmr0();
94
95 switch (mode) {
96 case CLOCK_EVT_MODE_PERIODIC:
97 write_tmr0(tmr & ~IOP_TMR_EN);
98 write_tcr0(ticks_per_jiffy - 1);
99 write_trr0(ticks_per_jiffy - 1);
100 tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN);
101 break;
102 case CLOCK_EVT_MODE_ONESHOT:
103
104 tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN);
105 break;
106 case CLOCK_EVT_MODE_RESUME:
107 tmr |= IOP_TMR_EN;
108 break;
109 case CLOCK_EVT_MODE_SHUTDOWN:
110 case CLOCK_EVT_MODE_UNUSED:
111 default:
112 tmr &= ~IOP_TMR_EN;
113 break;
114 }
115
116 write_tmr0(tmr);
117}
118
119static struct clock_event_device iop_clockevent = {
120 .name = "iop_timer0",
121 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
122 .rating = 300,
123 .set_next_event = iop_set_next_event,
124 .set_mode = iop_set_mode,
125};
126
127static irqreturn_t
128iop_timer_interrupt(int irq, void *dev_id)
129{
130 struct clock_event_device *evt = dev_id;
131
132 write_tisr(1);
133 evt->event_handler(evt);
134 return IRQ_HANDLED;
135}
136
137static struct irqaction iop_timer_irq = {
138 .name = "IOP Timer Tick",
139 .handler = iop_timer_interrupt,
140 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
141 .dev_id = &iop_clockevent,
142};
143
144static unsigned long iop_tick_rate;
145unsigned long get_iop_tick_rate(void)
146{
147 return iop_tick_rate;
148}
149EXPORT_SYMBOL(get_iop_tick_rate);
150
151void __init iop_init_time(unsigned long tick_rate)
152{
153 u32 timer_ctl;
154
155 init_sched_clock(&cd, iop_update_sched_clock, 32, tick_rate);
156
157 ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ);
158 iop_tick_rate = tick_rate;
159
160 timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED |
161 IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1;
162
163
164
165
166 write_tmr0(timer_ctl & ~IOP_TMR_EN);
167 write_tisr(1);
168 setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq);
169 clockevents_calc_mult_shift(&iop_clockevent,
170 tick_rate, IOP_MIN_RANGE);
171 iop_clockevent.max_delta_ns =
172 clockevent_delta2ns(0xfffffffe, &iop_clockevent);
173 iop_clockevent.min_delta_ns =
174 clockevent_delta2ns(0xf, &iop_clockevent);
175 iop_clockevent.cpumask = cpumask_of(0);
176 clockevents_register_device(&iop_clockevent);
177
178
179
180
181 write_trr1(0xffffffff);
182 write_tcr1(0xffffffff);
183 write_tmr1(timer_ctl);
184 clocksource_register_hz(&iop_clocksource, tick_rate);
185}
186