1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/interrupt.h>
18#include <linux/time.h>
19#include <linux/init.h>
20#include <linux/timex.h>
21#include <linux/io.h>
22#include <linux/clocksource.h>
23#include <linux/clockchips.h>
24#include <linux/export.h>
25#include <linux/sched_clock.h>
26#include <mach/hardware.h>
27#include <asm/irq.h>
28#include <linux/uaccess.h>
29#include <asm/mach/irq.h>
30#include <asm/mach/time.h>
31#include <mach/time.h>
32
33
34
35
36#define IOP_MIN_RANGE 4
37
38
39
40
41static u64 notrace iop_clocksource_read(struct clocksource *unused)
42{
43 return 0xffffffffu - read_tcr1();
44}
45
46static struct clocksource iop_clocksource = {
47 .name = "iop_timer1",
48 .rating = 300,
49 .read = iop_clocksource_read,
50 .mask = CLOCKSOURCE_MASK(32),
51 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
52};
53
54
55
56
57static u64 notrace iop_read_sched_clock(void)
58{
59 return 0xffffffffu - read_tcr1();
60}
61
62
63
64
65static int iop_set_next_event(unsigned long delta,
66 struct clock_event_device *unused)
67{
68 u32 tmr = IOP_TMR_PRIVILEGED | IOP_TMR_RATIO_1_1;
69
70 BUG_ON(delta == 0);
71 write_tmr0(tmr & ~(IOP_TMR_EN | IOP_TMR_RELOAD));
72 write_tcr0(delta);
73 write_tmr0((tmr & ~IOP_TMR_RELOAD) | IOP_TMR_EN);
74
75 return 0;
76}
77
78static unsigned long ticks_per_jiffy;
79
80static int iop_set_periodic(struct clock_event_device *evt)
81{
82 u32 tmr = read_tmr0();
83
84 write_tmr0(tmr & ~IOP_TMR_EN);
85 write_tcr0(ticks_per_jiffy - 1);
86 write_trr0(ticks_per_jiffy - 1);
87 tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN);
88
89 write_tmr0(tmr);
90 return 0;
91}
92
93static int iop_set_oneshot(struct clock_event_device *evt)
94{
95 u32 tmr = read_tmr0();
96
97
98 tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN);
99 write_tmr0(tmr);
100 return 0;
101}
102
103static int iop_shutdown(struct clock_event_device *evt)
104{
105 u32 tmr = read_tmr0();
106
107 tmr &= ~IOP_TMR_EN;
108 write_tmr0(tmr);
109 return 0;
110}
111
112static int iop_resume(struct clock_event_device *evt)
113{
114 u32 tmr = read_tmr0();
115
116 tmr |= IOP_TMR_EN;
117 write_tmr0(tmr);
118 return 0;
119}
120
121static struct clock_event_device iop_clockevent = {
122 .name = "iop_timer0",
123 .features = CLOCK_EVT_FEAT_PERIODIC |
124 CLOCK_EVT_FEAT_ONESHOT,
125 .rating = 300,
126 .set_next_event = iop_set_next_event,
127 .set_state_shutdown = iop_shutdown,
128 .set_state_periodic = iop_set_periodic,
129 .tick_resume = iop_resume,
130 .set_state_oneshot = iop_set_oneshot,
131};
132
133static irqreturn_t
134iop_timer_interrupt(int irq, void *dev_id)
135{
136 struct clock_event_device *evt = dev_id;
137
138 write_tisr(1);
139 evt->event_handler(evt);
140 return IRQ_HANDLED;
141}
142
143static struct irqaction iop_timer_irq = {
144 .name = "IOP Timer Tick",
145 .handler = iop_timer_interrupt,
146 .flags = IRQF_TIMER | IRQF_IRQPOLL,
147 .dev_id = &iop_clockevent,
148};
149
150static unsigned long iop_tick_rate;
151unsigned long get_iop_tick_rate(void)
152{
153 return iop_tick_rate;
154}
155EXPORT_SYMBOL(get_iop_tick_rate);
156
157void __init iop_init_time(unsigned long tick_rate)
158{
159 u32 timer_ctl;
160
161 sched_clock_register(iop_read_sched_clock, 32, tick_rate);
162
163 ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ);
164 iop_tick_rate = tick_rate;
165
166 timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED |
167 IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1;
168
169
170
171
172 write_tmr0(timer_ctl & ~IOP_TMR_EN);
173 write_tisr(1);
174 setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq);
175 iop_clockevent.cpumask = cpumask_of(0);
176 clockevents_config_and_register(&iop_clockevent, tick_rate,
177 0xf, 0xfffffffe);
178
179
180
181
182 write_trr1(0xffffffff);
183 write_tcr1(0xffffffff);
184 write_tmr1(timer_ctl);
185 clocksource_register_hz(&iop_clocksource, tick_rate);
186}
187