1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/clocksource.h>
18#include <linux/clockchips.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/io.h>
23
24#include <asm/mach/time.h>
25#include <asm/hardware/gic.h>
26#include <asm/localtimer.h>
27
28#include <mach/msm_iomap.h>
29#include <mach/cpu.h>
30#include <mach/board.h>
31
32#define TIMER_MATCH_VAL 0x0000
33#define TIMER_COUNT_VAL 0x0004
34#define TIMER_ENABLE 0x0008
35#define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
36#define TIMER_ENABLE_EN BIT(0)
37#define TIMER_CLEAR 0x000C
38#define DGT_CLK_CTL 0x0034
39#define DGT_CLK_CTL_DIV_4 0x3
40
41#define GPT_HZ 32768
42
43#define MSM_DGT_SHIFT 5
44
45static void __iomem *event_base;
46
47static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
48{
49 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
50
51 if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
52 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
53 ctrl &= ~TIMER_ENABLE_EN;
54 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
55 }
56 evt->event_handler(evt);
57 return IRQ_HANDLED;
58}
59
60static int msm_timer_set_next_event(unsigned long cycles,
61 struct clock_event_device *evt)
62{
63 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
64
65 writel_relaxed(0, event_base + TIMER_CLEAR);
66 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
67 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
68 return 0;
69}
70
71static void msm_timer_set_mode(enum clock_event_mode mode,
72 struct clock_event_device *evt)
73{
74 u32 ctrl;
75
76 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
77 ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
78
79 switch (mode) {
80 case CLOCK_EVT_MODE_RESUME:
81 case CLOCK_EVT_MODE_PERIODIC:
82 break;
83 case CLOCK_EVT_MODE_ONESHOT:
84
85 break;
86 case CLOCK_EVT_MODE_UNUSED:
87 case CLOCK_EVT_MODE_SHUTDOWN:
88 break;
89 }
90 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
91}
92
93static struct clock_event_device msm_clockevent = {
94 .name = "gp_timer",
95 .features = CLOCK_EVT_FEAT_ONESHOT,
96 .rating = 200,
97 .set_next_event = msm_timer_set_next_event,
98 .set_mode = msm_timer_set_mode,
99};
100
101static union {
102 struct clock_event_device *evt;
103 struct clock_event_device __percpu **percpu_evt;
104} msm_evt;
105
106static void __iomem *source_base;
107
108static cycle_t msm_read_timer_count(struct clocksource *cs)
109{
110 return readl_relaxed(source_base + TIMER_COUNT_VAL);
111}
112
113static cycle_t msm_read_timer_count_shift(struct clocksource *cs)
114{
115
116
117
118
119 return msm_read_timer_count(cs) >> MSM_DGT_SHIFT;
120}
121
122static struct clocksource msm_clocksource = {
123 .name = "dg_timer",
124 .rating = 300,
125 .read = msm_read_timer_count,
126 .mask = CLOCKSOURCE_MASK(32),
127 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
128};
129
130static void __init msm_timer_init(void)
131{
132 struct clock_event_device *ce = &msm_clockevent;
133 struct clocksource *cs = &msm_clocksource;
134 int res;
135 u32 dgt_hz;
136
137 if (cpu_is_msm7x01()) {
138 event_base = MSM_CSR_BASE;
139 source_base = MSM_CSR_BASE + 0x10;
140 dgt_hz = 19200000 >> MSM_DGT_SHIFT;
141 cs->read = msm_read_timer_count_shift;
142 cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT));
143 } else if (cpu_is_msm7x30()) {
144 event_base = MSM_CSR_BASE + 0x04;
145 source_base = MSM_CSR_BASE + 0x24;
146 dgt_hz = 24576000 / 4;
147 } else if (cpu_is_qsd8x50()) {
148 event_base = MSM_CSR_BASE;
149 source_base = MSM_CSR_BASE + 0x10;
150 dgt_hz = 19200000 / 4;
151 } else if (cpu_is_msm8x60() || cpu_is_msm8960()) {
152 event_base = MSM_TMR_BASE + 0x04;
153
154 source_base = MSM_TMR0_BASE + 0x24;
155 dgt_hz = 27000000 / 4;
156 writel_relaxed(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
157 } else
158 BUG();
159
160 writel_relaxed(0, event_base + TIMER_ENABLE);
161 writel_relaxed(0, event_base + TIMER_CLEAR);
162 writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
163 ce->cpumask = cpumask_of(0);
164
165 ce->irq = INT_GP_TIMER_EXP;
166 clockevents_config_and_register(ce, GPT_HZ, 4, 0xffffffff);
167 if (cpu_is_msm8x60() || cpu_is_msm8960()) {
168 msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *);
169 if (!msm_evt.percpu_evt) {
170 pr_err("memory allocation failed for %s\n", ce->name);
171 goto err;
172 }
173 *__this_cpu_ptr(msm_evt.percpu_evt) = ce;
174 res = request_percpu_irq(ce->irq, msm_timer_interrupt,
175 ce->name, msm_evt.percpu_evt);
176 if (!res)
177 enable_percpu_irq(ce->irq, 0);
178 } else {
179 msm_evt.evt = ce;
180 res = request_irq(ce->irq, msm_timer_interrupt,
181 IRQF_TIMER | IRQF_NOBALANCING |
182 IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt);
183 }
184
185 if (res)
186 pr_err("request_irq failed for %s\n", ce->name);
187err:
188 writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
189 res = clocksource_register_hz(cs, dgt_hz);
190 if (res)
191 pr_err("clocksource_register failed\n");
192}
193
194#ifdef CONFIG_LOCAL_TIMERS
195int __cpuinit local_timer_setup(struct clock_event_device *evt)
196{
197
198 if (!smp_processor_id())
199 return 0;
200
201 writel_relaxed(0, event_base + TIMER_ENABLE);
202 writel_relaxed(0, event_base + TIMER_CLEAR);
203 writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
204 evt->irq = msm_clockevent.irq;
205 evt->name = "local_timer";
206 evt->features = msm_clockevent.features;
207 evt->rating = msm_clockevent.rating;
208 evt->set_mode = msm_timer_set_mode;
209 evt->set_next_event = msm_timer_set_next_event;
210 evt->shift = msm_clockevent.shift;
211 evt->mult = div_sc(GPT_HZ, NSEC_PER_SEC, evt->shift);
212 evt->max_delta_ns = clockevent_delta2ns(0xf0000000, evt);
213 evt->min_delta_ns = clockevent_delta2ns(4, evt);
214
215 *__this_cpu_ptr(msm_evt.percpu_evt) = evt;
216 clockevents_register_device(evt);
217 enable_percpu_irq(evt->irq, 0);
218 return 0;
219}
220
221void local_timer_stop(struct clock_event_device *evt)
222{
223 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
224 disable_percpu_irq(evt->irq);
225}
226#endif
227
228struct sys_timer msm_timer = {
229 .init = msm_timer_init
230};
231