1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/errno.h>
31#include <linux/module.h>
32#include <linux/sched.h>
33#include <linux/kernel.h>
34#include <linux/param.h>
35#include <linux/string.h>
36#include <linux/mm.h>
37#include <linux/delay.h>
38#include <linux/ioport.h>
39#include <linux/irq.h>
40#include <linux/interrupt.h>
41#include <linux/init.h>
42#include <linux/bcd.h>
43#include <linux/profile.h>
44#include <linux/irq_work.h>
45
46#include <asm/uaccess.h>
47#include <asm/io.h>
48#include <asm/hwrpb.h>
49#include <asm/rtc.h>
50
51#include <linux/mc146818rtc.h>
52#include <linux/time.h>
53#include <linux/timex.h>
54#include <linux/clocksource.h>
55
56#include "proto.h"
57#include "irq_impl.h"
58
59static int set_rtc_mmss(unsigned long);
60
61DEFINE_SPINLOCK(rtc_lock);
62EXPORT_SYMBOL(rtc_lock);
63
64#define TICK_SIZE (tick_nsec / 1000)
65
66
67
68
69
70
71#define FIX_SHIFT 48
72
73
74static struct {
75
76 __u32 last_time;
77
78 unsigned long scaled_ticks_per_cycle;
79
80 unsigned long partial_tick;
81} state;
82
83unsigned long est_cycle_freq;
84
85#ifdef CONFIG_IRQ_WORK
86
87DEFINE_PER_CPU(u8, irq_work_pending);
88
89#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
90#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
91#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
92
93void arch_irq_work_raise(void)
94{
95 set_irq_work_pending_flag();
96}
97
98#else
99
100#define test_irq_work_pending() 0
101#define clear_irq_work_pending()
102
103#endif
104
105
106static inline __u32 rpcc(void)
107{
108 __u32 result;
109 asm volatile ("rpcc %0" : "=r"(result));
110 return result;
111}
112
113int update_persistent_clock(struct timespec now)
114{
115 return set_rtc_mmss(now.tv_sec);
116}
117
118void read_persistent_clock(struct timespec *ts)
119{
120 unsigned int year, mon, day, hour, min, sec, epoch;
121
122 sec = CMOS_READ(RTC_SECONDS);
123 min = CMOS_READ(RTC_MINUTES);
124 hour = CMOS_READ(RTC_HOURS);
125 day = CMOS_READ(RTC_DAY_OF_MONTH);
126 mon = CMOS_READ(RTC_MONTH);
127 year = CMOS_READ(RTC_YEAR);
128
129 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
130 sec = bcd2bin(sec);
131 min = bcd2bin(min);
132 hour = bcd2bin(hour);
133 day = bcd2bin(day);
134 mon = bcd2bin(mon);
135 year = bcd2bin(year);
136 }
137
138
139 epoch = 1900;
140 if (year < 20)
141 epoch = 2000;
142 else if (year >= 20 && year < 48)
143
144 epoch = 1980;
145 else if (year >= 48 && year < 70)
146
147 epoch = 1952;
148
149 printk(KERN_INFO "Using epoch = %d\n", epoch);
150
151 if ((year += epoch) < 1970)
152 year += 100;
153
154 ts->tv_sec = mktime(year, mon, day, hour, min, sec);
155 ts->tv_nsec = 0;
156}
157
158
159
160
161
162
163
164irqreturn_t timer_interrupt(int irq, void *dev)
165{
166 unsigned long delta;
167 __u32 now;
168 long nticks;
169
170#ifndef CONFIG_SMP
171
172 profile_tick(CPU_PROFILING);
173#endif
174
175
176
177
178
179
180 now = rpcc();
181 delta = now - state.last_time;
182 state.last_time = now;
183 delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
184 state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
185 nticks = delta >> FIX_SHIFT;
186
187 if (nticks)
188 xtime_update(nticks);
189
190 if (test_irq_work_pending()) {
191 clear_irq_work_pending();
192 irq_work_run();
193 }
194
195#ifndef CONFIG_SMP
196 while (nticks--)
197 update_process_times(user_mode(get_irq_regs()));
198#endif
199
200 return IRQ_HANDLED;
201}
202
203void __init
204common_init_rtc(void)
205{
206 unsigned char x;
207
208
209 x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
210
211
212 if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
213 printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x);
214 CMOS_WRITE(0x26, RTC_FREQ_SELECT);
215 }
216
217
218 x = CMOS_READ(RTC_CONTROL);
219 if (!(x & RTC_PIE)) {
220 printk("Turning on RTC interrupts.\n");
221 x |= RTC_PIE;
222 x &= ~(RTC_AIE | RTC_UIE);
223 CMOS_WRITE(x, RTC_CONTROL);
224 }
225 (void) CMOS_READ(RTC_INTR_FLAGS);
226
227 outb(0x36, 0x43);
228 outb(0x00, 0x40);
229 outb(0x00, 0x40);
230
231 outb(0xb6, 0x43);
232 outb(0x31, 0x42);
233 outb(0x13, 0x42);
234
235 init_rtc_irq();
236}
237
238unsigned int common_get_rtc_time(struct rtc_time *time)
239{
240 return __get_rtc_time(time);
241}
242
243int common_set_rtc_time(struct rtc_time *time)
244{
245 return __set_rtc_time(time);
246}
247
248
249
250
251
252
253
254static unsigned long __init
255validate_cc_value(unsigned long cc)
256{
257 static struct bounds {
258 unsigned int min, max;
259 } cpu_hz[] __initdata = {
260 [EV3_CPU] = { 50000000, 200000000 },
261 [EV4_CPU] = { 100000000, 300000000 },
262 [LCA4_CPU] = { 100000000, 300000000 },
263 [EV45_CPU] = { 200000000, 300000000 },
264 [EV5_CPU] = { 250000000, 433000000 },
265 [EV56_CPU] = { 333000000, 667000000 },
266 [PCA56_CPU] = { 400000000, 600000000 },
267 [PCA57_CPU] = { 500000000, 600000000 },
268 [EV6_CPU] = { 466000000, 600000000 },
269 [EV67_CPU] = { 600000000, 750000000 },
270 [EV68AL_CPU] = { 750000000, 940000000 },
271 [EV68CB_CPU] = { 1000000000, 1333333333 },
272
273 [EV68CX_CPU] = { 1000000000, 1700000000 },
274 [EV69_CPU] = { 1000000000, 1700000000 },
275 [EV7_CPU] = { 800000000, 1400000000 },
276 [EV79_CPU] = { 1000000000, 2000000000 },
277 };
278
279
280 const unsigned int deviation = 10000000;
281
282 struct percpu_struct *cpu;
283 unsigned int index;
284
285 cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
286 index = cpu->type & 0xffffffff;
287
288
289 if (index >= ARRAY_SIZE(cpu_hz))
290 return cc;
291
292
293 if (cpu_hz[index].max == 0)
294 return cc;
295
296 if (cc < cpu_hz[index].min - deviation
297 || cc > cpu_hz[index].max + deviation)
298 return 0;
299
300 return cc;
301}
302
303
304
305
306
307
308
309#define CALIBRATE_LATCH 0xffff
310#define TIMEOUT_COUNT 0x100000
311
312static unsigned long __init
313calibrate_cc_with_pit(void)
314{
315 int cc, count = 0;
316
317
318 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
319
320
321
322
323
324
325
326
327 outb(0xb0, 0x43);
328 outb(CALIBRATE_LATCH & 0xff, 0x42);
329 outb(CALIBRATE_LATCH >> 8, 0x42);
330
331 cc = rpcc();
332 do {
333 count++;
334 } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
335 cc = rpcc() - cc;
336
337
338 if (count <= 1 || count == TIMEOUT_COUNT)
339 return 0;
340
341 return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
342}
343
344
345
346
347
348
349static unsigned long __init
350rpcc_after_update_in_progress(void)
351{
352 do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
353 do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
354
355 return rpcc();
356}
357
358#ifndef CONFIG_SMP
359
360
361static u64 read_rpcc(struct clocksource *cs)
362{
363 u64 ret = (u64)rpcc();
364 return ret;
365}
366
367static struct clocksource clocksource_rpcc = {
368 .name = "rpcc",
369 .rating = 300,
370 .read = read_rpcc,
371 .mask = CLOCKSOURCE_MASK(32),
372 .flags = CLOCK_SOURCE_IS_CONTINUOUS
373};
374
375static inline void register_rpcc_clocksource(long cycle_freq)
376{
377 clocksource_register_hz(&clocksource_rpcc, cycle_freq);
378}
379#else
380static inline void register_rpcc_clocksource(long cycle_freq)
381{
382}
383#endif
384
385void __init
386time_init(void)
387{
388 unsigned int cc1, cc2;
389 unsigned long cycle_freq, tolerance;
390 long diff;
391
392
393 if (!est_cycle_freq)
394 est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
395
396 cc1 = rpcc();
397
398
399 if (!est_cycle_freq) {
400 cc1 = rpcc_after_update_in_progress();
401 cc2 = rpcc_after_update_in_progress();
402 est_cycle_freq = validate_cc_value(cc2 - cc1);
403 cc1 = cc2;
404 }
405
406 cycle_freq = hwrpb->cycle_freq;
407 if (est_cycle_freq) {
408
409
410 tolerance = cycle_freq / 4000;
411 diff = cycle_freq - est_cycle_freq;
412 if (diff < 0)
413 diff = -diff;
414 if ((unsigned long)diff > tolerance) {
415 cycle_freq = est_cycle_freq;
416 printk("HWRPB cycle frequency bogus. "
417 "Estimated %lu Hz\n", cycle_freq);
418 } else {
419 est_cycle_freq = 0;
420 }
421 } else if (! validate_cc_value (cycle_freq)) {
422 printk("HWRPB cycle frequency bogus, "
423 "and unable to estimate a proper value!\n");
424 }
425
426
427
428
429
430 __delay(1000000);
431
432
433 if (HZ > (1<<16)) {
434 extern void __you_loose (void);
435 __you_loose();
436 }
437
438 register_rpcc_clocksource(cycle_freq);
439
440 state.last_time = cc1;
441 state.scaled_ticks_per_cycle
442 = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
443 state.partial_tick = 0L;
444
445
446 alpha_mv.init_rtc();
447}
448
449
450
451
452
453
454
455
456
457
458
459
460
461static int
462set_rtc_mmss(unsigned long nowtime)
463{
464 int retval = 0;
465 int real_seconds, real_minutes, cmos_minutes;
466 unsigned char save_control, save_freq_select;
467
468
469 spin_lock(&rtc_lock);
470
471 save_control = CMOS_READ(RTC_CONTROL);
472 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
473
474
475 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
476 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
477
478 cmos_minutes = CMOS_READ(RTC_MINUTES);
479 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
480 cmos_minutes = bcd2bin(cmos_minutes);
481
482
483
484
485
486
487
488 real_seconds = nowtime % 60;
489 real_minutes = nowtime / 60;
490 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) {
491
492 real_minutes += 30;
493 }
494 real_minutes %= 60;
495
496 if (abs(real_minutes - cmos_minutes) < 30) {
497 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
498 real_seconds = bin2bcd(real_seconds);
499 real_minutes = bin2bcd(real_minutes);
500 }
501 CMOS_WRITE(real_seconds,RTC_SECONDS);
502 CMOS_WRITE(real_minutes,RTC_MINUTES);
503 } else {
504 printk_once(KERN_NOTICE
505 "set_rtc_mmss: can't update from %d to %d\n",
506 cmos_minutes, real_minutes);
507 retval = -1;
508 }
509
510
511
512
513
514
515
516
517 CMOS_WRITE(save_control, RTC_CONTROL);
518 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
519 spin_unlock(&rtc_lock);
520
521 return retval;
522}
523