1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/ioctl.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/mm.h>
28#include <linux/fs.h>
29#include <linux/mmtimer.h>
30#include <linux/miscdevice.h>
31#include <linux/posix-timers.h>
32#include <linux/interrupt.h>
33#include <linux/time.h>
34#include <linux/math64.h>
35#include <linux/mutex.h>
36#include <linux/slab.h>
37
38#include <asm/uaccess.h>
39#include <asm/sn/addrs.h>
40#include <asm/sn/intr.h>
41#include <asm/sn/shub_mmr.h>
42#include <asm/sn/nodepda.h>
43#include <asm/sn/shubio.h>
44
45MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
46MODULE_DESCRIPTION("SGI Altix RTC Timer");
47MODULE_LICENSE("GPL");
48
49
50#define MMTIMER_NAME "mmtimer"
51#define MMTIMER_DESC "SGI Altix RTC Timer"
52#define MMTIMER_VERSION "2.1"
53
54#define RTC_BITS 55
55
56static struct k_clock sgi_clock;
57
58extern unsigned long sn_rtc_cycles_per_second;
59
60#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
61
62#define rtc_time() (*RTC_COUNTER_ADDR)
63
64static DEFINE_MUTEX(mmtimer_mutex);
65static long mmtimer_ioctl(struct file *file, unsigned int cmd,
66 unsigned long arg);
67static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
68
69
70
71
72static unsigned long mmtimer_femtoperiod = 0;
73
74static const struct file_operations mmtimer_fops = {
75 .owner = THIS_MODULE,
76 .mmap = mmtimer_mmap,
77 .unlocked_ioctl = mmtimer_ioctl,
78 .llseek = noop_llseek,
79};
80
81
82
83
84
85
86static int mmtimer_int_pending(int comparator)
87{
88 if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
89 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
90 return 1;
91 else
92 return 0;
93}
94
95
96static void mmtimer_clr_int_pending(int comparator)
97{
98 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
99 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
100}
101
102
103static void mmtimer_setup_int_0(int cpu, u64 expires)
104{
105 u64 val;
106
107
108 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL);
109
110
111 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L);
112
113
114 mmtimer_clr_int_pending(0);
115
116 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
117 ((u64)cpu_physical_id(cpu) <<
118 SH_RTC1_INT_CONFIG_PID_SHFT);
119
120
121 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val);
122
123
124 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL);
125
126
127 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires);
128
129
130}
131
132
133static void mmtimer_setup_int_1(int cpu, u64 expires)
134{
135 u64 val;
136
137 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL);
138
139 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L);
140
141 mmtimer_clr_int_pending(1);
142
143 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
144 ((u64)cpu_physical_id(cpu) <<
145 SH_RTC2_INT_CONFIG_PID_SHFT);
146
147 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val);
148
149 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL);
150
151 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires);
152}
153
154
155static void mmtimer_setup_int_2(int cpu, u64 expires)
156{
157 u64 val;
158
159 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL);
160
161 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L);
162
163 mmtimer_clr_int_pending(2);
164
165 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
166 ((u64)cpu_physical_id(cpu) <<
167 SH_RTC3_INT_CONFIG_PID_SHFT);
168
169 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);
170
171 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL);
172
173 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires);
174}
175
176
177
178
179
180
181static int mmtimer_setup(int cpu, int comparator, unsigned long expires,
182 u64 *set_completion_time)
183{
184 switch (comparator) {
185 case 0:
186 mmtimer_setup_int_0(cpu, expires);
187 break;
188 case 1:
189 mmtimer_setup_int_1(cpu, expires);
190 break;
191 case 2:
192 mmtimer_setup_int_2(cpu, expires);
193 break;
194 }
195
196 *set_completion_time = rtc_time();
197 if (*set_completion_time <= expires)
198 return 1;
199
200
201
202
203
204 return mmtimer_int_pending(comparator);
205}
206
207static int mmtimer_disable_int(long nasid, int comparator)
208{
209 switch (comparator) {
210 case 0:
211 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE),
212 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL);
213 break;
214 case 1:
215 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE),
216 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL);
217 break;
218 case 2:
219 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE),
220 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL);
221 break;
222 default:
223 return -EFAULT;
224 }
225 return 0;
226}
227
228#define COMPARATOR 1
229
230#define TIMER_OFF 0xbadcabLL
231#define TIMER_SET 0
232
233#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40
234
235
236struct mmtimer {
237 struct rb_node list;
238 struct k_itimer *timer;
239 int cpu;
240};
241
242struct mmtimer_node {
243 spinlock_t lock ____cacheline_aligned;
244 struct rb_root timer_head;
245 struct rb_node *next;
246 struct tasklet_struct tasklet;
247};
248static struct mmtimer_node *timers;
249
250static unsigned mmtimer_interval_retry_increment =
251 MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT;
252module_param(mmtimer_interval_retry_increment, uint, 0644);
253MODULE_PARM_DESC(mmtimer_interval_retry_increment,
254 "RTC ticks to add to expiration on interval retry (default 40)");
255
256
257
258
259
260static void mmtimer_add_list(struct mmtimer *n)
261{
262 int nodeid = n->timer->it.mmtimer.node;
263 unsigned long expires = n->timer->it.mmtimer.expires;
264 struct rb_node **link = &timers[nodeid].timer_head.rb_node;
265 struct rb_node *parent = NULL;
266 struct mmtimer *x;
267
268
269
270
271 while (*link) {
272 parent = *link;
273 x = rb_entry(parent, struct mmtimer, list);
274
275 if (expires < x->timer->it.mmtimer.expires)
276 link = &(*link)->rb_left;
277 else
278 link = &(*link)->rb_right;
279 }
280
281
282
283
284
285 rb_link_node(&n->list, parent, link);
286 rb_insert_color(&n->list, &timers[nodeid].timer_head);
287
288 if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next,
289 struct mmtimer, list)->timer->it.mmtimer.expires)
290 timers[nodeid].next = &n->list;
291}
292
293
294
295
296
297static void mmtimer_set_next_timer(int nodeid)
298{
299 struct mmtimer_node *n = &timers[nodeid];
300 struct mmtimer *x;
301 struct k_itimer *t;
302 u64 expires, exp, set_completion_time;
303 int i;
304
305restart:
306 if (n->next == NULL)
307 return;
308
309 x = rb_entry(n->next, struct mmtimer, list);
310 t = x->timer;
311 if (!t->it.mmtimer.incr) {
312
313 if (!mmtimer_setup(x->cpu, COMPARATOR,
314 t->it.mmtimer.expires,
315 &set_completion_time)) {
316
317 tasklet_schedule(&n->tasklet);
318 }
319 return;
320 }
321
322
323 i = 0;
324 expires = exp = t->it.mmtimer.expires;
325 while (!mmtimer_setup(x->cpu, COMPARATOR, expires,
326 &set_completion_time)) {
327 int to;
328
329 i++;
330 expires = set_completion_time +
331 mmtimer_interval_retry_increment + (1 << i);
332
333 to = ((u64)(expires - exp) / t->it.mmtimer.incr);
334 if (to) {
335 t->it_overrun += to;
336 t->it.mmtimer.expires += t->it.mmtimer.incr * to;
337 exp = t->it.mmtimer.expires;
338 }
339 if (i > 20) {
340 printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
341 t->it.mmtimer.clock = TIMER_OFF;
342 n->next = rb_next(&x->list);
343 rb_erase(&x->list, &n->timer_head);
344 kfree(x);
345 goto restart;
346 }
347 }
348}
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static long mmtimer_ioctl(struct file *file, unsigned int cmd,
378 unsigned long arg)
379{
380 int ret = 0;
381
382 mutex_lock(&mmtimer_mutex);
383
384 switch (cmd) {
385 case MMTIMER_GETOFFSET:
386
387
388
389 if(PAGE_SIZE <= (1 << 16))
390 ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8;
391 else
392 ret = -ENOSYS;
393 break;
394
395 case MMTIMER_GETRES:
396 if(copy_to_user((unsigned long __user *)arg,
397 &mmtimer_femtoperiod, sizeof(unsigned long)))
398 ret = -EFAULT;
399 break;
400
401 case MMTIMER_GETFREQ:
402 if(copy_to_user((unsigned long __user *)arg,
403 &sn_rtc_cycles_per_second,
404 sizeof(unsigned long)))
405 ret = -EFAULT;
406 break;
407
408 case MMTIMER_GETBITS:
409 ret = RTC_BITS;
410 break;
411
412 case MMTIMER_MMAPAVAIL:
413 ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0;
414 break;
415
416 case MMTIMER_GETCOUNTER:
417 if(copy_to_user((unsigned long __user *)arg,
418 RTC_COUNTER_ADDR, sizeof(unsigned long)))
419 ret = -EFAULT;
420 break;
421 default:
422 ret = -ENOTTY;
423 break;
424 }
425 mutex_unlock(&mmtimer_mutex);
426 return ret;
427}
428
429
430
431
432
433
434
435
436
437static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
438{
439 unsigned long mmtimer_addr;
440
441 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
442 return -EINVAL;
443
444 if (vma->vm_flags & VM_WRITE)
445 return -EPERM;
446
447 if (PAGE_SIZE > (1 << 16))
448 return -ENOSYS;
449
450 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
451
452 mmtimer_addr = __pa(RTC_COUNTER_ADDR);
453 mmtimer_addr &= ~(PAGE_SIZE - 1);
454 mmtimer_addr &= 0xfffffffffffffffUL;
455
456 if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT,
457 PAGE_SIZE, vma->vm_page_prot)) {
458 printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n");
459 return -EAGAIN;
460 }
461
462 return 0;
463}
464
465static struct miscdevice mmtimer_miscdev = {
466 SGI_MMTIMER,
467 MMTIMER_NAME,
468 &mmtimer_fops
469};
470
471static struct timespec sgi_clock_offset;
472static int sgi_clock_period;
473
474
475
476
477
478static struct timespec sgi_clock_offset;
479static int sgi_clock_period;
480
481static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
482{
483 u64 nsec;
484
485 nsec = rtc_time() * sgi_clock_period
486 + sgi_clock_offset.tv_nsec;
487 *tp = ns_to_timespec(nsec);
488 tp->tv_sec += sgi_clock_offset.tv_sec;
489 return 0;
490};
491
492static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp)
493{
494
495 u64 nsec;
496 u32 rem;
497
498 nsec = rtc_time() * sgi_clock_period;
499
500 sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
501
502 if (rem <= tp->tv_nsec)
503 sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
504 else {
505 sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem;
506 sgi_clock_offset.tv_sec--;
507 }
508 return 0;
509}
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524static irqreturn_t
525mmtimer_interrupt(int irq, void *dev_id)
526{
527 unsigned long expires = 0;
528 int result = IRQ_NONE;
529 unsigned indx = cpu_to_node(smp_processor_id());
530 struct mmtimer *base;
531
532 spin_lock(&timers[indx].lock);
533 base = rb_entry(timers[indx].next, struct mmtimer, list);
534 if (base == NULL) {
535 spin_unlock(&timers[indx].lock);
536 return result;
537 }
538
539 if (base->cpu == smp_processor_id()) {
540 if (base->timer)
541 expires = base->timer->it.mmtimer.expires;
542
543 if ((mmtimer_int_pending(COMPARATOR) > 0) ||
544 (expires && (expires <= rtc_time()))) {
545 mmtimer_clr_int_pending(COMPARATOR);
546 tasklet_schedule(&timers[indx].tasklet);
547 result = IRQ_HANDLED;
548 }
549 }
550 spin_unlock(&timers[indx].lock);
551 return result;
552}
553
554static void mmtimer_tasklet(unsigned long data)
555{
556 int nodeid = data;
557 struct mmtimer_node *mn = &timers[nodeid];
558 struct mmtimer *x;
559 struct k_itimer *t;
560 unsigned long flags;
561
562
563 spin_lock_irqsave(&mn->lock, flags);
564 if (!mn->next)
565 goto out;
566
567 x = rb_entry(mn->next, struct mmtimer, list);
568 t = x->timer;
569
570 if (t->it.mmtimer.clock == TIMER_OFF)
571 goto out;
572
573 t->it_overrun = 0;
574
575 mn->next = rb_next(&x->list);
576 rb_erase(&x->list, &mn->timer_head);
577
578 if (posix_timer_event(t, 0) != 0)
579 t->it_overrun++;
580
581 if(t->it.mmtimer.incr) {
582 t->it.mmtimer.expires += t->it.mmtimer.incr;
583 mmtimer_add_list(x);
584 } else {
585
586 t->it.mmtimer.clock = TIMER_OFF;
587 t->it.mmtimer.expires = 0;
588 kfree(x);
589 }
590
591 mmtimer_set_next_timer(nodeid);
592
593 t->it_overrun_last = t->it_overrun;
594out:
595 spin_unlock_irqrestore(&mn->lock, flags);
596}
597
598static int sgi_timer_create(struct k_itimer *timer)
599{
600
601 timer->it.mmtimer.clock = TIMER_OFF;
602 return 0;
603}
604
605
606
607
608
609
610static int sgi_timer_del(struct k_itimer *timr)
611{
612 cnodeid_t nodeid = timr->it.mmtimer.node;
613 unsigned long irqflags;
614
615 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
616 if (timr->it.mmtimer.clock != TIMER_OFF) {
617 unsigned long expires = timr->it.mmtimer.expires;
618 struct rb_node *n = timers[nodeid].timer_head.rb_node;
619 struct mmtimer *uninitialized_var(t);
620 int r = 0;
621
622 timr->it.mmtimer.clock = TIMER_OFF;
623 timr->it.mmtimer.expires = 0;
624
625 while (n) {
626 t = rb_entry(n, struct mmtimer, list);
627 if (t->timer == timr)
628 break;
629
630 if (expires < t->timer->it.mmtimer.expires)
631 n = n->rb_left;
632 else
633 n = n->rb_right;
634 }
635
636 if (!n) {
637 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
638 return 0;
639 }
640
641 if (timers[nodeid].next == n) {
642 timers[nodeid].next = rb_next(n);
643 r = 1;
644 }
645
646 rb_erase(n, &timers[nodeid].timer_head);
647 kfree(t);
648
649 if (r) {
650 mmtimer_disable_int(cnodeid_to_nasid(nodeid),
651 COMPARATOR);
652 mmtimer_set_next_timer(nodeid);
653 }
654 }
655 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
656 return 0;
657}
658
659
660static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
661{
662
663 if (timr->it.mmtimer.clock == TIMER_OFF) {
664 cur_setting->it_interval.tv_nsec = 0;
665 cur_setting->it_interval.tv_sec = 0;
666 cur_setting->it_value.tv_nsec = 0;
667 cur_setting->it_value.tv_sec =0;
668 return;
669 }
670
671 cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
672 cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
673}
674
675
676static int sgi_timer_set(struct k_itimer *timr, int flags,
677 struct itimerspec * new_setting,
678 struct itimerspec * old_setting)
679{
680 unsigned long when, period, irqflags;
681 int err = 0;
682 cnodeid_t nodeid;
683 struct mmtimer *base;
684 struct rb_node *n;
685
686 if (old_setting)
687 sgi_timer_get(timr, old_setting);
688
689 sgi_timer_del(timr);
690 when = timespec_to_ns(&new_setting->it_value);
691 period = timespec_to_ns(&new_setting->it_interval);
692
693 if (when == 0)
694
695 return 0;
696
697 base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);
698 if (base == NULL)
699 return -ENOMEM;
700
701 if (flags & TIMER_ABSTIME) {
702 struct timespec n;
703 unsigned long now;
704
705 getnstimeofday(&n);
706 now = timespec_to_ns(&n);
707 if (when > now)
708 when -= now;
709 else
710
711 when = 0;
712 }
713
714
715
716
717
718
719 when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time();
720 period = (period + sgi_clock_period - 1) / sgi_clock_period;
721
722
723
724
725
726
727 preempt_disable();
728
729 nodeid = cpu_to_node(smp_processor_id());
730
731
732 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
733
734 base->timer = timr;
735 base->cpu = smp_processor_id();
736
737 timr->it.mmtimer.clock = TIMER_SET;
738 timr->it.mmtimer.node = nodeid;
739 timr->it.mmtimer.incr = period;
740 timr->it.mmtimer.expires = when;
741
742 n = timers[nodeid].next;
743
744
745 mmtimer_add_list(base);
746
747 if (timers[nodeid].next == n) {
748
749 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
750 preempt_enable();
751 return err;
752 }
753
754
755 if (n)
756 mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);
757
758 mmtimer_set_next_timer(nodeid);
759
760
761 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
762
763 preempt_enable();
764
765 return err;
766}
767
768static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp)
769{
770 tp->tv_sec = 0;
771 tp->tv_nsec = sgi_clock_period;
772 return 0;
773}
774
775static struct k_clock sgi_clock = {
776 .clock_set = sgi_clock_set,
777 .clock_get = sgi_clock_get,
778 .clock_getres = sgi_clock_getres,
779 .timer_create = sgi_timer_create,
780 .timer_set = sgi_timer_set,
781 .timer_del = sgi_timer_del,
782 .timer_get = sgi_timer_get
783};
784
785
786
787
788
789
790static int __init mmtimer_init(void)
791{
792 cnodeid_t node, maxn = -1;
793
794 if (!ia64_platform_is("sn2"))
795 return 0;
796
797
798
799
800 if (sn_rtc_cycles_per_second < 100000) {
801 printk(KERN_ERR "%s: unable to determine clock frequency\n",
802 MMTIMER_NAME);
803 goto out1;
804 }
805
806 mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
807 2) / sn_rtc_cycles_per_second;
808
809 if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
810 printk(KERN_WARNING "%s: unable to allocate interrupt.",
811 MMTIMER_NAME);
812 goto out1;
813 }
814
815 if (misc_register(&mmtimer_miscdev)) {
816 printk(KERN_ERR "%s: failed to register device\n",
817 MMTIMER_NAME);
818 goto out2;
819 }
820
821
822 for_each_online_node(node) {
823 maxn = node;
824 }
825 maxn++;
826
827
828 timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
829 if (timers == NULL) {
830 printk(KERN_ERR "%s: failed to allocate memory for device\n",
831 MMTIMER_NAME);
832 goto out3;
833 }
834
835
836 for_each_online_node(node) {
837 spin_lock_init(&timers[node].lock);
838 tasklet_init(&timers[node].tasklet, mmtimer_tasklet,
839 (unsigned long) node);
840 }
841
842 sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second;
843 posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock);
844
845 printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
846 sn_rtc_cycles_per_second/(unsigned long)1E6);
847
848 return 0;
849
850out3:
851 kfree(timers);
852 misc_deregister(&mmtimer_miscdev);
853out2:
854 free_irq(SGI_MMTIMER_VECTOR, NULL);
855out1:
856 return -1;
857}
858
859module_init(mmtimer_init);
860