1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/ioctl.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/mm.h>
28#include <linux/fs.h>
29#include <linux/mmtimer.h>
30#include <linux/miscdevice.h>
31#include <linux/posix-timers.h>
32#include <linux/interrupt.h>
33#include <linux/time.h>
34#include <linux/math64.h>
35#include <linux/mutex.h>
36#include <linux/slab.h>
37
38#include <asm/uaccess.h>
39#include <asm/sn/addrs.h>
40#include <asm/sn/intr.h>
41#include <asm/sn/shub_mmr.h>
42#include <asm/sn/nodepda.h>
43#include <asm/sn/shubio.h>
44
45MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
46MODULE_DESCRIPTION("SGI Altix RTC Timer");
47MODULE_LICENSE("GPL");
48
49
50#define MMTIMER_NAME "mmtimer"
51#define MMTIMER_DESC "SGI Altix RTC Timer"
52#define MMTIMER_VERSION "2.1"
53
54#define RTC_BITS 55
55
56extern unsigned long sn_rtc_cycles_per_second;
57
58#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
59
60#define rtc_time() (*RTC_COUNTER_ADDR)
61
62static DEFINE_MUTEX(mmtimer_mutex);
63static long mmtimer_ioctl(struct file *file, unsigned int cmd,
64 unsigned long arg);
65static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
66
67
68
69
70static unsigned long mmtimer_femtoperiod = 0;
71
72static const struct file_operations mmtimer_fops = {
73 .owner = THIS_MODULE,
74 .mmap = mmtimer_mmap,
75 .unlocked_ioctl = mmtimer_ioctl,
76 .llseek = noop_llseek,
77};
78
79
80
81
82
83
84static int mmtimer_int_pending(int comparator)
85{
86 if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
87 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
88 return 1;
89 else
90 return 0;
91}
92
93
94static void mmtimer_clr_int_pending(int comparator)
95{
96 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
97 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
98}
99
100
101static void mmtimer_setup_int_0(int cpu, u64 expires)
102{
103 u64 val;
104
105
106 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL);
107
108
109 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L);
110
111
112 mmtimer_clr_int_pending(0);
113
114 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
115 ((u64)cpu_physical_id(cpu) <<
116 SH_RTC1_INT_CONFIG_PID_SHFT);
117
118
119 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val);
120
121
122 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL);
123
124
125 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires);
126
127
128}
129
130
131static void mmtimer_setup_int_1(int cpu, u64 expires)
132{
133 u64 val;
134
135 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL);
136
137 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L);
138
139 mmtimer_clr_int_pending(1);
140
141 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
142 ((u64)cpu_physical_id(cpu) <<
143 SH_RTC2_INT_CONFIG_PID_SHFT);
144
145 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val);
146
147 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL);
148
149 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires);
150}
151
152
153static void mmtimer_setup_int_2(int cpu, u64 expires)
154{
155 u64 val;
156
157 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL);
158
159 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L);
160
161 mmtimer_clr_int_pending(2);
162
163 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
164 ((u64)cpu_physical_id(cpu) <<
165 SH_RTC3_INT_CONFIG_PID_SHFT);
166
167 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);
168
169 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL);
170
171 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires);
172}
173
174
175
176
177
178
179static int mmtimer_setup(int cpu, int comparator, unsigned long expires,
180 u64 *set_completion_time)
181{
182 switch (comparator) {
183 case 0:
184 mmtimer_setup_int_0(cpu, expires);
185 break;
186 case 1:
187 mmtimer_setup_int_1(cpu, expires);
188 break;
189 case 2:
190 mmtimer_setup_int_2(cpu, expires);
191 break;
192 }
193
194 *set_completion_time = rtc_time();
195 if (*set_completion_time <= expires)
196 return 1;
197
198
199
200
201
202 return mmtimer_int_pending(comparator);
203}
204
205static int mmtimer_disable_int(long nasid, int comparator)
206{
207 switch (comparator) {
208 case 0:
209 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE),
210 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL);
211 break;
212 case 1:
213 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE),
214 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL);
215 break;
216 case 2:
217 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE),
218 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL);
219 break;
220 default:
221 return -EFAULT;
222 }
223 return 0;
224}
225
226#define COMPARATOR 1
227
228#define TIMER_OFF 0xbadcabLL
229#define TIMER_SET 0
230
231#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40
232
233
234struct mmtimer {
235 struct rb_node list;
236 struct k_itimer *timer;
237 int cpu;
238};
239
240struct mmtimer_node {
241 spinlock_t lock ____cacheline_aligned;
242 struct rb_root timer_head;
243 struct rb_node *next;
244 struct tasklet_struct tasklet;
245};
246static struct mmtimer_node *timers;
247
248static unsigned mmtimer_interval_retry_increment =
249 MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT;
250module_param(mmtimer_interval_retry_increment, uint, 0644);
251MODULE_PARM_DESC(mmtimer_interval_retry_increment,
252 "RTC ticks to add to expiration on interval retry (default 40)");
253
254
255
256
257
258static void mmtimer_add_list(struct mmtimer *n)
259{
260 int nodeid = n->timer->it.mmtimer.node;
261 unsigned long expires = n->timer->it.mmtimer.expires;
262 struct rb_node **link = &timers[nodeid].timer_head.rb_node;
263 struct rb_node *parent = NULL;
264 struct mmtimer *x;
265
266
267
268
269 while (*link) {
270 parent = *link;
271 x = rb_entry(parent, struct mmtimer, list);
272
273 if (expires < x->timer->it.mmtimer.expires)
274 link = &(*link)->rb_left;
275 else
276 link = &(*link)->rb_right;
277 }
278
279
280
281
282
283 rb_link_node(&n->list, parent, link);
284 rb_insert_color(&n->list, &timers[nodeid].timer_head);
285
286 if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next,
287 struct mmtimer, list)->timer->it.mmtimer.expires)
288 timers[nodeid].next = &n->list;
289}
290
291
292
293
294
295static void mmtimer_set_next_timer(int nodeid)
296{
297 struct mmtimer_node *n = &timers[nodeid];
298 struct mmtimer *x;
299 struct k_itimer *t;
300 u64 expires, exp, set_completion_time;
301 int i;
302
303restart:
304 if (n->next == NULL)
305 return;
306
307 x = rb_entry(n->next, struct mmtimer, list);
308 t = x->timer;
309 if (!t->it.mmtimer.incr) {
310
311 if (!mmtimer_setup(x->cpu, COMPARATOR,
312 t->it.mmtimer.expires,
313 &set_completion_time)) {
314
315 tasklet_schedule(&n->tasklet);
316 }
317 return;
318 }
319
320
321 i = 0;
322 expires = exp = t->it.mmtimer.expires;
323 while (!mmtimer_setup(x->cpu, COMPARATOR, expires,
324 &set_completion_time)) {
325 int to;
326
327 i++;
328 expires = set_completion_time +
329 mmtimer_interval_retry_increment + (1 << i);
330
331 to = ((u64)(expires - exp) / t->it.mmtimer.incr);
332 if (to) {
333 t->it_overrun += to;
334 t->it.mmtimer.expires += t->it.mmtimer.incr * to;
335 exp = t->it.mmtimer.expires;
336 }
337 if (i > 20) {
338 printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
339 t->it.mmtimer.clock = TIMER_OFF;
340 n->next = rb_next(&x->list);
341 rb_erase(&x->list, &n->timer_head);
342 kfree(x);
343 goto restart;
344 }
345 }
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375static long mmtimer_ioctl(struct file *file, unsigned int cmd,
376 unsigned long arg)
377{
378 int ret = 0;
379
380 mutex_lock(&mmtimer_mutex);
381
382 switch (cmd) {
383 case MMTIMER_GETOFFSET:
384
385
386
387 if(PAGE_SIZE <= (1 << 16))
388 ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8;
389 else
390 ret = -ENOSYS;
391 break;
392
393 case MMTIMER_GETRES:
394 if(copy_to_user((unsigned long __user *)arg,
395 &mmtimer_femtoperiod, sizeof(unsigned long)))
396 ret = -EFAULT;
397 break;
398
399 case MMTIMER_GETFREQ:
400 if(copy_to_user((unsigned long __user *)arg,
401 &sn_rtc_cycles_per_second,
402 sizeof(unsigned long)))
403 ret = -EFAULT;
404 break;
405
406 case MMTIMER_GETBITS:
407 ret = RTC_BITS;
408 break;
409
410 case MMTIMER_MMAPAVAIL:
411 ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0;
412 break;
413
414 case MMTIMER_GETCOUNTER:
415 if(copy_to_user((unsigned long __user *)arg,
416 RTC_COUNTER_ADDR, sizeof(unsigned long)))
417 ret = -EFAULT;
418 break;
419 default:
420 ret = -ENOTTY;
421 break;
422 }
423 mutex_unlock(&mmtimer_mutex);
424 return ret;
425}
426
427
428
429
430
431
432
433
434
435static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
436{
437 unsigned long mmtimer_addr;
438
439 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
440 return -EINVAL;
441
442 if (vma->vm_flags & VM_WRITE)
443 return -EPERM;
444
445 if (PAGE_SIZE > (1 << 16))
446 return -ENOSYS;
447
448 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
449
450 mmtimer_addr = __pa(RTC_COUNTER_ADDR);
451 mmtimer_addr &= ~(PAGE_SIZE - 1);
452 mmtimer_addr &= 0xfffffffffffffffUL;
453
454 if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT,
455 PAGE_SIZE, vma->vm_page_prot)) {
456 printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n");
457 return -EAGAIN;
458 }
459
460 return 0;
461}
462
463static struct miscdevice mmtimer_miscdev = {
464 SGI_MMTIMER,
465 MMTIMER_NAME,
466 &mmtimer_fops
467};
468
469static struct timespec sgi_clock_offset;
470static int sgi_clock_period;
471
472
473
474
475
476static struct timespec sgi_clock_offset;
477static int sgi_clock_period;
478
479static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
480{
481 u64 nsec;
482
483 nsec = rtc_time() * sgi_clock_period
484 + sgi_clock_offset.tv_nsec;
485 *tp = ns_to_timespec(nsec);
486 tp->tv_sec += sgi_clock_offset.tv_sec;
487 return 0;
488};
489
490static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
491{
492
493 u64 nsec;
494 u32 rem;
495
496 nsec = rtc_time() * sgi_clock_period;
497
498 sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
499
500 if (rem <= tp->tv_nsec)
501 sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
502 else {
503 sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem;
504 sgi_clock_offset.tv_sec--;
505 }
506 return 0;
507}
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522static irqreturn_t
523mmtimer_interrupt(int irq, void *dev_id)
524{
525 unsigned long expires = 0;
526 int result = IRQ_NONE;
527 unsigned indx = cpu_to_node(smp_processor_id());
528 struct mmtimer *base;
529
530 spin_lock(&timers[indx].lock);
531 base = rb_entry(timers[indx].next, struct mmtimer, list);
532 if (base == NULL) {
533 spin_unlock(&timers[indx].lock);
534 return result;
535 }
536
537 if (base->cpu == smp_processor_id()) {
538 if (base->timer)
539 expires = base->timer->it.mmtimer.expires;
540
541 if ((mmtimer_int_pending(COMPARATOR) > 0) ||
542 (expires && (expires <= rtc_time()))) {
543 mmtimer_clr_int_pending(COMPARATOR);
544 tasklet_schedule(&timers[indx].tasklet);
545 result = IRQ_HANDLED;
546 }
547 }
548 spin_unlock(&timers[indx].lock);
549 return result;
550}
551
552static void mmtimer_tasklet(unsigned long data)
553{
554 int nodeid = data;
555 struct mmtimer_node *mn = &timers[nodeid];
556 struct mmtimer *x;
557 struct k_itimer *t;
558 unsigned long flags;
559
560
561 spin_lock_irqsave(&mn->lock, flags);
562 if (!mn->next)
563 goto out;
564
565 x = rb_entry(mn->next, struct mmtimer, list);
566 t = x->timer;
567
568 if (t->it.mmtimer.clock == TIMER_OFF)
569 goto out;
570
571 t->it_overrun = 0;
572
573 mn->next = rb_next(&x->list);
574 rb_erase(&x->list, &mn->timer_head);
575
576 if (posix_timer_event(t, 0) != 0)
577 t->it_overrun++;
578
579 if(t->it.mmtimer.incr) {
580 t->it.mmtimer.expires += t->it.mmtimer.incr;
581 mmtimer_add_list(x);
582 } else {
583
584 t->it.mmtimer.clock = TIMER_OFF;
585 t->it.mmtimer.expires = 0;
586 kfree(x);
587 }
588
589 mmtimer_set_next_timer(nodeid);
590
591 t->it_overrun_last = t->it_overrun;
592out:
593 spin_unlock_irqrestore(&mn->lock, flags);
594}
595
596static int sgi_timer_create(struct k_itimer *timer)
597{
598
599 timer->it.mmtimer.clock = TIMER_OFF;
600 return 0;
601}
602
603
604
605
606
607
608static int sgi_timer_del(struct k_itimer *timr)
609{
610 cnodeid_t nodeid = timr->it.mmtimer.node;
611 unsigned long irqflags;
612
613 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
614 if (timr->it.mmtimer.clock != TIMER_OFF) {
615 unsigned long expires = timr->it.mmtimer.expires;
616 struct rb_node *n = timers[nodeid].timer_head.rb_node;
617 struct mmtimer *uninitialized_var(t);
618 int r = 0;
619
620 timr->it.mmtimer.clock = TIMER_OFF;
621 timr->it.mmtimer.expires = 0;
622
623 while (n) {
624 t = rb_entry(n, struct mmtimer, list);
625 if (t->timer == timr)
626 break;
627
628 if (expires < t->timer->it.mmtimer.expires)
629 n = n->rb_left;
630 else
631 n = n->rb_right;
632 }
633
634 if (!n) {
635 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
636 return 0;
637 }
638
639 if (timers[nodeid].next == n) {
640 timers[nodeid].next = rb_next(n);
641 r = 1;
642 }
643
644 rb_erase(n, &timers[nodeid].timer_head);
645 kfree(t);
646
647 if (r) {
648 mmtimer_disable_int(cnodeid_to_nasid(nodeid),
649 COMPARATOR);
650 mmtimer_set_next_timer(nodeid);
651 }
652 }
653 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
654 return 0;
655}
656
657
658static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
659{
660
661 if (timr->it.mmtimer.clock == TIMER_OFF) {
662 cur_setting->it_interval.tv_nsec = 0;
663 cur_setting->it_interval.tv_sec = 0;
664 cur_setting->it_value.tv_nsec = 0;
665 cur_setting->it_value.tv_sec =0;
666 return;
667 }
668
669 cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
670 cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
671}
672
673
674static int sgi_timer_set(struct k_itimer *timr, int flags,
675 struct itimerspec * new_setting,
676 struct itimerspec * old_setting)
677{
678 unsigned long when, period, irqflags;
679 int err = 0;
680 cnodeid_t nodeid;
681 struct mmtimer *base;
682 struct rb_node *n;
683
684 if (old_setting)
685 sgi_timer_get(timr, old_setting);
686
687 sgi_timer_del(timr);
688 when = timespec_to_ns(&new_setting->it_value);
689 period = timespec_to_ns(&new_setting->it_interval);
690
691 if (when == 0)
692
693 return 0;
694
695 base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);
696 if (base == NULL)
697 return -ENOMEM;
698
699 if (flags & TIMER_ABSTIME) {
700 struct timespec n;
701 unsigned long now;
702
703 getnstimeofday(&n);
704 now = timespec_to_ns(&n);
705 if (when > now)
706 when -= now;
707 else
708
709 when = 0;
710 }
711
712
713
714
715
716
717 when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time();
718 period = (period + sgi_clock_period - 1) / sgi_clock_period;
719
720
721
722
723
724
725 preempt_disable();
726
727 nodeid = cpu_to_node(smp_processor_id());
728
729
730 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
731
732 base->timer = timr;
733 base->cpu = smp_processor_id();
734
735 timr->it.mmtimer.clock = TIMER_SET;
736 timr->it.mmtimer.node = nodeid;
737 timr->it.mmtimer.incr = period;
738 timr->it.mmtimer.expires = when;
739
740 n = timers[nodeid].next;
741
742
743 mmtimer_add_list(base);
744
745 if (timers[nodeid].next == n) {
746
747 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
748 preempt_enable();
749 return err;
750 }
751
752
753 if (n)
754 mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);
755
756 mmtimer_set_next_timer(nodeid);
757
758
759 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
760
761 preempt_enable();
762
763 return err;
764}
765
766static struct k_clock sgi_clock = {
767 .res = 0,
768 .clock_set = sgi_clock_set,
769 .clock_get = sgi_clock_get,
770 .timer_create = sgi_timer_create,
771 .nsleep = do_posix_clock_nonanosleep,
772 .timer_set = sgi_timer_set,
773 .timer_del = sgi_timer_del,
774 .timer_get = sgi_timer_get
775};
776
777
778
779
780
781
782static int __init mmtimer_init(void)
783{
784 cnodeid_t node, maxn = -1;
785
786 if (!ia64_platform_is("sn2"))
787 return 0;
788
789
790
791
792 if (sn_rtc_cycles_per_second < 100000) {
793 printk(KERN_ERR "%s: unable to determine clock frequency\n",
794 MMTIMER_NAME);
795 goto out1;
796 }
797
798 mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
799 2) / sn_rtc_cycles_per_second;
800
801 if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
802 printk(KERN_WARNING "%s: unable to allocate interrupt.",
803 MMTIMER_NAME);
804 goto out1;
805 }
806
807 if (misc_register(&mmtimer_miscdev)) {
808 printk(KERN_ERR "%s: failed to register device\n",
809 MMTIMER_NAME);
810 goto out2;
811 }
812
813
814 for_each_online_node(node) {
815 maxn = node;
816 }
817 maxn++;
818
819
820 timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
821 if (timers == NULL) {
822 printk(KERN_ERR "%s: failed to allocate memory for device\n",
823 MMTIMER_NAME);
824 goto out3;
825 }
826
827
828 for_each_online_node(node) {
829 spin_lock_init(&timers[node].lock);
830 tasklet_init(&timers[node].tasklet, mmtimer_tasklet,
831 (unsigned long) node);
832 }
833
834 sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
835 register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock);
836
837 printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
838 sn_rtc_cycles_per_second/(unsigned long)1E6);
839
840 return 0;
841
842out3:
843 kfree(timers);
844 misc_deregister(&mmtimer_miscdev);
845out2:
846 free_irq(SGI_MMTIMER_VECTOR, NULL);
847out1:
848 return -1;
849}
850
851module_init(mmtimer_init);
852