1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/device.h>
27#include <linux/clocksource.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/sched.h>
31#include <linux/tick.h>
32#include <linux/kthread.h>
33
34void timecounter_init(struct timecounter *tc,
35 const struct cyclecounter *cc,
36 u64 start_tstamp)
37{
38 tc->cc = cc;
39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp;
41}
42EXPORT_SYMBOL_GPL(timecounter_init);
43
44
45
46
47
48
49
50
51
52
53
54
55static u64 timecounter_read_delta(struct timecounter *tc)
56{
57 cycle_t cycle_now, cycle_delta;
58 u64 ns_offset;
59
60
61 cycle_now = tc->cc->read(tc->cc);
62
63
64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
65
66
67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
68
69
70 tc->cycle_last = cycle_now;
71
72 return ns_offset;
73}
74
75u64 timecounter_read(struct timecounter *tc)
76{
77 u64 nsec;
78
79
80 nsec = timecounter_read_delta(tc);
81 nsec += tc->nsec;
82 tc->nsec = nsec;
83
84 return nsec;
85}
86EXPORT_SYMBOL_GPL(timecounter_read);
87
88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp)
90{
91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
92 u64 nsec;
93
94
95
96
97
98
99 if (cycle_delta > tc->cc->mask / 2) {
100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
102 } else {
103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
104 }
105
106 return nsec;
107}
108EXPORT_SYMBOL_GPL(timecounter_cyc2time);
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133void
134clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
135{
136 u64 tmp;
137 u32 sft, sftacc= 32;
138
139
140
141
142
143 tmp = ((u64)maxsec * from) >> 32;
144 while (tmp) {
145 tmp >>=1;
146 sftacc--;
147 }
148
149
150
151
152
153 for (sft = 32; sft > 0; sft--) {
154 tmp = (u64) to << sft;
155 tmp += from / 2;
156 do_div(tmp, from);
157 if ((tmp >> sftacc) == 0)
158 break;
159 }
160 *mult = tmp;
161 *shift = sft;
162}
163
164
165
166
167
168
169
170
171
172
173
174static struct clocksource *curr_clocksource;
175static LIST_HEAD(clocksource_list);
176static DEFINE_MUTEX(clocksource_mutex);
177static char override_name[32];
178static int finished_booting;
179
180#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
181static void clocksource_watchdog_work(struct work_struct *work);
182
183static LIST_HEAD(watchdog_list);
184static struct clocksource *watchdog;
185static struct timer_list watchdog_timer;
186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
187static DEFINE_SPINLOCK(watchdog_lock);
188static int watchdog_running;
189static atomic_t watchdog_reset_pending;
190
191static int clocksource_watchdog_kthread(void *data);
192static void __clocksource_change_rating(struct clocksource *cs, int rating);
193
194
195
196
197#define WATCHDOG_INTERVAL (HZ >> 1)
198#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
199
200static void clocksource_watchdog_work(struct work_struct *work)
201{
202
203
204
205
206 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
207}
208
209static void __clocksource_unstable(struct clocksource *cs)
210{
211 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
212 cs->flags |= CLOCK_SOURCE_UNSTABLE;
213 if (finished_booting)
214 schedule_work(&watchdog_work);
215}
216
217static void clocksource_unstable(struct clocksource *cs, int64_t delta)
218{
219 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
220 cs->name, delta);
221 __clocksource_unstable(cs);
222}
223
224
225
226
227
228
229
230
231
232
233void clocksource_mark_unstable(struct clocksource *cs)
234{
235 unsigned long flags;
236
237 spin_lock_irqsave(&watchdog_lock, flags);
238 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
239 if (list_empty(&cs->wd_list))
240 list_add(&cs->wd_list, &watchdog_list);
241 __clocksource_unstable(cs);
242 }
243 spin_unlock_irqrestore(&watchdog_lock, flags);
244}
245
246static void clocksource_watchdog(unsigned long data)
247{
248 struct clocksource *cs;
249 cycle_t csnow, wdnow;
250 int64_t wd_nsec, cs_nsec;
251 int next_cpu, reset_pending;
252
253 spin_lock(&watchdog_lock);
254 if (!watchdog_running)
255 goto out;
256
257 reset_pending = atomic_read(&watchdog_reset_pending);
258
259 list_for_each_entry(cs, &watchdog_list, wd_list) {
260
261
262 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
263 if (finished_booting)
264 schedule_work(&watchdog_work);
265 continue;
266 }
267
268 local_irq_disable();
269 csnow = cs->read(cs);
270 wdnow = watchdog->read(watchdog);
271 local_irq_enable();
272
273
274 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
275 atomic_read(&watchdog_reset_pending)) {
276 cs->flags |= CLOCK_SOURCE_WATCHDOG;
277 cs->wd_last = wdnow;
278 cs->cs_last = csnow;
279 continue;
280 }
281
282 wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
283 watchdog->mult, watchdog->shift);
284
285 cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
286 cs->mask, cs->mult, cs->shift);
287 cs->cs_last = csnow;
288 cs->wd_last = wdnow;
289
290 if (atomic_read(&watchdog_reset_pending))
291 continue;
292
293
294 if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
295 clocksource_unstable(cs, cs_nsec - wd_nsec);
296 continue;
297 }
298
299 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
300 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
301 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
302 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
303
304
305
306
307
308 tick_clock_notify();
309 }
310 }
311
312
313
314
315
316 if (reset_pending)
317 atomic_dec(&watchdog_reset_pending);
318
319
320
321
322
323 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
324 if (next_cpu >= nr_cpu_ids)
325 next_cpu = cpumask_first(cpu_online_mask);
326 watchdog_timer.expires += WATCHDOG_INTERVAL;
327 add_timer_on(&watchdog_timer, next_cpu);
328out:
329 spin_unlock(&watchdog_lock);
330}
331
332static inline void clocksource_start_watchdog(void)
333{
334 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
335 return;
336 init_timer(&watchdog_timer);
337 watchdog_timer.function = clocksource_watchdog;
338 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
339 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
340 watchdog_running = 1;
341}
342
343static inline void clocksource_stop_watchdog(void)
344{
345 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
346 return;
347 del_timer(&watchdog_timer);
348 watchdog_running = 0;
349}
350
351static inline void clocksource_reset_watchdog(void)
352{
353 struct clocksource *cs;
354
355 list_for_each_entry(cs, &watchdog_list, wd_list)
356 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
357}
358
359static void clocksource_resume_watchdog(void)
360{
361 atomic_inc(&watchdog_reset_pending);
362}
363
364static void clocksource_enqueue_watchdog(struct clocksource *cs)
365{
366 unsigned long flags;
367
368 spin_lock_irqsave(&watchdog_lock, flags);
369 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
370
371 list_add(&cs->wd_list, &watchdog_list);
372 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
373 } else {
374
375 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
376 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
377
378 if (!watchdog || cs->rating > watchdog->rating) {
379 watchdog = cs;
380
381 clocksource_reset_watchdog();
382 }
383 }
384
385 clocksource_start_watchdog();
386 spin_unlock_irqrestore(&watchdog_lock, flags);
387}
388
389static void clocksource_dequeue_watchdog(struct clocksource *cs)
390{
391 struct clocksource *tmp;
392 unsigned long flags;
393
394 spin_lock_irqsave(&watchdog_lock, flags);
395 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
396
397 list_del_init(&cs->wd_list);
398 } else if (cs == watchdog) {
399
400 clocksource_reset_watchdog();
401
402 watchdog = NULL;
403 list_for_each_entry(tmp, &clocksource_list, list) {
404 if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
405 continue;
406 if (!watchdog || tmp->rating > watchdog->rating)
407 watchdog = tmp;
408 }
409 }
410 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
411
412 clocksource_stop_watchdog();
413 spin_unlock_irqrestore(&watchdog_lock, flags);
414}
415
416static int clocksource_watchdog_kthread(void *data)
417{
418 struct clocksource *cs, *tmp;
419 unsigned long flags;
420 LIST_HEAD(unstable);
421
422 mutex_lock(&clocksource_mutex);
423 spin_lock_irqsave(&watchdog_lock, flags);
424 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
425 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
426 list_del_init(&cs->wd_list);
427 list_add(&cs->wd_list, &unstable);
428 }
429
430 clocksource_stop_watchdog();
431 spin_unlock_irqrestore(&watchdog_lock, flags);
432
433
434 list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
435 list_del_init(&cs->wd_list);
436 __clocksource_change_rating(cs, 0);
437 }
438 mutex_unlock(&clocksource_mutex);
439 return 0;
440}
441
442#else
443
444static void clocksource_enqueue_watchdog(struct clocksource *cs)
445{
446 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
447 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
448}
449
450static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
451static inline void clocksource_resume_watchdog(void) { }
452static inline int clocksource_watchdog_kthread(void *data) { return 0; }
453
454#endif
455
456
457
458
459void clocksource_suspend(void)
460{
461 struct clocksource *cs;
462
463 list_for_each_entry_reverse(cs, &clocksource_list, list)
464 if (cs->suspend)
465 cs->suspend(cs);
466}
467
468
469
470
471void clocksource_resume(void)
472{
473 struct clocksource *cs;
474
475 list_for_each_entry(cs, &clocksource_list, list)
476 if (cs->resume)
477 cs->resume(cs);
478
479 clocksource_resume_watchdog();
480}
481
482
483
484
485
486
487
488
489void clocksource_touch_watchdog(void)
490{
491 clocksource_resume_watchdog();
492}
493
494
495
496
497
498
499static u32 clocksource_max_adjustment(struct clocksource *cs)
500{
501 u64 ret;
502
503
504
505 ret = (u64)cs->mult * 11;
506 do_div(ret,100);
507 return (u32)ret;
508}
509
510
511
512
513
514
515static u64 clocksource_max_deferment(struct clocksource *cs)
516{
517 u64 max_nsecs, max_cycles;
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
534
535
536
537
538
539
540
541 max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
542 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
543 cs->shift);
544
545
546
547
548
549
550
551 return max_nsecs - (max_nsecs >> 3);
552}
553
554#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
555
556
557
558
559
560
561
562
563
564static void clocksource_select(void)
565{
566 struct clocksource *best, *cs;
567
568 if (!finished_booting || list_empty(&clocksource_list))
569 return;
570
571 best = list_first_entry(&clocksource_list, struct clocksource, list);
572
573 list_for_each_entry(cs, &clocksource_list, list) {
574 if (strcmp(cs->name, override_name) != 0)
575 continue;
576
577
578
579
580
581 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
582 tick_oneshot_mode_active()) {
583
584 printk(KERN_WARNING "Override clocksource %s is not "
585 "HRT compatible. Cannot switch while in "
586 "HRT/NOHZ mode\n", cs->name);
587 override_name[0] = 0;
588 } else
589
590 best = cs;
591 break;
592 }
593 if (curr_clocksource != best) {
594 printk(KERN_INFO "Switching to clocksource %s\n", best->name);
595 curr_clocksource = best;
596 timekeeping_notify(curr_clocksource);
597 }
598}
599
600#else
601
602static inline void clocksource_select(void) { }
603
604#endif
605
606
607
608
609
610
611
612
613static int __init clocksource_done_booting(void)
614{
615 mutex_lock(&clocksource_mutex);
616 curr_clocksource = clocksource_default_clock();
617 mutex_unlock(&clocksource_mutex);
618
619 finished_booting = 1;
620
621
622
623
624 clocksource_watchdog_kthread(NULL);
625
626 mutex_lock(&clocksource_mutex);
627 clocksource_select();
628 mutex_unlock(&clocksource_mutex);
629 return 0;
630}
631fs_initcall(clocksource_done_booting);
632
633
634
635
636static void clocksource_enqueue(struct clocksource *cs)
637{
638 struct list_head *entry = &clocksource_list;
639 struct clocksource *tmp;
640
641 list_for_each_entry(tmp, &clocksource_list, list)
642
643 if (tmp->rating >= cs->rating)
644 entry = &tmp->list;
645 list_add(&cs->list, entry);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
660{
661 u64 sec;
662
663
664
665
666
667
668
669
670
671
672 sec = (cs->mask - (cs->mask >> 3));
673 do_div(sec, freq);
674 do_div(sec, scale);
675 if (!sec)
676 sec = 1;
677 else if (sec > 600 && cs->mask > UINT_MAX)
678 sec = 600;
679
680 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
681 NSEC_PER_SEC / scale, sec * scale);
682
683
684
685
686
687
688 cs->maxadj = clocksource_max_adjustment(cs);
689 while ((cs->mult + cs->maxadj < cs->mult)
690 || (cs->mult - cs->maxadj > cs->mult)) {
691 cs->mult >>= 1;
692 cs->shift--;
693 cs->maxadj = clocksource_max_adjustment(cs);
694 }
695
696 cs->max_idle_ns = clocksource_max_deferment(cs);
697}
698EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
699
700
701
702
703
704
705
706
707
708
709
710
711int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
712{
713
714
715 __clocksource_updatefreq_scale(cs, scale, freq);
716
717
718 mutex_lock(&clocksource_mutex);
719 clocksource_enqueue(cs);
720 clocksource_enqueue_watchdog(cs);
721 clocksource_select();
722 mutex_unlock(&clocksource_mutex);
723 return 0;
724}
725EXPORT_SYMBOL_GPL(__clocksource_register_scale);
726
727
728
729
730
731
732
733
734int clocksource_register(struct clocksource *cs)
735{
736
737 cs->maxadj = clocksource_max_adjustment(cs);
738 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
739 "Clocksource %s might overflow on 11%% adjustment\n",
740 cs->name);
741
742
743 cs->max_idle_ns = clocksource_max_deferment(cs);
744
745 mutex_lock(&clocksource_mutex);
746 clocksource_enqueue(cs);
747 clocksource_enqueue_watchdog(cs);
748 clocksource_select();
749 mutex_unlock(&clocksource_mutex);
750 return 0;
751}
752EXPORT_SYMBOL(clocksource_register);
753
754static void __clocksource_change_rating(struct clocksource *cs, int rating)
755{
756 list_del(&cs->list);
757 cs->rating = rating;
758 clocksource_enqueue(cs);
759 clocksource_select();
760}
761
762
763
764
765
766
767void clocksource_change_rating(struct clocksource *cs, int rating)
768{
769 mutex_lock(&clocksource_mutex);
770 __clocksource_change_rating(cs, rating);
771 mutex_unlock(&clocksource_mutex);
772}
773EXPORT_SYMBOL(clocksource_change_rating);
774
775
776
777
778
779void clocksource_unregister(struct clocksource *cs)
780{
781 mutex_lock(&clocksource_mutex);
782 clocksource_dequeue_watchdog(cs);
783 list_del(&cs->list);
784 clocksource_select();
785 mutex_unlock(&clocksource_mutex);
786}
787EXPORT_SYMBOL(clocksource_unregister);
788
789#ifdef CONFIG_SYSFS
790
791
792
793
794
795
796
797
798static ssize_t
799sysfs_show_current_clocksources(struct device *dev,
800 struct device_attribute *attr, char *buf)
801{
802 ssize_t count = 0;
803
804 mutex_lock(&clocksource_mutex);
805 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
806 mutex_unlock(&clocksource_mutex);
807
808 return count;
809}
810
811
812
813
814
815
816
817
818
819
820
821static ssize_t sysfs_override_clocksource(struct device *dev,
822 struct device_attribute *attr,
823 const char *buf, size_t count)
824{
825 size_t ret = count;
826
827
828 if (count >= sizeof(override_name))
829 return -EINVAL;
830
831
832 if (buf[count-1] == '\n')
833 count--;
834
835 mutex_lock(&clocksource_mutex);
836
837 if (count > 0)
838 memcpy(override_name, buf, count);
839 override_name[count] = 0;
840 clocksource_select();
841
842 mutex_unlock(&clocksource_mutex);
843
844 return ret;
845}
846
847
848
849
850
851
852
853
854
855static ssize_t
856sysfs_show_available_clocksources(struct device *dev,
857 struct device_attribute *attr,
858 char *buf)
859{
860 struct clocksource *src;
861 ssize_t count = 0;
862
863 mutex_lock(&clocksource_mutex);
864 list_for_each_entry(src, &clocksource_list, list) {
865
866
867
868
869 if (!tick_oneshot_mode_active() ||
870 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
871 count += snprintf(buf + count,
872 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
873 "%s ", src->name);
874 }
875 mutex_unlock(&clocksource_mutex);
876
877 count += snprintf(buf + count,
878 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
879
880 return count;
881}
882
883
884
885
886static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
887 sysfs_override_clocksource);
888
889static DEVICE_ATTR(available_clocksource, 0444,
890 sysfs_show_available_clocksources, NULL);
891
892static struct bus_type clocksource_subsys = {
893 .name = "clocksource",
894 .dev_name = "clocksource",
895};
896
897static struct device device_clocksource = {
898 .id = 0,
899 .bus = &clocksource_subsys,
900};
901
902static int __init init_clocksource_sysfs(void)
903{
904 int error = subsys_system_register(&clocksource_subsys, NULL);
905
906 if (!error)
907 error = device_register(&device_clocksource);
908 if (!error)
909 error = device_create_file(
910 &device_clocksource,
911 &dev_attr_current_clocksource);
912 if (!error)
913 error = device_create_file(
914 &device_clocksource,
915 &dev_attr_available_clocksource);
916 return error;
917}
918
919device_initcall(init_clocksource_sysfs);
920#endif
921
922
923
924
925
926
927
928
929static int __init boot_override_clocksource(char* str)
930{
931 mutex_lock(&clocksource_mutex);
932 if (str)
933 strlcpy(override_name, str, sizeof(override_name));
934 mutex_unlock(&clocksource_mutex);
935 return 1;
936}
937
938__setup("clocksource=", boot_override_clocksource);
939
940
941
942
943
944
945
946
947static int __init boot_override_clock(char* str)
948{
949 if (!strcmp(str, "pmtmr")) {
950 printk("Warning: clock=pmtmr is deprecated. "
951 "Use clocksource=acpi_pm.\n");
952 return boot_override_clocksource("acpi_pm");
953 }
954 printk("Warning! clock= boot option is deprecated. "
955 "Use clocksource=xyz\n");
956 return boot_override_clocksource(str);
957}
958
959__setup("clock=", boot_override_clock);
960