1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28#include <linux/device.h>
29#include <linux/clocksource.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/sched.h>
33#include <linux/tick.h>
34#include <linux/kthread.h>
35
36#include "tick-internal.h"
37#include "timekeeping_internal.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62void
63clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
64{
65 u64 tmp;
66 u32 sft, sftacc= 32;
67
68
69
70
71
72 tmp = ((u64)maxsec * from) >> 32;
73 while (tmp) {
74 tmp >>=1;
75 sftacc--;
76 }
77
78
79
80
81
82 for (sft = 32; sft > 0; sft--) {
83 tmp = (u64) to << sft;
84 tmp += from / 2;
85 do_div(tmp, from);
86 if ((tmp >> sftacc) == 0)
87 break;
88 }
89 *mult = tmp;
90 *shift = sft;
91}
92EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
93
94
95
96
97
98
99
100
101
102
103
104
105
106static struct clocksource *curr_clocksource;
107static struct clocksource *suspend_clocksource;
108static LIST_HEAD(clocksource_list);
109static DEFINE_MUTEX(clocksource_mutex);
110static char override_name[CS_NAME_LEN];
111static int finished_booting;
112static u64 suspend_start;
113
114#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
115static void clocksource_watchdog_work(struct work_struct *work);
116static void clocksource_select(void);
117
118static LIST_HEAD(watchdog_list);
119static struct clocksource *watchdog;
120static struct timer_list watchdog_timer;
121static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
122static DEFINE_SPINLOCK(watchdog_lock);
123static int watchdog_running;
124static atomic_t watchdog_reset_pending;
125
126static void inline clocksource_watchdog_lock(unsigned long *flags)
127{
128 spin_lock_irqsave(&watchdog_lock, *flags);
129}
130
131static void inline clocksource_watchdog_unlock(unsigned long *flags)
132{
133 spin_unlock_irqrestore(&watchdog_lock, *flags);
134}
135
136static int clocksource_watchdog_kthread(void *data);
137static void __clocksource_change_rating(struct clocksource *cs, int rating);
138
139
140
141
142#define WATCHDOG_INTERVAL (HZ >> 1)
143#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
144
145static void clocksource_watchdog_work(struct work_struct *work)
146{
147
148
149
150
151
152
153
154
155
156
157
158
159
160 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
161}
162
163static void __clocksource_unstable(struct clocksource *cs)
164{
165 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
166 cs->flags |= CLOCK_SOURCE_UNSTABLE;
167
168
169
170
171
172 if (list_empty(&cs->list)) {
173 cs->rating = 0;
174 return;
175 }
176
177 if (cs->mark_unstable)
178 cs->mark_unstable(cs);
179
180
181 if (finished_booting)
182 schedule_work(&watchdog_work);
183}
184
185
186
187
188
189
190
191
192void clocksource_mark_unstable(struct clocksource *cs)
193{
194 unsigned long flags;
195
196 spin_lock_irqsave(&watchdog_lock, flags);
197 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
198 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
199 list_add(&cs->wd_list, &watchdog_list);
200 __clocksource_unstable(cs);
201 }
202 spin_unlock_irqrestore(&watchdog_lock, flags);
203}
204
205static void clocksource_watchdog(struct timer_list *unused)
206{
207 struct clocksource *cs;
208 u64 csnow, wdnow, cslast, wdlast, delta;
209 int64_t wd_nsec, cs_nsec;
210 int next_cpu, reset_pending;
211
212 spin_lock(&watchdog_lock);
213 if (!watchdog_running)
214 goto out;
215
216 reset_pending = atomic_read(&watchdog_reset_pending);
217
218 list_for_each_entry(cs, &watchdog_list, wd_list) {
219
220
221 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
222 if (finished_booting)
223 schedule_work(&watchdog_work);
224 continue;
225 }
226
227 local_irq_disable();
228 csnow = cs->read(cs);
229 wdnow = watchdog->read(watchdog);
230 local_irq_enable();
231
232
233 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
234 atomic_read(&watchdog_reset_pending)) {
235 cs->flags |= CLOCK_SOURCE_WATCHDOG;
236 cs->wd_last = wdnow;
237 cs->cs_last = csnow;
238 continue;
239 }
240
241 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
242 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
243 watchdog->shift);
244
245 delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
246 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
247 wdlast = cs->wd_last;
248 cslast = cs->cs_last;
249 cs->cs_last = csnow;
250 cs->wd_last = wdnow;
251
252 if (atomic_read(&watchdog_reset_pending))
253 continue;
254
255
256 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
257 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
258 smp_processor_id(), cs->name);
259 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
260 watchdog->name, wdnow, wdlast, watchdog->mask);
261 pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
262 cs->name, csnow, cslast, cs->mask);
263 __clocksource_unstable(cs);
264 continue;
265 }
266
267 if (cs == curr_clocksource && cs->tick_stable)
268 cs->tick_stable(cs);
269
270 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
271 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
272 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
273
274 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
275
276
277
278
279
280 if (!finished_booting)
281 continue;
282
283
284
285
286
287
288
289
290
291 if (cs != curr_clocksource) {
292 cs->flags |= CLOCK_SOURCE_RESELECT;
293 schedule_work(&watchdog_work);
294 } else {
295 tick_clock_notify();
296 }
297 }
298 }
299
300
301
302
303
304 if (reset_pending)
305 atomic_dec(&watchdog_reset_pending);
306
307
308
309
310
311 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
312 if (next_cpu >= nr_cpu_ids)
313 next_cpu = cpumask_first(cpu_online_mask);
314 watchdog_timer.expires += WATCHDOG_INTERVAL;
315 add_timer_on(&watchdog_timer, next_cpu);
316out:
317 spin_unlock(&watchdog_lock);
318}
319
320static inline void clocksource_start_watchdog(void)
321{
322 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
323 return;
324 timer_setup(&watchdog_timer, clocksource_watchdog, 0);
325 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
326 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
327 watchdog_running = 1;
328}
329
330static inline void clocksource_stop_watchdog(void)
331{
332 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
333 return;
334 del_timer(&watchdog_timer);
335 watchdog_running = 0;
336}
337
338static inline void clocksource_reset_watchdog(void)
339{
340 struct clocksource *cs;
341
342 list_for_each_entry(cs, &watchdog_list, wd_list)
343 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
344}
345
346static void clocksource_resume_watchdog(void)
347{
348 atomic_inc(&watchdog_reset_pending);
349}
350
351static void clocksource_enqueue_watchdog(struct clocksource *cs)
352{
353 INIT_LIST_HEAD(&cs->wd_list);
354
355 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
356
357 list_add(&cs->wd_list, &watchdog_list);
358 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
359 } else {
360
361 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
362 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
363 }
364}
365
366static void clocksource_select_watchdog(bool fallback)
367{
368 struct clocksource *cs, *old_wd;
369 unsigned long flags;
370
371 spin_lock_irqsave(&watchdog_lock, flags);
372
373 old_wd = watchdog;
374 if (fallback)
375 watchdog = NULL;
376
377 list_for_each_entry(cs, &clocksource_list, list) {
378
379 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
380 continue;
381
382
383 if (fallback && cs == old_wd)
384 continue;
385
386
387 if (!watchdog || cs->rating > watchdog->rating)
388 watchdog = cs;
389 }
390
391 if (!watchdog)
392 watchdog = old_wd;
393
394
395 if (watchdog != old_wd)
396 clocksource_reset_watchdog();
397
398
399 clocksource_start_watchdog();
400 spin_unlock_irqrestore(&watchdog_lock, flags);
401}
402
403static void clocksource_dequeue_watchdog(struct clocksource *cs)
404{
405 if (cs != watchdog) {
406 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
407
408 list_del_init(&cs->wd_list);
409
410 clocksource_stop_watchdog();
411 }
412 }
413}
414
415static int __clocksource_watchdog_kthread(void)
416{
417 struct clocksource *cs, *tmp;
418 unsigned long flags;
419 int select = 0;
420
421 spin_lock_irqsave(&watchdog_lock, flags);
422 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
423 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
424 list_del_init(&cs->wd_list);
425 __clocksource_change_rating(cs, 0);
426 select = 1;
427 }
428 if (cs->flags & CLOCK_SOURCE_RESELECT) {
429 cs->flags &= ~CLOCK_SOURCE_RESELECT;
430 select = 1;
431 }
432 }
433
434 clocksource_stop_watchdog();
435 spin_unlock_irqrestore(&watchdog_lock, flags);
436
437 return select;
438}
439
440static int clocksource_watchdog_kthread(void *data)
441{
442 mutex_lock(&clocksource_mutex);
443 if (__clocksource_watchdog_kthread())
444 clocksource_select();
445 mutex_unlock(&clocksource_mutex);
446 return 0;
447}
448
449static bool clocksource_is_watchdog(struct clocksource *cs)
450{
451 return cs == watchdog;
452}
453
454#else
455
456static void clocksource_enqueue_watchdog(struct clocksource *cs)
457{
458 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
459 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
460}
461
462static void clocksource_select_watchdog(bool fallback) { }
463static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
464static inline void clocksource_resume_watchdog(void) { }
465static inline int __clocksource_watchdog_kthread(void) { return 0; }
466static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
467void clocksource_mark_unstable(struct clocksource *cs) { }
468
469static inline void clocksource_watchdog_lock(unsigned long *flags) { }
470static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
471
472#endif
473
474static bool clocksource_is_suspend(struct clocksource *cs)
475{
476 return cs == suspend_clocksource;
477}
478
479static void __clocksource_suspend_select(struct clocksource *cs)
480{
481
482
483
484 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
485 return;
486
487
488
489
490
491
492 if (cs->suspend || cs->resume) {
493 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
494 cs->name);
495 }
496
497
498 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
499 suspend_clocksource = cs;
500}
501
502
503
504
505
506static void clocksource_suspend_select(bool fallback)
507{
508 struct clocksource *cs, *old_suspend;
509
510 old_suspend = suspend_clocksource;
511 if (fallback)
512 suspend_clocksource = NULL;
513
514 list_for_each_entry(cs, &clocksource_list, list) {
515
516 if (fallback && cs == old_suspend)
517 continue;
518
519 __clocksource_suspend_select(cs);
520 }
521}
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
537{
538 if (!suspend_clocksource)
539 return;
540
541
542
543
544
545
546 if (clocksource_is_suspend(cs)) {
547 suspend_start = start_cycles;
548 return;
549 }
550
551 if (suspend_clocksource->enable &&
552 suspend_clocksource->enable(suspend_clocksource)) {
553 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
554 return;
555 }
556
557 suspend_start = suspend_clocksource->read(suspend_clocksource);
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
575{
576 u64 now, delta, nsec = 0;
577
578 if (!suspend_clocksource)
579 return 0;
580
581
582
583
584
585
586 if (clocksource_is_suspend(cs))
587 now = cycle_now;
588 else
589 now = suspend_clocksource->read(suspend_clocksource);
590
591 if (now > suspend_start) {
592 delta = clocksource_delta(now, suspend_start,
593 suspend_clocksource->mask);
594 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
595 suspend_clocksource->shift);
596 }
597
598
599
600
601
602 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
603 suspend_clocksource->disable(suspend_clocksource);
604
605 return nsec;
606}
607
608
609
610
611void clocksource_suspend(void)
612{
613 struct clocksource *cs;
614
615 list_for_each_entry_reverse(cs, &clocksource_list, list)
616 if (cs->suspend)
617 cs->suspend(cs);
618}
619
620
621
622
623void clocksource_resume(void)
624{
625 struct clocksource *cs;
626
627 list_for_each_entry(cs, &clocksource_list, list)
628 if (cs->resume)
629 cs->resume(cs);
630
631 clocksource_resume_watchdog();
632}
633
634
635
636
637
638
639
640
641void clocksource_touch_watchdog(void)
642{
643 clocksource_resume_watchdog();
644}
645
646
647
648
649
650
651static u32 clocksource_max_adjustment(struct clocksource *cs)
652{
653 u64 ret;
654
655
656
657 ret = (u64)cs->mult * 11;
658 do_div(ret,100);
659 return (u32)ret;
660}
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
678{
679 u64 max_nsecs, max_cycles;
680
681
682
683
684
685 max_cycles = ULLONG_MAX;
686 do_div(max_cycles, mult+maxadj);
687
688
689
690
691
692
693
694 max_cycles = min(max_cycles, mask);
695 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
696
697
698 if (max_cyc)
699 *max_cyc = max_cycles;
700
701
702 max_nsecs >>= 1;
703
704 return max_nsecs;
705}
706
707
708
709
710
711
712static inline void clocksource_update_max_deferment(struct clocksource *cs)
713{
714 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
715 cs->maxadj, cs->mask,
716 &cs->max_cycles);
717}
718
719#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
720
721static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
722{
723 struct clocksource *cs;
724
725 if (!finished_booting || list_empty(&clocksource_list))
726 return NULL;
727
728
729
730
731
732
733 list_for_each_entry(cs, &clocksource_list, list) {
734 if (skipcur && cs == curr_clocksource)
735 continue;
736 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
737 continue;
738 return cs;
739 }
740 return NULL;
741}
742
743static void __clocksource_select(bool skipcur)
744{
745 bool oneshot = tick_oneshot_mode_active();
746 struct clocksource *best, *cs;
747
748
749 best = clocksource_find_best(oneshot, skipcur);
750 if (!best)
751 return;
752
753 if (!strlen(override_name))
754 goto found;
755
756
757 list_for_each_entry(cs, &clocksource_list, list) {
758 if (skipcur && cs == curr_clocksource)
759 continue;
760 if (strcmp(cs->name, override_name) != 0)
761 continue;
762
763
764
765
766
767 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
768
769 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
770 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
771 cs->name);
772 override_name[0] = 0;
773 } else {
774
775
776
777
778 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
779 cs->name);
780 }
781 } else
782
783 best = cs;
784 break;
785 }
786
787found:
788 if (curr_clocksource != best && !timekeeping_notify(best)) {
789 pr_info("Switched to clocksource %s\n", best->name);
790 curr_clocksource = best;
791 }
792}
793
794
795
796
797
798
799
800
801
802static void clocksource_select(void)
803{
804 __clocksource_select(false);
805}
806
807static void clocksource_select_fallback(void)
808{
809 __clocksource_select(true);
810}
811
812#else
813static inline void clocksource_select(void) { }
814static inline void clocksource_select_fallback(void) { }
815
816#endif
817
818
819
820
821
822
823
824
825static int __init clocksource_done_booting(void)
826{
827 mutex_lock(&clocksource_mutex);
828 curr_clocksource = clocksource_default_clock();
829 finished_booting = 1;
830
831
832
833 __clocksource_watchdog_kthread();
834 clocksource_select();
835 mutex_unlock(&clocksource_mutex);
836 return 0;
837}
838fs_initcall(clocksource_done_booting);
839
840
841
842
843static void clocksource_enqueue(struct clocksource *cs)
844{
845 struct list_head *entry = &clocksource_list;
846 struct clocksource *tmp;
847
848 list_for_each_entry(tmp, &clocksource_list, list) {
849
850 if (tmp->rating < cs->rating)
851 break;
852 entry = &tmp->list;
853 }
854 list_add(&cs->list, entry);
855}
856
857
858
859
860
861
862
863
864
865
866
867
868
869void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
870{
871 u64 sec;
872
873
874
875
876
877 if (freq) {
878
879
880
881
882
883
884
885
886
887 sec = cs->mask;
888 do_div(sec, freq);
889 do_div(sec, scale);
890 if (!sec)
891 sec = 1;
892 else if (sec > 600 && cs->mask > UINT_MAX)
893 sec = 600;
894
895 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
896 NSEC_PER_SEC / scale, sec * scale);
897 }
898
899
900
901
902 cs->maxadj = clocksource_max_adjustment(cs);
903 while (freq && ((cs->mult + cs->maxadj < cs->mult)
904 || (cs->mult - cs->maxadj > cs->mult))) {
905 cs->mult >>= 1;
906 cs->shift--;
907 cs->maxadj = clocksource_max_adjustment(cs);
908 }
909
910
911
912
913
914 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
915 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
916 cs->name);
917
918 clocksource_update_max_deferment(cs);
919
920 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
921 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
922}
923EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
924
925
926
927
928
929
930
931
932
933
934
935
936int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
937{
938 unsigned long flags;
939
940
941 __clocksource_update_freq_scale(cs, scale, freq);
942
943
944 mutex_lock(&clocksource_mutex);
945
946 clocksource_watchdog_lock(&flags);
947 clocksource_enqueue(cs);
948 clocksource_enqueue_watchdog(cs);
949 clocksource_watchdog_unlock(&flags);
950
951 clocksource_select();
952 clocksource_select_watchdog(false);
953 __clocksource_suspend_select(cs);
954 mutex_unlock(&clocksource_mutex);
955 return 0;
956}
957EXPORT_SYMBOL_GPL(__clocksource_register_scale);
958
959static void __clocksource_change_rating(struct clocksource *cs, int rating)
960{
961 list_del(&cs->list);
962 cs->rating = rating;
963 clocksource_enqueue(cs);
964}
965
966
967
968
969
970
971void clocksource_change_rating(struct clocksource *cs, int rating)
972{
973 unsigned long flags;
974
975 mutex_lock(&clocksource_mutex);
976 clocksource_watchdog_lock(&flags);
977 __clocksource_change_rating(cs, rating);
978 clocksource_watchdog_unlock(&flags);
979
980 clocksource_select();
981 clocksource_select_watchdog(false);
982 clocksource_suspend_select(false);
983 mutex_unlock(&clocksource_mutex);
984}
985EXPORT_SYMBOL(clocksource_change_rating);
986
987
988
989
990static int clocksource_unbind(struct clocksource *cs)
991{
992 unsigned long flags;
993
994 if (clocksource_is_watchdog(cs)) {
995
996 clocksource_select_watchdog(true);
997 if (clocksource_is_watchdog(cs))
998 return -EBUSY;
999 }
1000
1001 if (cs == curr_clocksource) {
1002
1003 clocksource_select_fallback();
1004 if (curr_clocksource == cs)
1005 return -EBUSY;
1006 }
1007
1008 if (clocksource_is_suspend(cs)) {
1009
1010
1011
1012
1013
1014 clocksource_suspend_select(true);
1015 }
1016
1017 clocksource_watchdog_lock(&flags);
1018 clocksource_dequeue_watchdog(cs);
1019 list_del_init(&cs->list);
1020 clocksource_watchdog_unlock(&flags);
1021
1022 return 0;
1023}
1024
1025
1026
1027
1028
1029int clocksource_unregister(struct clocksource *cs)
1030{
1031 int ret = 0;
1032
1033 mutex_lock(&clocksource_mutex);
1034 if (!list_empty(&cs->list))
1035 ret = clocksource_unbind(cs);
1036 mutex_unlock(&clocksource_mutex);
1037 return ret;
1038}
1039EXPORT_SYMBOL(clocksource_unregister);
1040
1041#ifdef CONFIG_SYSFS
1042
1043
1044
1045
1046
1047
1048
1049
1050static ssize_t current_clocksource_show(struct device *dev,
1051 struct device_attribute *attr,
1052 char *buf)
1053{
1054 ssize_t count = 0;
1055
1056 mutex_lock(&clocksource_mutex);
1057 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
1058 mutex_unlock(&clocksource_mutex);
1059
1060 return count;
1061}
1062
1063ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1064{
1065 size_t ret = cnt;
1066
1067
1068 if (!cnt || cnt >= CS_NAME_LEN)
1069 return -EINVAL;
1070
1071
1072 if (buf[cnt-1] == '\n')
1073 cnt--;
1074 if (cnt > 0)
1075 memcpy(dst, buf, cnt);
1076 dst[cnt] = 0;
1077 return ret;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static ssize_t current_clocksource_store(struct device *dev,
1091 struct device_attribute *attr,
1092 const char *buf, size_t count)
1093{
1094 ssize_t ret;
1095
1096 mutex_lock(&clocksource_mutex);
1097
1098 ret = sysfs_get_uname(buf, override_name, count);
1099 if (ret >= 0)
1100 clocksource_select();
1101
1102 mutex_unlock(&clocksource_mutex);
1103
1104 return ret;
1105}
1106static DEVICE_ATTR_RW(current_clocksource);
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117static ssize_t unbind_clocksource_store(struct device *dev,
1118 struct device_attribute *attr,
1119 const char *buf, size_t count)
1120{
1121 struct clocksource *cs;
1122 char name[CS_NAME_LEN];
1123 ssize_t ret;
1124
1125 ret = sysfs_get_uname(buf, name, count);
1126 if (ret < 0)
1127 return ret;
1128
1129 ret = -ENODEV;
1130 mutex_lock(&clocksource_mutex);
1131 list_for_each_entry(cs, &clocksource_list, list) {
1132 if (strcmp(cs->name, name))
1133 continue;
1134 ret = clocksource_unbind(cs);
1135 break;
1136 }
1137 mutex_unlock(&clocksource_mutex);
1138
1139 return ret ? ret : count;
1140}
1141static DEVICE_ATTR_WO(unbind_clocksource);
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151static ssize_t available_clocksource_show(struct device *dev,
1152 struct device_attribute *attr,
1153 char *buf)
1154{
1155 struct clocksource *src;
1156 ssize_t count = 0;
1157
1158 mutex_lock(&clocksource_mutex);
1159 list_for_each_entry(src, &clocksource_list, list) {
1160
1161
1162
1163
1164 if (!tick_oneshot_mode_active() ||
1165 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1166 count += snprintf(buf + count,
1167 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1168 "%s ", src->name);
1169 }
1170 mutex_unlock(&clocksource_mutex);
1171
1172 count += snprintf(buf + count,
1173 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1174
1175 return count;
1176}
1177static DEVICE_ATTR_RO(available_clocksource);
1178
1179static struct attribute *clocksource_attrs[] = {
1180 &dev_attr_current_clocksource.attr,
1181 &dev_attr_unbind_clocksource.attr,
1182 &dev_attr_available_clocksource.attr,
1183 NULL
1184};
1185ATTRIBUTE_GROUPS(clocksource);
1186
1187static struct bus_type clocksource_subsys = {
1188 .name = "clocksource",
1189 .dev_name = "clocksource",
1190};
1191
1192static struct device device_clocksource = {
1193 .id = 0,
1194 .bus = &clocksource_subsys,
1195 .groups = clocksource_groups,
1196};
1197
1198static int __init init_clocksource_sysfs(void)
1199{
1200 int error = subsys_system_register(&clocksource_subsys, NULL);
1201
1202 if (!error)
1203 error = device_register(&device_clocksource);
1204
1205 return error;
1206}
1207
1208device_initcall(init_clocksource_sysfs);
1209#endif
1210
1211
1212
1213
1214
1215
1216
1217
1218static int __init boot_override_clocksource(char* str)
1219{
1220 mutex_lock(&clocksource_mutex);
1221 if (str)
1222 strlcpy(override_name, str, sizeof(override_name));
1223 mutex_unlock(&clocksource_mutex);
1224 return 1;
1225}
1226
1227__setup("clocksource=", boot_override_clocksource);
1228
1229
1230
1231
1232
1233
1234
1235
1236static int __init boot_override_clock(char* str)
1237{
1238 if (!strcmp(str, "pmtmr")) {
1239 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1240 return boot_override_clocksource("acpi_pm");
1241 }
1242 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1243 return boot_override_clocksource(str);
1244}
1245
1246__setup("clock=", boot_override_clock);
1247