1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/device.h>
11#include <linux/clocksource.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/tick.h>
16#include <linux/kthread.h>
17
18#include "tick-internal.h"
19#include "timekeeping_internal.h"
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44void
45clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
46{
47 u64 tmp;
48 u32 sft, sftacc= 32;
49
50
51
52
53
54 tmp = ((u64)maxsec * from) >> 32;
55 while (tmp) {
56 tmp >>=1;
57 sftacc--;
58 }
59
60
61
62
63
64 for (sft = 32; sft > 0; sft--) {
65 tmp = (u64) to << sft;
66 tmp += from / 2;
67 do_div(tmp, from);
68 if ((tmp >> sftacc) == 0)
69 break;
70 }
71 *mult = tmp;
72 *shift = sft;
73}
74EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
75
76
77
78
79
80
81
82
83
84
85
86
87
88static struct clocksource *curr_clocksource;
89static struct clocksource *suspend_clocksource;
90static LIST_HEAD(clocksource_list);
91static DEFINE_MUTEX(clocksource_mutex);
92static char override_name[CS_NAME_LEN];
93static int finished_booting;
94static u64 suspend_start;
95
96#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
97static void clocksource_watchdog_work(struct work_struct *work);
98static void clocksource_select(void);
99
100static LIST_HEAD(watchdog_list);
101static struct clocksource *watchdog;
102static struct timer_list watchdog_timer;
103static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
104static DEFINE_SPINLOCK(watchdog_lock);
105static int watchdog_running;
106static atomic_t watchdog_reset_pending;
107
108static inline void clocksource_watchdog_lock(unsigned long *flags)
109{
110 spin_lock_irqsave(&watchdog_lock, *flags);
111}
112
113static inline void clocksource_watchdog_unlock(unsigned long *flags)
114{
115 spin_unlock_irqrestore(&watchdog_lock, *flags);
116}
117
118static int clocksource_watchdog_kthread(void *data);
119static void __clocksource_change_rating(struct clocksource *cs, int rating);
120
121
122
123
124#define WATCHDOG_INTERVAL (HZ >> 1)
125#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
126
127static void clocksource_watchdog_work(struct work_struct *work)
128{
129
130
131
132
133
134
135
136
137
138
139
140
141
142 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
143}
144
145static void __clocksource_unstable(struct clocksource *cs)
146{
147 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
148 cs->flags |= CLOCK_SOURCE_UNSTABLE;
149
150
151
152
153
154 if (list_empty(&cs->list)) {
155 cs->rating = 0;
156 return;
157 }
158
159 if (cs->mark_unstable)
160 cs->mark_unstable(cs);
161
162
163 if (finished_booting)
164 schedule_work(&watchdog_work);
165}
166
167
168
169
170
171
172
173
174void clocksource_mark_unstable(struct clocksource *cs)
175{
176 unsigned long flags;
177
178 spin_lock_irqsave(&watchdog_lock, flags);
179 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
180 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
181 list_add(&cs->wd_list, &watchdog_list);
182 __clocksource_unstable(cs);
183 }
184 spin_unlock_irqrestore(&watchdog_lock, flags);
185}
186
187static void clocksource_watchdog(struct timer_list *unused)
188{
189 struct clocksource *cs;
190 u64 csnow, wdnow, cslast, wdlast, delta;
191 int64_t wd_nsec, cs_nsec;
192 int next_cpu, reset_pending;
193
194 spin_lock(&watchdog_lock);
195 if (!watchdog_running)
196 goto out;
197
198 reset_pending = atomic_read(&watchdog_reset_pending);
199
200 list_for_each_entry(cs, &watchdog_list, wd_list) {
201
202
203 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
204 if (finished_booting)
205 schedule_work(&watchdog_work);
206 continue;
207 }
208
209 local_irq_disable();
210 csnow = cs->read(cs);
211 wdnow = watchdog->read(watchdog);
212 local_irq_enable();
213
214
215 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
216 atomic_read(&watchdog_reset_pending)) {
217 cs->flags |= CLOCK_SOURCE_WATCHDOG;
218 cs->wd_last = wdnow;
219 cs->cs_last = csnow;
220 continue;
221 }
222
223 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
224 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
225 watchdog->shift);
226
227 delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
228 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
229 wdlast = cs->wd_last;
230 cslast = cs->cs_last;
231 cs->cs_last = csnow;
232 cs->wd_last = wdnow;
233
234 if (atomic_read(&watchdog_reset_pending))
235 continue;
236
237
238 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
239 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
240 smp_processor_id(), cs->name);
241 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
242 watchdog->name, wdnow, wdlast, watchdog->mask);
243 pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
244 cs->name, csnow, cslast, cs->mask);
245 __clocksource_unstable(cs);
246 continue;
247 }
248
249 if (cs == curr_clocksource && cs->tick_stable)
250 cs->tick_stable(cs);
251
252 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
253 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
254 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
255
256 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
257
258
259
260
261
262 if (!finished_booting)
263 continue;
264
265
266
267
268
269
270
271
272
273 if (cs != curr_clocksource) {
274 cs->flags |= CLOCK_SOURCE_RESELECT;
275 schedule_work(&watchdog_work);
276 } else {
277 tick_clock_notify();
278 }
279 }
280 }
281
282
283
284
285
286 if (reset_pending)
287 atomic_dec(&watchdog_reset_pending);
288
289
290
291
292
293 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
294 if (next_cpu >= nr_cpu_ids)
295 next_cpu = cpumask_first(cpu_online_mask);
296 watchdog_timer.expires += WATCHDOG_INTERVAL;
297 add_timer_on(&watchdog_timer, next_cpu);
298out:
299 spin_unlock(&watchdog_lock);
300}
301
302static inline void clocksource_start_watchdog(void)
303{
304 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
305 return;
306 timer_setup(&watchdog_timer, clocksource_watchdog, 0);
307 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
308 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
309 watchdog_running = 1;
310}
311
312static inline void clocksource_stop_watchdog(void)
313{
314 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
315 return;
316 del_timer(&watchdog_timer);
317 watchdog_running = 0;
318}
319
320static inline void clocksource_reset_watchdog(void)
321{
322 struct clocksource *cs;
323
324 list_for_each_entry(cs, &watchdog_list, wd_list)
325 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
326}
327
328static void clocksource_resume_watchdog(void)
329{
330 atomic_inc(&watchdog_reset_pending);
331}
332
333static void clocksource_enqueue_watchdog(struct clocksource *cs)
334{
335 INIT_LIST_HEAD(&cs->wd_list);
336
337 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
338
339 list_add(&cs->wd_list, &watchdog_list);
340 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
341 } else {
342
343 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
344 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
345 }
346}
347
348static void clocksource_select_watchdog(bool fallback)
349{
350 struct clocksource *cs, *old_wd;
351 unsigned long flags;
352
353 spin_lock_irqsave(&watchdog_lock, flags);
354
355 old_wd = watchdog;
356 if (fallback)
357 watchdog = NULL;
358
359 list_for_each_entry(cs, &clocksource_list, list) {
360
361 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
362 continue;
363
364
365 if (fallback && cs == old_wd)
366 continue;
367
368
369 if (!watchdog || cs->rating > watchdog->rating)
370 watchdog = cs;
371 }
372
373 if (!watchdog)
374 watchdog = old_wd;
375
376
377 if (watchdog != old_wd)
378 clocksource_reset_watchdog();
379
380
381 clocksource_start_watchdog();
382 spin_unlock_irqrestore(&watchdog_lock, flags);
383}
384
385static void clocksource_dequeue_watchdog(struct clocksource *cs)
386{
387 if (cs != watchdog) {
388 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
389
390 list_del_init(&cs->wd_list);
391
392 clocksource_stop_watchdog();
393 }
394 }
395}
396
397static int __clocksource_watchdog_kthread(void)
398{
399 struct clocksource *cs, *tmp;
400 unsigned long flags;
401 int select = 0;
402
403 spin_lock_irqsave(&watchdog_lock, flags);
404 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
405 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
406 list_del_init(&cs->wd_list);
407 __clocksource_change_rating(cs, 0);
408 select = 1;
409 }
410 if (cs->flags & CLOCK_SOURCE_RESELECT) {
411 cs->flags &= ~CLOCK_SOURCE_RESELECT;
412 select = 1;
413 }
414 }
415
416 clocksource_stop_watchdog();
417 spin_unlock_irqrestore(&watchdog_lock, flags);
418
419 return select;
420}
421
422static int clocksource_watchdog_kthread(void *data)
423{
424 mutex_lock(&clocksource_mutex);
425 if (__clocksource_watchdog_kthread())
426 clocksource_select();
427 mutex_unlock(&clocksource_mutex);
428 return 0;
429}
430
431static bool clocksource_is_watchdog(struct clocksource *cs)
432{
433 return cs == watchdog;
434}
435
436#else
437
438static void clocksource_enqueue_watchdog(struct clocksource *cs)
439{
440 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
441 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
442}
443
444static void clocksource_select_watchdog(bool fallback) { }
445static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
446static inline void clocksource_resume_watchdog(void) { }
447static inline int __clocksource_watchdog_kthread(void) { return 0; }
448static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
449void clocksource_mark_unstable(struct clocksource *cs) { }
450
451static inline void clocksource_watchdog_lock(unsigned long *flags) { }
452static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
453
454#endif
455
456static bool clocksource_is_suspend(struct clocksource *cs)
457{
458 return cs == suspend_clocksource;
459}
460
461static void __clocksource_suspend_select(struct clocksource *cs)
462{
463
464
465
466 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
467 return;
468
469
470
471
472
473
474 if (cs->suspend || cs->resume) {
475 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
476 cs->name);
477 }
478
479
480 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
481 suspend_clocksource = cs;
482}
483
484
485
486
487
488static void clocksource_suspend_select(bool fallback)
489{
490 struct clocksource *cs, *old_suspend;
491
492 old_suspend = suspend_clocksource;
493 if (fallback)
494 suspend_clocksource = NULL;
495
496 list_for_each_entry(cs, &clocksource_list, list) {
497
498 if (fallback && cs == old_suspend)
499 continue;
500
501 __clocksource_suspend_select(cs);
502 }
503}
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
519{
520 if (!suspend_clocksource)
521 return;
522
523
524
525
526
527
528 if (clocksource_is_suspend(cs)) {
529 suspend_start = start_cycles;
530 return;
531 }
532
533 if (suspend_clocksource->enable &&
534 suspend_clocksource->enable(suspend_clocksource)) {
535 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
536 return;
537 }
538
539 suspend_start = suspend_clocksource->read(suspend_clocksource);
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
557{
558 u64 now, delta, nsec = 0;
559
560 if (!suspend_clocksource)
561 return 0;
562
563
564
565
566
567
568 if (clocksource_is_suspend(cs))
569 now = cycle_now;
570 else
571 now = suspend_clocksource->read(suspend_clocksource);
572
573 if (now > suspend_start) {
574 delta = clocksource_delta(now, suspend_start,
575 suspend_clocksource->mask);
576 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
577 suspend_clocksource->shift);
578 }
579
580
581
582
583
584 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
585 suspend_clocksource->disable(suspend_clocksource);
586
587 return nsec;
588}
589
590
591
592
593void clocksource_suspend(void)
594{
595 struct clocksource *cs;
596
597 list_for_each_entry_reverse(cs, &clocksource_list, list)
598 if (cs->suspend)
599 cs->suspend(cs);
600}
601
602
603
604
605void clocksource_resume(void)
606{
607 struct clocksource *cs;
608
609 list_for_each_entry(cs, &clocksource_list, list)
610 if (cs->resume)
611 cs->resume(cs);
612
613 clocksource_resume_watchdog();
614}
615
616
617
618
619
620
621
622
623void clocksource_touch_watchdog(void)
624{
625 clocksource_resume_watchdog();
626}
627
628
629
630
631
632
633static u32 clocksource_max_adjustment(struct clocksource *cs)
634{
635 u64 ret;
636
637
638
639 ret = (u64)cs->mult * 11;
640 do_div(ret,100);
641 return (u32)ret;
642}
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
660{
661 u64 max_nsecs, max_cycles;
662
663
664
665
666
667 max_cycles = ULLONG_MAX;
668 do_div(max_cycles, mult+maxadj);
669
670
671
672
673
674
675
676 max_cycles = min(max_cycles, mask);
677 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
678
679
680 if (max_cyc)
681 *max_cyc = max_cycles;
682
683
684 max_nsecs >>= 1;
685
686 return max_nsecs;
687}
688
689
690
691
692
693
694static inline void clocksource_update_max_deferment(struct clocksource *cs)
695{
696 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
697 cs->maxadj, cs->mask,
698 &cs->max_cycles);
699}
700
701#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
702
703static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
704{
705 struct clocksource *cs;
706
707 if (!finished_booting || list_empty(&clocksource_list))
708 return NULL;
709
710
711
712
713
714
715 list_for_each_entry(cs, &clocksource_list, list) {
716 if (skipcur && cs == curr_clocksource)
717 continue;
718 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
719 continue;
720 return cs;
721 }
722 return NULL;
723}
724
725static void __clocksource_select(bool skipcur)
726{
727 bool oneshot = tick_oneshot_mode_active();
728 struct clocksource *best, *cs;
729
730
731 best = clocksource_find_best(oneshot, skipcur);
732 if (!best)
733 return;
734
735 if (!strlen(override_name))
736 goto found;
737
738
739 list_for_each_entry(cs, &clocksource_list, list) {
740 if (skipcur && cs == curr_clocksource)
741 continue;
742 if (strcmp(cs->name, override_name) != 0)
743 continue;
744
745
746
747
748
749 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
750
751 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
752 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
753 cs->name);
754 override_name[0] = 0;
755 } else {
756
757
758
759
760 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
761 cs->name);
762 }
763 } else
764
765 best = cs;
766 break;
767 }
768
769found:
770 if (curr_clocksource != best && !timekeeping_notify(best)) {
771 pr_info("Switched to clocksource %s\n", best->name);
772 curr_clocksource = best;
773 }
774}
775
776
777
778
779
780
781
782
783
784static void clocksource_select(void)
785{
786 __clocksource_select(false);
787}
788
789static void clocksource_select_fallback(void)
790{
791 __clocksource_select(true);
792}
793
794#else
795static inline void clocksource_select(void) { }
796static inline void clocksource_select_fallback(void) { }
797
798#endif
799
800
801
802
803
804
805
806
807static int __init clocksource_done_booting(void)
808{
809 mutex_lock(&clocksource_mutex);
810 curr_clocksource = clocksource_default_clock();
811 finished_booting = 1;
812
813
814
815 __clocksource_watchdog_kthread();
816 clocksource_select();
817 mutex_unlock(&clocksource_mutex);
818 return 0;
819}
820fs_initcall(clocksource_done_booting);
821
822
823
824
825static void clocksource_enqueue(struct clocksource *cs)
826{
827 struct list_head *entry = &clocksource_list;
828 struct clocksource *tmp;
829
830 list_for_each_entry(tmp, &clocksource_list, list) {
831
832 if (tmp->rating < cs->rating)
833 break;
834 entry = &tmp->list;
835 }
836 list_add(&cs->list, entry);
837}
838
839
840
841
842
843
844
845
846
847
848
849
850
851void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
852{
853 u64 sec;
854
855
856
857
858
859 if (freq) {
860
861
862
863
864
865
866
867
868
869 sec = cs->mask;
870 do_div(sec, freq);
871 do_div(sec, scale);
872 if (!sec)
873 sec = 1;
874 else if (sec > 600 && cs->mask > UINT_MAX)
875 sec = 600;
876
877 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
878 NSEC_PER_SEC / scale, sec * scale);
879 }
880
881
882
883
884 cs->maxadj = clocksource_max_adjustment(cs);
885 while (freq && ((cs->mult + cs->maxadj < cs->mult)
886 || (cs->mult - cs->maxadj > cs->mult))) {
887 cs->mult >>= 1;
888 cs->shift--;
889 cs->maxadj = clocksource_max_adjustment(cs);
890 }
891
892
893
894
895
896 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
897 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
898 cs->name);
899
900 clocksource_update_max_deferment(cs);
901
902 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
903 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
904}
905EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
906
907
908
909
910
911
912
913
914
915
916
917
918int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
919{
920 unsigned long flags;
921
922 clocksource_arch_init(cs);
923
924
925 __clocksource_update_freq_scale(cs, scale, freq);
926
927
928 mutex_lock(&clocksource_mutex);
929
930 clocksource_watchdog_lock(&flags);
931 clocksource_enqueue(cs);
932 clocksource_enqueue_watchdog(cs);
933 clocksource_watchdog_unlock(&flags);
934
935 clocksource_select();
936 clocksource_select_watchdog(false);
937 __clocksource_suspend_select(cs);
938 mutex_unlock(&clocksource_mutex);
939 return 0;
940}
941EXPORT_SYMBOL_GPL(__clocksource_register_scale);
942
943static void __clocksource_change_rating(struct clocksource *cs, int rating)
944{
945 list_del(&cs->list);
946 cs->rating = rating;
947 clocksource_enqueue(cs);
948}
949
950
951
952
953
954
955void clocksource_change_rating(struct clocksource *cs, int rating)
956{
957 unsigned long flags;
958
959 mutex_lock(&clocksource_mutex);
960 clocksource_watchdog_lock(&flags);
961 __clocksource_change_rating(cs, rating);
962 clocksource_watchdog_unlock(&flags);
963
964 clocksource_select();
965 clocksource_select_watchdog(false);
966 clocksource_suspend_select(false);
967 mutex_unlock(&clocksource_mutex);
968}
969EXPORT_SYMBOL(clocksource_change_rating);
970
971
972
973
974static int clocksource_unbind(struct clocksource *cs)
975{
976 unsigned long flags;
977
978 if (clocksource_is_watchdog(cs)) {
979
980 clocksource_select_watchdog(true);
981 if (clocksource_is_watchdog(cs))
982 return -EBUSY;
983 }
984
985 if (cs == curr_clocksource) {
986
987 clocksource_select_fallback();
988 if (curr_clocksource == cs)
989 return -EBUSY;
990 }
991
992 if (clocksource_is_suspend(cs)) {
993
994
995
996
997
998 clocksource_suspend_select(true);
999 }
1000
1001 clocksource_watchdog_lock(&flags);
1002 clocksource_dequeue_watchdog(cs);
1003 list_del_init(&cs->list);
1004 clocksource_watchdog_unlock(&flags);
1005
1006 return 0;
1007}
1008
1009
1010
1011
1012
1013int clocksource_unregister(struct clocksource *cs)
1014{
1015 int ret = 0;
1016
1017 mutex_lock(&clocksource_mutex);
1018 if (!list_empty(&cs->list))
1019 ret = clocksource_unbind(cs);
1020 mutex_unlock(&clocksource_mutex);
1021 return ret;
1022}
1023EXPORT_SYMBOL(clocksource_unregister);
1024
1025#ifdef CONFIG_SYSFS
1026
1027
1028
1029
1030
1031
1032
1033
1034static ssize_t current_clocksource_show(struct device *dev,
1035 struct device_attribute *attr,
1036 char *buf)
1037{
1038 ssize_t count = 0;
1039
1040 mutex_lock(&clocksource_mutex);
1041 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
1042 mutex_unlock(&clocksource_mutex);
1043
1044 return count;
1045}
1046
1047ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1048{
1049 size_t ret = cnt;
1050
1051
1052 if (!cnt || cnt >= CS_NAME_LEN)
1053 return -EINVAL;
1054
1055
1056 if (buf[cnt-1] == '\n')
1057 cnt--;
1058 if (cnt > 0)
1059 memcpy(dst, buf, cnt);
1060 dst[cnt] = 0;
1061 return ret;
1062}
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074static ssize_t current_clocksource_store(struct device *dev,
1075 struct device_attribute *attr,
1076 const char *buf, size_t count)
1077{
1078 ssize_t ret;
1079
1080 mutex_lock(&clocksource_mutex);
1081
1082 ret = sysfs_get_uname(buf, override_name, count);
1083 if (ret >= 0)
1084 clocksource_select();
1085
1086 mutex_unlock(&clocksource_mutex);
1087
1088 return ret;
1089}
1090static DEVICE_ATTR_RW(current_clocksource);
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static ssize_t unbind_clocksource_store(struct device *dev,
1102 struct device_attribute *attr,
1103 const char *buf, size_t count)
1104{
1105 struct clocksource *cs;
1106 char name[CS_NAME_LEN];
1107 ssize_t ret;
1108
1109 ret = sysfs_get_uname(buf, name, count);
1110 if (ret < 0)
1111 return ret;
1112
1113 ret = -ENODEV;
1114 mutex_lock(&clocksource_mutex);
1115 list_for_each_entry(cs, &clocksource_list, list) {
1116 if (strcmp(cs->name, name))
1117 continue;
1118 ret = clocksource_unbind(cs);
1119 break;
1120 }
1121 mutex_unlock(&clocksource_mutex);
1122
1123 return ret ? ret : count;
1124}
1125static DEVICE_ATTR_WO(unbind_clocksource);
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135static ssize_t available_clocksource_show(struct device *dev,
1136 struct device_attribute *attr,
1137 char *buf)
1138{
1139 struct clocksource *src;
1140 ssize_t count = 0;
1141
1142 mutex_lock(&clocksource_mutex);
1143 list_for_each_entry(src, &clocksource_list, list) {
1144
1145
1146
1147
1148 if (!tick_oneshot_mode_active() ||
1149 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1150 count += snprintf(buf + count,
1151 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1152 "%s ", src->name);
1153 }
1154 mutex_unlock(&clocksource_mutex);
1155
1156 count += snprintf(buf + count,
1157 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1158
1159 return count;
1160}
1161static DEVICE_ATTR_RO(available_clocksource);
1162
1163static struct attribute *clocksource_attrs[] = {
1164 &dev_attr_current_clocksource.attr,
1165 &dev_attr_unbind_clocksource.attr,
1166 &dev_attr_available_clocksource.attr,
1167 NULL
1168};
1169ATTRIBUTE_GROUPS(clocksource);
1170
1171static struct bus_type clocksource_subsys = {
1172 .name = "clocksource",
1173 .dev_name = "clocksource",
1174};
1175
1176static struct device device_clocksource = {
1177 .id = 0,
1178 .bus = &clocksource_subsys,
1179 .groups = clocksource_groups,
1180};
1181
1182static int __init init_clocksource_sysfs(void)
1183{
1184 int error = subsys_system_register(&clocksource_subsys, NULL);
1185
1186 if (!error)
1187 error = device_register(&device_clocksource);
1188
1189 return error;
1190}
1191
1192device_initcall(init_clocksource_sysfs);
1193#endif
1194
1195
1196
1197
1198
1199
1200
1201
1202static int __init boot_override_clocksource(char* str)
1203{
1204 mutex_lock(&clocksource_mutex);
1205 if (str)
1206 strlcpy(override_name, str, sizeof(override_name));
1207 mutex_unlock(&clocksource_mutex);
1208 return 1;
1209}
1210
1211__setup("clocksource=", boot_override_clocksource);
1212
1213
1214
1215
1216
1217
1218
1219
1220static int __init boot_override_clock(char* str)
1221{
1222 if (!strcmp(str, "pmtmr")) {
1223 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1224 return boot_override_clocksource("acpi_pm");
1225 }
1226 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1227 return boot_override_clocksource(str);
1228}
1229
1230__setup("clock=", boot_override_clock);
1231