1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/kernel.h>
4#include <linux/sched.h>
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/timer.h>
8#include <linux/acpi_pmtmr.h>
9#include <linux/cpufreq.h>
10#include <linux/delay.h>
11#include <linux/clocksource.h>
12#include <linux/percpu.h>
13#include <linux/timex.h>
14#include <linux/static_key.h>
15
16#include <asm/hpet.h>
17#include <asm/timer.h>
18#include <asm/vgtod.h>
19#include <asm/time.h>
20#include <asm/delay.h>
21#include <asm/hypervisor.h>
22#include <asm/nmi.h>
23#include <asm/x86_init.h>
24#include <asm/geode.h>
25
26unsigned int __read_mostly cpu_khz;
27EXPORT_SYMBOL(cpu_khz);
28
29unsigned int __read_mostly tsc_khz;
30EXPORT_SYMBOL(tsc_khz);
31
32
33
34
35static int __read_mostly tsc_unstable;
36
37
38
39
40static int __read_mostly tsc_disabled = -1;
41
42static DEFINE_STATIC_KEY_FALSE(__use_tsc);
43
44int tsc_clocksource_reliable;
45
46static u32 art_to_tsc_numerator;
47static u32 art_to_tsc_denominator;
48static u64 art_to_tsc_offset;
49struct clocksource *art_related_clocksource;
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74struct cyc2ns {
75 struct cyc2ns_data data[2];
76 struct cyc2ns_data *head;
77 struct cyc2ns_data *tail;
78};
79
80static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
81
82struct cyc2ns_data *cyc2ns_read_begin(void)
83{
84 struct cyc2ns_data *head;
85
86 preempt_disable();
87
88 head = this_cpu_read(cyc2ns.head);
89
90
91
92
93 smp_read_barrier_depends();
94 head->__count++;
95 barrier();
96
97 return head;
98}
99
100void cyc2ns_read_end(struct cyc2ns_data *head)
101{
102 barrier();
103
104
105
106
107
108
109 if (!--head->__count) {
110
111
112
113
114
115
116
117 this_cpu_write(cyc2ns.tail, head);
118 }
119 preempt_enable();
120}
121
122
123
124
125
126
127
128static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
129{
130 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
131 struct cyc2ns_data *data = c2n->data;
132
133 if (data == c2n->head)
134 data++;
135
136
137
138
139
140
141
142
143 while (c2n->tail == data)
144 cpu_relax();
145
146 return data;
147}
148
149static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
150{
151 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
152
153
154
155
156
157 smp_wmb();
158
159 ACCESS_ONCE(c2n->head) = data;
160}
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186static void cyc2ns_data_init(struct cyc2ns_data *data)
187{
188 data->cyc2ns_mul = 0;
189 data->cyc2ns_shift = 0;
190 data->cyc2ns_offset = 0;
191 data->__count = 0;
192}
193
194static void cyc2ns_init(int cpu)
195{
196 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
197
198 cyc2ns_data_init(&c2n->data[0]);
199 cyc2ns_data_init(&c2n->data[1]);
200
201 c2n->head = c2n->data;
202 c2n->tail = c2n->data;
203}
204
205static inline unsigned long long cycles_2_ns(unsigned long long cyc)
206{
207 struct cyc2ns_data *data, *tail;
208 unsigned long long ns;
209
210
211
212
213
214
215
216
217 preempt_disable_notrace();
218 data = this_cpu_read(cyc2ns.head);
219 tail = this_cpu_read(cyc2ns.tail);
220
221 if (likely(data == tail)) {
222 ns = data->cyc2ns_offset;
223 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
224 } else {
225 data->__count++;
226
227 barrier();
228
229 ns = data->cyc2ns_offset;
230 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
231
232 barrier();
233
234 if (!--data->__count)
235 this_cpu_write(cyc2ns.tail, data);
236 }
237 preempt_enable_notrace();
238
239 return ns;
240}
241
242static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
243{
244 unsigned long long tsc_now, ns_now;
245 struct cyc2ns_data *data;
246 unsigned long flags;
247
248 local_irq_save(flags);
249 sched_clock_idle_sleep_event();
250
251 if (!cpu_khz)
252 goto done;
253
254 data = cyc2ns_write_begin(cpu);
255
256 tsc_now = rdtsc();
257 ns_now = cycles_2_ns(tsc_now);
258
259
260
261
262
263
264 clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
265 NSEC_PER_MSEC, 0);
266
267
268
269
270
271
272
273 if (data->cyc2ns_shift == 32) {
274 data->cyc2ns_shift = 31;
275 data->cyc2ns_mul >>= 1;
276 }
277
278 data->cyc2ns_offset = ns_now -
279 mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
280
281 cyc2ns_write_end(cpu, data);
282
283done:
284 sched_clock_idle_wakeup_event(0);
285 local_irq_restore(flags);
286}
287
288
289
290u64 native_sched_clock(void)
291{
292 if (static_branch_likely(&__use_tsc)) {
293 u64 tsc_now = rdtsc();
294
295
296 return cycles_2_ns(tsc_now);
297 }
298
299
300
301
302
303
304
305
306
307
308
309 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
310}
311
312
313
314
315u64 native_sched_clock_from_tsc(u64 tsc)
316{
317 return cycles_2_ns(tsc);
318}
319
320
321
322#ifdef CONFIG_PARAVIRT
323unsigned long long sched_clock(void)
324{
325 return paravirt_sched_clock();
326}
327#else
328unsigned long long
329sched_clock(void) __attribute__((alias("native_sched_clock")));
330#endif
331
332int check_tsc_unstable(void)
333{
334 return tsc_unstable;
335}
336EXPORT_SYMBOL_GPL(check_tsc_unstable);
337
338int check_tsc_disabled(void)
339{
340 return tsc_disabled;
341}
342EXPORT_SYMBOL_GPL(check_tsc_disabled);
343
344#ifdef CONFIG_X86_TSC
345int __init notsc_setup(char *str)
346{
347 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
348 tsc_disabled = 1;
349 return 1;
350}
351#else
352
353
354
355
356int __init notsc_setup(char *str)
357{
358 setup_clear_cpu_cap(X86_FEATURE_TSC);
359 return 1;
360}
361#endif
362
363__setup("notsc", notsc_setup);
364
365static int no_sched_irq_time;
366
367static int __init tsc_setup(char *str)
368{
369 if (!strcmp(str, "reliable"))
370 tsc_clocksource_reliable = 1;
371 if (!strncmp(str, "noirqtime", 9))
372 no_sched_irq_time = 1;
373 return 1;
374}
375
376__setup("tsc=", tsc_setup);
377
378#define MAX_RETRIES 5
379#define SMI_TRESHOLD 50000
380
381
382
383
384static u64 tsc_read_refs(u64 *p, int hpet)
385{
386 u64 t1, t2;
387 int i;
388
389 for (i = 0; i < MAX_RETRIES; i++) {
390 t1 = get_cycles();
391 if (hpet)
392 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
393 else
394 *p = acpi_pm_read_early();
395 t2 = get_cycles();
396 if ((t2 - t1) < SMI_TRESHOLD)
397 return t2;
398 }
399 return ULLONG_MAX;
400}
401
402
403
404
405static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
406{
407 u64 tmp;
408
409 if (hpet2 < hpet1)
410 hpet2 += 0x100000000ULL;
411 hpet2 -= hpet1;
412 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
413 do_div(tmp, 1000000);
414 do_div(deltatsc, tmp);
415
416 return (unsigned long) deltatsc;
417}
418
419
420
421
422static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
423{
424 u64 tmp;
425
426 if (!pm1 && !pm2)
427 return ULONG_MAX;
428
429 if (pm2 < pm1)
430 pm2 += (u64)ACPI_PM_OVRRUN;
431 pm2 -= pm1;
432 tmp = pm2 * 1000000000LL;
433 do_div(tmp, PMTMR_TICKS_PER_SEC);
434 do_div(deltatsc, tmp);
435
436 return (unsigned long) deltatsc;
437}
438
439#define CAL_MS 10
440#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
441#define CAL_PIT_LOOPS 1000
442
443#define CAL2_MS 50
444#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
445#define CAL2_PIT_LOOPS 5000
446
447
448
449
450
451
452
453
454
455static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
456{
457 u64 tsc, t1, t2, delta;
458 unsigned long tscmin, tscmax;
459 int pitcnt;
460
461
462 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
463
464
465
466
467
468
469 outb(0xb0, 0x43);
470 outb(latch & 0xff, 0x42);
471 outb(latch >> 8, 0x42);
472
473 tsc = t1 = t2 = get_cycles();
474
475 pitcnt = 0;
476 tscmax = 0;
477 tscmin = ULONG_MAX;
478 while ((inb(0x61) & 0x20) == 0) {
479 t2 = get_cycles();
480 delta = t2 - tsc;
481 tsc = t2;
482 if ((unsigned long) delta < tscmin)
483 tscmin = (unsigned int) delta;
484 if ((unsigned long) delta > tscmax)
485 tscmax = (unsigned int) delta;
486 pitcnt++;
487 }
488
489
490
491
492
493
494
495
496
497
498 if (pitcnt < loopmin || tscmax > 10 * tscmin)
499 return ULONG_MAX;
500
501
502 delta = t2 - t1;
503 do_div(delta, ms);
504 return delta;
505}
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542static inline int pit_verify_msb(unsigned char val)
543{
544
545 inb(0x42);
546 return inb(0x42) == val;
547}
548
549static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
550{
551 int count;
552 u64 tsc = 0, prev_tsc = 0;
553
554 for (count = 0; count < 50000; count++) {
555 if (!pit_verify_msb(val))
556 break;
557 prev_tsc = tsc;
558 tsc = get_cycles();
559 }
560 *deltap = get_cycles() - prev_tsc;
561 *tscp = tsc;
562
563
564
565
566
567 return count > 5;
568}
569
570
571
572
573
574
575
576#define MAX_QUICK_PIT_MS 50
577#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
578
579static unsigned long quick_pit_calibrate(void)
580{
581 int i;
582 u64 tsc, delta;
583 unsigned long d1, d2;
584
585
586 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
587
588
589
590
591
592
593
594
595
596
597 outb(0xb0, 0x43);
598
599
600 outb(0xff, 0x42);
601 outb(0xff, 0x42);
602
603
604
605
606
607
608
609 pit_verify_msb(0);
610
611 if (pit_expect_msb(0xff, &tsc, &d1)) {
612 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
613 if (!pit_expect_msb(0xff-i, &delta, &d2))
614 break;
615
616 delta -= tsc;
617
618
619
620
621
622 if (i == 1 &&
623 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
624 return 0;
625
626
627
628
629 if (d1+d2 >= delta >> 11)
630 continue;
631
632
633
634
635
636
637
638
639 if (!pit_verify_msb(0xfe - i))
640 break;
641 goto success;
642 }
643 }
644 pr_info("Fast TSC calibration failed\n");
645 return 0;
646
647success:
648
649
650
651
652
653
654
655
656
657
658
659
660
661 delta *= PIT_TICK_RATE;
662 do_div(delta, i*256*1000);
663 pr_info("Fast TSC calibration using PIT\n");
664 return delta;
665}
666
667
668
669
670unsigned long native_calibrate_tsc(void)
671{
672 u64 tsc1, tsc2, delta, ref1, ref2;
673 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
674 unsigned long flags, latch, ms, fast_calibrate;
675 int hpet = is_hpet_enabled(), i, loopmin;
676
677
678 local_irq_save(flags);
679 fast_calibrate = try_msr_calibrate_tsc();
680 local_irq_restore(flags);
681 if (fast_calibrate)
682 return fast_calibrate;
683
684 local_irq_save(flags);
685 fast_calibrate = quick_pit_calibrate();
686 local_irq_restore(flags);
687 if (fast_calibrate)
688 return fast_calibrate;
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716 latch = CAL_LATCH;
717 ms = CAL_MS;
718 loopmin = CAL_PIT_LOOPS;
719
720 for (i = 0; i < 3; i++) {
721 unsigned long tsc_pit_khz;
722
723
724
725
726
727
728
729 local_irq_save(flags);
730 tsc1 = tsc_read_refs(&ref1, hpet);
731 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
732 tsc2 = tsc_read_refs(&ref2, hpet);
733 local_irq_restore(flags);
734
735
736 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
737
738
739 if (ref1 == ref2)
740 continue;
741
742
743 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
744 continue;
745
746 tsc2 = (tsc2 - tsc1) * 1000000LL;
747 if (hpet)
748 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
749 else
750 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
751
752 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
753
754
755 delta = ((u64) tsc_pit_min) * 100;
756 do_div(delta, tsc_ref_min);
757
758
759
760
761
762
763
764 if (delta >= 90 && delta <= 110) {
765 pr_info("PIT calibration matches %s. %d loops\n",
766 hpet ? "HPET" : "PMTIMER", i + 1);
767 return tsc_ref_min;
768 }
769
770
771
772
773
774
775
776 if (i == 1 && tsc_pit_min == ULONG_MAX) {
777 latch = CAL2_LATCH;
778 ms = CAL2_MS;
779 loopmin = CAL2_PIT_LOOPS;
780 }
781 }
782
783
784
785
786 if (tsc_pit_min == ULONG_MAX) {
787
788 pr_warn("Unable to calibrate against PIT\n");
789
790
791 if (!hpet && !ref1 && !ref2) {
792 pr_notice("No reference (HPET/PMTIMER) available\n");
793 return 0;
794 }
795
796
797 if (tsc_ref_min == ULONG_MAX) {
798 pr_warn("HPET/PMTIMER calibration failed\n");
799 return 0;
800 }
801
802
803 pr_info("using %s reference calibration\n",
804 hpet ? "HPET" : "PMTIMER");
805
806 return tsc_ref_min;
807 }
808
809
810 if (!hpet && !ref1 && !ref2) {
811 pr_info("Using PIT calibration value\n");
812 return tsc_pit_min;
813 }
814
815
816 if (tsc_ref_min == ULONG_MAX) {
817 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
818 return tsc_pit_min;
819 }
820
821
822
823
824
825
826 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
827 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
828 pr_info("Using PIT calibration value\n");
829 return tsc_pit_min;
830}
831
832int recalibrate_cpu_khz(void)
833{
834#ifndef CONFIG_SMP
835 unsigned long cpu_khz_old = cpu_khz;
836
837 if (cpu_has_tsc) {
838 tsc_khz = x86_platform.calibrate_tsc();
839 cpu_khz = tsc_khz;
840 cpu_data(0).loops_per_jiffy =
841 cpufreq_scale(cpu_data(0).loops_per_jiffy,
842 cpu_khz_old, cpu_khz);
843 return 0;
844 } else
845 return -ENODEV;
846#else
847 return -ENODEV;
848#endif
849}
850
851EXPORT_SYMBOL(recalibrate_cpu_khz);
852
853
854static unsigned long long cyc2ns_suspend;
855
856void tsc_save_sched_clock_state(void)
857{
858 if (!sched_clock_stable())
859 return;
860
861 cyc2ns_suspend = sched_clock();
862}
863
864
865
866
867
868
869
870
871
872void tsc_restore_sched_clock_state(void)
873{
874 unsigned long long offset;
875 unsigned long flags;
876 int cpu;
877
878 if (!sched_clock_stable())
879 return;
880
881 local_irq_save(flags);
882
883
884
885
886
887
888
889 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
890 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
891
892 offset = cyc2ns_suspend - sched_clock();
893
894 for_each_possible_cpu(cpu) {
895 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
896 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
897 }
898
899 local_irq_restore(flags);
900}
901
902#ifdef CONFIG_CPU_FREQ
903
904
905
906
907
908
909
910
911
912
913
914
915static unsigned int ref_freq;
916static unsigned long loops_per_jiffy_ref;
917static unsigned long tsc_khz_ref;
918
919static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
920 void *data)
921{
922 struct cpufreq_freqs *freq = data;
923 unsigned long *lpj;
924
925 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
926 return 0;
927
928 lpj = &boot_cpu_data.loops_per_jiffy;
929#ifdef CONFIG_SMP
930 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
931 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
932#endif
933
934 if (!ref_freq) {
935 ref_freq = freq->old;
936 loops_per_jiffy_ref = *lpj;
937 tsc_khz_ref = tsc_khz;
938 }
939 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
940 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
941 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
942
943 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
944 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
945 mark_tsc_unstable("cpufreq changes");
946
947 set_cyc2ns_scale(tsc_khz, freq->cpu);
948 }
949
950 return 0;
951}
952
953static struct notifier_block time_cpufreq_notifier_block = {
954 .notifier_call = time_cpufreq_notifier
955};
956
957static int __init cpufreq_tsc(void)
958{
959 if (!cpu_has_tsc)
960 return 0;
961 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
962 return 0;
963 cpufreq_register_notifier(&time_cpufreq_notifier_block,
964 CPUFREQ_TRANSITION_NOTIFIER);
965 return 0;
966}
967
968core_initcall(cpufreq_tsc);
969
970#endif
971
972#define ART_CPUID_LEAF (0x15)
973#define ART_MIN_DENOMINATOR (1)
974
975
976
977
978
979static void detect_art(void)
980{
981 unsigned int unused[2];
982
983 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
984 return;
985
986 cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
987 &art_to_tsc_numerator, unused, unused+1);
988
989
990 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
991 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
992 art_to_tsc_denominator < ART_MIN_DENOMINATOR)
993 return;
994
995 if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset))
996 return;
997
998
999 setup_force_cpu_cap(X86_FEATURE_ART);
1000}
1001
1002
1003
1004
1005static struct clocksource clocksource_tsc;
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static cycle_t read_tsc(struct clocksource *cs)
1024{
1025 return (cycle_t)rdtsc_ordered();
1026}
1027
1028
1029
1030
1031static struct clocksource clocksource_tsc = {
1032 .name = "tsc",
1033 .rating = 300,
1034 .read = read_tsc,
1035 .mask = CLOCKSOURCE_MASK(64),
1036 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1037 CLOCK_SOURCE_MUST_VERIFY,
1038 .archdata = { .vclock_mode = VCLOCK_TSC },
1039};
1040
1041void mark_tsc_unstable(char *reason)
1042{
1043 if (!tsc_unstable) {
1044 tsc_unstable = 1;
1045 clear_sched_clock_stable();
1046 disable_sched_clock_irqtime();
1047 pr_info("Marking TSC unstable due to %s\n", reason);
1048
1049 if (clocksource_tsc.mult)
1050 clocksource_mark_unstable(&clocksource_tsc);
1051 else {
1052 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
1053 clocksource_tsc.rating = 0;
1054 }
1055 }
1056}
1057
1058EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1059
1060static void __init check_system_tsc_reliable(void)
1061{
1062#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1063 if (is_geode_lx()) {
1064
1065#define RTSC_SUSP 0x100
1066 unsigned long res_low, res_high;
1067
1068 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1069
1070 if (res_low & RTSC_SUSP)
1071 tsc_clocksource_reliable = 1;
1072 }
1073#endif
1074 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1075 tsc_clocksource_reliable = 1;
1076}
1077
1078
1079
1080
1081
1082int unsynchronized_tsc(void)
1083{
1084 if (!cpu_has_tsc || tsc_unstable)
1085 return 1;
1086
1087#ifdef CONFIG_SMP
1088 if (apic_is_clustered_box())
1089 return 1;
1090#endif
1091
1092 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1093 return 0;
1094
1095 if (tsc_clocksource_reliable)
1096 return 0;
1097
1098
1099
1100
1101 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1102
1103 if (num_possible_cpus() > 1)
1104 return 1;
1105 }
1106
1107 return 0;
1108}
1109
1110
1111
1112
1113struct system_counterval_t convert_art_to_tsc(cycle_t art)
1114{
1115 u64 tmp, res, rem;
1116
1117 rem = do_div(art, art_to_tsc_denominator);
1118
1119 res = art * art_to_tsc_numerator;
1120 tmp = rem * art_to_tsc_numerator;
1121
1122 do_div(tmp, art_to_tsc_denominator);
1123 res += tmp + art_to_tsc_offset;
1124
1125 return (struct system_counterval_t) {.cs = art_related_clocksource,
1126 .cycles = res};
1127}
1128EXPORT_SYMBOL(convert_art_to_tsc);
1129
1130static void tsc_refine_calibration_work(struct work_struct *work);
1131static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146static void tsc_refine_calibration_work(struct work_struct *work)
1147{
1148 static u64 tsc_start = -1, ref_start;
1149 static int hpet;
1150 u64 tsc_stop, ref_stop, delta;
1151 unsigned long freq;
1152
1153
1154 if (check_tsc_unstable())
1155 goto out;
1156
1157
1158
1159
1160
1161
1162 if (tsc_start == -1) {
1163
1164
1165
1166
1167 hpet = is_hpet_enabled();
1168 schedule_delayed_work(&tsc_irqwork, HZ);
1169 tsc_start = tsc_read_refs(&ref_start, hpet);
1170 return;
1171 }
1172
1173 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1174
1175
1176 if (ref_start == ref_stop)
1177 goto out;
1178
1179
1180 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
1181 goto out;
1182
1183 delta = tsc_stop - tsc_start;
1184 delta *= 1000000LL;
1185 if (hpet)
1186 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1187 else
1188 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1189
1190
1191 if (abs(tsc_khz - freq) > tsc_khz/100)
1192 goto out;
1193
1194 tsc_khz = freq;
1195 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1196 (unsigned long)tsc_khz / 1000,
1197 (unsigned long)tsc_khz % 1000);
1198
1199out:
1200 if (boot_cpu_has(X86_FEATURE_ART))
1201 art_related_clocksource = &clocksource_tsc;
1202 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1203}
1204
1205
1206static int __init init_tsc_clocksource(void)
1207{
1208 if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
1209 return 0;
1210
1211 if (tsc_clocksource_reliable)
1212 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1213
1214 if (check_tsc_unstable()) {
1215 clocksource_tsc.rating = 0;
1216 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
1217 }
1218
1219 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1220 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1221
1222
1223
1224
1225
1226 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
1227 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1228 return 0;
1229 }
1230
1231 schedule_delayed_work(&tsc_irqwork, 0);
1232 return 0;
1233}
1234
1235
1236
1237
1238device_initcall(init_tsc_clocksource);
1239
1240void __init tsc_init(void)
1241{
1242 u64 lpj;
1243 int cpu;
1244
1245 if (!cpu_has_tsc) {
1246 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1247 return;
1248 }
1249
1250 tsc_khz = x86_platform.calibrate_tsc();
1251 cpu_khz = tsc_khz;
1252
1253 if (!tsc_khz) {
1254 mark_tsc_unstable("could not calculate TSC khz");
1255 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1256 return;
1257 }
1258
1259 pr_info("Detected %lu.%03lu MHz processor\n",
1260 (unsigned long)cpu_khz / 1000,
1261 (unsigned long)cpu_khz % 1000);
1262
1263
1264
1265
1266
1267
1268
1269 for_each_possible_cpu(cpu) {
1270 cyc2ns_init(cpu);
1271 set_cyc2ns_scale(cpu_khz, cpu);
1272 }
1273
1274 if (tsc_disabled > 0)
1275 return;
1276
1277
1278
1279 tsc_disabled = 0;
1280 static_branch_enable(&__use_tsc);
1281
1282 if (!no_sched_irq_time)
1283 enable_sched_clock_irqtime();
1284
1285 lpj = ((u64)tsc_khz * 1000);
1286 do_div(lpj, HZ);
1287 lpj_fine = lpj;
1288
1289 use_tsc_delay();
1290
1291 if (unsynchronized_tsc())
1292 mark_tsc_unstable("TSCs unsynchronized");
1293
1294 check_system_tsc_reliable();
1295
1296 detect_art();
1297}
1298
1299#ifdef CONFIG_SMP
1300
1301
1302
1303
1304
1305
1306unsigned long calibrate_delay_is_known(void)
1307{
1308 int sibling, cpu = smp_processor_id();
1309 struct cpumask *mask = topology_core_cpumask(cpu);
1310
1311 if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
1312 return 0;
1313
1314 if (!mask)
1315 return 0;
1316
1317 sibling = cpumask_any_but(mask, cpu);
1318 if (sibling < nr_cpu_ids)
1319 return cpu_data(sibling).loops_per_jiffy;
1320 return 0;
1321}
1322#endif
1323