1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/kernel.h>
4#include <linux/sched.h>
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/timer.h>
8#include <linux/acpi_pmtmr.h>
9#include <linux/cpufreq.h>
10#include <linux/delay.h>
11#include <linux/clocksource.h>
12#include <linux/percpu.h>
13#include <linux/timex.h>
14
15#include <asm/hpet.h>
16#include <asm/timer.h>
17#include <asm/vgtod.h>
18#include <asm/time.h>
19#include <asm/delay.h>
20#include <asm/hypervisor.h>
21#include <asm/nmi.h>
22#include <asm/x86_init.h>
23
24unsigned int __read_mostly cpu_khz;
25EXPORT_SYMBOL(cpu_khz);
26
27unsigned int __read_mostly tsc_khz;
28EXPORT_SYMBOL(tsc_khz);
29
30
31
32
33static int __read_mostly tsc_unstable;
34
35
36
37
38static int __read_mostly tsc_disabled = -1;
39
40int tsc_clocksource_reliable;
41
42
43
44u64 native_sched_clock(void)
45{
46 u64 this_offset;
47
48
49
50
51
52
53
54
55
56 if (unlikely(tsc_disabled)) {
57
58 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
59 }
60
61
62 rdtscll(this_offset);
63
64
65 return __cycles_2_ns(this_offset);
66}
67
68
69
70#ifdef CONFIG_PARAVIRT
71unsigned long long sched_clock(void)
72{
73 return paravirt_sched_clock();
74}
75#else
76unsigned long long
77sched_clock(void) __attribute__((alias("native_sched_clock")));
78#endif
79
80unsigned long long native_read_tsc(void)
81{
82 return __native_read_tsc();
83}
84EXPORT_SYMBOL(native_read_tsc);
85
86int check_tsc_unstable(void)
87{
88 return tsc_unstable;
89}
90EXPORT_SYMBOL_GPL(check_tsc_unstable);
91
92int check_tsc_disabled(void)
93{
94 return tsc_disabled;
95}
96EXPORT_SYMBOL_GPL(check_tsc_disabled);
97
98#ifdef CONFIG_X86_TSC
99int __init notsc_setup(char *str)
100{
101 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
102 tsc_disabled = 1;
103 return 1;
104}
105#else
106
107
108
109
110int __init notsc_setup(char *str)
111{
112 setup_clear_cpu_cap(X86_FEATURE_TSC);
113 return 1;
114}
115#endif
116
117__setup("notsc", notsc_setup);
118
119static int no_sched_irq_time;
120
121static int __init tsc_setup(char *str)
122{
123 if (!strcmp(str, "reliable"))
124 tsc_clocksource_reliable = 1;
125 if (!strncmp(str, "noirqtime", 9))
126 no_sched_irq_time = 1;
127 return 1;
128}
129
130__setup("tsc=", tsc_setup);
131
132#define MAX_RETRIES 5
133#define SMI_TRESHOLD 50000
134
135
136
137
138static u64 tsc_read_refs(u64 *p, int hpet)
139{
140 u64 t1, t2;
141 int i;
142
143 for (i = 0; i < MAX_RETRIES; i++) {
144 t1 = get_cycles();
145 if (hpet)
146 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
147 else
148 *p = acpi_pm_read_early();
149 t2 = get_cycles();
150 if ((t2 - t1) < SMI_TRESHOLD)
151 return t2;
152 }
153 return ULLONG_MAX;
154}
155
156
157
158
159static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
160{
161 u64 tmp;
162
163 if (hpet2 < hpet1)
164 hpet2 += 0x100000000ULL;
165 hpet2 -= hpet1;
166 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
167 do_div(tmp, 1000000);
168 do_div(deltatsc, tmp);
169
170 return (unsigned long) deltatsc;
171}
172
173
174
175
176static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
177{
178 u64 tmp;
179
180 if (!pm1 && !pm2)
181 return ULONG_MAX;
182
183 if (pm2 < pm1)
184 pm2 += (u64)ACPI_PM_OVRRUN;
185 pm2 -= pm1;
186 tmp = pm2 * 1000000000LL;
187 do_div(tmp, PMTMR_TICKS_PER_SEC);
188 do_div(deltatsc, tmp);
189
190 return (unsigned long) deltatsc;
191}
192
193#define CAL_MS 10
194#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
195#define CAL_PIT_LOOPS 1000
196
197#define CAL2_MS 50
198#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
199#define CAL2_PIT_LOOPS 5000
200
201
202
203
204
205
206
207
208
209static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
210{
211 u64 tsc, t1, t2, delta;
212 unsigned long tscmin, tscmax;
213 int pitcnt;
214
215
216 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
217
218
219
220
221
222
223 outb(0xb0, 0x43);
224 outb(latch & 0xff, 0x42);
225 outb(latch >> 8, 0x42);
226
227 tsc = t1 = t2 = get_cycles();
228
229 pitcnt = 0;
230 tscmax = 0;
231 tscmin = ULONG_MAX;
232 while ((inb(0x61) & 0x20) == 0) {
233 t2 = get_cycles();
234 delta = t2 - tsc;
235 tsc = t2;
236 if ((unsigned long) delta < tscmin)
237 tscmin = (unsigned int) delta;
238 if ((unsigned long) delta > tscmax)
239 tscmax = (unsigned int) delta;
240 pitcnt++;
241 }
242
243
244
245
246
247
248
249
250
251
252 if (pitcnt < loopmin || tscmax > 10 * tscmin)
253 return ULONG_MAX;
254
255
256 delta = t2 - t1;
257 do_div(delta, ms);
258 return delta;
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296static inline int pit_verify_msb(unsigned char val)
297{
298
299 inb(0x42);
300 return inb(0x42) == val;
301}
302
303static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
304{
305 int count;
306 u64 tsc = 0, prev_tsc = 0;
307
308 for (count = 0; count < 50000; count++) {
309 if (!pit_verify_msb(val))
310 break;
311 prev_tsc = tsc;
312 tsc = get_cycles();
313 }
314 *deltap = get_cycles() - prev_tsc;
315 *tscp = tsc;
316
317
318
319
320
321 return count > 5;
322}
323
324
325
326
327
328
329
330#define MAX_QUICK_PIT_MS 50
331#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
332
333static unsigned long quick_pit_calibrate(void)
334{
335 int i;
336 u64 tsc, delta;
337 unsigned long d1, d2;
338
339
340 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
341
342
343
344
345
346
347
348
349
350
351 outb(0xb0, 0x43);
352
353
354 outb(0xff, 0x42);
355 outb(0xff, 0x42);
356
357
358
359
360
361
362
363 pit_verify_msb(0);
364
365 if (pit_expect_msb(0xff, &tsc, &d1)) {
366 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
367 if (!pit_expect_msb(0xff-i, &delta, &d2))
368 break;
369
370
371
372
373 delta -= tsc;
374 if (d1+d2 >= delta >> 11)
375 continue;
376
377
378
379
380
381
382
383
384 if (!pit_verify_msb(0xfe - i))
385 break;
386 goto success;
387 }
388 }
389 pr_err("Fast TSC calibration failed\n");
390 return 0;
391
392success:
393
394
395
396
397
398
399
400
401
402
403
404
405
406 delta *= PIT_TICK_RATE;
407 do_div(delta, i*256*1000);
408 pr_info("Fast TSC calibration using PIT\n");
409 return delta;
410}
411
412
413
414
415unsigned long native_calibrate_tsc(void)
416{
417 u64 tsc1, tsc2, delta, ref1, ref2;
418 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
419 unsigned long flags, latch, ms, fast_calibrate;
420 int hpet = is_hpet_enabled(), i, loopmin;
421
422 local_irq_save(flags);
423 fast_calibrate = quick_pit_calibrate();
424 local_irq_restore(flags);
425 if (fast_calibrate)
426 return fast_calibrate;
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454 latch = CAL_LATCH;
455 ms = CAL_MS;
456 loopmin = CAL_PIT_LOOPS;
457
458 for (i = 0; i < 3; i++) {
459 unsigned long tsc_pit_khz;
460
461
462
463
464
465
466
467 local_irq_save(flags);
468 tsc1 = tsc_read_refs(&ref1, hpet);
469 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
470 tsc2 = tsc_read_refs(&ref2, hpet);
471 local_irq_restore(flags);
472
473
474 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
475
476
477 if (ref1 == ref2)
478 continue;
479
480
481 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
482 continue;
483
484 tsc2 = (tsc2 - tsc1) * 1000000LL;
485 if (hpet)
486 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
487 else
488 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
489
490 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
491
492
493 delta = ((u64) tsc_pit_min) * 100;
494 do_div(delta, tsc_ref_min);
495
496
497
498
499
500
501
502 if (delta >= 90 && delta <= 110) {
503 pr_info("PIT calibration matches %s. %d loops\n",
504 hpet ? "HPET" : "PMTIMER", i + 1);
505 return tsc_ref_min;
506 }
507
508
509
510
511
512
513
514 if (i == 1 && tsc_pit_min == ULONG_MAX) {
515 latch = CAL2_LATCH;
516 ms = CAL2_MS;
517 loopmin = CAL2_PIT_LOOPS;
518 }
519 }
520
521
522
523
524 if (tsc_pit_min == ULONG_MAX) {
525
526 pr_warn("Unable to calibrate against PIT\n");
527
528
529 if (!hpet && !ref1 && !ref2) {
530 pr_notice("No reference (HPET/PMTIMER) available\n");
531 return 0;
532 }
533
534
535 if (tsc_ref_min == ULONG_MAX) {
536 pr_warn("HPET/PMTIMER calibration failed\n");
537 return 0;
538 }
539
540
541 pr_info("using %s reference calibration\n",
542 hpet ? "HPET" : "PMTIMER");
543
544 return tsc_ref_min;
545 }
546
547
548 if (!hpet && !ref1 && !ref2) {
549 pr_info("Using PIT calibration value\n");
550 return tsc_pit_min;
551 }
552
553
554 if (tsc_ref_min == ULONG_MAX) {
555 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
556 return tsc_pit_min;
557 }
558
559
560
561
562
563
564 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
565 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
566 pr_info("Using PIT calibration value\n");
567 return tsc_pit_min;
568}
569
570int recalibrate_cpu_khz(void)
571{
572#ifndef CONFIG_SMP
573 unsigned long cpu_khz_old = cpu_khz;
574
575 if (cpu_has_tsc) {
576 tsc_khz = x86_platform.calibrate_tsc();
577 cpu_khz = tsc_khz;
578 cpu_data(0).loops_per_jiffy =
579 cpufreq_scale(cpu_data(0).loops_per_jiffy,
580 cpu_khz_old, cpu_khz);
581 return 0;
582 } else
583 return -ENODEV;
584#else
585 return -ENODEV;
586#endif
587}
588
589EXPORT_SYMBOL(recalibrate_cpu_khz);
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614DEFINE_PER_CPU(unsigned long, cyc2ns);
615DEFINE_PER_CPU(unsigned long long, cyc2ns_offset);
616
617static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
618{
619 unsigned long long tsc_now, ns_now, *offset;
620 unsigned long flags, *scale;
621
622 local_irq_save(flags);
623 sched_clock_idle_sleep_event();
624
625 scale = &per_cpu(cyc2ns, cpu);
626 offset = &per_cpu(cyc2ns_offset, cpu);
627
628 rdtscll(tsc_now);
629 ns_now = __cycles_2_ns(tsc_now);
630
631 if (cpu_khz) {
632 *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) +
633 cpu_khz / 2) / cpu_khz;
634 *offset = ns_now - mult_frac(tsc_now, *scale,
635 (1UL << CYC2NS_SCALE_FACTOR));
636 }
637
638 sched_clock_idle_wakeup_event(0);
639 local_irq_restore(flags);
640}
641
642static unsigned long long cyc2ns_suspend;
643
644void tsc_save_sched_clock_state(void)
645{
646 if (!sched_clock_stable)
647 return;
648
649 cyc2ns_suspend = sched_clock();
650}
651
652
653
654
655
656
657
658
659
660void tsc_restore_sched_clock_state(void)
661{
662 unsigned long long offset;
663 unsigned long flags;
664 int cpu;
665
666 if (!sched_clock_stable)
667 return;
668
669 local_irq_save(flags);
670
671 __this_cpu_write(cyc2ns_offset, 0);
672 offset = cyc2ns_suspend - sched_clock();
673
674 for_each_possible_cpu(cpu)
675 per_cpu(cyc2ns_offset, cpu) = offset;
676
677 local_irq_restore(flags);
678}
679
680#ifdef CONFIG_CPU_FREQ
681
682
683
684
685
686
687
688
689
690
691
692
693static unsigned int ref_freq;
694static unsigned long loops_per_jiffy_ref;
695static unsigned long tsc_khz_ref;
696
697static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
698 void *data)
699{
700 struct cpufreq_freqs *freq = data;
701 unsigned long *lpj;
702
703 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
704 return 0;
705
706 lpj = &boot_cpu_data.loops_per_jiffy;
707#ifdef CONFIG_SMP
708 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
709 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
710#endif
711
712 if (!ref_freq) {
713 ref_freq = freq->old;
714 loops_per_jiffy_ref = *lpj;
715 tsc_khz_ref = tsc_khz;
716 }
717 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
718 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
719 (val == CPUFREQ_RESUMECHANGE)) {
720 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
721
722 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
723 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
724 mark_tsc_unstable("cpufreq changes");
725 }
726
727 set_cyc2ns_scale(tsc_khz, freq->cpu);
728
729 return 0;
730}
731
732static struct notifier_block time_cpufreq_notifier_block = {
733 .notifier_call = time_cpufreq_notifier
734};
735
736static int __init cpufreq_tsc(void)
737{
738 if (!cpu_has_tsc)
739 return 0;
740 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
741 return 0;
742 cpufreq_register_notifier(&time_cpufreq_notifier_block,
743 CPUFREQ_TRANSITION_NOTIFIER);
744 return 0;
745}
746
747core_initcall(cpufreq_tsc);
748
749#endif
750
751
752
753static struct clocksource clocksource_tsc;
754
755
756
757
758
759
760
761
762
763
764
765
766
767static cycle_t read_tsc(struct clocksource *cs)
768{
769 cycle_t ret = (cycle_t)get_cycles();
770
771 return ret >= clocksource_tsc.cycle_last ?
772 ret : clocksource_tsc.cycle_last;
773}
774
775static void resume_tsc(struct clocksource *cs)
776{
777 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
778 clocksource_tsc.cycle_last = 0;
779}
780
781static struct clocksource clocksource_tsc = {
782 .name = "tsc",
783 .rating = 300,
784 .read = read_tsc,
785 .resume = resume_tsc,
786 .mask = CLOCKSOURCE_MASK(64),
787 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
788 CLOCK_SOURCE_MUST_VERIFY,
789#ifdef CONFIG_X86_64
790 .archdata = { .vclock_mode = VCLOCK_TSC },
791#endif
792};
793
794void mark_tsc_unstable(char *reason)
795{
796 if (!tsc_unstable) {
797 tsc_unstable = 1;
798 sched_clock_stable = 0;
799 disable_sched_clock_irqtime();
800 pr_info("Marking TSC unstable due to %s\n", reason);
801
802 if (clocksource_tsc.mult)
803 clocksource_mark_unstable(&clocksource_tsc);
804 else {
805 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
806 clocksource_tsc.rating = 0;
807 }
808 }
809}
810
811EXPORT_SYMBOL_GPL(mark_tsc_unstable);
812
813static void __init check_system_tsc_reliable(void)
814{
815#ifdef CONFIG_MGEODE_LX
816
817#define RTSC_SUSP 0x100
818 unsigned long res_low, res_high;
819
820 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
821
822 if (res_low & RTSC_SUSP)
823 tsc_clocksource_reliable = 1;
824#endif
825 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
826 tsc_clocksource_reliable = 1;
827}
828
829
830
831
832
833int unsynchronized_tsc(void)
834{
835 if (!cpu_has_tsc || tsc_unstable)
836 return 1;
837
838#ifdef CONFIG_SMP
839 if (apic_is_clustered_box())
840 return 1;
841#endif
842
843 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
844 return 0;
845
846 if (tsc_clocksource_reliable)
847 return 0;
848
849
850
851
852 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
853
854 if (num_possible_cpus() > 1)
855 return 1;
856 }
857
858 return 0;
859}
860
861
862static void tsc_refine_calibration_work(struct work_struct *work);
863static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878static void tsc_refine_calibration_work(struct work_struct *work)
879{
880 static u64 tsc_start = -1, ref_start;
881 static int hpet;
882 u64 tsc_stop, ref_stop, delta;
883 unsigned long freq;
884
885
886 if (check_tsc_unstable())
887 goto out;
888
889
890
891
892
893
894 if (tsc_start == -1) {
895
896
897
898
899 hpet = is_hpet_enabled();
900 schedule_delayed_work(&tsc_irqwork, HZ);
901 tsc_start = tsc_read_refs(&ref_start, hpet);
902 return;
903 }
904
905 tsc_stop = tsc_read_refs(&ref_stop, hpet);
906
907
908 if (ref_start == ref_stop)
909 goto out;
910
911
912 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
913 goto out;
914
915 delta = tsc_stop - tsc_start;
916 delta *= 1000000LL;
917 if (hpet)
918 freq = calc_hpet_ref(delta, ref_start, ref_stop);
919 else
920 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
921
922
923 if (abs(tsc_khz - freq) > tsc_khz/100)
924 goto out;
925
926 tsc_khz = freq;
927 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
928 (unsigned long)tsc_khz / 1000,
929 (unsigned long)tsc_khz % 1000);
930
931out:
932 clocksource_register_khz(&clocksource_tsc, tsc_khz);
933}
934
935
936static int __init init_tsc_clocksource(void)
937{
938 if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
939 return 0;
940
941 if (tsc_clocksource_reliable)
942 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
943
944 if (check_tsc_unstable()) {
945 clocksource_tsc.rating = 0;
946 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
947 }
948
949 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
950 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
951
952
953
954
955
956 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
957 clocksource_register_khz(&clocksource_tsc, tsc_khz);
958 return 0;
959 }
960
961 schedule_delayed_work(&tsc_irqwork, 0);
962 return 0;
963}
964
965
966
967
968device_initcall(init_tsc_clocksource);
969
970void __init tsc_init(void)
971{
972 u64 lpj;
973 int cpu;
974
975 x86_init.timers.tsc_pre_init();
976
977 if (!cpu_has_tsc)
978 return;
979
980 tsc_khz = x86_platform.calibrate_tsc();
981 cpu_khz = tsc_khz;
982
983 if (!tsc_khz) {
984 mark_tsc_unstable("could not calculate TSC khz");
985 return;
986 }
987
988 pr_info("Detected %lu.%03lu MHz processor\n",
989 (unsigned long)cpu_khz / 1000,
990 (unsigned long)cpu_khz % 1000);
991
992
993
994
995
996
997
998 for_each_possible_cpu(cpu)
999 set_cyc2ns_scale(cpu_khz, cpu);
1000
1001 if (tsc_disabled > 0)
1002 return;
1003
1004
1005 tsc_disabled = 0;
1006
1007 if (!no_sched_irq_time)
1008 enable_sched_clock_irqtime();
1009
1010 lpj = ((u64)tsc_khz * 1000);
1011 do_div(lpj, HZ);
1012 lpj_fine = lpj;
1013
1014 use_tsc_delay();
1015
1016 if (unsynchronized_tsc())
1017 mark_tsc_unstable("TSCs unsynchronized");
1018
1019 check_system_tsc_reliable();
1020}
1021
1022#ifdef CONFIG_SMP
1023
1024
1025
1026
1027
1028
1029unsigned long calibrate_delay_is_known(void)
1030{
1031 int i, cpu = smp_processor_id();
1032
1033 if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
1034 return 0;
1035
1036 for_each_online_cpu(i)
1037 if (cpu_data(i).phys_proc_id == cpu_data(cpu).phys_proc_id)
1038 return cpu_data(i).loops_per_jiffy;
1039 return 0;
1040}
1041#endif
1042