1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/errno.h>
35#include <linux/export.h>
36#include <linux/sched.h>
37#include <linux/sched/clock.h>
38#include <linux/kernel.h>
39#include <linux/param.h>
40#include <linux/string.h>
41#include <linux/mm.h>
42#include <linux/interrupt.h>
43#include <linux/timex.h>
44#include <linux/kernel_stat.h>
45#include <linux/time.h>
46#include <linux/clockchips.h>
47#include <linux/init.h>
48#include <linux/profile.h>
49#include <linux/cpu.h>
50#include <linux/security.h>
51#include <linux/percpu.h>
52#include <linux/rtc.h>
53#include <linux/jiffies.h>
54#include <linux/posix-timers.h>
55#include <linux/irq.h>
56#include <linux/delay.h>
57#include <linux/irq_work.h>
58#include <linux/clk-provider.h>
59#include <linux/suspend.h>
60#include <linux/rtc.h>
61#include <linux/sched/cputime.h>
62#include <linux/processor.h>
63#include <asm/trace.h>
64
65#include <asm/io.h>
66#include <asm/nvram.h>
67#include <asm/cache.h>
68#include <asm/machdep.h>
69#include <linux/uaccess.h>
70#include <asm/time.h>
71#include <asm/prom.h>
72#include <asm/irq.h>
73#include <asm/div64.h>
74#include <asm/smp.h>
75#include <asm/vdso_datapage.h>
76#include <asm/firmware.h>
77#include <asm/asm-prototypes.h>
78
79
80
81#include <linux/clockchips.h>
82#include <linux/timekeeper_internal.h>
83
84static u64 rtc_read(struct clocksource *);
85static struct clocksource clocksource_rtc = {
86 .name = "rtc",
87 .rating = 400,
88 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
89 .mask = CLOCKSOURCE_MASK(64),
90 .read = rtc_read,
91};
92
93static u64 timebase_read(struct clocksource *);
94static struct clocksource clocksource_timebase = {
95 .name = "timebase",
96 .rating = 400,
97 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
98 .mask = CLOCKSOURCE_MASK(64),
99 .read = timebase_read,
100};
101
102#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
103u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
104
105static int decrementer_set_next_event(unsigned long evt,
106 struct clock_event_device *dev);
107static int decrementer_shutdown(struct clock_event_device *evt);
108
109struct clock_event_device decrementer_clockevent = {
110 .name = "decrementer",
111 .rating = 200,
112 .irq = 0,
113 .set_next_event = decrementer_set_next_event,
114 .set_state_shutdown = decrementer_shutdown,
115 .tick_resume = decrementer_shutdown,
116 .features = CLOCK_EVT_FEAT_ONESHOT |
117 CLOCK_EVT_FEAT_C3STOP,
118};
119EXPORT_SYMBOL(decrementer_clockevent);
120
121DEFINE_PER_CPU(u64, decrementers_next_tb);
122static DEFINE_PER_CPU(struct clock_event_device, decrementers);
123
124#define XSEC_PER_SEC (1024*1024)
125
126#ifdef CONFIG_PPC64
127#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
128#else
129
130#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
131#endif
132
133unsigned long tb_ticks_per_jiffy;
134unsigned long tb_ticks_per_usec = 100;
135EXPORT_SYMBOL(tb_ticks_per_usec);
136unsigned long tb_ticks_per_sec;
137EXPORT_SYMBOL(tb_ticks_per_sec);
138
139DEFINE_SPINLOCK(rtc_lock);
140EXPORT_SYMBOL_GPL(rtc_lock);
141
142static u64 tb_to_ns_scale __read_mostly;
143static unsigned tb_to_ns_shift __read_mostly;
144static u64 boot_tb __read_mostly;
145
146extern struct timezone sys_tz;
147static long timezone_offset;
148
149unsigned long ppc_proc_freq;
150EXPORT_SYMBOL_GPL(ppc_proc_freq);
151unsigned long ppc_tb_freq;
152EXPORT_SYMBOL_GPL(ppc_tb_freq);
153
154#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
155
156
157
158
159u64 __cputime_usec_factor;
160EXPORT_SYMBOL(__cputime_usec_factor);
161
162#ifdef CONFIG_PPC_SPLPAR
163void (*dtl_consumer)(struct dtl_entry *, u64);
164#endif
165
166#ifdef CONFIG_PPC64
167#define get_accounting(tsk) (&get_paca()->accounting)
168#else
169#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
170#endif
171
172static void calc_cputime_factors(void)
173{
174 struct div_result res;
175
176 div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
177 __cputime_usec_factor = res.result_low;
178}
179
180
181
182
183
184static unsigned long read_spurr(unsigned long tb)
185{
186 if (cpu_has_feature(CPU_FTR_SPURR))
187 return mfspr(SPRN_SPURR);
188 if (cpu_has_feature(CPU_FTR_PURR))
189 return mfspr(SPRN_PURR);
190 return tb;
191}
192
193#ifdef CONFIG_PPC_SPLPAR
194
195
196
197
198
199static u64 scan_dispatch_log(u64 stop_tb)
200{
201 u64 i = local_paca->dtl_ridx;
202 struct dtl_entry *dtl = local_paca->dtl_curr;
203 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
204 struct lppaca *vpa = local_paca->lppaca_ptr;
205 u64 tb_delta;
206 u64 stolen = 0;
207 u64 dtb;
208
209 if (!dtl)
210 return 0;
211
212 if (i == be64_to_cpu(vpa->dtl_idx))
213 return 0;
214 while (i < be64_to_cpu(vpa->dtl_idx)) {
215 dtb = be64_to_cpu(dtl->timebase);
216 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
217 be32_to_cpu(dtl->ready_to_enqueue_time);
218 barrier();
219 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
220
221 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
222 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
223 continue;
224 }
225 if (dtb > stop_tb)
226 break;
227 if (dtl_consumer)
228 dtl_consumer(dtl, i);
229 stolen += tb_delta;
230 ++i;
231 ++dtl;
232 if (dtl == dtl_end)
233 dtl = local_paca->dispatch_log;
234 }
235 local_paca->dtl_ridx = i;
236 local_paca->dtl_curr = dtl;
237 return stolen;
238}
239
240
241
242
243
244void accumulate_stolen_time(void)
245{
246 u64 sst, ust;
247 unsigned long save_irq_soft_mask = irq_soft_mask_return();
248 struct cpu_accounting_data *acct = &local_paca->accounting;
249
250
251
252
253
254
255
256 irq_soft_mask_set(IRQS_DISABLED);
257
258 sst = scan_dispatch_log(acct->starttime_user);
259 ust = scan_dispatch_log(acct->starttime);
260 acct->stime -= sst;
261 acct->utime -= ust;
262 acct->steal_time += ust + sst;
263
264 irq_soft_mask_set(save_irq_soft_mask);
265}
266
267static inline u64 calculate_stolen_time(u64 stop_tb)
268{
269 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
270 return 0;
271
272 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
273 return scan_dispatch_log(stop_tb);
274
275 return 0;
276}
277
278#else
279static inline u64 calculate_stolen_time(u64 stop_tb)
280{
281 return 0;
282}
283
284#endif
285
286
287
288
289
290static unsigned long vtime_delta(struct task_struct *tsk,
291 unsigned long *stime_scaled,
292 unsigned long *steal_time)
293{
294 unsigned long now, nowscaled, deltascaled;
295 unsigned long stime;
296 unsigned long utime, utime_scaled;
297 struct cpu_accounting_data *acct = get_accounting(tsk);
298
299 WARN_ON_ONCE(!irqs_disabled());
300
301 now = mftb();
302 nowscaled = read_spurr(now);
303 stime = now - acct->starttime;
304 acct->starttime = now;
305 deltascaled = nowscaled - acct->startspurr;
306 acct->startspurr = nowscaled;
307
308 *steal_time = calculate_stolen_time(now);
309
310 utime = acct->utime - acct->utime_sspurr;
311 acct->utime_sspurr = acct->utime;
312
313
314
315
316
317
318
319
320
321
322
323 *stime_scaled = stime;
324 utime_scaled = utime;
325 if (deltascaled != stime + utime) {
326 if (utime) {
327 *stime_scaled = deltascaled * stime / (stime + utime);
328 utime_scaled = deltascaled - *stime_scaled;
329 } else {
330 *stime_scaled = deltascaled;
331 }
332 }
333 acct->utime_scaled += utime_scaled;
334
335 return stime;
336}
337
338void vtime_account_system(struct task_struct *tsk)
339{
340 unsigned long stime, stime_scaled, steal_time;
341 struct cpu_accounting_data *acct = get_accounting(tsk);
342
343 stime = vtime_delta(tsk, &stime_scaled, &steal_time);
344
345 stime -= min(stime, steal_time);
346 acct->steal_time += steal_time;
347
348 if ((tsk->flags & PF_VCPU) && !irq_count()) {
349 acct->gtime += stime;
350 acct->utime_scaled += stime_scaled;
351 } else {
352 if (hardirq_count())
353 acct->hardirq_time += stime;
354 else if (in_serving_softirq())
355 acct->softirq_time += stime;
356 else
357 acct->stime += stime;
358
359 acct->stime_scaled += stime_scaled;
360 }
361}
362EXPORT_SYMBOL_GPL(vtime_account_system);
363
364void vtime_account_idle(struct task_struct *tsk)
365{
366 unsigned long stime, stime_scaled, steal_time;
367 struct cpu_accounting_data *acct = get_accounting(tsk);
368
369 stime = vtime_delta(tsk, &stime_scaled, &steal_time);
370 acct->idle_time += stime + steal_time;
371}
372
373
374
375
376
377
378
379
380void vtime_flush(struct task_struct *tsk)
381{
382 struct cpu_accounting_data *acct = get_accounting(tsk);
383
384 if (acct->utime)
385 account_user_time(tsk, cputime_to_nsecs(acct->utime));
386
387 if (acct->utime_scaled)
388 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
389
390 if (acct->gtime)
391 account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
392
393 if (acct->steal_time)
394 account_steal_time(cputime_to_nsecs(acct->steal_time));
395
396 if (acct->idle_time)
397 account_idle_time(cputime_to_nsecs(acct->idle_time));
398
399 if (acct->stime)
400 account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
401 CPUTIME_SYSTEM);
402 if (acct->stime_scaled)
403 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
404
405 if (acct->hardirq_time)
406 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
407 CPUTIME_IRQ);
408 if (acct->softirq_time)
409 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
410 CPUTIME_SOFTIRQ);
411
412 acct->utime = 0;
413 acct->utime_scaled = 0;
414 acct->utime_sspurr = 0;
415 acct->gtime = 0;
416 acct->steal_time = 0;
417 acct->idle_time = 0;
418 acct->stime = 0;
419 acct->stime_scaled = 0;
420 acct->hardirq_time = 0;
421 acct->softirq_time = 0;
422}
423
424#ifdef CONFIG_PPC32
425
426
427
428
429
430void arch_vtime_task_switch(struct task_struct *prev)
431{
432 struct cpu_accounting_data *acct = get_accounting(current);
433
434 acct->starttime = get_accounting(prev)->starttime;
435 acct->startspurr = get_accounting(prev)->startspurr;
436}
437#endif
438
439#else
440#define calc_cputime_factors()
441#endif
442
443void __delay(unsigned long loops)
444{
445 unsigned long start;
446 int diff;
447
448 spin_begin();
449 if (__USE_RTC()) {
450 start = get_rtcl();
451 do {
452
453 diff = get_rtcl() - start;
454 if (diff < 0)
455 diff += 1000000000;
456 spin_cpu_relax();
457 } while (diff < loops);
458 } else {
459 start = get_tbl();
460 while (get_tbl() - start < loops)
461 spin_cpu_relax();
462 }
463 spin_end();
464}
465EXPORT_SYMBOL(__delay);
466
467void udelay(unsigned long usecs)
468{
469 __delay(tb_ticks_per_usec * usecs);
470}
471EXPORT_SYMBOL(udelay);
472
473#ifdef CONFIG_SMP
474unsigned long profile_pc(struct pt_regs *regs)
475{
476 unsigned long pc = instruction_pointer(regs);
477
478 if (in_lock_functions(pc))
479 return regs->link;
480
481 return pc;
482}
483EXPORT_SYMBOL(profile_pc);
484#endif
485
486#ifdef CONFIG_IRQ_WORK
487
488
489
490
491#ifdef CONFIG_PPC64
492static inline unsigned long test_irq_work_pending(void)
493{
494 unsigned long x;
495
496 asm volatile("lbz %0,%1(13)"
497 : "=r" (x)
498 : "i" (offsetof(struct paca_struct, irq_work_pending)));
499 return x;
500}
501
502static inline void set_irq_work_pending_flag(void)
503{
504 asm volatile("stb %0,%1(13)" : :
505 "r" (1),
506 "i" (offsetof(struct paca_struct, irq_work_pending)));
507}
508
509static inline void clear_irq_work_pending(void)
510{
511 asm volatile("stb %0,%1(13)" : :
512 "r" (0),
513 "i" (offsetof(struct paca_struct, irq_work_pending)));
514}
515
516#else
517
518DEFINE_PER_CPU(u8, irq_work_pending);
519
520#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
521#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
522#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
523
524#endif
525
526void arch_irq_work_raise(void)
527{
528 preempt_disable();
529 set_irq_work_pending_flag();
530 set_dec(1);
531 preempt_enable();
532}
533
534#else
535
536#define test_irq_work_pending() 0
537#define clear_irq_work_pending()
538
539#endif
540
541static void __timer_interrupt(void)
542{
543 struct pt_regs *regs = get_irq_regs();
544 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
545 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
546 u64 now;
547
548 trace_timer_interrupt_entry(regs);
549
550 if (test_irq_work_pending()) {
551 clear_irq_work_pending();
552 irq_work_run();
553 }
554
555 now = get_tb_or_rtc();
556 if (now >= *next_tb) {
557 *next_tb = ~(u64)0;
558 if (evt->event_handler)
559 evt->event_handler(evt);
560 __this_cpu_inc(irq_stat.timer_irqs_event);
561 } else {
562 now = *next_tb - now;
563 if (now <= decrementer_max)
564 set_dec(now);
565
566 if (test_irq_work_pending())
567 set_dec(1);
568 __this_cpu_inc(irq_stat.timer_irqs_others);
569 }
570
571#ifdef CONFIG_PPC64
572
573 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
574 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
575 cu->current_tb = mfspr(SPRN_PURR);
576 }
577#endif
578
579 trace_timer_interrupt_exit(regs);
580}
581
582
583
584
585
586void timer_interrupt(struct pt_regs * regs)
587{
588 struct pt_regs *old_regs;
589 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
590
591
592
593
594 set_dec(decrementer_max);
595
596
597
598
599
600
601
602 if (!cpu_online(smp_processor_id())) {
603 *next_tb = ~(u64)0;
604 return;
605 }
606
607
608
609
610 may_hard_irq_enable();
611
612
613#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
614 if (atomic_read(&ppc_n_lost_interrupts) != 0)
615 do_IRQ(regs);
616#endif
617
618 old_regs = set_irq_regs(regs);
619 irq_enter();
620
621 __timer_interrupt();
622 irq_exit();
623 set_irq_regs(old_regs);
624}
625EXPORT_SYMBOL(timer_interrupt);
626
627
628
629
630
631
632void hdec_interrupt(struct pt_regs *regs)
633{
634}
635
636#ifdef CONFIG_SUSPEND
637static void generic_suspend_disable_irqs(void)
638{
639
640
641
642
643 set_dec(decrementer_max);
644 local_irq_disable();
645 set_dec(decrementer_max);
646}
647
648static void generic_suspend_enable_irqs(void)
649{
650 local_irq_enable();
651}
652
653
654void arch_suspend_disable_irqs(void)
655{
656 if (ppc_md.suspend_disable_irqs)
657 ppc_md.suspend_disable_irqs();
658 generic_suspend_disable_irqs();
659}
660
661
662void arch_suspend_enable_irqs(void)
663{
664 generic_suspend_enable_irqs();
665 if (ppc_md.suspend_enable_irqs)
666 ppc_md.suspend_enable_irqs();
667}
668#endif
669
670unsigned long long tb_to_ns(unsigned long long ticks)
671{
672 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
673}
674EXPORT_SYMBOL_GPL(tb_to_ns);
675
676
677
678
679
680
681
682
683notrace unsigned long long sched_clock(void)
684{
685 if (__USE_RTC())
686 return get_rtc();
687 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
688}
689
690
691#ifdef CONFIG_PPC_PSERIES
692
693
694
695
696
697
698unsigned long long running_clock(void)
699{
700
701
702
703
704
705
706
707
708 if (firmware_has_feature(FW_FEATURE_LPAR) &&
709 cpu_has_feature(CPU_FTR_ARCH_207S))
710 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
711
712
713
714
715
716
717
718 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
719}
720#endif
721
722static int __init get_freq(char *name, int cells, unsigned long *val)
723{
724 struct device_node *cpu;
725 const __be32 *fp;
726 int found = 0;
727
728
729 cpu = of_find_node_by_type(NULL, "cpu");
730
731 if (cpu) {
732 fp = of_get_property(cpu, name, NULL);
733 if (fp) {
734 found = 1;
735 *val = of_read_ulong(fp, cells);
736 }
737
738 of_node_put(cpu);
739 }
740
741 return found;
742}
743
744static void start_cpu_decrementer(void)
745{
746#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
747 unsigned int tcr;
748
749
750 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
751
752 tcr = mfspr(SPRN_TCR);
753
754
755
756
757 tcr &= TCR_WP_MASK;
758 tcr |= TCR_DIE;
759 mtspr(SPRN_TCR, tcr);
760#endif
761}
762
763void __init generic_calibrate_decr(void)
764{
765 ppc_tb_freq = DEFAULT_TB_FREQ;
766
767 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
768 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
769
770 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
771 "(not found)\n");
772 }
773
774 ppc_proc_freq = DEFAULT_PROC_FREQ;
775
776 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
777 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
778
779 printk(KERN_ERR "WARNING: Estimating processor frequency "
780 "(not found)\n");
781 }
782}
783
784int update_persistent_clock(struct timespec now)
785{
786 struct rtc_time tm;
787
788 if (!ppc_md.set_rtc_time)
789 return -ENODEV;
790
791 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
792 tm.tm_year -= 1900;
793 tm.tm_mon -= 1;
794
795 return ppc_md.set_rtc_time(&tm);
796}
797
798static void __read_persistent_clock(struct timespec *ts)
799{
800 struct rtc_time tm;
801 static int first = 1;
802
803 ts->tv_nsec = 0;
804
805 if (first) {
806 first = 0;
807 if (ppc_md.time_init)
808 timezone_offset = ppc_md.time_init();
809
810
811 if (ppc_md.get_boot_time) {
812 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
813 return;
814 }
815 }
816 if (!ppc_md.get_rtc_time) {
817 ts->tv_sec = 0;
818 return;
819 }
820 ppc_md.get_rtc_time(&tm);
821
822 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
823 tm.tm_hour, tm.tm_min, tm.tm_sec);
824}
825
826void read_persistent_clock(struct timespec *ts)
827{
828 __read_persistent_clock(ts);
829
830
831 if (ts->tv_sec < 0) {
832 ts->tv_sec = 0;
833 ts->tv_nsec = 0;
834 }
835
836}
837
838
839static notrace u64 rtc_read(struct clocksource *cs)
840{
841 return (u64)get_rtc();
842}
843
844static notrace u64 timebase_read(struct clocksource *cs)
845{
846 return (u64)get_tb();
847}
848
849
850void update_vsyscall(struct timekeeper *tk)
851{
852 struct timespec xt;
853 struct clocksource *clock = tk->tkr_mono.clock;
854 u32 mult = tk->tkr_mono.mult;
855 u32 shift = tk->tkr_mono.shift;
856 u64 cycle_last = tk->tkr_mono.cycle_last;
857 u64 new_tb_to_xs, new_stamp_xsec;
858 u64 frac_sec;
859
860 if (clock != &clocksource_timebase)
861 return;
862
863 xt.tv_sec = tk->xtime_sec;
864 xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
865
866
867 ++vdso_data->tb_update_count;
868 smp_mb();
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887 if (mult <= 62500000 && clock->shift >= 16)
888 new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
889 else
890 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
891
892
893
894
895
896
897
898
899
900 frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
901 do_div(frac_sec, NSEC_PER_SEC);
902
903
904
905
906
907 new_stamp_xsec = frac_sec >> 12;
908 new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
909
910
911
912
913
914
915
916
917
918
919 vdso_data->tb_orig_stamp = cycle_last;
920 vdso_data->stamp_xsec = new_stamp_xsec;
921 vdso_data->tb_to_xs = new_tb_to_xs;
922 vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
923 vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
924 vdso_data->stamp_xtime = xt;
925 vdso_data->stamp_sec_fraction = frac_sec;
926 smp_wmb();
927 ++(vdso_data->tb_update_count);
928}
929
930void update_vsyscall_tz(void)
931{
932 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
933 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
934}
935
936static void __init clocksource_init(void)
937{
938 struct clocksource *clock;
939
940 if (__USE_RTC())
941 clock = &clocksource_rtc;
942 else
943 clock = &clocksource_timebase;
944
945 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
946 printk(KERN_ERR "clocksource: %s is already registered\n",
947 clock->name);
948 return;
949 }
950
951 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
952 clock->name, clock->mult, clock->shift);
953}
954
955static int decrementer_set_next_event(unsigned long evt,
956 struct clock_event_device *dev)
957{
958 __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
959 set_dec(evt);
960
961
962 if (test_irq_work_pending())
963 set_dec(1);
964
965 return 0;
966}
967
968static int decrementer_shutdown(struct clock_event_device *dev)
969{
970 decrementer_set_next_event(decrementer_max, dev);
971 return 0;
972}
973
974
975void tick_broadcast_ipi_handler(void)
976{
977 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
978
979 *next_tb = get_tb_or_rtc();
980 __timer_interrupt();
981}
982
983static void register_decrementer_clockevent(int cpu)
984{
985 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
986
987 *dec = decrementer_clockevent;
988 dec->cpumask = cpumask_of(cpu);
989
990 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
991 dec->name, dec->mult, dec->shift, cpu);
992
993 clockevents_register_device(dec);
994}
995
996static void enable_large_decrementer(void)
997{
998 if (!cpu_has_feature(CPU_FTR_ARCH_300))
999 return;
1000
1001 if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
1002 return;
1003
1004
1005
1006
1007
1008 if (cpu_has_feature(CPU_FTR_HVMODE))
1009 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
1010}
1011
1012static void __init set_decrementer_max(void)
1013{
1014 struct device_node *cpu;
1015 u32 bits = 32;
1016
1017
1018 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1019 return;
1020
1021 cpu = of_find_node_by_type(NULL, "cpu");
1022
1023 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
1024 if (bits > 64 || bits < 32) {
1025 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
1026 bits = 32;
1027 }
1028
1029
1030 decrementer_max = (1ul << (bits - 1)) - 1;
1031 }
1032
1033 of_node_put(cpu);
1034
1035 pr_info("time_init: %u bit decrementer (max: %llx)\n",
1036 bits, decrementer_max);
1037}
1038
1039static void __init init_decrementer_clockevent(void)
1040{
1041 int cpu = smp_processor_id();
1042
1043 clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
1044
1045 decrementer_clockevent.max_delta_ns =
1046 clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
1047 decrementer_clockevent.max_delta_ticks = decrementer_max;
1048 decrementer_clockevent.min_delta_ns =
1049 clockevent_delta2ns(2, &decrementer_clockevent);
1050 decrementer_clockevent.min_delta_ticks = 2;
1051
1052 register_decrementer_clockevent(cpu);
1053}
1054
1055void secondary_cpu_time_init(void)
1056{
1057
1058 enable_large_decrementer();
1059
1060
1061
1062
1063 start_cpu_decrementer();
1064
1065
1066
1067 register_decrementer_clockevent(smp_processor_id());
1068}
1069
1070
1071void __init time_init(void)
1072{
1073 struct div_result res;
1074 u64 scale;
1075 unsigned shift;
1076
1077 if (__USE_RTC()) {
1078
1079 ppc_tb_freq = 1000000000;
1080 } else {
1081
1082 ppc_md.calibrate_decr();
1083 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
1084 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1085 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
1086 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1087 }
1088
1089 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
1090 tb_ticks_per_sec = ppc_tb_freq;
1091 tb_ticks_per_usec = ppc_tb_freq / 1000000;
1092 calc_cputime_factors();
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1105 scale = res.result_low;
1106 for (shift = 0; res.result_high != 0; ++shift) {
1107 scale = (scale >> 1) | (res.result_high << 63);
1108 res.result_high >>= 1;
1109 }
1110 tb_to_ns_scale = scale;
1111 tb_to_ns_shift = shift;
1112
1113 boot_tb = get_tb_or_rtc();
1114
1115
1116 if (timezone_offset) {
1117 sys_tz.tz_minuteswest = -timezone_offset / 60;
1118 sys_tz.tz_dsttime = 0;
1119 }
1120
1121 vdso_data->tb_update_count = 0;
1122 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1123
1124
1125 set_decrementer_max();
1126 enable_large_decrementer();
1127
1128
1129
1130
1131 start_cpu_decrementer();
1132
1133
1134 clocksource_init();
1135
1136 init_decrementer_clockevent();
1137 tick_setup_hrtimer_broadcast();
1138
1139#ifdef CONFIG_COMMON_CLK
1140 of_clk_init(NULL);
1141#endif
1142}
1143
1144
1145#define FEBRUARY 2
1146#define STARTOFTIME 1970
1147#define SECDAY 86400L
1148#define SECYR (SECDAY * 365)
1149#define leapyear(year) ((year) % 4 == 0 && \
1150 ((year) % 100 != 0 || (year) % 400 == 0))
1151#define days_in_year(a) (leapyear(a) ? 366 : 365)
1152#define days_in_month(a) (month_days[(a) - 1])
1153
1154static int month_days[12] = {
1155 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1156};
1157
1158void to_tm(int tim, struct rtc_time * tm)
1159{
1160 register int i;
1161 register long hms, day;
1162
1163 day = tim / SECDAY;
1164 hms = tim % SECDAY;
1165
1166
1167 tm->tm_hour = hms / 3600;
1168 tm->tm_min = (hms % 3600) / 60;
1169 tm->tm_sec = (hms % 3600) % 60;
1170
1171
1172 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1173 day -= days_in_year(i);
1174 tm->tm_year = i;
1175
1176
1177 if (leapyear(tm->tm_year))
1178 days_in_month(FEBRUARY) = 29;
1179 for (i = 1; day >= days_in_month(i); i++)
1180 day -= days_in_month(i);
1181 days_in_month(FEBRUARY) = 28;
1182 tm->tm_mon = i;
1183
1184
1185 tm->tm_mday = day + 1;
1186
1187
1188
1189
1190 tm->tm_wday = -1;
1191}
1192EXPORT_SYMBOL(to_tm);
1193
1194
1195
1196
1197
1198void div128_by_32(u64 dividend_high, u64 dividend_low,
1199 unsigned divisor, struct div_result *dr)
1200{
1201 unsigned long a, b, c, d;
1202 unsigned long w, x, y, z;
1203 u64 ra, rb, rc;
1204
1205 a = dividend_high >> 32;
1206 b = dividend_high & 0xffffffff;
1207 c = dividend_low >> 32;
1208 d = dividend_low & 0xffffffff;
1209
1210 w = a / divisor;
1211 ra = ((u64)(a - (w * divisor)) << 32) + b;
1212
1213 rb = ((u64) do_div(ra, divisor) << 32) + c;
1214 x = ra;
1215
1216 rc = ((u64) do_div(rb, divisor) << 32) + d;
1217 y = rb;
1218
1219 do_div(rc, divisor);
1220 z = rc;
1221
1222 dr->result_high = ((u64)w << 32) + x;
1223 dr->result_low = ((u64)y << 32) + z;
1224
1225}
1226
1227
1228void calibrate_delay(void)
1229{
1230
1231
1232
1233 loops_per_jiffy = tb_ticks_per_jiffy;
1234}
1235
1236#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1237static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1238{
1239 ppc_md.get_rtc_time(tm);
1240 return 0;
1241}
1242
1243static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1244{
1245 if (!ppc_md.set_rtc_time)
1246 return -EOPNOTSUPP;
1247
1248 if (ppc_md.set_rtc_time(tm) < 0)
1249 return -EOPNOTSUPP;
1250
1251 return 0;
1252}
1253
1254static const struct rtc_class_ops rtc_generic_ops = {
1255 .read_time = rtc_generic_get_time,
1256 .set_time = rtc_generic_set_time,
1257};
1258
1259static int __init rtc_init(void)
1260{
1261 struct platform_device *pdev;
1262
1263 if (!ppc_md.get_rtc_time)
1264 return -ENODEV;
1265
1266 pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1267 &rtc_generic_ops,
1268 sizeof(rtc_generic_ops));
1269
1270 return PTR_ERR_OR_ZERO(pdev);
1271}
1272
1273device_initcall(rtc_init);
1274#endif
1275