1#include <linux/export.h>
2#include <linux/sched.h>
3#include <linux/tsacct_kern.h>
4#include <linux/kernel_stat.h>
5#include <linux/static_key.h>
6#include <linux/context_tracking.h>
7#include "sched.h"
8#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h>
10#endif
11
12
13#ifdef CONFIG_IRQ_TIME_ACCOUNTING
14
15
16
17
18
19
20
21
22
23
24
25
26DEFINE_PER_CPU(u64, cpu_hardirq_time);
27DEFINE_PER_CPU(u64, cpu_softirq_time);
28
29static DEFINE_PER_CPU(u64, irq_start_time);
30static int sched_clock_irqtime;
31
32void enable_sched_clock_irqtime(void)
33{
34 sched_clock_irqtime = 1;
35}
36
37void disable_sched_clock_irqtime(void)
38{
39 sched_clock_irqtime = 0;
40}
41
42#ifndef CONFIG_64BIT
43DEFINE_PER_CPU(seqcount_t, irq_time_seq);
44#endif
45
46
47
48
49
50void irqtime_account_irq(struct task_struct *curr)
51{
52 unsigned long flags;
53 s64 delta;
54 int cpu;
55
56 if (!sched_clock_irqtime)
57 return;
58
59 local_irq_save(flags);
60
61 cpu = smp_processor_id();
62 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
63 __this_cpu_add(irq_start_time, delta);
64
65 irq_time_write_begin();
66
67
68
69
70
71
72 if (hardirq_count())
73 __this_cpu_add(cpu_hardirq_time, delta);
74 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
75 __this_cpu_add(cpu_softirq_time, delta);
76
77 irq_time_write_end();
78 local_irq_restore(flags);
79}
80EXPORT_SYMBOL_GPL(irqtime_account_irq);
81
82static int irqtime_account_hi_update(void)
83{
84 u64 *cpustat = kcpustat_this_cpu->cpustat;
85 unsigned long flags;
86 u64 latest_ns;
87 int ret = 0;
88
89 local_irq_save(flags);
90 latest_ns = this_cpu_read(cpu_hardirq_time);
91 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
92 ret = 1;
93 local_irq_restore(flags);
94 return ret;
95}
96
97static int irqtime_account_si_update(void)
98{
99 u64 *cpustat = kcpustat_this_cpu->cpustat;
100 unsigned long flags;
101 u64 latest_ns;
102 int ret = 0;
103
104 local_irq_save(flags);
105 latest_ns = this_cpu_read(cpu_softirq_time);
106 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
107 ret = 1;
108 local_irq_restore(flags);
109 return ret;
110}
111
112#else
113
114#define sched_clock_irqtime (0)
115
116#endif
117
118static inline void task_group_account_field(struct task_struct *p, int index,
119 u64 tmp)
120{
121
122
123
124
125
126
127 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
128
129 cpuacct_account_field(p, index, tmp);
130}
131
132
133
134
135
136
137
138void account_user_time(struct task_struct *p, cputime_t cputime,
139 cputime_t cputime_scaled)
140{
141 int index;
142
143
144 p->utime += cputime;
145 p->utimescaled += cputime_scaled;
146 account_group_user_time(p, cputime);
147
148 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
149
150
151 task_group_account_field(p, index, (__force u64) cputime);
152
153
154 acct_account_cputime(p);
155}
156
157
158
159
160
161
162
163static void account_guest_time(struct task_struct *p, cputime_t cputime,
164 cputime_t cputime_scaled)
165{
166 u64 *cpustat = kcpustat_this_cpu->cpustat;
167
168
169 p->utime += cputime;
170 p->utimescaled += cputime_scaled;
171 account_group_user_time(p, cputime);
172 p->gtime += cputime;
173
174
175 if (task_nice(p) > 0) {
176 cpustat[CPUTIME_NICE] += (__force u64) cputime;
177 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
178 } else {
179 cpustat[CPUTIME_USER] += (__force u64) cputime;
180 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
181 }
182}
183
184
185
186
187
188
189
190
191static inline
192void __account_system_time(struct task_struct *p, cputime_t cputime,
193 cputime_t cputime_scaled, int index)
194{
195
196 p->stime += cputime;
197 p->stimescaled += cputime_scaled;
198 account_group_system_time(p, cputime);
199
200
201 task_group_account_field(p, index, (__force u64) cputime);
202
203
204 acct_account_cputime(p);
205}
206
207
208
209
210
211
212
213
214void account_system_time(struct task_struct *p, int hardirq_offset,
215 cputime_t cputime, cputime_t cputime_scaled)
216{
217 int index;
218
219 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
220 account_guest_time(p, cputime, cputime_scaled);
221 return;
222 }
223
224 if (hardirq_count() - hardirq_offset)
225 index = CPUTIME_IRQ;
226 else if (in_serving_softirq())
227 index = CPUTIME_SOFTIRQ;
228 else
229 index = CPUTIME_SYSTEM;
230
231 __account_system_time(p, cputime, cputime_scaled, index);
232}
233
234
235
236
237
238void account_steal_time(cputime_t cputime)
239{
240 u64 *cpustat = kcpustat_this_cpu->cpustat;
241
242 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
243}
244
245
246
247
248
249void account_idle_time(cputime_t cputime)
250{
251 u64 *cpustat = kcpustat_this_cpu->cpustat;
252 struct rq *rq = this_rq();
253
254 if (atomic_read(&rq->nr_iowait) > 0)
255 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
256 else
257 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
258}
259
260static __always_inline bool steal_account_process_tick(void)
261{
262#ifdef CONFIG_PARAVIRT
263 if (static_key_false(¶virt_steal_enabled)) {
264 u64 steal;
265 unsigned long steal_jiffies;
266
267 steal = paravirt_steal_clock(smp_processor_id());
268 steal -= this_rq()->prev_steal_time;
269
270
271
272
273
274
275 steal_jiffies = nsecs_to_jiffies(steal);
276 this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
277
278 account_steal_time(jiffies_to_cputime(steal_jiffies));
279 return steal_jiffies;
280 }
281#endif
282 return false;
283}
284
285
286
287
288
289void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
290{
291 struct signal_struct *sig = tsk->signal;
292 cputime_t utime, stime;
293 struct task_struct *t;
294 unsigned int seq, nextseq;
295 unsigned long flags;
296
297 rcu_read_lock();
298
299 nextseq = 0;
300 do {
301 seq = nextseq;
302 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
303 times->utime = sig->utime;
304 times->stime = sig->stime;
305 times->sum_exec_runtime = sig->sum_sched_runtime;
306
307 for_each_thread(tsk, t) {
308 task_cputime(t, &utime, &stime);
309 times->utime += utime;
310 times->stime += stime;
311 times->sum_exec_runtime += task_sched_runtime(t);
312 }
313
314 nextseq = 1;
315 } while (need_seqretry(&sig->stats_lock, seq));
316 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
317 rcu_read_unlock();
318}
319
320#ifdef CONFIG_IRQ_TIME_ACCOUNTING
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
343 struct rq *rq, int ticks)
344{
345 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
346 u64 cputime = (__force u64) cputime_one_jiffy;
347 u64 *cpustat = kcpustat_this_cpu->cpustat;
348
349 if (steal_account_process_tick())
350 return;
351
352 cputime *= ticks;
353 scaled *= ticks;
354
355 if (irqtime_account_hi_update()) {
356 cpustat[CPUTIME_IRQ] += cputime;
357 } else if (irqtime_account_si_update()) {
358 cpustat[CPUTIME_SOFTIRQ] += cputime;
359 } else if (this_cpu_ksoftirqd() == p) {
360
361
362
363
364
365 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
366 } else if (user_tick) {
367 account_user_time(p, cputime, scaled);
368 } else if (p == rq->idle) {
369 account_idle_time(cputime);
370 } else if (p->flags & PF_VCPU) {
371 account_guest_time(p, cputime, scaled);
372 } else {
373 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
374 }
375}
376
377static void irqtime_account_idle_ticks(int ticks)
378{
379 struct rq *rq = this_rq();
380
381 irqtime_account_process_tick(current, 0, rq, ticks);
382}
383#else
384static inline void irqtime_account_idle_ticks(int ticks) {}
385static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
386 struct rq *rq, int nr_ticks) {}
387#endif
388
389
390
391
392#ifdef CONFIG_VIRT_CPU_ACCOUNTING
393
394#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
395void vtime_common_task_switch(struct task_struct *prev)
396{
397 if (is_idle_task(prev))
398 vtime_account_idle(prev);
399 else
400 vtime_account_system(prev);
401
402#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
403 vtime_account_user(prev);
404#endif
405 arch_vtime_task_switch(prev);
406}
407#endif
408
409
410
411
412
413
414
415
416
417#ifndef __ARCH_HAS_VTIME_ACCOUNT
418void vtime_common_account_irq_enter(struct task_struct *tsk)
419{
420 if (!in_interrupt()) {
421
422
423
424
425
426
427 if (context_tracking_in_user()) {
428 vtime_account_user(tsk);
429 return;
430 }
431
432 if (is_idle_task(tsk)) {
433 vtime_account_idle(tsk);
434 return;
435 }
436 }
437 vtime_account_system(tsk);
438}
439EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
440#endif
441#endif
442
443
444#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
445void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
446{
447 *ut = p->utime;
448 *st = p->stime;
449}
450EXPORT_SYMBOL_GPL(task_cputime_adjusted);
451
452void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
453{
454 struct task_cputime cputime;
455
456 thread_group_cputime(p, &cputime);
457
458 *ut = cputime.utime;
459 *st = cputime.stime;
460}
461#else
462
463
464
465
466
467void account_process_tick(struct task_struct *p, int user_tick)
468{
469 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
470 struct rq *rq = this_rq();
471
472 if (vtime_accounting_cpu_enabled())
473 return;
474
475 if (sched_clock_irqtime) {
476 irqtime_account_process_tick(p, user_tick, rq, 1);
477 return;
478 }
479
480 if (steal_account_process_tick())
481 return;
482
483 if (user_tick)
484 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
485 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
486 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
487 one_jiffy_scaled);
488 else
489 account_idle_time(cputime_one_jiffy);
490}
491
492
493
494
495
496
497void account_steal_ticks(unsigned long ticks)
498{
499 account_steal_time(jiffies_to_cputime(ticks));
500}
501
502
503
504
505
506void account_idle_ticks(unsigned long ticks)
507{
508
509 if (sched_clock_irqtime) {
510 irqtime_account_idle_ticks(ticks);
511 return;
512 }
513
514 account_idle_time(jiffies_to_cputime(ticks));
515}
516
517
518
519
520
521static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
522{
523 u64 scaled;
524
525 for (;;) {
526
527 if (stime > rtime)
528 swap(rtime, stime);
529
530
531 if (total >> 32)
532 goto drop_precision;
533
534
535 if (!(rtime >> 32))
536 break;
537
538
539 if (stime >> 31)
540 goto drop_precision;
541
542
543 stime <<= 1;
544 rtime >>= 1;
545 continue;
546
547drop_precision:
548
549 rtime >>= 1;
550 total >>= 1;
551 }
552
553
554
555
556
557 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
558 return (__force cputime_t) scaled;
559}
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581static void cputime_adjust(struct task_cputime *curr,
582 struct prev_cputime *prev,
583 cputime_t *ut, cputime_t *st)
584{
585 cputime_t rtime, stime, utime;
586 unsigned long flags;
587
588
589 raw_spin_lock_irqsave(&prev->lock, flags);
590 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
591
592
593
594
595
596
597
598
599
600 if (prev->stime + prev->utime >= rtime)
601 goto out;
602
603 stime = curr->stime;
604 utime = curr->utime;
605
606 if (utime == 0) {
607 stime = rtime;
608 goto update;
609 }
610
611 if (stime == 0) {
612 utime = rtime;
613 goto update;
614 }
615
616 stime = scale_stime((__force u64)stime, (__force u64)rtime,
617 (__force u64)(stime + utime));
618
619
620
621
622
623
624
625
626
627
628 if (stime < prev->stime)
629 stime = prev->stime;
630 utime = rtime - stime;
631
632
633
634
635
636 if (utime < prev->utime) {
637 utime = prev->utime;
638 stime = rtime - utime;
639 }
640
641update:
642 prev->stime = stime;
643 prev->utime = utime;
644out:
645 *ut = prev->utime;
646 *st = prev->stime;
647 raw_spin_unlock_irqrestore(&prev->lock, flags);
648}
649
650void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
651{
652 struct task_cputime cputime = {
653 .sum_exec_runtime = p->se.sum_exec_runtime,
654 };
655
656 task_cputime(p, &cputime.utime, &cputime.stime);
657 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
658}
659EXPORT_SYMBOL_GPL(task_cputime_adjusted);
660
661void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
662{
663 struct task_cputime cputime;
664
665 thread_group_cputime(p, &cputime);
666 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
667}
668#endif
669
670#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
671static cputime_t vtime_delta(struct task_struct *tsk)
672{
673 unsigned long now = READ_ONCE(jiffies);
674
675 if (time_before(now, (unsigned long)tsk->vtime_snap))
676 return 0;
677
678 return jiffies_to_cputime(now - tsk->vtime_snap);
679}
680
681static cputime_t get_vtime_delta(struct task_struct *tsk)
682{
683 unsigned long now = READ_ONCE(jiffies);
684 unsigned long delta = now - tsk->vtime_snap;
685
686 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
687 tsk->vtime_snap = now;
688
689 return jiffies_to_cputime(delta);
690}
691
692static void __vtime_account_system(struct task_struct *tsk)
693{
694 cputime_t delta_cpu = get_vtime_delta(tsk);
695
696 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
697}
698
699void vtime_account_system(struct task_struct *tsk)
700{
701 if (!vtime_delta(tsk))
702 return;
703
704 write_seqcount_begin(&tsk->vtime_seqcount);
705 __vtime_account_system(tsk);
706 write_seqcount_end(&tsk->vtime_seqcount);
707}
708
709void vtime_gen_account_irq_exit(struct task_struct *tsk)
710{
711 write_seqcount_begin(&tsk->vtime_seqcount);
712 if (vtime_delta(tsk))
713 __vtime_account_system(tsk);
714 if (context_tracking_in_user())
715 tsk->vtime_snap_whence = VTIME_USER;
716 write_seqcount_end(&tsk->vtime_seqcount);
717}
718
719void vtime_account_user(struct task_struct *tsk)
720{
721 cputime_t delta_cpu;
722
723 write_seqcount_begin(&tsk->vtime_seqcount);
724 tsk->vtime_snap_whence = VTIME_SYS;
725 if (vtime_delta(tsk)) {
726 delta_cpu = get_vtime_delta(tsk);
727 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
728 }
729 write_seqcount_end(&tsk->vtime_seqcount);
730}
731
732void vtime_user_enter(struct task_struct *tsk)
733{
734 write_seqcount_begin(&tsk->vtime_seqcount);
735 if (vtime_delta(tsk))
736 __vtime_account_system(tsk);
737 tsk->vtime_snap_whence = VTIME_USER;
738 write_seqcount_end(&tsk->vtime_seqcount);
739}
740
741void vtime_guest_enter(struct task_struct *tsk)
742{
743
744
745
746
747
748
749
750 write_seqcount_begin(&tsk->vtime_seqcount);
751 if (vtime_delta(tsk))
752 __vtime_account_system(tsk);
753 current->flags |= PF_VCPU;
754 write_seqcount_end(&tsk->vtime_seqcount);
755}
756EXPORT_SYMBOL_GPL(vtime_guest_enter);
757
758void vtime_guest_exit(struct task_struct *tsk)
759{
760 write_seqcount_begin(&tsk->vtime_seqcount);
761 __vtime_account_system(tsk);
762 current->flags &= ~PF_VCPU;
763 write_seqcount_end(&tsk->vtime_seqcount);
764}
765EXPORT_SYMBOL_GPL(vtime_guest_exit);
766
767void vtime_account_idle(struct task_struct *tsk)
768{
769 cputime_t delta_cpu = get_vtime_delta(tsk);
770
771 account_idle_time(delta_cpu);
772}
773
774void arch_vtime_task_switch(struct task_struct *prev)
775{
776 write_seqcount_begin(&prev->vtime_seqcount);
777 prev->vtime_snap_whence = VTIME_INACTIVE;
778 write_seqcount_end(&prev->vtime_seqcount);
779
780 write_seqcount_begin(¤t->vtime_seqcount);
781 current->vtime_snap_whence = VTIME_SYS;
782 current->vtime_snap = jiffies;
783 write_seqcount_end(¤t->vtime_seqcount);
784}
785
786void vtime_init_idle(struct task_struct *t, int cpu)
787{
788 unsigned long flags;
789
790 local_irq_save(flags);
791 write_seqcount_begin(&t->vtime_seqcount);
792 t->vtime_snap_whence = VTIME_SYS;
793 t->vtime_snap = jiffies;
794 write_seqcount_end(&t->vtime_seqcount);
795 local_irq_restore(flags);
796}
797
798cputime_t task_gtime(struct task_struct *t)
799{
800 unsigned int seq;
801 cputime_t gtime;
802
803 if (!vtime_accounting_enabled())
804 return t->gtime;
805
806 do {
807 seq = read_seqcount_begin(&t->vtime_seqcount);
808
809 gtime = t->gtime;
810 if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
811 gtime += vtime_delta(t);
812
813 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
814
815 return gtime;
816}
817
818
819
820
821
822
823static void
824fetch_task_cputime(struct task_struct *t,
825 cputime_t *u_dst, cputime_t *s_dst,
826 cputime_t *u_src, cputime_t *s_src,
827 cputime_t *udelta, cputime_t *sdelta)
828{
829 unsigned int seq;
830 unsigned long long delta;
831
832 do {
833 *udelta = 0;
834 *sdelta = 0;
835
836 seq = read_seqcount_begin(&t->vtime_seqcount);
837
838 if (u_dst)
839 *u_dst = *u_src;
840 if (s_dst)
841 *s_dst = *s_src;
842
843
844 if (t->vtime_snap_whence == VTIME_INACTIVE ||
845 is_idle_task(t))
846 continue;
847
848 delta = vtime_delta(t);
849
850
851
852
853
854 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
855 *udelta = delta;
856 } else {
857 if (t->vtime_snap_whence == VTIME_SYS)
858 *sdelta = delta;
859 }
860 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
861}
862
863
864void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
865{
866 cputime_t udelta, sdelta;
867
868 if (!vtime_accounting_enabled()) {
869 if (utime)
870 *utime = t->utime;
871 if (stime)
872 *stime = t->stime;
873 return;
874 }
875
876 fetch_task_cputime(t, utime, stime, &t->utime,
877 &t->stime, &udelta, &sdelta);
878 if (utime)
879 *utime += udelta;
880 if (stime)
881 *stime += sdelta;
882}
883
884void task_cputime_scaled(struct task_struct *t,
885 cputime_t *utimescaled, cputime_t *stimescaled)
886{
887 cputime_t udelta, sdelta;
888
889 if (!vtime_accounting_enabled()) {
890 if (utimescaled)
891 *utimescaled = t->utimescaled;
892 if (stimescaled)
893 *stimescaled = t->stimescaled;
894 return;
895 }
896
897 fetch_task_cputime(t, utimescaled, stimescaled,
898 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
899 if (utimescaled)
900 *utimescaled += cputime_to_scaled(udelta);
901 if (stimescaled)
902 *stimescaled += cputime_to_scaled(sdelta);
903}
904#endif
905