1
2
3
4
5#include "sched.h"
6
7#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8
9
10
11
12
13
14
15
16
17
18
19
20DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
21
22static int sched_clock_irqtime;
23
24void enable_sched_clock_irqtime(void)
25{
26 sched_clock_irqtime = 1;
27}
28
29void disable_sched_clock_irqtime(void)
30{
31 sched_clock_irqtime = 0;
32}
33
34static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
35 enum cpu_usage_stat idx)
36{
37 u64 *cpustat = kcpustat_this_cpu->cpustat;
38
39 u64_stats_update_begin(&irqtime->sync);
40 cpustat[idx] += delta;
41 irqtime->total += delta;
42 irqtime->tick_delta += delta;
43 u64_stats_update_end(&irqtime->sync);
44}
45
46
47
48
49
50void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
51{
52 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
53 unsigned int pc;
54 s64 delta;
55 int cpu;
56
57 if (!sched_clock_irqtime)
58 return;
59
60 cpu = smp_processor_id();
61 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
62 irqtime->irq_start_time += delta;
63 pc = irq_count() - offset;
64
65
66
67
68
69
70
71 if (pc & HARDIRQ_MASK)
72 irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
73 else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
74 irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
75}
76
77static u64 irqtime_tick_accounted(u64 maxtime)
78{
79 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
80 u64 delta;
81
82 delta = min(irqtime->tick_delta, maxtime);
83 irqtime->tick_delta -= delta;
84
85 return delta;
86}
87
88#else
89
90#define sched_clock_irqtime (0)
91
92static u64 irqtime_tick_accounted(u64 dummy)
93{
94 return 0;
95}
96
97#endif
98
99static inline void task_group_account_field(struct task_struct *p, int index,
100 u64 tmp)
101{
102
103
104
105
106
107
108 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
109
110 cgroup_account_cputime_field(p, index, tmp);
111}
112
113
114
115
116
117
118void account_user_time(struct task_struct *p, u64 cputime)
119{
120 int index;
121
122
123 p->utime += cputime;
124 account_group_user_time(p, cputime);
125
126 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
127
128
129 task_group_account_field(p, index, cputime);
130
131
132 acct_account_cputime(p);
133}
134
135
136
137
138
139
140void account_guest_time(struct task_struct *p, u64 cputime)
141{
142 u64 *cpustat = kcpustat_this_cpu->cpustat;
143
144
145 p->utime += cputime;
146 account_group_user_time(p, cputime);
147 p->gtime += cputime;
148
149
150 if (task_nice(p) > 0) {
151 cpustat[CPUTIME_NICE] += cputime;
152 cpustat[CPUTIME_GUEST_NICE] += cputime;
153 } else {
154 cpustat[CPUTIME_USER] += cputime;
155 cpustat[CPUTIME_GUEST] += cputime;
156 }
157}
158
159
160
161
162
163
164
165void account_system_index_time(struct task_struct *p,
166 u64 cputime, enum cpu_usage_stat index)
167{
168
169 p->stime += cputime;
170 account_group_system_time(p, cputime);
171
172
173 task_group_account_field(p, index, cputime);
174
175
176 acct_account_cputime(p);
177}
178
179
180
181
182
183
184
185void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
186{
187 int index;
188
189 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
190 account_guest_time(p, cputime);
191 return;
192 }
193
194 if (hardirq_count() - hardirq_offset)
195 index = CPUTIME_IRQ;
196 else if (in_serving_softirq())
197 index = CPUTIME_SOFTIRQ;
198 else
199 index = CPUTIME_SYSTEM;
200
201 account_system_index_time(p, cputime, index);
202}
203
204
205
206
207
208void account_steal_time(u64 cputime)
209{
210 u64 *cpustat = kcpustat_this_cpu->cpustat;
211
212 cpustat[CPUTIME_STEAL] += cputime;
213}
214
215
216
217
218
219void account_idle_time(u64 cputime)
220{
221 u64 *cpustat = kcpustat_this_cpu->cpustat;
222 struct rq *rq = this_rq();
223
224 if (atomic_read(&rq->nr_iowait) > 0)
225 cpustat[CPUTIME_IOWAIT] += cputime;
226 else
227 cpustat[CPUTIME_IDLE] += cputime;
228}
229
230
231
232
233
234
235static __always_inline u64 steal_account_process_time(u64 maxtime)
236{
237#ifdef CONFIG_PARAVIRT
238 if (static_key_false(¶virt_steal_enabled)) {
239 u64 steal;
240
241 steal = paravirt_steal_clock(smp_processor_id());
242 steal -= this_rq()->prev_steal_time;
243 steal = min(steal, maxtime);
244 account_steal_time(steal);
245 this_rq()->prev_steal_time += steal;
246
247 return steal;
248 }
249#endif
250 return 0;
251}
252
253
254
255
256static inline u64 account_other_time(u64 max)
257{
258 u64 accounted;
259
260 lockdep_assert_irqs_disabled();
261
262 accounted = steal_account_process_time(max);
263
264 if (accounted < max)
265 accounted += irqtime_tick_accounted(max - accounted);
266
267 return accounted;
268}
269
270#ifdef CONFIG_64BIT
271static inline u64 read_sum_exec_runtime(struct task_struct *t)
272{
273 return t->se.sum_exec_runtime;
274}
275#else
276static u64 read_sum_exec_runtime(struct task_struct *t)
277{
278 u64 ns;
279 struct rq_flags rf;
280 struct rq *rq;
281
282 rq = task_rq_lock(t, &rf);
283 ns = t->se.sum_exec_runtime;
284 task_rq_unlock(rq, t, &rf);
285
286 return ns;
287}
288#endif
289
290
291
292
293
294void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
295{
296 struct signal_struct *sig = tsk->signal;
297 u64 utime, stime;
298 struct task_struct *t;
299 unsigned int seq, nextseq;
300 unsigned long flags;
301
302
303
304
305
306
307
308
309
310 if (same_thread_group(current, tsk))
311 (void) task_sched_runtime(current);
312
313 rcu_read_lock();
314
315 nextseq = 0;
316 do {
317 seq = nextseq;
318 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
319 times->utime = sig->utime;
320 times->stime = sig->stime;
321 times->sum_exec_runtime = sig->sum_sched_runtime;
322
323 for_each_thread(tsk, t) {
324 task_cputime(t, &utime, &stime);
325 times->utime += utime;
326 times->stime += stime;
327 times->sum_exec_runtime += read_sum_exec_runtime(t);
328 }
329
330 nextseq = 1;
331 } while (need_seqretry(&sig->stats_lock, seq));
332 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
333 rcu_read_unlock();
334}
335
336#ifdef CONFIG_IRQ_TIME_ACCOUNTING
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
359 int ticks)
360{
361 u64 other, cputime = TICK_NSEC * ticks;
362
363
364
365
366
367
368
369
370 other = account_other_time(ULONG_MAX);
371 if (other >= cputime)
372 return;
373
374 cputime -= other;
375
376 if (this_cpu_ksoftirqd() == p) {
377
378
379
380
381
382 account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
383 } else if (user_tick) {
384 account_user_time(p, cputime);
385 } else if (p == this_rq()->idle) {
386 account_idle_time(cputime);
387 } else if (p->flags & PF_VCPU) {
388 account_guest_time(p, cputime);
389 } else {
390 account_system_index_time(p, cputime, CPUTIME_SYSTEM);
391 }
392}
393
394static void irqtime_account_idle_ticks(int ticks)
395{
396 irqtime_account_process_tick(current, 0, ticks);
397}
398#else
399static inline void irqtime_account_idle_ticks(int ticks) { }
400static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
401 int nr_ticks) { }
402#endif
403
404
405
406
407#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
408
409# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
410void vtime_task_switch(struct task_struct *prev)
411{
412 if (is_idle_task(prev))
413 vtime_account_idle(prev);
414 else
415 vtime_account_kernel(prev);
416
417 vtime_flush(prev);
418 arch_vtime_task_switch(prev);
419}
420# endif
421
422void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
423{
424 unsigned int pc = irq_count() - offset;
425
426 if (pc & HARDIRQ_OFFSET) {
427 vtime_account_hardirq(tsk);
428 } else if (pc & SOFTIRQ_OFFSET) {
429 vtime_account_softirq(tsk);
430 } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
431 is_idle_task(tsk)) {
432 vtime_account_idle(tsk);
433 } else {
434 vtime_account_kernel(tsk);
435 }
436}
437
438void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
439 u64 *ut, u64 *st)
440{
441 *ut = curr->utime;
442 *st = curr->stime;
443}
444
445void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
446{
447 *ut = p->utime;
448 *st = p->stime;
449}
450EXPORT_SYMBOL_GPL(task_cputime_adjusted);
451
452void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
453{
454 struct task_cputime cputime;
455
456 thread_group_cputime(p, &cputime);
457
458 *ut = cputime.utime;
459 *st = cputime.stime;
460}
461
462#else
463
464
465
466
467
468
469void account_process_tick(struct task_struct *p, int user_tick)
470{
471 u64 cputime, steal;
472
473 if (vtime_accounting_enabled_this_cpu())
474 return;
475
476 if (sched_clock_irqtime) {
477 irqtime_account_process_tick(p, user_tick, 1);
478 return;
479 }
480
481 cputime = TICK_NSEC;
482 steal = steal_account_process_time(ULONG_MAX);
483
484 if (steal >= cputime)
485 return;
486
487 cputime -= steal;
488
489 if (user_tick)
490 account_user_time(p, cputime);
491 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
492 account_system_time(p, HARDIRQ_OFFSET, cputime);
493 else
494 account_idle_time(cputime);
495}
496
497
498
499
500
501void account_idle_ticks(unsigned long ticks)
502{
503 u64 cputime, steal;
504
505 if (sched_clock_irqtime) {
506 irqtime_account_idle_ticks(ticks);
507 return;
508 }
509
510 cputime = ticks * TICK_NSEC;
511 steal = steal_account_process_time(ULONG_MAX);
512
513 if (steal >= cputime)
514 return;
515
516 cputime -= steal;
517 account_idle_time(cputime);
518}
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
541 u64 *ut, u64 *st)
542{
543 u64 rtime, stime, utime;
544 unsigned long flags;
545
546
547 raw_spin_lock_irqsave(&prev->lock, flags);
548 rtime = curr->sum_exec_runtime;
549
550
551
552
553
554
555
556
557
558 if (prev->stime + prev->utime >= rtime)
559 goto out;
560
561 stime = curr->stime;
562 utime = curr->utime;
563
564
565
566
567
568
569 if (stime == 0) {
570 utime = rtime;
571 goto update;
572 }
573
574 if (utime == 0) {
575 stime = rtime;
576 goto update;
577 }
578
579 stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
580
581update:
582
583
584
585
586
587
588
589
590
591 if (stime < prev->stime)
592 stime = prev->stime;
593 utime = rtime - stime;
594
595
596
597
598
599 if (utime < prev->utime) {
600 utime = prev->utime;
601 stime = rtime - utime;
602 }
603
604 prev->stime = stime;
605 prev->utime = utime;
606out:
607 *ut = prev->utime;
608 *st = prev->stime;
609 raw_spin_unlock_irqrestore(&prev->lock, flags);
610}
611
612void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
613{
614 struct task_cputime cputime = {
615 .sum_exec_runtime = p->se.sum_exec_runtime,
616 };
617
618 task_cputime(p, &cputime.utime, &cputime.stime);
619 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
620}
621EXPORT_SYMBOL_GPL(task_cputime_adjusted);
622
623void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
624{
625 struct task_cputime cputime;
626
627 thread_group_cputime(p, &cputime);
628 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
629}
630#endif
631
632#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
633static u64 vtime_delta(struct vtime *vtime)
634{
635 unsigned long long clock;
636
637 clock = sched_clock();
638 if (clock < vtime->starttime)
639 return 0;
640
641 return clock - vtime->starttime;
642}
643
644static u64 get_vtime_delta(struct vtime *vtime)
645{
646 u64 delta = vtime_delta(vtime);
647 u64 other;
648
649
650
651
652
653
654
655
656 other = account_other_time(delta);
657 WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
658 vtime->starttime += delta;
659
660 return delta - other;
661}
662
663static void vtime_account_system(struct task_struct *tsk,
664 struct vtime *vtime)
665{
666 vtime->stime += get_vtime_delta(vtime);
667 if (vtime->stime >= TICK_NSEC) {
668 account_system_time(tsk, irq_count(), vtime->stime);
669 vtime->stime = 0;
670 }
671}
672
673static void vtime_account_guest(struct task_struct *tsk,
674 struct vtime *vtime)
675{
676 vtime->gtime += get_vtime_delta(vtime);
677 if (vtime->gtime >= TICK_NSEC) {
678 account_guest_time(tsk, vtime->gtime);
679 vtime->gtime = 0;
680 }
681}
682
683static void __vtime_account_kernel(struct task_struct *tsk,
684 struct vtime *vtime)
685{
686
687 if (vtime->state == VTIME_GUEST)
688 vtime_account_guest(tsk, vtime);
689 else
690 vtime_account_system(tsk, vtime);
691}
692
693void vtime_account_kernel(struct task_struct *tsk)
694{
695 struct vtime *vtime = &tsk->vtime;
696
697 if (!vtime_delta(vtime))
698 return;
699
700 write_seqcount_begin(&vtime->seqcount);
701 __vtime_account_kernel(tsk, vtime);
702 write_seqcount_end(&vtime->seqcount);
703}
704
705void vtime_user_enter(struct task_struct *tsk)
706{
707 struct vtime *vtime = &tsk->vtime;
708
709 write_seqcount_begin(&vtime->seqcount);
710 vtime_account_system(tsk, vtime);
711 vtime->state = VTIME_USER;
712 write_seqcount_end(&vtime->seqcount);
713}
714
715void vtime_user_exit(struct task_struct *tsk)
716{
717 struct vtime *vtime = &tsk->vtime;
718
719 write_seqcount_begin(&vtime->seqcount);
720 vtime->utime += get_vtime_delta(vtime);
721 if (vtime->utime >= TICK_NSEC) {
722 account_user_time(tsk, vtime->utime);
723 vtime->utime = 0;
724 }
725 vtime->state = VTIME_SYS;
726 write_seqcount_end(&vtime->seqcount);
727}
728
729void vtime_guest_enter(struct task_struct *tsk)
730{
731 struct vtime *vtime = &tsk->vtime;
732
733
734
735
736
737
738
739 write_seqcount_begin(&vtime->seqcount);
740 vtime_account_system(tsk, vtime);
741 tsk->flags |= PF_VCPU;
742 vtime->state = VTIME_GUEST;
743 write_seqcount_end(&vtime->seqcount);
744}
745EXPORT_SYMBOL_GPL(vtime_guest_enter);
746
747void vtime_guest_exit(struct task_struct *tsk)
748{
749 struct vtime *vtime = &tsk->vtime;
750
751 write_seqcount_begin(&vtime->seqcount);
752 vtime_account_guest(tsk, vtime);
753 tsk->flags &= ~PF_VCPU;
754 vtime->state = VTIME_SYS;
755 write_seqcount_end(&vtime->seqcount);
756}
757EXPORT_SYMBOL_GPL(vtime_guest_exit);
758
759void vtime_account_idle(struct task_struct *tsk)
760{
761 account_idle_time(get_vtime_delta(&tsk->vtime));
762}
763
764void vtime_task_switch_generic(struct task_struct *prev)
765{
766 struct vtime *vtime = &prev->vtime;
767
768 write_seqcount_begin(&vtime->seqcount);
769 if (vtime->state == VTIME_IDLE)
770 vtime_account_idle(prev);
771 else
772 __vtime_account_kernel(prev, vtime);
773 vtime->state = VTIME_INACTIVE;
774 vtime->cpu = -1;
775 write_seqcount_end(&vtime->seqcount);
776
777 vtime = ¤t->vtime;
778
779 write_seqcount_begin(&vtime->seqcount);
780 if (is_idle_task(current))
781 vtime->state = VTIME_IDLE;
782 else if (current->flags & PF_VCPU)
783 vtime->state = VTIME_GUEST;
784 else
785 vtime->state = VTIME_SYS;
786 vtime->starttime = sched_clock();
787 vtime->cpu = smp_processor_id();
788 write_seqcount_end(&vtime->seqcount);
789}
790
791void vtime_init_idle(struct task_struct *t, int cpu)
792{
793 struct vtime *vtime = &t->vtime;
794 unsigned long flags;
795
796 local_irq_save(flags);
797 write_seqcount_begin(&vtime->seqcount);
798 vtime->state = VTIME_IDLE;
799 vtime->starttime = sched_clock();
800 vtime->cpu = cpu;
801 write_seqcount_end(&vtime->seqcount);
802 local_irq_restore(flags);
803}
804
805u64 task_gtime(struct task_struct *t)
806{
807 struct vtime *vtime = &t->vtime;
808 unsigned int seq;
809 u64 gtime;
810
811 if (!vtime_accounting_enabled())
812 return t->gtime;
813
814 do {
815 seq = read_seqcount_begin(&vtime->seqcount);
816
817 gtime = t->gtime;
818 if (vtime->state == VTIME_GUEST)
819 gtime += vtime->gtime + vtime_delta(vtime);
820
821 } while (read_seqcount_retry(&vtime->seqcount, seq));
822
823 return gtime;
824}
825
826
827
828
829
830
831void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
832{
833 struct vtime *vtime = &t->vtime;
834 unsigned int seq;
835 u64 delta;
836
837 if (!vtime_accounting_enabled()) {
838 *utime = t->utime;
839 *stime = t->stime;
840 return;
841 }
842
843 do {
844 seq = read_seqcount_begin(&vtime->seqcount);
845
846 *utime = t->utime;
847 *stime = t->stime;
848
849
850 if (vtime->state < VTIME_SYS)
851 continue;
852
853 delta = vtime_delta(vtime);
854
855
856
857
858
859 if (vtime->state == VTIME_SYS)
860 *stime += vtime->stime + delta;
861 else
862 *utime += vtime->utime + delta;
863 } while (read_seqcount_retry(&vtime->seqcount, seq));
864}
865
866static int vtime_state_fetch(struct vtime *vtime, int cpu)
867{
868 int state = READ_ONCE(vtime->state);
869
870
871
872
873
874 if (vtime->cpu != cpu && vtime->cpu != -1)
875 return -EAGAIN;
876
877
878
879
880
881
882
883
884
885
886 if (state == VTIME_INACTIVE)
887 return -EAGAIN;
888
889 return state;
890}
891
892static u64 kcpustat_user_vtime(struct vtime *vtime)
893{
894 if (vtime->state == VTIME_USER)
895 return vtime->utime + vtime_delta(vtime);
896 else if (vtime->state == VTIME_GUEST)
897 return vtime->gtime + vtime_delta(vtime);
898 return 0;
899}
900
901static int kcpustat_field_vtime(u64 *cpustat,
902 struct task_struct *tsk,
903 enum cpu_usage_stat usage,
904 int cpu, u64 *val)
905{
906 struct vtime *vtime = &tsk->vtime;
907 unsigned int seq;
908
909 do {
910 int state;
911
912 seq = read_seqcount_begin(&vtime->seqcount);
913
914 state = vtime_state_fetch(vtime, cpu);
915 if (state < 0)
916 return state;
917
918 *val = cpustat[usage];
919
920
921
922
923
924
925
926
927 switch (usage) {
928 case CPUTIME_SYSTEM:
929 if (state == VTIME_SYS)
930 *val += vtime->stime + vtime_delta(vtime);
931 break;
932 case CPUTIME_USER:
933 if (task_nice(tsk) <= 0)
934 *val += kcpustat_user_vtime(vtime);
935 break;
936 case CPUTIME_NICE:
937 if (task_nice(tsk) > 0)
938 *val += kcpustat_user_vtime(vtime);
939 break;
940 case CPUTIME_GUEST:
941 if (state == VTIME_GUEST && task_nice(tsk) <= 0)
942 *val += vtime->gtime + vtime_delta(vtime);
943 break;
944 case CPUTIME_GUEST_NICE:
945 if (state == VTIME_GUEST && task_nice(tsk) > 0)
946 *val += vtime->gtime + vtime_delta(vtime);
947 break;
948 default:
949 break;
950 }
951 } while (read_seqcount_retry(&vtime->seqcount, seq));
952
953 return 0;
954}
955
956u64 kcpustat_field(struct kernel_cpustat *kcpustat,
957 enum cpu_usage_stat usage, int cpu)
958{
959 u64 *cpustat = kcpustat->cpustat;
960 u64 val = cpustat[usage];
961 struct rq *rq;
962 int err;
963
964 if (!vtime_accounting_enabled_cpu(cpu))
965 return val;
966
967 rq = cpu_rq(cpu);
968
969 for (;;) {
970 struct task_struct *curr;
971
972 rcu_read_lock();
973 curr = rcu_dereference(rq->curr);
974 if (WARN_ON_ONCE(!curr)) {
975 rcu_read_unlock();
976 return cpustat[usage];
977 }
978
979 err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
980 rcu_read_unlock();
981
982 if (!err)
983 return val;
984
985 cpu_relax();
986 }
987}
988EXPORT_SYMBOL_GPL(kcpustat_field);
989
990static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
991 const struct kernel_cpustat *src,
992 struct task_struct *tsk, int cpu)
993{
994 struct vtime *vtime = &tsk->vtime;
995 unsigned int seq;
996
997 do {
998 u64 *cpustat;
999 u64 delta;
1000 int state;
1001
1002 seq = read_seqcount_begin(&vtime->seqcount);
1003
1004 state = vtime_state_fetch(vtime, cpu);
1005 if (state < 0)
1006 return state;
1007
1008 *dst = *src;
1009 cpustat = dst->cpustat;
1010
1011
1012 if (state < VTIME_SYS)
1013 continue;
1014
1015 delta = vtime_delta(vtime);
1016
1017
1018
1019
1020
1021 if (state == VTIME_SYS) {
1022 cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
1023 } else if (state == VTIME_USER) {
1024 if (task_nice(tsk) > 0)
1025 cpustat[CPUTIME_NICE] += vtime->utime + delta;
1026 else
1027 cpustat[CPUTIME_USER] += vtime->utime + delta;
1028 } else {
1029 WARN_ON_ONCE(state != VTIME_GUEST);
1030 if (task_nice(tsk) > 0) {
1031 cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1032 cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1033 } else {
1034 cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1035 cpustat[CPUTIME_USER] += vtime->gtime + delta;
1036 }
1037 }
1038 } while (read_seqcount_retry(&vtime->seqcount, seq));
1039
1040 return 0;
1041}
1042
1043void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1044{
1045 const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1046 struct rq *rq;
1047 int err;
1048
1049 if (!vtime_accounting_enabled_cpu(cpu)) {
1050 *dst = *src;
1051 return;
1052 }
1053
1054 rq = cpu_rq(cpu);
1055
1056 for (;;) {
1057 struct task_struct *curr;
1058
1059 rcu_read_lock();
1060 curr = rcu_dereference(rq->curr);
1061 if (WARN_ON_ONCE(!curr)) {
1062 rcu_read_unlock();
1063 *dst = *src;
1064 return;
1065 }
1066
1067 err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1068 rcu_read_unlock();
1069
1070 if (!err)
1071 return;
1072
1073 cpu_relax();
1074 }
1075}
1076EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1077
1078#endif
1079