linux/include/linux/sched/cputime.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_CPUTIME_H
   3#define _LINUX_SCHED_CPUTIME_H
   4
   5#include <linux/sched/signal.h>
   6
   7/*
   8 * cputime accounting APIs:
   9 */
  10
  11#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  12#include <asm/cputime.h>
  13
  14#ifndef cputime_to_nsecs
  15# define cputime_to_nsecs(__ct) \
  16        (cputime_to_usecs(__ct) * NSEC_PER_USEC)
  17#endif
  18#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  19
  20#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  21extern bool task_cputime(struct task_struct *t,
  22                         u64 *utime, u64 *stime);
  23extern u64 task_gtime(struct task_struct *t);
  24#else
  25static inline bool task_cputime(struct task_struct *t,
  26                                u64 *utime, u64 *stime)
  27{
  28        *utime = t->utime;
  29        *stime = t->stime;
  30        return false;
  31}
  32
  33static inline u64 task_gtime(struct task_struct *t)
  34{
  35        return t->gtime;
  36}
  37#endif
  38
  39#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
  40static inline void task_cputime_scaled(struct task_struct *t,
  41                                       u64 *utimescaled,
  42                                       u64 *stimescaled)
  43{
  44        *utimescaled = t->utimescaled;
  45        *stimescaled = t->stimescaled;
  46}
  47#else
  48static inline void task_cputime_scaled(struct task_struct *t,
  49                                       u64 *utimescaled,
  50                                       u64 *stimescaled)
  51{
  52        task_cputime(t, utimescaled, stimescaled);
  53}
  54#endif
  55
  56extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
  57extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
  58extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
  59                           u64 *ut, u64 *st);
  60
  61/*
  62 * Thread group CPU time accounting.
  63 */
  64void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
  65void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples);
  66
  67/*
  68 * The following are functions that support scheduler-internal time accounting.
  69 * These functions are generally called at the timer tick.  None of this depends
  70 * on CONFIG_SCHEDSTATS.
  71 */
  72
  73/**
  74 * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active
  75 *
  76 * @tsk:        Pointer to target task.
  77 */
  78#ifdef CONFIG_POSIX_TIMERS
  79static inline
  80struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
  81{
  82        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  83
  84        /*
  85         * Check whether posix CPU timers are active. If not the thread
  86         * group accounting is not active either. Lockless check.
  87         */
  88        if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active))
  89                return NULL;
  90
  91        /*
  92         * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
  93         * in __exit_signal(), we won't account to the signal struct further
  94         * cputime consumed by that task, even though the task can still be
  95         * ticking after __exit_signal().
  96         *
  97         * In order to keep a consistent behaviour between thread group cputime
  98         * and thread group cputimer accounting, lets also ignore the cputime
  99         * elapsing after __exit_signal() in any thread group timer running.
 100         *
 101         * This makes sure that POSIX CPU clocks and timers are synchronized, so
 102         * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
 103         * clock delta is behind the expiring timer value.
 104         */
 105        if (unlikely(!tsk->sighand))
 106                return NULL;
 107
 108        return cputimer;
 109}
 110#else
 111static inline
 112struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
 113{
 114        return NULL;
 115}
 116#endif
 117
 118/**
 119 * account_group_user_time - Maintain utime for a thread group.
 120 *
 121 * @tsk:        Pointer to task structure.
 122 * @cputime:    Time value by which to increment the utime field of the
 123 *              thread_group_cputime structure.
 124 *
 125 * If thread group time is being maintained, get the structure for the
 126 * running CPU and update the utime field there.
 127 */
 128static inline void account_group_user_time(struct task_struct *tsk,
 129                                           u64 cputime)
 130{
 131        struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 132
 133        if (!cputimer)
 134                return;
 135
 136        atomic64_add(cputime, &cputimer->cputime_atomic.utime);
 137}
 138
 139/**
 140 * account_group_system_time - Maintain stime for a thread group.
 141 *
 142 * @tsk:        Pointer to task structure.
 143 * @cputime:    Time value by which to increment the stime field of the
 144 *              thread_group_cputime structure.
 145 *
 146 * If thread group time is being maintained, get the structure for the
 147 * running CPU and update the stime field there.
 148 */
 149static inline void account_group_system_time(struct task_struct *tsk,
 150                                             u64 cputime)
 151{
 152        struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 153
 154        if (!cputimer)
 155                return;
 156
 157        atomic64_add(cputime, &cputimer->cputime_atomic.stime);
 158}
 159
 160/**
 161 * account_group_exec_runtime - Maintain exec runtime for a thread group.
 162 *
 163 * @tsk:        Pointer to task structure.
 164 * @ns:         Time value by which to increment the sum_exec_runtime field
 165 *              of the thread_group_cputime structure.
 166 *
 167 * If thread group time is being maintained, get the structure for the
 168 * running CPU and update the sum_exec_runtime field there.
 169 */
 170static inline void account_group_exec_runtime(struct task_struct *tsk,
 171                                              unsigned long long ns)
 172{
 173        struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 174
 175        if (!cputimer)
 176                return;
 177
 178        atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
 179}
 180
 181static inline void prev_cputime_init(struct prev_cputime *prev)
 182{
 183#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 184        prev->utime = prev->stime = 0;
 185        raw_spin_lock_init(&prev->lock);
 186#endif
 187}
 188
 189extern unsigned long long
 190task_sched_runtime(struct task_struct *task);
 191
 192#endif /* _LINUX_SCHED_CPUTIME_H */
 193