linux/kernel/time/posix-cpu-timers.c
<<
>>
Prefs
   1/*
   2 * Implement CPU time clocks for the POSIX clock interface.
   3 */
   4
   5#include <linux/sched.h>
   6#include <linux/posix-timers.h>
   7#include <linux/errno.h>
   8#include <linux/math64.h>
   9#include <asm/uaccess.h>
  10#include <linux/kernel_stat.h>
  11#include <trace/events/timer.h>
  12#include <linux/random.h>
  13#include <linux/tick.h>
  14#include <linux/workqueue.h>
  15
  16/*
  17 * Called after updating RLIMIT_CPU to run cpu timer and update
  18 * tsk->signal->cputime_expires expiration cache if necessary. Needs
  19 * siglock protection since other code may update expiration cache as
  20 * well.
  21 */
  22void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
  23{
  24        cputime_t cputime = secs_to_cputime(rlim_new);
  25
  26        spin_lock_irq(&task->sighand->siglock);
  27        set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
  28        spin_unlock_irq(&task->sighand->siglock);
  29}
  30
  31static int check_clock(const clockid_t which_clock)
  32{
  33        int error = 0;
  34        struct task_struct *p;
  35        const pid_t pid = CPUCLOCK_PID(which_clock);
  36
  37        if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
  38                return -EINVAL;
  39
  40        if (pid == 0)
  41                return 0;
  42
  43        rcu_read_lock();
  44        p = find_task_by_vpid(pid);
  45        if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
  46                   same_thread_group(p, current) : has_group_leader_pid(p))) {
  47                error = -EINVAL;
  48        }
  49        rcu_read_unlock();
  50
  51        return error;
  52}
  53
  54static inline unsigned long long
  55timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
  56{
  57        unsigned long long ret;
  58
  59        ret = 0;                /* high half always zero when .cpu used */
  60        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  61                ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
  62        } else {
  63                ret = cputime_to_expires(timespec_to_cputime(tp));
  64        }
  65        return ret;
  66}
  67
  68static void sample_to_timespec(const clockid_t which_clock,
  69                               unsigned long long expires,
  70                               struct timespec *tp)
  71{
  72        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
  73                *tp = ns_to_timespec(expires);
  74        else
  75                cputime_to_timespec((__force cputime_t)expires, tp);
  76}
  77
  78/*
  79 * Update expiry time from increment, and increase overrun count,
  80 * given the current clock sample.
  81 */
  82static void bump_cpu_timer(struct k_itimer *timer,
  83                           unsigned long long now)
  84{
  85        int i;
  86        unsigned long long delta, incr;
  87
  88        if (timer->it.cpu.incr == 0)
  89                return;
  90
  91        if (now < timer->it.cpu.expires)
  92                return;
  93
  94        incr = timer->it.cpu.incr;
  95        delta = now + incr - timer->it.cpu.expires;
  96
  97        /* Don't use (incr*2 < delta), incr*2 might overflow. */
  98        for (i = 0; incr < delta - incr; i++)
  99                incr = incr << 1;
 100
 101        for (; i >= 0; incr >>= 1, i--) {
 102                if (delta < incr)
 103                        continue;
 104
 105                timer->it.cpu.expires += incr;
 106                timer->it_overrun += 1 << i;
 107                delta -= incr;
 108        }
 109}
 110
 111/**
 112 * task_cputime_zero - Check a task_cputime struct for all zero fields.
 113 *
 114 * @cputime:    The struct to compare.
 115 *
 116 * Checks @cputime to see if all fields are zero.  Returns true if all fields
 117 * are zero, false if any field is nonzero.
 118 */
 119static inline int task_cputime_zero(const struct task_cputime *cputime)
 120{
 121        if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
 122                return 1;
 123        return 0;
 124}
 125
 126static inline unsigned long long prof_ticks(struct task_struct *p)
 127{
 128        cputime_t utime, stime;
 129
 130        task_cputime(p, &utime, &stime);
 131
 132        return cputime_to_expires(utime + stime);
 133}
 134static inline unsigned long long virt_ticks(struct task_struct *p)
 135{
 136        cputime_t utime;
 137
 138        task_cputime(p, &utime, NULL);
 139
 140        return cputime_to_expires(utime);
 141}
 142
 143static int
 144posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
 145{
 146        int error = check_clock(which_clock);
 147        if (!error) {
 148                tp->tv_sec = 0;
 149                tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
 150                if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
 151                        /*
 152                         * If sched_clock is using a cycle counter, we
 153                         * don't have any idea of its true resolution
 154                         * exported, but it is much more than 1s/HZ.
 155                         */
 156                        tp->tv_nsec = 1;
 157                }
 158        }
 159        return error;
 160}
 161
 162static int
 163posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
 164{
 165        /*
 166         * You can never reset a CPU clock, but we check for other errors
 167         * in the call before failing with EPERM.
 168         */
 169        int error = check_clock(which_clock);
 170        if (error == 0) {
 171                error = -EPERM;
 172        }
 173        return error;
 174}
 175
 176
 177/*
 178 * Sample a per-thread clock for the given task.
 179 */
 180static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
 181                            unsigned long long *sample)
 182{
 183        switch (CPUCLOCK_WHICH(which_clock)) {
 184        default:
 185                return -EINVAL;
 186        case CPUCLOCK_PROF:
 187                *sample = prof_ticks(p);
 188                break;
 189        case CPUCLOCK_VIRT:
 190                *sample = virt_ticks(p);
 191                break;
 192        case CPUCLOCK_SCHED:
 193                *sample = task_sched_runtime(p);
 194                break;
 195        }
 196        return 0;
 197}
 198
 199/*
 200 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
 201 * to avoid race conditions with concurrent updates to cputime.
 202 */
 203static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
 204{
 205        u64 curr_cputime;
 206retry:
 207        curr_cputime = atomic64_read(cputime);
 208        if (sum_cputime > curr_cputime) {
 209                if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
 210                        goto retry;
 211        }
 212}
 213
 214static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
 215{
 216        __update_gt_cputime(&cputime_atomic->utime, sum->utime);
 217        __update_gt_cputime(&cputime_atomic->stime, sum->stime);
 218        __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
 219}
 220
 221/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
 222static inline void sample_cputime_atomic(struct task_cputime *times,
 223                                         struct task_cputime_atomic *atomic_times)
 224{
 225        times->utime = atomic64_read(&atomic_times->utime);
 226        times->stime = atomic64_read(&atomic_times->stime);
 227        times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
 228}
 229
 230void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
 231{
 232        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 233        struct task_cputime sum;
 234
 235        /* Check if cputimer isn't running. This is accessed without locking. */
 236        if (!READ_ONCE(cputimer->running)) {
 237                /*
 238                 * The POSIX timer interface allows for absolute time expiry
 239                 * values through the TIMER_ABSTIME flag, therefore we have
 240                 * to synchronize the timer to the clock every time we start it.
 241                 */
 242                thread_group_cputime(tsk, &sum);
 243                update_gt_cputime(&cputimer->cputime_atomic, &sum);
 244
 245                /*
 246                 * We're setting cputimer->running without a lock. Ensure
 247                 * this only gets written to in one operation. We set
 248                 * running after update_gt_cputime() as a small optimization,
 249                 * but barriers are not required because update_gt_cputime()
 250                 * can handle concurrent updates.
 251                 */
 252                WRITE_ONCE(cputimer->running, 1);
 253        }
 254        sample_cputime_atomic(times, &cputimer->cputime_atomic);
 255}
 256
 257/*
 258 * Sample a process (thread group) clock for the given group_leader task.
 259 * Must be called with task sighand lock held for safe while_each_thread()
 260 * traversal.
 261 */
 262static int cpu_clock_sample_group(const clockid_t which_clock,
 263                                  struct task_struct *p,
 264                                  unsigned long long *sample)
 265{
 266        struct task_cputime cputime;
 267
 268        switch (CPUCLOCK_WHICH(which_clock)) {
 269        default:
 270                return -EINVAL;
 271        case CPUCLOCK_PROF:
 272                thread_group_cputime(p, &cputime);
 273                *sample = cputime_to_expires(cputime.utime + cputime.stime);
 274                break;
 275        case CPUCLOCK_VIRT:
 276                thread_group_cputime(p, &cputime);
 277                *sample = cputime_to_expires(cputime.utime);
 278                break;
 279        case CPUCLOCK_SCHED:
 280                thread_group_cputime(p, &cputime);
 281                *sample = cputime.sum_exec_runtime;
 282                break;
 283        }
 284        return 0;
 285}
 286
 287static int posix_cpu_clock_get_task(struct task_struct *tsk,
 288                                    const clockid_t which_clock,
 289                                    struct timespec *tp)
 290{
 291        int err = -EINVAL;
 292        unsigned long long rtn;
 293
 294        if (CPUCLOCK_PERTHREAD(which_clock)) {
 295                if (same_thread_group(tsk, current))
 296                        err = cpu_clock_sample(which_clock, tsk, &rtn);
 297        } else {
 298                if (tsk == current || thread_group_leader(tsk))
 299                        err = cpu_clock_sample_group(which_clock, tsk, &rtn);
 300        }
 301
 302        if (!err)
 303                sample_to_timespec(which_clock, rtn, tp);
 304
 305        return err;
 306}
 307
 308
 309static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
 310{
 311        const pid_t pid = CPUCLOCK_PID(which_clock);
 312        int err = -EINVAL;
 313
 314        if (pid == 0) {
 315                /*
 316                 * Special case constant value for our own clocks.
 317                 * We don't have to do any lookup to find ourselves.
 318                 */
 319                err = posix_cpu_clock_get_task(current, which_clock, tp);
 320        } else {
 321                /*
 322                 * Find the given PID, and validate that the caller
 323                 * should be able to see it.
 324                 */
 325                struct task_struct *p;
 326                rcu_read_lock();
 327                p = find_task_by_vpid(pid);
 328                if (p)
 329                        err = posix_cpu_clock_get_task(p, which_clock, tp);
 330                rcu_read_unlock();
 331        }
 332
 333        return err;
 334}
 335
 336
 337/*
 338 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
 339 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
 340 * new timer already all-zeros initialized.
 341 */
 342static int posix_cpu_timer_create(struct k_itimer *new_timer)
 343{
 344        int ret = 0;
 345        const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
 346        struct task_struct *p;
 347
 348        if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
 349                return -EINVAL;
 350
 351        INIT_LIST_HEAD(&new_timer->it.cpu.entry);
 352
 353        rcu_read_lock();
 354        if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
 355                if (pid == 0) {
 356                        p = current;
 357                } else {
 358                        p = find_task_by_vpid(pid);
 359                        if (p && !same_thread_group(p, current))
 360                                p = NULL;
 361                }
 362        } else {
 363                if (pid == 0) {
 364                        p = current->group_leader;
 365                } else {
 366                        p = find_task_by_vpid(pid);
 367                        if (p && !has_group_leader_pid(p))
 368                                p = NULL;
 369                }
 370        }
 371        new_timer->it.cpu.task = p;
 372        if (p) {
 373                get_task_struct(p);
 374        } else {
 375                ret = -EINVAL;
 376        }
 377        rcu_read_unlock();
 378
 379        return ret;
 380}
 381
 382/*
 383 * Clean up a CPU-clock timer that is about to be destroyed.
 384 * This is called from timer deletion with the timer already locked.
 385 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 386 * and try again.  (This happens when the timer is in the middle of firing.)
 387 */
 388static int posix_cpu_timer_del(struct k_itimer *timer)
 389{
 390        int ret = 0;
 391        unsigned long flags;
 392        struct sighand_struct *sighand;
 393        struct task_struct *p = timer->it.cpu.task;
 394
 395        WARN_ON_ONCE(p == NULL);
 396
 397        /*
 398         * Protect against sighand release/switch in exit/exec and process/
 399         * thread timer list entry concurrent read/writes.
 400         */
 401        sighand = lock_task_sighand(p, &flags);
 402        if (unlikely(sighand == NULL)) {
 403                /*
 404                 * We raced with the reaping of the task.
 405                 * The deletion should have cleared us off the list.
 406                 */
 407                WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
 408        } else {
 409                if (timer->it.cpu.firing)
 410                        ret = TIMER_RETRY;
 411                else
 412                        list_del(&timer->it.cpu.entry);
 413
 414                unlock_task_sighand(p, &flags);
 415        }
 416
 417        if (!ret)
 418                put_task_struct(p);
 419
 420        return ret;
 421}
 422
 423static void cleanup_timers_list(struct list_head *head)
 424{
 425        struct cpu_timer_list *timer, *next;
 426
 427        list_for_each_entry_safe(timer, next, head, entry)
 428                list_del_init(&timer->entry);
 429}
 430
 431/*
 432 * Clean out CPU timers still ticking when a thread exited.  The task
 433 * pointer is cleared, and the expiry time is replaced with the residual
 434 * time for later timer_gettime calls to return.
 435 * This must be called with the siglock held.
 436 */
 437static void cleanup_timers(struct list_head *head)
 438{
 439        cleanup_timers_list(head);
 440        cleanup_timers_list(++head);
 441        cleanup_timers_list(++head);
 442}
 443
 444/*
 445 * These are both called with the siglock held, when the current thread
 446 * is being reaped.  When the final (leader) thread in the group is reaped,
 447 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
 448 */
 449void posix_cpu_timers_exit(struct task_struct *tsk)
 450{
 451        add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
 452                                                sizeof(unsigned long long));
 453        cleanup_timers(tsk->cpu_timers);
 454
 455}
 456void posix_cpu_timers_exit_group(struct task_struct *tsk)
 457{
 458        cleanup_timers(tsk->signal->cpu_timers);
 459}
 460
 461static inline int expires_gt(cputime_t expires, cputime_t new_exp)
 462{
 463        return expires == 0 || expires > new_exp;
 464}
 465
 466/*
 467 * Insert the timer on the appropriate list before any timers that
 468 * expire later.  This must be called with the sighand lock held.
 469 */
 470static void arm_timer(struct k_itimer *timer)
 471{
 472        struct task_struct *p = timer->it.cpu.task;
 473        struct list_head *head, *listpos;
 474        struct task_cputime *cputime_expires;
 475        struct cpu_timer_list *const nt = &timer->it.cpu;
 476        struct cpu_timer_list *next;
 477
 478        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 479                head = p->cpu_timers;
 480                cputime_expires = &p->cputime_expires;
 481        } else {
 482                head = p->signal->cpu_timers;
 483                cputime_expires = &p->signal->cputime_expires;
 484        }
 485        head += CPUCLOCK_WHICH(timer->it_clock);
 486
 487        listpos = head;
 488        list_for_each_entry(next, head, entry) {
 489                if (nt->expires < next->expires)
 490                        break;
 491                listpos = &next->entry;
 492        }
 493        list_add(&nt->entry, listpos);
 494
 495        if (listpos == head) {
 496                unsigned long long exp = nt->expires;
 497
 498                /*
 499                 * We are the new earliest-expiring POSIX 1.b timer, hence
 500                 * need to update expiration cache. Take into account that
 501                 * for process timers we share expiration cache with itimers
 502                 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
 503                 */
 504
 505                switch (CPUCLOCK_WHICH(timer->it_clock)) {
 506                case CPUCLOCK_PROF:
 507                        if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
 508                                cputime_expires->prof_exp = expires_to_cputime(exp);
 509                        break;
 510                case CPUCLOCK_VIRT:
 511                        if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
 512                                cputime_expires->virt_exp = expires_to_cputime(exp);
 513                        break;
 514                case CPUCLOCK_SCHED:
 515                        if (cputime_expires->sched_exp == 0 ||
 516                            cputime_expires->sched_exp > exp)
 517                                cputime_expires->sched_exp = exp;
 518                        break;
 519                }
 520        }
 521}
 522
 523/*
 524 * The timer is locked, fire it and arrange for its reload.
 525 */
 526static void cpu_timer_fire(struct k_itimer *timer)
 527{
 528        if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
 529                /*
 530                 * User don't want any signal.
 531                 */
 532                timer->it.cpu.expires = 0;
 533        } else if (unlikely(timer->sigq == NULL)) {
 534                /*
 535                 * This a special case for clock_nanosleep,
 536                 * not a normal timer from sys_timer_create.
 537                 */
 538                wake_up_process(timer->it_process);
 539                timer->it.cpu.expires = 0;
 540        } else if (timer->it.cpu.incr == 0) {
 541                /*
 542                 * One-shot timer.  Clear it as soon as it's fired.
 543                 */
 544                posix_timer_event(timer, 0);
 545                timer->it.cpu.expires = 0;
 546        } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
 547                /*
 548                 * The signal did not get queued because the signal
 549                 * was ignored, so we won't get any callback to
 550                 * reload the timer.  But we need to keep it
 551                 * ticking in case the signal is deliverable next time.
 552                 */
 553                posix_cpu_timer_schedule(timer);
 554        }
 555}
 556
 557/*
 558 * Sample a process (thread group) timer for the given group_leader task.
 559 * Must be called with task sighand lock held for safe while_each_thread()
 560 * traversal.
 561 */
 562static int cpu_timer_sample_group(const clockid_t which_clock,
 563                                  struct task_struct *p,
 564                                  unsigned long long *sample)
 565{
 566        struct task_cputime cputime;
 567
 568        thread_group_cputimer(p, &cputime);
 569        switch (CPUCLOCK_WHICH(which_clock)) {
 570        default:
 571                return -EINVAL;
 572        case CPUCLOCK_PROF:
 573                *sample = cputime_to_expires(cputime.utime + cputime.stime);
 574                break;
 575        case CPUCLOCK_VIRT:
 576                *sample = cputime_to_expires(cputime.utime);
 577                break;
 578        case CPUCLOCK_SCHED:
 579                *sample = cputime.sum_exec_runtime;
 580                break;
 581        }
 582        return 0;
 583}
 584
 585#ifdef CONFIG_NO_HZ_FULL
 586static void nohz_kick_work_fn(struct work_struct *work)
 587{
 588        tick_nohz_full_kick_all();
 589}
 590
 591static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
 592
 593/*
 594 * We need the IPIs to be sent from sane process context.
 595 * The posix cpu timers are always set with irqs disabled.
 596 */
 597static void posix_cpu_timer_kick_nohz(void)
 598{
 599        if (context_tracking_is_enabled())
 600                schedule_work(&nohz_kick_work);
 601}
 602
 603bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
 604{
 605        if (!task_cputime_zero(&tsk->cputime_expires))
 606                return false;
 607
 608        /* Check if cputimer is running. This is accessed without locking. */
 609        if (READ_ONCE(tsk->signal->cputimer.running))
 610                return false;
 611
 612        return true;
 613}
 614#else
 615static inline void posix_cpu_timer_kick_nohz(void) { }
 616#endif
 617
 618/*
 619 * Guts of sys_timer_settime for CPU timers.
 620 * This is called with the timer locked and interrupts disabled.
 621 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 622 * and try again.  (This happens when the timer is in the middle of firing.)
 623 */
 624static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 625                               struct itimerspec *new, struct itimerspec *old)
 626{
 627        unsigned long flags;
 628        struct sighand_struct *sighand;
 629        struct task_struct *p = timer->it.cpu.task;
 630        unsigned long long old_expires, new_expires, old_incr, val;
 631        int ret;
 632
 633        WARN_ON_ONCE(p == NULL);
 634
 635        new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
 636
 637        /*
 638         * Protect against sighand release/switch in exit/exec and p->cpu_timers
 639         * and p->signal->cpu_timers read/write in arm_timer()
 640         */
 641        sighand = lock_task_sighand(p, &flags);
 642        /*
 643         * If p has just been reaped, we can no
 644         * longer get any information about it at all.
 645         */
 646        if (unlikely(sighand == NULL)) {
 647                return -ESRCH;
 648        }
 649
 650        /*
 651         * Disarm any old timer after extracting its expiry time.
 652         */
 653        WARN_ON_ONCE(!irqs_disabled());
 654
 655        ret = 0;
 656        old_incr = timer->it.cpu.incr;
 657        old_expires = timer->it.cpu.expires;
 658        if (unlikely(timer->it.cpu.firing)) {
 659                timer->it.cpu.firing = -1;
 660                ret = TIMER_RETRY;
 661        } else
 662                list_del_init(&timer->it.cpu.entry);
 663
 664        /*
 665         * We need to sample the current value to convert the new
 666         * value from to relative and absolute, and to convert the
 667         * old value from absolute to relative.  To set a process
 668         * timer, we need a sample to balance the thread expiry
 669         * times (in arm_timer).  With an absolute time, we must
 670         * check if it's already passed.  In short, we need a sample.
 671         */
 672        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 673                cpu_clock_sample(timer->it_clock, p, &val);
 674        } else {
 675                cpu_timer_sample_group(timer->it_clock, p, &val);
 676        }
 677
 678        if (old) {
 679                if (old_expires == 0) {
 680                        old->it_value.tv_sec = 0;
 681                        old->it_value.tv_nsec = 0;
 682                } else {
 683                        /*
 684                         * Update the timer in case it has
 685                         * overrun already.  If it has,
 686                         * we'll report it as having overrun
 687                         * and with the next reloaded timer
 688                         * already ticking, though we are
 689                         * swallowing that pending
 690                         * notification here to install the
 691                         * new setting.
 692                         */
 693                        bump_cpu_timer(timer, val);
 694                        if (val < timer->it.cpu.expires) {
 695                                old_expires = timer->it.cpu.expires - val;
 696                                sample_to_timespec(timer->it_clock,
 697                                                   old_expires,
 698                                                   &old->it_value);
 699                        } else {
 700                                old->it_value.tv_nsec = 1;
 701                                old->it_value.tv_sec = 0;
 702                        }
 703                }
 704        }
 705
 706        if (unlikely(ret)) {
 707                /*
 708                 * We are colliding with the timer actually firing.
 709                 * Punt after filling in the timer's old value, and
 710                 * disable this firing since we are already reporting
 711                 * it as an overrun (thanks to bump_cpu_timer above).
 712                 */
 713                unlock_task_sighand(p, &flags);
 714                goto out;
 715        }
 716
 717        if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
 718                new_expires += val;
 719        }
 720
 721        /*
 722         * Install the new expiry time (or zero).
 723         * For a timer with no notification action, we don't actually
 724         * arm the timer (we'll just fake it for timer_gettime).
 725         */
 726        timer->it.cpu.expires = new_expires;
 727        if (new_expires != 0 && val < new_expires) {
 728                arm_timer(timer);
 729        }
 730
 731        unlock_task_sighand(p, &flags);
 732        /*
 733         * Install the new reload setting, and
 734         * set up the signal and overrun bookkeeping.
 735         */
 736        timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
 737                                                &new->it_interval);
 738
 739        /*
 740         * This acts as a modification timestamp for the timer,
 741         * so any automatic reload attempt will punt on seeing
 742         * that we have reset the timer manually.
 743         */
 744        timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
 745                ~REQUEUE_PENDING;
 746        timer->it_overrun_last = 0;
 747        timer->it_overrun = -1;
 748
 749        if (new_expires != 0 && !(val < new_expires)) {
 750                /*
 751                 * The designated time already passed, so we notify
 752                 * immediately, even if the thread never runs to
 753                 * accumulate more time on this clock.
 754                 */
 755                cpu_timer_fire(timer);
 756        }
 757
 758        ret = 0;
 759 out:
 760        if (old) {
 761                sample_to_timespec(timer->it_clock,
 762                                   old_incr, &old->it_interval);
 763        }
 764        if (!ret)
 765                posix_cpu_timer_kick_nohz();
 766        return ret;
 767}
 768
 769static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
 770{
 771        unsigned long long now;
 772        struct task_struct *p = timer->it.cpu.task;
 773
 774        WARN_ON_ONCE(p == NULL);
 775
 776        /*
 777         * Easy part: convert the reload time.
 778         */
 779        sample_to_timespec(timer->it_clock,
 780                           timer->it.cpu.incr, &itp->it_interval);
 781
 782        if (timer->it.cpu.expires == 0) {       /* Timer not armed at all.  */
 783                itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
 784                return;
 785        }
 786
 787        /*
 788         * Sample the clock to take the difference with the expiry time.
 789         */
 790        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 791                cpu_clock_sample(timer->it_clock, p, &now);
 792        } else {
 793                struct sighand_struct *sighand;
 794                unsigned long flags;
 795
 796                /*
 797                 * Protect against sighand release/switch in exit/exec and
 798                 * also make timer sampling safe if it ends up calling
 799                 * thread_group_cputime().
 800                 */
 801                sighand = lock_task_sighand(p, &flags);
 802                if (unlikely(sighand == NULL)) {
 803                        /*
 804                         * The process has been reaped.
 805                         * We can't even collect a sample any more.
 806                         * Call the timer disarmed, nothing else to do.
 807                         */
 808                        timer->it.cpu.expires = 0;
 809                        sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
 810                                           &itp->it_value);
 811                } else {
 812                        cpu_timer_sample_group(timer->it_clock, p, &now);
 813                        unlock_task_sighand(p, &flags);
 814                }
 815        }
 816
 817        if (now < timer->it.cpu.expires) {
 818                sample_to_timespec(timer->it_clock,
 819                                   timer->it.cpu.expires - now,
 820                                   &itp->it_value);
 821        } else {
 822                /*
 823                 * The timer should have expired already, but the firing
 824                 * hasn't taken place yet.  Say it's just about to expire.
 825                 */
 826                itp->it_value.tv_nsec = 1;
 827                itp->it_value.tv_sec = 0;
 828        }
 829}
 830
 831static unsigned long long
 832check_timers_list(struct list_head *timers,
 833                  struct list_head *firing,
 834                  unsigned long long curr)
 835{
 836        int maxfire = 20;
 837
 838        while (!list_empty(timers)) {
 839                struct cpu_timer_list *t;
 840
 841                t = list_first_entry(timers, struct cpu_timer_list, entry);
 842
 843                if (!--maxfire || curr < t->expires)
 844                        return t->expires;
 845
 846                t->firing = 1;
 847                list_move_tail(&t->entry, firing);
 848        }
 849
 850        return 0;
 851}
 852
 853/*
 854 * Check for any per-thread CPU timers that have fired and move them off
 855 * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
 856 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
 857 */
 858static void check_thread_timers(struct task_struct *tsk,
 859                                struct list_head *firing)
 860{
 861        struct list_head *timers = tsk->cpu_timers;
 862        struct signal_struct *const sig = tsk->signal;
 863        struct task_cputime *tsk_expires = &tsk->cputime_expires;
 864        unsigned long long expires;
 865        unsigned long soft;
 866
 867        expires = check_timers_list(timers, firing, prof_ticks(tsk));
 868        tsk_expires->prof_exp = expires_to_cputime(expires);
 869
 870        expires = check_timers_list(++timers, firing, virt_ticks(tsk));
 871        tsk_expires->virt_exp = expires_to_cputime(expires);
 872
 873        tsk_expires->sched_exp = check_timers_list(++timers, firing,
 874                                                   tsk->se.sum_exec_runtime);
 875
 876        /*
 877         * Check for the special case thread timers.
 878         */
 879        soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
 880        if (soft != RLIM_INFINITY) {
 881                unsigned long hard =
 882                        READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
 883
 884                if (hard != RLIM_INFINITY &&
 885                    tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
 886                        /*
 887                         * At the hard limit, we just die.
 888                         * No need to calculate anything else now.
 889                         */
 890                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
 891                        return;
 892                }
 893                if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
 894                        /*
 895                         * At the soft limit, send a SIGXCPU every second.
 896                         */
 897                        if (soft < hard) {
 898                                soft += USEC_PER_SEC;
 899                                sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
 900                        }
 901                        printk(KERN_INFO
 902                                "RT Watchdog Timeout: %s[%d]\n",
 903                                tsk->comm, task_pid_nr(tsk));
 904                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
 905                }
 906        }
 907}
 908
 909static inline void stop_process_timers(struct signal_struct *sig)
 910{
 911        struct thread_group_cputimer *cputimer = &sig->cputimer;
 912
 913        /* Turn off cputimer->running. This is done without locking. */
 914        WRITE_ONCE(cputimer->running, 0);
 915}
 916
 917static u32 onecputick;
 918
 919static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
 920                             unsigned long long *expires,
 921                             unsigned long long cur_time, int signo)
 922{
 923        if (!it->expires)
 924                return;
 925
 926        if (cur_time >= it->expires) {
 927                if (it->incr) {
 928                        it->expires += it->incr;
 929                        it->error += it->incr_error;
 930                        if (it->error >= onecputick) {
 931                                it->expires -= cputime_one_jiffy;
 932                                it->error -= onecputick;
 933                        }
 934                } else {
 935                        it->expires = 0;
 936                }
 937
 938                trace_itimer_expire(signo == SIGPROF ?
 939                                    ITIMER_PROF : ITIMER_VIRTUAL,
 940                                    tsk->signal->leader_pid, cur_time);
 941                __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
 942        }
 943
 944        if (it->expires && (!*expires || it->expires < *expires)) {
 945                *expires = it->expires;
 946        }
 947}
 948
 949/*
 950 * Check for any per-thread CPU timers that have fired and move them
 951 * off the tsk->*_timers list onto the firing list.  Per-thread timers
 952 * have already been taken off.
 953 */
 954static void check_process_timers(struct task_struct *tsk,
 955                                 struct list_head *firing)
 956{
 957        struct signal_struct *const sig = tsk->signal;
 958        unsigned long long utime, ptime, virt_expires, prof_expires;
 959        unsigned long long sum_sched_runtime, sched_expires;
 960        struct list_head *timers = sig->cpu_timers;
 961        struct task_cputime cputime;
 962        unsigned long soft;
 963
 964        /*
 965         * Collect the current process totals.
 966         */
 967        thread_group_cputimer(tsk, &cputime);
 968        utime = cputime_to_expires(cputime.utime);
 969        ptime = utime + cputime_to_expires(cputime.stime);
 970        sum_sched_runtime = cputime.sum_exec_runtime;
 971
 972        prof_expires = check_timers_list(timers, firing, ptime);
 973        virt_expires = check_timers_list(++timers, firing, utime);
 974        sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
 975
 976        /*
 977         * Check for the special case process timers.
 978         */
 979        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
 980                         SIGPROF);
 981        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
 982                         SIGVTALRM);
 983        soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
 984        if (soft != RLIM_INFINITY) {
 985                unsigned long psecs = cputime_to_secs(ptime);
 986                unsigned long hard =
 987                        READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
 988                cputime_t x;
 989                if (psecs >= hard) {
 990                        /*
 991                         * At the hard limit, we just die.
 992                         * No need to calculate anything else now.
 993                         */
 994                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
 995                        return;
 996                }
 997                if (psecs >= soft) {
 998                        /*
 999                         * At the soft limit, send a SIGXCPU every second.
1000                         */
1001                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1002                        if (soft < hard) {
1003                                soft++;
1004                                sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1005                        }
1006                }
1007                x = secs_to_cputime(soft);
1008                if (!prof_expires || x < prof_expires) {
1009                        prof_expires = x;
1010                }
1011        }
1012
1013        sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
1014        sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
1015        sig->cputime_expires.sched_exp = sched_expires;
1016        if (task_cputime_zero(&sig->cputime_expires))
1017                stop_process_timers(sig);
1018}
1019
1020/*
1021 * This is called from the signal code (via do_schedule_next_timer)
1022 * when the last timer signal was delivered and we have to reload the timer.
1023 */
1024void posix_cpu_timer_schedule(struct k_itimer *timer)
1025{
1026        struct sighand_struct *sighand;
1027        unsigned long flags;
1028        struct task_struct *p = timer->it.cpu.task;
1029        unsigned long long now;
1030
1031        WARN_ON_ONCE(p == NULL);
1032
1033        /*
1034         * Fetch the current sample and update the timer's expiry time.
1035         */
1036        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1037                cpu_clock_sample(timer->it_clock, p, &now);
1038                bump_cpu_timer(timer, now);
1039                if (unlikely(p->exit_state))
1040                        goto out;
1041
1042                /* Protect timer list r/w in arm_timer() */
1043                sighand = lock_task_sighand(p, &flags);
1044                if (!sighand)
1045                        goto out;
1046        } else {
1047                /*
1048                 * Protect arm_timer() and timer sampling in case of call to
1049                 * thread_group_cputime().
1050                 */
1051                sighand = lock_task_sighand(p, &flags);
1052                if (unlikely(sighand == NULL)) {
1053                        /*
1054                         * The process has been reaped.
1055                         * We can't even collect a sample any more.
1056                         */
1057                        timer->it.cpu.expires = 0;
1058                        goto out;
1059                } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1060                        unlock_task_sighand(p, &flags);
1061                        /* Optimizations: if the process is dying, no need to rearm */
1062                        goto out;
1063                }
1064                cpu_timer_sample_group(timer->it_clock, p, &now);
1065                bump_cpu_timer(timer, now);
1066                /* Leave the sighand locked for the call below.  */
1067        }
1068
1069        /*
1070         * Now re-arm for the new expiry time.
1071         */
1072        WARN_ON_ONCE(!irqs_disabled());
1073        arm_timer(timer);
1074        unlock_task_sighand(p, &flags);
1075
1076        /* Kick full dynticks CPUs in case they need to tick on the new timer */
1077        posix_cpu_timer_kick_nohz();
1078out:
1079        timer->it_overrun_last = timer->it_overrun;
1080        timer->it_overrun = -1;
1081        ++timer->it_requeue_pending;
1082}
1083
1084/**
1085 * task_cputime_expired - Compare two task_cputime entities.
1086 *
1087 * @sample:     The task_cputime structure to be checked for expiration.
1088 * @expires:    Expiration times, against which @sample will be checked.
1089 *
1090 * Checks @sample against @expires to see if any field of @sample has expired.
1091 * Returns true if any field of the former is greater than the corresponding
1092 * field of the latter if the latter field is set.  Otherwise returns false.
1093 */
1094static inline int task_cputime_expired(const struct task_cputime *sample,
1095                                        const struct task_cputime *expires)
1096{
1097        if (expires->utime && sample->utime >= expires->utime)
1098                return 1;
1099        if (expires->stime && sample->utime + sample->stime >= expires->stime)
1100                return 1;
1101        if (expires->sum_exec_runtime != 0 &&
1102            sample->sum_exec_runtime >= expires->sum_exec_runtime)
1103                return 1;
1104        return 0;
1105}
1106
1107/**
1108 * fastpath_timer_check - POSIX CPU timers fast path.
1109 *
1110 * @tsk:        The task (thread) being checked.
1111 *
1112 * Check the task and thread group timers.  If both are zero (there are no
1113 * timers set) return false.  Otherwise snapshot the task and thread group
1114 * timers and compare them with the corresponding expiration times.  Return
1115 * true if a timer has expired, else return false.
1116 */
1117static inline int fastpath_timer_check(struct task_struct *tsk)
1118{
1119        struct signal_struct *sig;
1120        cputime_t utime, stime;
1121
1122        task_cputime(tsk, &utime, &stime);
1123
1124        if (!task_cputime_zero(&tsk->cputime_expires)) {
1125                struct task_cputime task_sample = {
1126                        .utime = utime,
1127                        .stime = stime,
1128                        .sum_exec_runtime = tsk->se.sum_exec_runtime
1129                };
1130
1131                if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1132                        return 1;
1133        }
1134
1135        sig = tsk->signal;
1136        /* Check if cputimer is running. This is accessed without locking. */
1137        if (READ_ONCE(sig->cputimer.running)) {
1138                struct task_cputime group_sample;
1139
1140                sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1141
1142                if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1143                        return 1;
1144        }
1145
1146        return 0;
1147}
1148
1149/*
1150 * This is called from the timer interrupt handler.  The irq handler has
1151 * already updated our counts.  We need to check if any timers fire now.
1152 * Interrupts are disabled.
1153 */
1154void run_posix_cpu_timers(struct task_struct *tsk)
1155{
1156        LIST_HEAD(firing);
1157        struct k_itimer *timer, *next;
1158        unsigned long flags;
1159
1160        WARN_ON_ONCE(!irqs_disabled());
1161
1162        /*
1163         * The fast path checks that there are no expired thread or thread
1164         * group timers.  If that's so, just return.
1165         */
1166        if (!fastpath_timer_check(tsk))
1167                return;
1168
1169        if (!lock_task_sighand(tsk, &flags))
1170                return;
1171        /*
1172         * Here we take off tsk->signal->cpu_timers[N] and
1173         * tsk->cpu_timers[N] all the timers that are firing, and
1174         * put them on the firing list.
1175         */
1176        check_thread_timers(tsk, &firing);
1177        /*
1178         * If there are any active process wide timers (POSIX 1.b, itimers,
1179         * RLIMIT_CPU) cputimer must be running.
1180         */
1181        if (READ_ONCE(tsk->signal->cputimer.running))
1182                check_process_timers(tsk, &firing);
1183
1184        /*
1185         * We must release these locks before taking any timer's lock.
1186         * There is a potential race with timer deletion here, as the
1187         * siglock now protects our private firing list.  We have set
1188         * the firing flag in each timer, so that a deletion attempt
1189         * that gets the timer lock before we do will give it up and
1190         * spin until we've taken care of that timer below.
1191         */
1192        unlock_task_sighand(tsk, &flags);
1193
1194        /*
1195         * Now that all the timers on our list have the firing flag,
1196         * no one will touch their list entries but us.  We'll take
1197         * each timer's lock before clearing its firing flag, so no
1198         * timer call will interfere.
1199         */
1200        list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1201                int cpu_firing;
1202
1203                spin_lock(&timer->it_lock);
1204                list_del_init(&timer->it.cpu.entry);
1205                cpu_firing = timer->it.cpu.firing;
1206                timer->it.cpu.firing = 0;
1207                /*
1208                 * The firing flag is -1 if we collided with a reset
1209                 * of the timer, which already reported this
1210                 * almost-firing as an overrun.  So don't generate an event.
1211                 */
1212                if (likely(cpu_firing >= 0))
1213                        cpu_timer_fire(timer);
1214                spin_unlock(&timer->it_lock);
1215        }
1216}
1217
1218/*
1219 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1220 * The tsk->sighand->siglock must be held by the caller.
1221 */
1222void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1223                           cputime_t *newval, cputime_t *oldval)
1224{
1225        unsigned long long now;
1226
1227        WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1228        cpu_timer_sample_group(clock_idx, tsk, &now);
1229
1230        if (oldval) {
1231                /*
1232                 * We are setting itimer. The *oldval is absolute and we update
1233                 * it to be relative, *newval argument is relative and we update
1234                 * it to be absolute.
1235                 */
1236                if (*oldval) {
1237                        if (*oldval <= now) {
1238                                /* Just about to fire. */
1239                                *oldval = cputime_one_jiffy;
1240                        } else {
1241                                *oldval -= now;
1242                        }
1243                }
1244
1245                if (!*newval)
1246                        goto out;
1247                *newval += now;
1248        }
1249
1250        /*
1251         * Update expiration cache if we are the earliest timer, or eventually
1252         * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1253         */
1254        switch (clock_idx) {
1255        case CPUCLOCK_PROF:
1256                if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1257                        tsk->signal->cputime_expires.prof_exp = *newval;
1258                break;
1259        case CPUCLOCK_VIRT:
1260                if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1261                        tsk->signal->cputime_expires.virt_exp = *newval;
1262                break;
1263        }
1264out:
1265        posix_cpu_timer_kick_nohz();
1266}
1267
1268static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1269                            struct timespec *rqtp, struct itimerspec *it)
1270{
1271        struct k_itimer timer;
1272        int error;
1273
1274        /*
1275         * Set up a temporary timer and then wait for it to go off.
1276         */
1277        memset(&timer, 0, sizeof timer);
1278        spin_lock_init(&timer.it_lock);
1279        timer.it_clock = which_clock;
1280        timer.it_overrun = -1;
1281        error = posix_cpu_timer_create(&timer);
1282        timer.it_process = current;
1283        if (!error) {
1284                static struct itimerspec zero_it;
1285
1286                memset(it, 0, sizeof *it);
1287                it->it_value = *rqtp;
1288
1289                spin_lock_irq(&timer.it_lock);
1290                error = posix_cpu_timer_set(&timer, flags, it, NULL);
1291                if (error) {
1292                        spin_unlock_irq(&timer.it_lock);
1293                        return error;
1294                }
1295
1296                while (!signal_pending(current)) {
1297                        if (timer.it.cpu.expires == 0) {
1298                                /*
1299                                 * Our timer fired and was reset, below
1300                                 * deletion can not fail.
1301                                 */
1302                                posix_cpu_timer_del(&timer);
1303                                spin_unlock_irq(&timer.it_lock);
1304                                return 0;
1305                        }
1306
1307                        /*
1308                         * Block until cpu_timer_fire (or a signal) wakes us.
1309                         */
1310                        __set_current_state(TASK_INTERRUPTIBLE);
1311                        spin_unlock_irq(&timer.it_lock);
1312                        schedule();
1313                        spin_lock_irq(&timer.it_lock);
1314                }
1315
1316                /*
1317                 * We were interrupted by a signal.
1318                 */
1319                sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1320                error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1321                if (!error) {
1322                        /*
1323                         * Timer is now unarmed, deletion can not fail.
1324                         */
1325                        posix_cpu_timer_del(&timer);
1326                }
1327                spin_unlock_irq(&timer.it_lock);
1328
1329                while (error == TIMER_RETRY) {
1330                        /*
1331                         * We need to handle case when timer was or is in the
1332                         * middle of firing. In other cases we already freed
1333                         * resources.
1334                         */
1335                        spin_lock_irq(&timer.it_lock);
1336                        error = posix_cpu_timer_del(&timer);
1337                        spin_unlock_irq(&timer.it_lock);
1338                }
1339
1340                if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1341                        /*
1342                         * It actually did fire already.
1343                         */
1344                        return 0;
1345                }
1346
1347                error = -ERESTART_RESTARTBLOCK;
1348        }
1349
1350        return error;
1351}
1352
1353static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1354
1355static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1356                            struct timespec *rqtp, struct timespec __user *rmtp)
1357{
1358        struct restart_block *restart_block = &current->restart_block;
1359        struct itimerspec it;
1360        int error;
1361
1362        /*
1363         * Diagnose required errors first.
1364         */
1365        if (CPUCLOCK_PERTHREAD(which_clock) &&
1366            (CPUCLOCK_PID(which_clock) == 0 ||
1367             CPUCLOCK_PID(which_clock) == current->pid))
1368                return -EINVAL;
1369
1370        error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1371
1372        if (error == -ERESTART_RESTARTBLOCK) {
1373
1374                if (flags & TIMER_ABSTIME)
1375                        return -ERESTARTNOHAND;
1376                /*
1377                 * Report back to the user the time still remaining.
1378                 */
1379                if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1380                        return -EFAULT;
1381
1382                restart_block->fn = posix_cpu_nsleep_restart;
1383                restart_block->nanosleep.clockid = which_clock;
1384                restart_block->nanosleep.rmtp = rmtp;
1385                restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1386        }
1387        return error;
1388}
1389
1390static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1391{
1392        clockid_t which_clock = restart_block->nanosleep.clockid;
1393        struct timespec t;
1394        struct itimerspec it;
1395        int error;
1396
1397        t = ns_to_timespec(restart_block->nanosleep.expires);
1398
1399        error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1400
1401        if (error == -ERESTART_RESTARTBLOCK) {
1402                struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1403                /*
1404                 * Report back to the user the time still remaining.
1405                 */
1406                if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1407                        return -EFAULT;
1408
1409                restart_block->nanosleep.expires = timespec_to_ns(&t);
1410        }
1411        return error;
1412
1413}
1414
1415#define PROCESS_CLOCK   MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1416#define THREAD_CLOCK    MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1417
1418static int process_cpu_clock_getres(const clockid_t which_clock,
1419                                    struct timespec *tp)
1420{
1421        return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1422}
1423static int process_cpu_clock_get(const clockid_t which_clock,
1424                                 struct timespec *tp)
1425{
1426        return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1427}
1428static int process_cpu_timer_create(struct k_itimer *timer)
1429{
1430        timer->it_clock = PROCESS_CLOCK;
1431        return posix_cpu_timer_create(timer);
1432}
1433static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1434                              struct timespec *rqtp,
1435                              struct timespec __user *rmtp)
1436{
1437        return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1438}
1439static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1440{
1441        return -EINVAL;
1442}
1443static int thread_cpu_clock_getres(const clockid_t which_clock,
1444                                   struct timespec *tp)
1445{
1446        return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1447}
1448static int thread_cpu_clock_get(const clockid_t which_clock,
1449                                struct timespec *tp)
1450{
1451        return posix_cpu_clock_get(THREAD_CLOCK, tp);
1452}
1453static int thread_cpu_timer_create(struct k_itimer *timer)
1454{
1455        timer->it_clock = THREAD_CLOCK;
1456        return posix_cpu_timer_create(timer);
1457}
1458
1459struct k_clock clock_posix_cpu = {
1460        .clock_getres   = posix_cpu_clock_getres,
1461        .clock_set      = posix_cpu_clock_set,
1462        .clock_get      = posix_cpu_clock_get,
1463        .timer_create   = posix_cpu_timer_create,
1464        .nsleep         = posix_cpu_nsleep,
1465        .nsleep_restart = posix_cpu_nsleep_restart,
1466        .timer_set      = posix_cpu_timer_set,
1467        .timer_del      = posix_cpu_timer_del,
1468        .timer_get      = posix_cpu_timer_get,
1469};
1470
1471static __init int init_posix_cpu_timers(void)
1472{
1473        struct k_clock process = {
1474                .clock_getres   = process_cpu_clock_getres,
1475                .clock_get      = process_cpu_clock_get,
1476                .timer_create   = process_cpu_timer_create,
1477                .nsleep         = process_cpu_nsleep,
1478                .nsleep_restart = process_cpu_nsleep_restart,
1479        };
1480        struct k_clock thread = {
1481                .clock_getres   = thread_cpu_clock_getres,
1482                .clock_get      = thread_cpu_clock_get,
1483                .timer_create   = thread_cpu_timer_create,
1484        };
1485        struct timespec ts;
1486
1487        posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1488        posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1489
1490        cputime_to_timespec(cputime_one_jiffy, &ts);
1491        onecputick = ts.tv_nsec;
1492        WARN_ON(ts.tv_sec != 0);
1493
1494        return 0;
1495}
1496__initcall(init_posix_cpu_timers);
1497