linux/kernel/time/posix-cpu-timers.c
<<
>>
Prefs
   1/*
   2 * Implement CPU time clocks for the POSIX clock interface.
   3 */
   4
   5#include <linux/sched.h>
   6#include <linux/posix-timers.h>
   7#include <linux/errno.h>
   8#include <linux/math64.h>
   9#include <asm/uaccess.h>
  10#include <linux/kernel_stat.h>
  11#include <trace/events/timer.h>
  12#include <linux/random.h>
  13#include <linux/tick.h>
  14#include <linux/workqueue.h>
  15
  16/*
  17 * Called after updating RLIMIT_CPU to run cpu timer and update
  18 * tsk->signal->cputime_expires expiration cache if necessary. Needs
  19 * siglock protection since other code may update expiration cache as
  20 * well.
  21 */
  22void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
  23{
  24        cputime_t cputime = secs_to_cputime(rlim_new);
  25
  26        spin_lock_irq(&task->sighand->siglock);
  27        set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
  28        spin_unlock_irq(&task->sighand->siglock);
  29}
  30
  31static int check_clock(const clockid_t which_clock)
  32{
  33        int error = 0;
  34        struct task_struct *p;
  35        const pid_t pid = CPUCLOCK_PID(which_clock);
  36
  37        if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
  38                return -EINVAL;
  39
  40        if (pid == 0)
  41                return 0;
  42
  43        rcu_read_lock();
  44        p = find_task_by_vpid(pid);
  45        if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
  46                   same_thread_group(p, current) : has_group_leader_pid(p))) {
  47                error = -EINVAL;
  48        }
  49        rcu_read_unlock();
  50
  51        return error;
  52}
  53
  54static inline unsigned long long
  55timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
  56{
  57        unsigned long long ret;
  58
  59        ret = 0;                /* high half always zero when .cpu used */
  60        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  61                ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
  62        } else {
  63                ret = cputime_to_expires(timespec_to_cputime(tp));
  64        }
  65        return ret;
  66}
  67
  68static void sample_to_timespec(const clockid_t which_clock,
  69                               unsigned long long expires,
  70                               struct timespec *tp)
  71{
  72        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
  73                *tp = ns_to_timespec(expires);
  74        else
  75                cputime_to_timespec((__force cputime_t)expires, tp);
  76}
  77
  78/*
  79 * Update expiry time from increment, and increase overrun count,
  80 * given the current clock sample.
  81 */
  82static void bump_cpu_timer(struct k_itimer *timer,
  83                           unsigned long long now)
  84{
  85        int i;
  86        unsigned long long delta, incr;
  87
  88        if (timer->it.cpu.incr == 0)
  89                return;
  90
  91        if (now < timer->it.cpu.expires)
  92                return;
  93
  94        incr = timer->it.cpu.incr;
  95        delta = now + incr - timer->it.cpu.expires;
  96
  97        /* Don't use (incr*2 < delta), incr*2 might overflow. */
  98        for (i = 0; incr < delta - incr; i++)
  99                incr = incr << 1;
 100
 101        for (; i >= 0; incr >>= 1, i--) {
 102                if (delta < incr)
 103                        continue;
 104
 105                timer->it.cpu.expires += incr;
 106                timer->it_overrun += 1 << i;
 107                delta -= incr;
 108        }
 109}
 110
 111/**
 112 * task_cputime_zero - Check a task_cputime struct for all zero fields.
 113 *
 114 * @cputime:    The struct to compare.
 115 *
 116 * Checks @cputime to see if all fields are zero.  Returns true if all fields
 117 * are zero, false if any field is nonzero.
 118 */
 119static inline int task_cputime_zero(const struct task_cputime *cputime)
 120{
 121        if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
 122                return 1;
 123        return 0;
 124}
 125
 126static inline unsigned long long prof_ticks(struct task_struct *p)
 127{
 128        cputime_t utime, stime;
 129
 130        task_cputime(p, &utime, &stime);
 131
 132        return cputime_to_expires(utime + stime);
 133}
 134static inline unsigned long long virt_ticks(struct task_struct *p)
 135{
 136        cputime_t utime;
 137
 138        task_cputime(p, &utime, NULL);
 139
 140        return cputime_to_expires(utime);
 141}
 142
 143static int
 144posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
 145{
 146        int error = check_clock(which_clock);
 147        if (!error) {
 148                tp->tv_sec = 0;
 149                tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
 150                if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
 151                        /*
 152                         * If sched_clock is using a cycle counter, we
 153                         * don't have any idea of its true resolution
 154                         * exported, but it is much more than 1s/HZ.
 155                         */
 156                        tp->tv_nsec = 1;
 157                }
 158        }
 159        return error;
 160}
 161
 162static int
 163posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
 164{
 165        /*
 166         * You can never reset a CPU clock, but we check for other errors
 167         * in the call before failing with EPERM.
 168         */
 169        int error = check_clock(which_clock);
 170        if (error == 0) {
 171                error = -EPERM;
 172        }
 173        return error;
 174}
 175
 176
 177/*
 178 * Sample a per-thread clock for the given task.
 179 */
 180static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
 181                            unsigned long long *sample)
 182{
 183        switch (CPUCLOCK_WHICH(which_clock)) {
 184        default:
 185                return -EINVAL;
 186        case CPUCLOCK_PROF:
 187                *sample = prof_ticks(p);
 188                break;
 189        case CPUCLOCK_VIRT:
 190                *sample = virt_ticks(p);
 191                break;
 192        case CPUCLOCK_SCHED:
 193                *sample = task_sched_runtime(p);
 194                break;
 195        }
 196        return 0;
 197}
 198
 199/*
 200 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
 201 * to avoid race conditions with concurrent updates to cputime.
 202 */
 203static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
 204{
 205        u64 curr_cputime;
 206retry:
 207        curr_cputime = atomic64_read(cputime);
 208        if (sum_cputime > curr_cputime) {
 209                if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
 210                        goto retry;
 211        }
 212}
 213
 214static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
 215{
 216        __update_gt_cputime(&cputime_atomic->utime, sum->utime);
 217        __update_gt_cputime(&cputime_atomic->stime, sum->stime);
 218        __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
 219}
 220
 221/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
 222static inline void sample_cputime_atomic(struct task_cputime *times,
 223                                         struct task_cputime_atomic *atomic_times)
 224{
 225        times->utime = atomic64_read(&atomic_times->utime);
 226        times->stime = atomic64_read(&atomic_times->stime);
 227        times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
 228}
 229
 230void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
 231{
 232        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 233        struct task_cputime sum;
 234
 235        /* Check if cputimer isn't running. This is accessed without locking. */
 236        if (!READ_ONCE(cputimer->running)) {
 237                /*
 238                 * The POSIX timer interface allows for absolute time expiry
 239                 * values through the TIMER_ABSTIME flag, therefore we have
 240                 * to synchronize the timer to the clock every time we start it.
 241                 */
 242                thread_group_cputime(tsk, &sum);
 243                update_gt_cputime(&cputimer->cputime_atomic, &sum);
 244
 245                /*
 246                 * We're setting cputimer->running without a lock. Ensure
 247                 * this only gets written to in one operation. We set
 248                 * running after update_gt_cputime() as a small optimization,
 249                 * but barriers are not required because update_gt_cputime()
 250                 * can handle concurrent updates.
 251                 */
 252                WRITE_ONCE(cputimer->running, true);
 253        }
 254        sample_cputime_atomic(times, &cputimer->cputime_atomic);
 255}
 256
 257/*
 258 * Sample a process (thread group) clock for the given group_leader task.
 259 * Must be called with task sighand lock held for safe while_each_thread()
 260 * traversal.
 261 */
 262static int cpu_clock_sample_group(const clockid_t which_clock,
 263                                  struct task_struct *p,
 264                                  unsigned long long *sample)
 265{
 266        struct task_cputime cputime;
 267
 268        switch (CPUCLOCK_WHICH(which_clock)) {
 269        default:
 270                return -EINVAL;
 271        case CPUCLOCK_PROF:
 272                thread_group_cputime(p, &cputime);
 273                *sample = cputime_to_expires(cputime.utime + cputime.stime);
 274                break;
 275        case CPUCLOCK_VIRT:
 276                thread_group_cputime(p, &cputime);
 277                *sample = cputime_to_expires(cputime.utime);
 278                break;
 279        case CPUCLOCK_SCHED:
 280                thread_group_cputime(p, &cputime);
 281                *sample = cputime.sum_exec_runtime;
 282                break;
 283        }
 284        return 0;
 285}
 286
 287static int posix_cpu_clock_get_task(struct task_struct *tsk,
 288                                    const clockid_t which_clock,
 289                                    struct timespec *tp)
 290{
 291        int err = -EINVAL;
 292        unsigned long long rtn;
 293
 294        if (CPUCLOCK_PERTHREAD(which_clock)) {
 295                if (same_thread_group(tsk, current))
 296                        err = cpu_clock_sample(which_clock, tsk, &rtn);
 297        } else {
 298                if (tsk == current || thread_group_leader(tsk))
 299                        err = cpu_clock_sample_group(which_clock, tsk, &rtn);
 300        }
 301
 302        if (!err)
 303                sample_to_timespec(which_clock, rtn, tp);
 304
 305        return err;
 306}
 307
 308
 309static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
 310{
 311        const pid_t pid = CPUCLOCK_PID(which_clock);
 312        int err = -EINVAL;
 313
 314        if (pid == 0) {
 315                /*
 316                 * Special case constant value for our own clocks.
 317                 * We don't have to do any lookup to find ourselves.
 318                 */
 319                err = posix_cpu_clock_get_task(current, which_clock, tp);
 320        } else {
 321                /*
 322                 * Find the given PID, and validate that the caller
 323                 * should be able to see it.
 324                 */
 325                struct task_struct *p;
 326                rcu_read_lock();
 327                p = find_task_by_vpid(pid);
 328                if (p)
 329                        err = posix_cpu_clock_get_task(p, which_clock, tp);
 330                rcu_read_unlock();
 331        }
 332
 333        return err;
 334}
 335
 336/*
 337 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
 338 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
 339 * new timer already all-zeros initialized.
 340 */
 341static int posix_cpu_timer_create(struct k_itimer *new_timer)
 342{
 343        int ret = 0;
 344        const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
 345        struct task_struct *p;
 346
 347        if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
 348                return -EINVAL;
 349
 350        INIT_LIST_HEAD(&new_timer->it.cpu.entry);
 351
 352        rcu_read_lock();
 353        if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
 354                if (pid == 0) {
 355                        p = current;
 356                } else {
 357                        p = find_task_by_vpid(pid);
 358                        if (p && !same_thread_group(p, current))
 359                                p = NULL;
 360                }
 361        } else {
 362                if (pid == 0) {
 363                        p = current->group_leader;
 364                } else {
 365                        p = find_task_by_vpid(pid);
 366                        if (p && !has_group_leader_pid(p))
 367                                p = NULL;
 368                }
 369        }
 370        new_timer->it.cpu.task = p;
 371        if (p) {
 372                get_task_struct(p);
 373        } else {
 374                ret = -EINVAL;
 375        }
 376        rcu_read_unlock();
 377
 378        return ret;
 379}
 380
 381/*
 382 * Clean up a CPU-clock timer that is about to be destroyed.
 383 * This is called from timer deletion with the timer already locked.
 384 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 385 * and try again.  (This happens when the timer is in the middle of firing.)
 386 */
 387static int posix_cpu_timer_del(struct k_itimer *timer)
 388{
 389        int ret = 0;
 390        unsigned long flags;
 391        struct sighand_struct *sighand;
 392        struct task_struct *p = timer->it.cpu.task;
 393
 394        WARN_ON_ONCE(p == NULL);
 395
 396        /*
 397         * Protect against sighand release/switch in exit/exec and process/
 398         * thread timer list entry concurrent read/writes.
 399         */
 400        sighand = lock_task_sighand(p, &flags);
 401        if (unlikely(sighand == NULL)) {
 402                /*
 403                 * We raced with the reaping of the task.
 404                 * The deletion should have cleared us off the list.
 405                 */
 406                WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
 407        } else {
 408                if (timer->it.cpu.firing)
 409                        ret = TIMER_RETRY;
 410                else
 411                        list_del(&timer->it.cpu.entry);
 412
 413                unlock_task_sighand(p, &flags);
 414        }
 415
 416        if (!ret)
 417                put_task_struct(p);
 418
 419        return ret;
 420}
 421
 422static void cleanup_timers_list(struct list_head *head)
 423{
 424        struct cpu_timer_list *timer, *next;
 425
 426        list_for_each_entry_safe(timer, next, head, entry)
 427                list_del_init(&timer->entry);
 428}
 429
 430/*
 431 * Clean out CPU timers still ticking when a thread exited.  The task
 432 * pointer is cleared, and the expiry time is replaced with the residual
 433 * time for later timer_gettime calls to return.
 434 * This must be called with the siglock held.
 435 */
 436static void cleanup_timers(struct list_head *head)
 437{
 438        cleanup_timers_list(head);
 439        cleanup_timers_list(++head);
 440        cleanup_timers_list(++head);
 441}
 442
 443/*
 444 * These are both called with the siglock held, when the current thread
 445 * is being reaped.  When the final (leader) thread in the group is reaped,
 446 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
 447 */
 448void posix_cpu_timers_exit(struct task_struct *tsk)
 449{
 450        add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
 451                                                sizeof(unsigned long long));
 452        cleanup_timers(tsk->cpu_timers);
 453
 454}
 455void posix_cpu_timers_exit_group(struct task_struct *tsk)
 456{
 457        cleanup_timers(tsk->signal->cpu_timers);
 458}
 459
 460static inline int expires_gt(cputime_t expires, cputime_t new_exp)
 461{
 462        return expires == 0 || expires > new_exp;
 463}
 464
 465/*
 466 * Insert the timer on the appropriate list before any timers that
 467 * expire later.  This must be called with the sighand lock held.
 468 */
 469static void arm_timer(struct k_itimer *timer)
 470{
 471        struct task_struct *p = timer->it.cpu.task;
 472        struct list_head *head, *listpos;
 473        struct task_cputime *cputime_expires;
 474        struct cpu_timer_list *const nt = &timer->it.cpu;
 475        struct cpu_timer_list *next;
 476
 477        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 478                head = p->cpu_timers;
 479                cputime_expires = &p->cputime_expires;
 480        } else {
 481                head = p->signal->cpu_timers;
 482                cputime_expires = &p->signal->cputime_expires;
 483        }
 484        head += CPUCLOCK_WHICH(timer->it_clock);
 485
 486        listpos = head;
 487        list_for_each_entry(next, head, entry) {
 488                if (nt->expires < next->expires)
 489                        break;
 490                listpos = &next->entry;
 491        }
 492        list_add(&nt->entry, listpos);
 493
 494        if (listpos == head) {
 495                unsigned long long exp = nt->expires;
 496
 497                /*
 498                 * We are the new earliest-expiring POSIX 1.b timer, hence
 499                 * need to update expiration cache. Take into account that
 500                 * for process timers we share expiration cache with itimers
 501                 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
 502                 */
 503
 504                switch (CPUCLOCK_WHICH(timer->it_clock)) {
 505                case CPUCLOCK_PROF:
 506                        if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
 507                                cputime_expires->prof_exp = expires_to_cputime(exp);
 508                        break;
 509                case CPUCLOCK_VIRT:
 510                        if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
 511                                cputime_expires->virt_exp = expires_to_cputime(exp);
 512                        break;
 513                case CPUCLOCK_SCHED:
 514                        if (cputime_expires->sched_exp == 0 ||
 515                            cputime_expires->sched_exp > exp)
 516                                cputime_expires->sched_exp = exp;
 517                        break;
 518                }
 519                if (CPUCLOCK_PERTHREAD(timer->it_clock))
 520                        tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
 521                else
 522                        tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
 523        }
 524}
 525
 526/*
 527 * The timer is locked, fire it and arrange for its reload.
 528 */
 529static void cpu_timer_fire(struct k_itimer *timer)
 530{
 531        if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
 532                /*
 533                 * User don't want any signal.
 534                 */
 535                timer->it.cpu.expires = 0;
 536        } else if (unlikely(timer->sigq == NULL)) {
 537                /*
 538                 * This a special case for clock_nanosleep,
 539                 * not a normal timer from sys_timer_create.
 540                 */
 541                wake_up_process(timer->it_process);
 542                timer->it.cpu.expires = 0;
 543        } else if (timer->it.cpu.incr == 0) {
 544                /*
 545                 * One-shot timer.  Clear it as soon as it's fired.
 546                 */
 547                posix_timer_event(timer, 0);
 548                timer->it.cpu.expires = 0;
 549        } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
 550                /*
 551                 * The signal did not get queued because the signal
 552                 * was ignored, so we won't get any callback to
 553                 * reload the timer.  But we need to keep it
 554                 * ticking in case the signal is deliverable next time.
 555                 */
 556                posix_cpu_timer_schedule(timer);
 557        }
 558}
 559
 560/*
 561 * Sample a process (thread group) timer for the given group_leader task.
 562 * Must be called with task sighand lock held for safe while_each_thread()
 563 * traversal.
 564 */
 565static int cpu_timer_sample_group(const clockid_t which_clock,
 566                                  struct task_struct *p,
 567                                  unsigned long long *sample)
 568{
 569        struct task_cputime cputime;
 570
 571        thread_group_cputimer(p, &cputime);
 572        switch (CPUCLOCK_WHICH(which_clock)) {
 573        default:
 574                return -EINVAL;
 575        case CPUCLOCK_PROF:
 576                *sample = cputime_to_expires(cputime.utime + cputime.stime);
 577                break;
 578        case CPUCLOCK_VIRT:
 579                *sample = cputime_to_expires(cputime.utime);
 580                break;
 581        case CPUCLOCK_SCHED:
 582                *sample = cputime.sum_exec_runtime;
 583                break;
 584        }
 585        return 0;
 586}
 587
 588/*
 589 * Guts of sys_timer_settime for CPU timers.
 590 * This is called with the timer locked and interrupts disabled.
 591 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 592 * and try again.  (This happens when the timer is in the middle of firing.)
 593 */
 594static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 595                               struct itimerspec *new, struct itimerspec *old)
 596{
 597        unsigned long flags;
 598        struct sighand_struct *sighand;
 599        struct task_struct *p = timer->it.cpu.task;
 600        unsigned long long old_expires, new_expires, old_incr, val;
 601        int ret;
 602
 603        WARN_ON_ONCE(p == NULL);
 604
 605        new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
 606
 607        /*
 608         * Protect against sighand release/switch in exit/exec and p->cpu_timers
 609         * and p->signal->cpu_timers read/write in arm_timer()
 610         */
 611        sighand = lock_task_sighand(p, &flags);
 612        /*
 613         * If p has just been reaped, we can no
 614         * longer get any information about it at all.
 615         */
 616        if (unlikely(sighand == NULL)) {
 617                return -ESRCH;
 618        }
 619
 620        /*
 621         * Disarm any old timer after extracting its expiry time.
 622         */
 623        WARN_ON_ONCE(!irqs_disabled());
 624
 625        ret = 0;
 626        old_incr = timer->it.cpu.incr;
 627        old_expires = timer->it.cpu.expires;
 628        if (unlikely(timer->it.cpu.firing)) {
 629                timer->it.cpu.firing = -1;
 630                ret = TIMER_RETRY;
 631        } else
 632                list_del_init(&timer->it.cpu.entry);
 633
 634        /*
 635         * We need to sample the current value to convert the new
 636         * value from to relative and absolute, and to convert the
 637         * old value from absolute to relative.  To set a process
 638         * timer, we need a sample to balance the thread expiry
 639         * times (in arm_timer).  With an absolute time, we must
 640         * check if it's already passed.  In short, we need a sample.
 641         */
 642        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 643                cpu_clock_sample(timer->it_clock, p, &val);
 644        } else {
 645                cpu_timer_sample_group(timer->it_clock, p, &val);
 646        }
 647
 648        if (old) {
 649                if (old_expires == 0) {
 650                        old->it_value.tv_sec = 0;
 651                        old->it_value.tv_nsec = 0;
 652                } else {
 653                        /*
 654                         * Update the timer in case it has
 655                         * overrun already.  If it has,
 656                         * we'll report it as having overrun
 657                         * and with the next reloaded timer
 658                         * already ticking, though we are
 659                         * swallowing that pending
 660                         * notification here to install the
 661                         * new setting.
 662                         */
 663                        bump_cpu_timer(timer, val);
 664                        if (val < timer->it.cpu.expires) {
 665                                old_expires = timer->it.cpu.expires - val;
 666                                sample_to_timespec(timer->it_clock,
 667                                                   old_expires,
 668                                                   &old->it_value);
 669                        } else {
 670                                old->it_value.tv_nsec = 1;
 671                                old->it_value.tv_sec = 0;
 672                        }
 673                }
 674        }
 675
 676        if (unlikely(ret)) {
 677                /*
 678                 * We are colliding with the timer actually firing.
 679                 * Punt after filling in the timer's old value, and
 680                 * disable this firing since we are already reporting
 681                 * it as an overrun (thanks to bump_cpu_timer above).
 682                 */
 683                unlock_task_sighand(p, &flags);
 684                goto out;
 685        }
 686
 687        if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
 688                new_expires += val;
 689        }
 690
 691        /*
 692         * Install the new expiry time (or zero).
 693         * For a timer with no notification action, we don't actually
 694         * arm the timer (we'll just fake it for timer_gettime).
 695         */
 696        timer->it.cpu.expires = new_expires;
 697        if (new_expires != 0 && val < new_expires) {
 698                arm_timer(timer);
 699        }
 700
 701        unlock_task_sighand(p, &flags);
 702        /*
 703         * Install the new reload setting, and
 704         * set up the signal and overrun bookkeeping.
 705         */
 706        timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
 707                                                &new->it_interval);
 708
 709        /*
 710         * This acts as a modification timestamp for the timer,
 711         * so any automatic reload attempt will punt on seeing
 712         * that we have reset the timer manually.
 713         */
 714        timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
 715                ~REQUEUE_PENDING;
 716        timer->it_overrun_last = 0;
 717        timer->it_overrun = -1;
 718
 719        if (new_expires != 0 && !(val < new_expires)) {
 720                /*
 721                 * The designated time already passed, so we notify
 722                 * immediately, even if the thread never runs to
 723                 * accumulate more time on this clock.
 724                 */
 725                cpu_timer_fire(timer);
 726        }
 727
 728        ret = 0;
 729 out:
 730        if (old) {
 731                sample_to_timespec(timer->it_clock,
 732                                   old_incr, &old->it_interval);
 733        }
 734
 735        return ret;
 736}
 737
 738static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
 739{
 740        unsigned long long now;
 741        struct task_struct *p = timer->it.cpu.task;
 742
 743        WARN_ON_ONCE(p == NULL);
 744
 745        /*
 746         * Easy part: convert the reload time.
 747         */
 748        sample_to_timespec(timer->it_clock,
 749                           timer->it.cpu.incr, &itp->it_interval);
 750
 751        if (timer->it.cpu.expires == 0) {       /* Timer not armed at all.  */
 752                itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
 753                return;
 754        }
 755
 756        /*
 757         * Sample the clock to take the difference with the expiry time.
 758         */
 759        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 760                cpu_clock_sample(timer->it_clock, p, &now);
 761        } else {
 762                struct sighand_struct *sighand;
 763                unsigned long flags;
 764
 765                /*
 766                 * Protect against sighand release/switch in exit/exec and
 767                 * also make timer sampling safe if it ends up calling
 768                 * thread_group_cputime().
 769                 */
 770                sighand = lock_task_sighand(p, &flags);
 771                if (unlikely(sighand == NULL)) {
 772                        /*
 773                         * The process has been reaped.
 774                         * We can't even collect a sample any more.
 775                         * Call the timer disarmed, nothing else to do.
 776                         */
 777                        timer->it.cpu.expires = 0;
 778                        sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
 779                                           &itp->it_value);
 780                        return;
 781                } else {
 782                        cpu_timer_sample_group(timer->it_clock, p, &now);
 783                        unlock_task_sighand(p, &flags);
 784                }
 785        }
 786
 787        if (now < timer->it.cpu.expires) {
 788                sample_to_timespec(timer->it_clock,
 789                                   timer->it.cpu.expires - now,
 790                                   &itp->it_value);
 791        } else {
 792                /*
 793                 * The timer should have expired already, but the firing
 794                 * hasn't taken place yet.  Say it's just about to expire.
 795                 */
 796                itp->it_value.tv_nsec = 1;
 797                itp->it_value.tv_sec = 0;
 798        }
 799}
 800
 801static unsigned long long
 802check_timers_list(struct list_head *timers,
 803                  struct list_head *firing,
 804                  unsigned long long curr)
 805{
 806        int maxfire = 20;
 807
 808        while (!list_empty(timers)) {
 809                struct cpu_timer_list *t;
 810
 811                t = list_first_entry(timers, struct cpu_timer_list, entry);
 812
 813                if (!--maxfire || curr < t->expires)
 814                        return t->expires;
 815
 816                t->firing = 1;
 817                list_move_tail(&t->entry, firing);
 818        }
 819
 820        return 0;
 821}
 822
 823/*
 824 * Check for any per-thread CPU timers that have fired and move them off
 825 * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
 826 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
 827 */
 828static void check_thread_timers(struct task_struct *tsk,
 829                                struct list_head *firing)
 830{
 831        struct list_head *timers = tsk->cpu_timers;
 832        struct signal_struct *const sig = tsk->signal;
 833        struct task_cputime *tsk_expires = &tsk->cputime_expires;
 834        unsigned long long expires;
 835        unsigned long soft;
 836
 837        /*
 838         * If cputime_expires is zero, then there are no active
 839         * per thread CPU timers.
 840         */
 841        if (task_cputime_zero(&tsk->cputime_expires))
 842                return;
 843
 844        expires = check_timers_list(timers, firing, prof_ticks(tsk));
 845        tsk_expires->prof_exp = expires_to_cputime(expires);
 846
 847        expires = check_timers_list(++timers, firing, virt_ticks(tsk));
 848        tsk_expires->virt_exp = expires_to_cputime(expires);
 849
 850        tsk_expires->sched_exp = check_timers_list(++timers, firing,
 851                                                   tsk->se.sum_exec_runtime);
 852
 853        /*
 854         * Check for the special case thread timers.
 855         */
 856        soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
 857        if (soft != RLIM_INFINITY) {
 858                unsigned long hard =
 859                        READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
 860
 861                if (hard != RLIM_INFINITY &&
 862                    tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
 863                        /*
 864                         * At the hard limit, we just die.
 865                         * No need to calculate anything else now.
 866                         */
 867                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
 868                        return;
 869                }
 870                if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
 871                        /*
 872                         * At the soft limit, send a SIGXCPU every second.
 873                         */
 874                        if (soft < hard) {
 875                                soft += USEC_PER_SEC;
 876                                sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
 877                        }
 878                        printk(KERN_INFO
 879                                "RT Watchdog Timeout: %s[%d]\n",
 880                                tsk->comm, task_pid_nr(tsk));
 881                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
 882                }
 883        }
 884        if (task_cputime_zero(tsk_expires))
 885                tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
 886}
 887
 888static inline void stop_process_timers(struct signal_struct *sig)
 889{
 890        struct thread_group_cputimer *cputimer = &sig->cputimer;
 891
 892        /* Turn off cputimer->running. This is done without locking. */
 893        WRITE_ONCE(cputimer->running, false);
 894        tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
 895}
 896
 897static u32 onecputick;
 898
 899static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
 900                             unsigned long long *expires,
 901                             unsigned long long cur_time, int signo)
 902{
 903        if (!it->expires)
 904                return;
 905
 906        if (cur_time >= it->expires) {
 907                if (it->incr) {
 908                        it->expires += it->incr;
 909                        it->error += it->incr_error;
 910                        if (it->error >= onecputick) {
 911                                it->expires -= cputime_one_jiffy;
 912                                it->error -= onecputick;
 913                        }
 914                } else {
 915                        it->expires = 0;
 916                }
 917
 918                trace_itimer_expire(signo == SIGPROF ?
 919                                    ITIMER_PROF : ITIMER_VIRTUAL,
 920                                    tsk->signal->leader_pid, cur_time);
 921                __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
 922        }
 923
 924        if (it->expires && (!*expires || it->expires < *expires)) {
 925                *expires = it->expires;
 926        }
 927}
 928
 929/*
 930 * Check for any per-thread CPU timers that have fired and move them
 931 * off the tsk->*_timers list onto the firing list.  Per-thread timers
 932 * have already been taken off.
 933 */
 934static void check_process_timers(struct task_struct *tsk,
 935                                 struct list_head *firing)
 936{
 937        struct signal_struct *const sig = tsk->signal;
 938        unsigned long long utime, ptime, virt_expires, prof_expires;
 939        unsigned long long sum_sched_runtime, sched_expires;
 940        struct list_head *timers = sig->cpu_timers;
 941        struct task_cputime cputime;
 942        unsigned long soft;
 943
 944        /*
 945         * If cputimer is not running, then there are no active
 946         * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
 947         */
 948        if (!READ_ONCE(tsk->signal->cputimer.running))
 949                return;
 950
 951        /*
 952         * Signify that a thread is checking for process timers.
 953         * Write access to this field is protected by the sighand lock.
 954         */
 955        sig->cputimer.checking_timer = true;
 956
 957        /*
 958         * Collect the current process totals.
 959         */
 960        thread_group_cputimer(tsk, &cputime);
 961        utime = cputime_to_expires(cputime.utime);
 962        ptime = utime + cputime_to_expires(cputime.stime);
 963        sum_sched_runtime = cputime.sum_exec_runtime;
 964
 965        prof_expires = check_timers_list(timers, firing, ptime);
 966        virt_expires = check_timers_list(++timers, firing, utime);
 967        sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
 968
 969        /*
 970         * Check for the special case process timers.
 971         */
 972        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
 973                         SIGPROF);
 974        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
 975                         SIGVTALRM);
 976        soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
 977        if (soft != RLIM_INFINITY) {
 978                unsigned long psecs = cputime_to_secs(ptime);
 979                unsigned long hard =
 980                        READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
 981                cputime_t x;
 982                if (psecs >= hard) {
 983                        /*
 984                         * At the hard limit, we just die.
 985                         * No need to calculate anything else now.
 986                         */
 987                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
 988                        return;
 989                }
 990                if (psecs >= soft) {
 991                        /*
 992                         * At the soft limit, send a SIGXCPU every second.
 993                         */
 994                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
 995                        if (soft < hard) {
 996                                soft++;
 997                                sig->rlim[RLIMIT_CPU].rlim_cur = soft;
 998                        }
 999                }
1000                x = secs_to_cputime(soft);
1001                if (!prof_expires || x < prof_expires) {
1002                        prof_expires = x;
1003                }
1004        }
1005
1006        sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
1007        sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
1008        sig->cputime_expires.sched_exp = sched_expires;
1009        if (task_cputime_zero(&sig->cputime_expires))
1010                stop_process_timers(sig);
1011
1012        sig->cputimer.checking_timer = false;
1013}
1014
1015/*
1016 * This is called from the signal code (via do_schedule_next_timer)
1017 * when the last timer signal was delivered and we have to reload the timer.
1018 */
1019void posix_cpu_timer_schedule(struct k_itimer *timer)
1020{
1021        struct sighand_struct *sighand;
1022        unsigned long flags;
1023        struct task_struct *p = timer->it.cpu.task;
1024        unsigned long long now;
1025
1026        WARN_ON_ONCE(p == NULL);
1027
1028        /*
1029         * Fetch the current sample and update the timer's expiry time.
1030         */
1031        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1032                cpu_clock_sample(timer->it_clock, p, &now);
1033                bump_cpu_timer(timer, now);
1034                if (unlikely(p->exit_state))
1035                        goto out;
1036
1037                /* Protect timer list r/w in arm_timer() */
1038                sighand = lock_task_sighand(p, &flags);
1039                if (!sighand)
1040                        goto out;
1041        } else {
1042                /*
1043                 * Protect arm_timer() and timer sampling in case of call to
1044                 * thread_group_cputime().
1045                 */
1046                sighand = lock_task_sighand(p, &flags);
1047                if (unlikely(sighand == NULL)) {
1048                        /*
1049                         * The process has been reaped.
1050                         * We can't even collect a sample any more.
1051                         */
1052                        timer->it.cpu.expires = 0;
1053                        goto out;
1054                } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1055                        unlock_task_sighand(p, &flags);
1056                        /* Optimizations: if the process is dying, no need to rearm */
1057                        goto out;
1058                }
1059                cpu_timer_sample_group(timer->it_clock, p, &now);
1060                bump_cpu_timer(timer, now);
1061                /* Leave the sighand locked for the call below.  */
1062        }
1063
1064        /*
1065         * Now re-arm for the new expiry time.
1066         */
1067        WARN_ON_ONCE(!irqs_disabled());
1068        arm_timer(timer);
1069        unlock_task_sighand(p, &flags);
1070
1071out:
1072        timer->it_overrun_last = timer->it_overrun;
1073        timer->it_overrun = -1;
1074        ++timer->it_requeue_pending;
1075}
1076
1077/**
1078 * task_cputime_expired - Compare two task_cputime entities.
1079 *
1080 * @sample:     The task_cputime structure to be checked for expiration.
1081 * @expires:    Expiration times, against which @sample will be checked.
1082 *
1083 * Checks @sample against @expires to see if any field of @sample has expired.
1084 * Returns true if any field of the former is greater than the corresponding
1085 * field of the latter if the latter field is set.  Otherwise returns false.
1086 */
1087static inline int task_cputime_expired(const struct task_cputime *sample,
1088                                        const struct task_cputime *expires)
1089{
1090        if (expires->utime && sample->utime >= expires->utime)
1091                return 1;
1092        if (expires->stime && sample->utime + sample->stime >= expires->stime)
1093                return 1;
1094        if (expires->sum_exec_runtime != 0 &&
1095            sample->sum_exec_runtime >= expires->sum_exec_runtime)
1096                return 1;
1097        return 0;
1098}
1099
1100/**
1101 * fastpath_timer_check - POSIX CPU timers fast path.
1102 *
1103 * @tsk:        The task (thread) being checked.
1104 *
1105 * Check the task and thread group timers.  If both are zero (there are no
1106 * timers set) return false.  Otherwise snapshot the task and thread group
1107 * timers and compare them with the corresponding expiration times.  Return
1108 * true if a timer has expired, else return false.
1109 */
1110static inline int fastpath_timer_check(struct task_struct *tsk)
1111{
1112        struct signal_struct *sig;
1113
1114        if (!task_cputime_zero(&tsk->cputime_expires)) {
1115                struct task_cputime task_sample;
1116
1117                task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1118                task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1119                if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1120                        return 1;
1121        }
1122
1123        sig = tsk->signal;
1124        /*
1125         * Check if thread group timers expired when the cputimer is
1126         * running and no other thread in the group is already checking
1127         * for thread group cputimers. These fields are read without the
1128         * sighand lock. However, this is fine because this is meant to
1129         * be a fastpath heuristic to determine whether we should try to
1130         * acquire the sighand lock to check/handle timers.
1131         *
1132         * In the worst case scenario, if 'running' or 'checking_timer' gets
1133         * set but the current thread doesn't see the change yet, we'll wait
1134         * until the next thread in the group gets a scheduler interrupt to
1135         * handle the timer. This isn't an issue in practice because these
1136         * types of delays with signals actually getting sent are expected.
1137         */
1138        if (READ_ONCE(sig->cputimer.running) &&
1139            !READ_ONCE(sig->cputimer.checking_timer)) {
1140                struct task_cputime group_sample;
1141
1142                sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1143
1144                if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1145                        return 1;
1146        }
1147
1148        return 0;
1149}
1150
1151/*
1152 * This is called from the timer interrupt handler.  The irq handler has
1153 * already updated our counts.  We need to check if any timers fire now.
1154 * Interrupts are disabled.
1155 */
1156void run_posix_cpu_timers(struct task_struct *tsk)
1157{
1158        LIST_HEAD(firing);
1159        struct k_itimer *timer, *next;
1160        unsigned long flags;
1161
1162        WARN_ON_ONCE(!irqs_disabled());
1163
1164        /*
1165         * The fast path checks that there are no expired thread or thread
1166         * group timers.  If that's so, just return.
1167         */
1168        if (!fastpath_timer_check(tsk))
1169                return;
1170
1171        if (!lock_task_sighand(tsk, &flags))
1172                return;
1173        /*
1174         * Here we take off tsk->signal->cpu_timers[N] and
1175         * tsk->cpu_timers[N] all the timers that are firing, and
1176         * put them on the firing list.
1177         */
1178        check_thread_timers(tsk, &firing);
1179
1180        check_process_timers(tsk, &firing);
1181
1182        /*
1183         * We must release these locks before taking any timer's lock.
1184         * There is a potential race with timer deletion here, as the
1185         * siglock now protects our private firing list.  We have set
1186         * the firing flag in each timer, so that a deletion attempt
1187         * that gets the timer lock before we do will give it up and
1188         * spin until we've taken care of that timer below.
1189         */
1190        unlock_task_sighand(tsk, &flags);
1191
1192        /*
1193         * Now that all the timers on our list have the firing flag,
1194         * no one will touch their list entries but us.  We'll take
1195         * each timer's lock before clearing its firing flag, so no
1196         * timer call will interfere.
1197         */
1198        list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1199                int cpu_firing;
1200
1201                spin_lock(&timer->it_lock);
1202                list_del_init(&timer->it.cpu.entry);
1203                cpu_firing = timer->it.cpu.firing;
1204                timer->it.cpu.firing = 0;
1205                /*
1206                 * The firing flag is -1 if we collided with a reset
1207                 * of the timer, which already reported this
1208                 * almost-firing as an overrun.  So don't generate an event.
1209                 */
1210                if (likely(cpu_firing >= 0))
1211                        cpu_timer_fire(timer);
1212                spin_unlock(&timer->it_lock);
1213        }
1214}
1215
1216/*
1217 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1218 * The tsk->sighand->siglock must be held by the caller.
1219 */
1220void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1221                           cputime_t *newval, cputime_t *oldval)
1222{
1223        unsigned long long now;
1224
1225        WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1226        cpu_timer_sample_group(clock_idx, tsk, &now);
1227
1228        if (oldval) {
1229                /*
1230                 * We are setting itimer. The *oldval is absolute and we update
1231                 * it to be relative, *newval argument is relative and we update
1232                 * it to be absolute.
1233                 */
1234                if (*oldval) {
1235                        if (*oldval <= now) {
1236                                /* Just about to fire. */
1237                                *oldval = cputime_one_jiffy;
1238                        } else {
1239                                *oldval -= now;
1240                        }
1241                }
1242
1243                if (!*newval)
1244                        return;
1245                *newval += now;
1246        }
1247
1248        /*
1249         * Update expiration cache if we are the earliest timer, or eventually
1250         * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1251         */
1252        switch (clock_idx) {
1253        case CPUCLOCK_PROF:
1254                if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1255                        tsk->signal->cputime_expires.prof_exp = *newval;
1256                break;
1257        case CPUCLOCK_VIRT:
1258                if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1259                        tsk->signal->cputime_expires.virt_exp = *newval;
1260                break;
1261        }
1262
1263        tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1264}
1265
1266static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1267                            struct timespec *rqtp, struct itimerspec *it)
1268{
1269        struct k_itimer timer;
1270        int error;
1271
1272        /*
1273         * Set up a temporary timer and then wait for it to go off.
1274         */
1275        memset(&timer, 0, sizeof timer);
1276        spin_lock_init(&timer.it_lock);
1277        timer.it_clock = which_clock;
1278        timer.it_overrun = -1;
1279        error = posix_cpu_timer_create(&timer);
1280        timer.it_process = current;
1281        if (!error) {
1282                static struct itimerspec zero_it;
1283
1284                memset(it, 0, sizeof *it);
1285                it->it_value = *rqtp;
1286
1287                spin_lock_irq(&timer.it_lock);
1288                error = posix_cpu_timer_set(&timer, flags, it, NULL);
1289                if (error) {
1290                        spin_unlock_irq(&timer.it_lock);
1291                        return error;
1292                }
1293
1294                while (!signal_pending(current)) {
1295                        if (timer.it.cpu.expires == 0) {
1296                                /*
1297                                 * Our timer fired and was reset, below
1298                                 * deletion can not fail.
1299                                 */
1300                                posix_cpu_timer_del(&timer);
1301                                spin_unlock_irq(&timer.it_lock);
1302                                return 0;
1303                        }
1304
1305                        /*
1306                         * Block until cpu_timer_fire (or a signal) wakes us.
1307                         */
1308                        __set_current_state(TASK_INTERRUPTIBLE);
1309                        spin_unlock_irq(&timer.it_lock);
1310                        schedule();
1311                        spin_lock_irq(&timer.it_lock);
1312                }
1313
1314                /*
1315                 * We were interrupted by a signal.
1316                 */
1317                sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1318                error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1319                if (!error) {
1320                        /*
1321                         * Timer is now unarmed, deletion can not fail.
1322                         */
1323                        posix_cpu_timer_del(&timer);
1324                }
1325                spin_unlock_irq(&timer.it_lock);
1326
1327                while (error == TIMER_RETRY) {
1328                        /*
1329                         * We need to handle case when timer was or is in the
1330                         * middle of firing. In other cases we already freed
1331                         * resources.
1332                         */
1333                        spin_lock_irq(&timer.it_lock);
1334                        error = posix_cpu_timer_del(&timer);
1335                        spin_unlock_irq(&timer.it_lock);
1336                }
1337
1338                if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1339                        /*
1340                         * It actually did fire already.
1341                         */
1342                        return 0;
1343                }
1344
1345                error = -ERESTART_RESTARTBLOCK;
1346        }
1347
1348        return error;
1349}
1350
1351static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1352
1353static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1354                            struct timespec *rqtp, struct timespec __user *rmtp)
1355{
1356        struct restart_block *restart_block = &current->restart_block;
1357        struct itimerspec it;
1358        int error;
1359
1360        /*
1361         * Diagnose required errors first.
1362         */
1363        if (CPUCLOCK_PERTHREAD(which_clock) &&
1364            (CPUCLOCK_PID(which_clock) == 0 ||
1365             CPUCLOCK_PID(which_clock) == current->pid))
1366                return -EINVAL;
1367
1368        error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1369
1370        if (error == -ERESTART_RESTARTBLOCK) {
1371
1372                if (flags & TIMER_ABSTIME)
1373                        return -ERESTARTNOHAND;
1374                /*
1375                 * Report back to the user the time still remaining.
1376                 */
1377                if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1378                        return -EFAULT;
1379
1380                restart_block->fn = posix_cpu_nsleep_restart;
1381                restart_block->nanosleep.clockid = which_clock;
1382                restart_block->nanosleep.rmtp = rmtp;
1383                restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1384        }
1385        return error;
1386}
1387
1388static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1389{
1390        clockid_t which_clock = restart_block->nanosleep.clockid;
1391        struct timespec t;
1392        struct itimerspec it;
1393        int error;
1394
1395        t = ns_to_timespec(restart_block->nanosleep.expires);
1396
1397        error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1398
1399        if (error == -ERESTART_RESTARTBLOCK) {
1400                struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1401                /*
1402                 * Report back to the user the time still remaining.
1403                 */
1404                if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1405                        return -EFAULT;
1406
1407                restart_block->nanosleep.expires = timespec_to_ns(&t);
1408        }
1409        return error;
1410
1411}
1412
1413#define PROCESS_CLOCK   MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1414#define THREAD_CLOCK    MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1415
1416static int process_cpu_clock_getres(const clockid_t which_clock,
1417                                    struct timespec *tp)
1418{
1419        return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1420}
1421static int process_cpu_clock_get(const clockid_t which_clock,
1422                                 struct timespec *tp)
1423{
1424        return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1425}
1426static int process_cpu_timer_create(struct k_itimer *timer)
1427{
1428        timer->it_clock = PROCESS_CLOCK;
1429        return posix_cpu_timer_create(timer);
1430}
1431static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1432                              struct timespec *rqtp,
1433                              struct timespec __user *rmtp)
1434{
1435        return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1436}
1437static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1438{
1439        return -EINVAL;
1440}
1441static int thread_cpu_clock_getres(const clockid_t which_clock,
1442                                   struct timespec *tp)
1443{
1444        return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1445}
1446static int thread_cpu_clock_get(const clockid_t which_clock,
1447                                struct timespec *tp)
1448{
1449        return posix_cpu_clock_get(THREAD_CLOCK, tp);
1450}
1451static int thread_cpu_timer_create(struct k_itimer *timer)
1452{
1453        timer->it_clock = THREAD_CLOCK;
1454        return posix_cpu_timer_create(timer);
1455}
1456
1457struct k_clock clock_posix_cpu = {
1458        .clock_getres   = posix_cpu_clock_getres,
1459        .clock_set      = posix_cpu_clock_set,
1460        .clock_get      = posix_cpu_clock_get,
1461        .timer_create   = posix_cpu_timer_create,
1462        .nsleep         = posix_cpu_nsleep,
1463        .nsleep_restart = posix_cpu_nsleep_restart,
1464        .timer_set      = posix_cpu_timer_set,
1465        .timer_del      = posix_cpu_timer_del,
1466        .timer_get      = posix_cpu_timer_get,
1467};
1468
1469static __init int init_posix_cpu_timers(void)
1470{
1471        struct k_clock process = {
1472                .clock_getres   = process_cpu_clock_getres,
1473                .clock_get      = process_cpu_clock_get,
1474                .timer_create   = process_cpu_timer_create,
1475                .nsleep         = process_cpu_nsleep,
1476                .nsleep_restart = process_cpu_nsleep_restart,
1477        };
1478        struct k_clock thread = {
1479                .clock_getres   = thread_cpu_clock_getres,
1480                .clock_get      = thread_cpu_clock_get,
1481                .timer_create   = thread_cpu_timer_create,
1482        };
1483        struct timespec ts;
1484
1485        posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1486        posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1487
1488        cputime_to_timespec(cputime_one_jiffy, &ts);
1489        onecputick = ts.tv_nsec;
1490        WARN_ON(ts.tv_sec != 0);
1491
1492        return 0;
1493}
1494__initcall(init_posix_cpu_timers);
1495