1
2
3
4
5#include <linux/sched/signal.h>
6#include <linux/sched/cputime.h>
7#include <linux/posix-timers.h>
8#include <linux/errno.h>
9#include <linux/math64.h>
10#include <linux/uaccess.h>
11#include <linux/kernel_stat.h>
12#include <trace/events/timer.h>
13#include <linux/tick.h>
14#include <linux/workqueue.h>
15#include <linux/compat.h>
16
17#include "posix-timers.h"
18
19static void posix_cpu_timer_rearm(struct k_itimer *timer);
20
21
22
23
24
25
26
27void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
28{
29 u64 nsecs = rlim_new * NSEC_PER_SEC;
30
31 spin_lock_irq(&task->sighand->siglock);
32 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
33 spin_unlock_irq(&task->sighand->siglock);
34}
35
36static int check_clock(const clockid_t which_clock)
37{
38 int error = 0;
39 struct task_struct *p;
40 const pid_t pid = CPUCLOCK_PID(which_clock);
41
42 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
43 return -EINVAL;
44
45 if (pid == 0)
46 return 0;
47
48 rcu_read_lock();
49 p = find_task_by_vpid(pid);
50 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
51 same_thread_group(p, current) : has_group_leader_pid(p))) {
52 error = -EINVAL;
53 }
54 rcu_read_unlock();
55
56 return error;
57}
58
59
60
61
62
63static void bump_cpu_timer(struct k_itimer *timer, u64 now)
64{
65 int i;
66 u64 delta, incr;
67
68 if (timer->it.cpu.incr == 0)
69 return;
70
71 if (now < timer->it.cpu.expires)
72 return;
73
74 incr = timer->it.cpu.incr;
75 delta = now + incr - timer->it.cpu.expires;
76
77
78 for (i = 0; incr < delta - incr; i++)
79 incr = incr << 1;
80
81 for (; i >= 0; incr >>= 1, i--) {
82 if (delta < incr)
83 continue;
84
85 timer->it.cpu.expires += incr;
86 timer->it_overrun += 1 << i;
87 delta -= incr;
88 }
89}
90
91
92
93
94
95
96
97
98
99static inline int task_cputime_zero(const struct task_cputime *cputime)
100{
101 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
102 return 1;
103 return 0;
104}
105
106static inline u64 prof_ticks(struct task_struct *p)
107{
108 u64 utime, stime;
109
110 task_cputime(p, &utime, &stime);
111
112 return utime + stime;
113}
114static inline u64 virt_ticks(struct task_struct *p)
115{
116 u64 utime, stime;
117
118 task_cputime(p, &utime, &stime);
119
120 return utime;
121}
122
123static int
124posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
125{
126 int error = check_clock(which_clock);
127 if (!error) {
128 tp->tv_sec = 0;
129 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
130 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
131
132
133
134
135
136 tp->tv_nsec = 1;
137 }
138 }
139 return error;
140}
141
142static int
143posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp)
144{
145
146
147
148
149 int error = check_clock(which_clock);
150 if (error == 0) {
151 error = -EPERM;
152 }
153 return error;
154}
155
156
157
158
159
160static int cpu_clock_sample(const clockid_t which_clock,
161 struct task_struct *p, u64 *sample)
162{
163 switch (CPUCLOCK_WHICH(which_clock)) {
164 default:
165 return -EINVAL;
166 case CPUCLOCK_PROF:
167 *sample = prof_ticks(p);
168 break;
169 case CPUCLOCK_VIRT:
170 *sample = virt_ticks(p);
171 break;
172 case CPUCLOCK_SCHED:
173 *sample = task_sched_runtime(p);
174 break;
175 }
176 return 0;
177}
178
179
180
181
182
183static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
184{
185 u64 curr_cputime;
186retry:
187 curr_cputime = atomic64_read(cputime);
188 if (sum_cputime > curr_cputime) {
189 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
190 goto retry;
191 }
192}
193
194static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
195{
196 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
197 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
198 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
199}
200
201
202static inline void sample_cputime_atomic(struct task_cputime *times,
203 struct task_cputime_atomic *atomic_times)
204{
205 times->utime = atomic64_read(&atomic_times->utime);
206 times->stime = atomic64_read(&atomic_times->stime);
207 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
208}
209
210void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
211{
212 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
213 struct task_cputime sum;
214
215
216 if (!READ_ONCE(cputimer->running)) {
217
218
219
220
221
222 thread_group_cputime(tsk, &sum);
223 update_gt_cputime(&cputimer->cputime_atomic, &sum);
224
225
226
227
228
229
230
231
232 WRITE_ONCE(cputimer->running, true);
233 }
234 sample_cputime_atomic(times, &cputimer->cputime_atomic);
235}
236
237
238
239
240
241
242static int cpu_clock_sample_group(const clockid_t which_clock,
243 struct task_struct *p,
244 u64 *sample)
245{
246 struct task_cputime cputime;
247
248 switch (CPUCLOCK_WHICH(which_clock)) {
249 default:
250 return -EINVAL;
251 case CPUCLOCK_PROF:
252 thread_group_cputime(p, &cputime);
253 *sample = cputime.utime + cputime.stime;
254 break;
255 case CPUCLOCK_VIRT:
256 thread_group_cputime(p, &cputime);
257 *sample = cputime.utime;
258 break;
259 case CPUCLOCK_SCHED:
260 thread_group_cputime(p, &cputime);
261 *sample = cputime.sum_exec_runtime;
262 break;
263 }
264 return 0;
265}
266
267static int posix_cpu_clock_get_task(struct task_struct *tsk,
268 const clockid_t which_clock,
269 struct timespec64 *tp)
270{
271 int err = -EINVAL;
272 u64 rtn;
273
274 if (CPUCLOCK_PERTHREAD(which_clock)) {
275 if (same_thread_group(tsk, current))
276 err = cpu_clock_sample(which_clock, tsk, &rtn);
277 } else {
278 if (tsk == current || thread_group_leader(tsk))
279 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
280 }
281
282 if (!err)
283 *tp = ns_to_timespec64(rtn);
284
285 return err;
286}
287
288
289static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp)
290{
291 const pid_t pid = CPUCLOCK_PID(which_clock);
292 int err = -EINVAL;
293
294 if (pid == 0) {
295
296
297
298
299 err = posix_cpu_clock_get_task(current, which_clock, tp);
300 } else {
301
302
303
304
305 struct task_struct *p;
306 rcu_read_lock();
307 p = find_task_by_vpid(pid);
308 if (p)
309 err = posix_cpu_clock_get_task(p, which_clock, tp);
310 rcu_read_unlock();
311 }
312
313 return err;
314}
315
316
317
318
319
320
321static int posix_cpu_timer_create(struct k_itimer *new_timer)
322{
323 int ret = 0;
324 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
325 struct task_struct *p;
326
327 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
328 return -EINVAL;
329
330 new_timer->kclock = &clock_posix_cpu;
331
332 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
333
334 rcu_read_lock();
335 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
336 if (pid == 0) {
337 p = current;
338 } else {
339 p = find_task_by_vpid(pid);
340 if (p && !same_thread_group(p, current))
341 p = NULL;
342 }
343 } else {
344 if (pid == 0) {
345 p = current->group_leader;
346 } else {
347 p = find_task_by_vpid(pid);
348 if (p && !has_group_leader_pid(p))
349 p = NULL;
350 }
351 }
352 new_timer->it.cpu.task = p;
353 if (p) {
354 get_task_struct(p);
355 } else {
356 ret = -EINVAL;
357 }
358 rcu_read_unlock();
359
360 return ret;
361}
362
363
364
365
366
367
368
369static int posix_cpu_timer_del(struct k_itimer *timer)
370{
371 int ret = 0;
372 unsigned long flags;
373 struct sighand_struct *sighand;
374 struct task_struct *p = timer->it.cpu.task;
375
376 WARN_ON_ONCE(p == NULL);
377
378
379
380
381
382 sighand = lock_task_sighand(p, &flags);
383 if (unlikely(sighand == NULL)) {
384
385
386
387
388 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
389 } else {
390 if (timer->it.cpu.firing)
391 ret = TIMER_RETRY;
392 else
393 list_del(&timer->it.cpu.entry);
394
395 unlock_task_sighand(p, &flags);
396 }
397
398 if (!ret)
399 put_task_struct(p);
400
401 return ret;
402}
403
404static void cleanup_timers_list(struct list_head *head)
405{
406 struct cpu_timer_list *timer, *next;
407
408 list_for_each_entry_safe(timer, next, head, entry)
409 list_del_init(&timer->entry);
410}
411
412
413
414
415
416
417
418static void cleanup_timers(struct list_head *head)
419{
420 cleanup_timers_list(head);
421 cleanup_timers_list(++head);
422 cleanup_timers_list(++head);
423}
424
425
426
427
428
429
430void posix_cpu_timers_exit(struct task_struct *tsk)
431{
432 cleanup_timers(tsk->cpu_timers);
433}
434void posix_cpu_timers_exit_group(struct task_struct *tsk)
435{
436 cleanup_timers(tsk->signal->cpu_timers);
437}
438
439static inline int expires_gt(u64 expires, u64 new_exp)
440{
441 return expires == 0 || expires > new_exp;
442}
443
444
445
446
447
448static void arm_timer(struct k_itimer *timer)
449{
450 struct task_struct *p = timer->it.cpu.task;
451 struct list_head *head, *listpos;
452 struct task_cputime *cputime_expires;
453 struct cpu_timer_list *const nt = &timer->it.cpu;
454 struct cpu_timer_list *next;
455
456 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
457 head = p->cpu_timers;
458 cputime_expires = &p->cputime_expires;
459 } else {
460 head = p->signal->cpu_timers;
461 cputime_expires = &p->signal->cputime_expires;
462 }
463 head += CPUCLOCK_WHICH(timer->it_clock);
464
465 listpos = head;
466 list_for_each_entry(next, head, entry) {
467 if (nt->expires < next->expires)
468 break;
469 listpos = &next->entry;
470 }
471 list_add(&nt->entry, listpos);
472
473 if (listpos == head) {
474 u64 exp = nt->expires;
475
476
477
478
479
480
481
482
483 switch (CPUCLOCK_WHICH(timer->it_clock)) {
484 case CPUCLOCK_PROF:
485 if (expires_gt(cputime_expires->prof_exp, exp))
486 cputime_expires->prof_exp = exp;
487 break;
488 case CPUCLOCK_VIRT:
489 if (expires_gt(cputime_expires->virt_exp, exp))
490 cputime_expires->virt_exp = exp;
491 break;
492 case CPUCLOCK_SCHED:
493 if (expires_gt(cputime_expires->sched_exp, exp))
494 cputime_expires->sched_exp = exp;
495 break;
496 }
497 if (CPUCLOCK_PERTHREAD(timer->it_clock))
498 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
499 else
500 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
501 }
502}
503
504
505
506
507static void cpu_timer_fire(struct k_itimer *timer)
508{
509 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
510
511
512
513 timer->it.cpu.expires = 0;
514 } else if (unlikely(timer->sigq == NULL)) {
515
516
517
518
519 wake_up_process(timer->it_process);
520 timer->it.cpu.expires = 0;
521 } else if (timer->it.cpu.incr == 0) {
522
523
524
525 posix_timer_event(timer, 0);
526 timer->it.cpu.expires = 0;
527 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
528
529
530
531
532
533
534 posix_cpu_timer_rearm(timer);
535 ++timer->it_requeue_pending;
536 }
537}
538
539
540
541
542
543
544static int cpu_timer_sample_group(const clockid_t which_clock,
545 struct task_struct *p, u64 *sample)
546{
547 struct task_cputime cputime;
548
549 thread_group_cputimer(p, &cputime);
550 switch (CPUCLOCK_WHICH(which_clock)) {
551 default:
552 return -EINVAL;
553 case CPUCLOCK_PROF:
554 *sample = cputime.utime + cputime.stime;
555 break;
556 case CPUCLOCK_VIRT:
557 *sample = cputime.utime;
558 break;
559 case CPUCLOCK_SCHED:
560 *sample = cputime.sum_exec_runtime;
561 break;
562 }
563 return 0;
564}
565
566
567
568
569
570
571
572static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
573 struct itimerspec64 *new, struct itimerspec64 *old)
574{
575 unsigned long flags;
576 struct sighand_struct *sighand;
577 struct task_struct *p = timer->it.cpu.task;
578 u64 old_expires, new_expires, old_incr, val;
579 int ret;
580
581 WARN_ON_ONCE(p == NULL);
582
583
584
585
586
587 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
588
589
590
591
592
593 sighand = lock_task_sighand(p, &flags);
594
595
596
597
598 if (unlikely(sighand == NULL)) {
599 return -ESRCH;
600 }
601
602
603
604
605 WARN_ON_ONCE(!irqs_disabled());
606
607 ret = 0;
608 old_incr = timer->it.cpu.incr;
609 old_expires = timer->it.cpu.expires;
610 if (unlikely(timer->it.cpu.firing)) {
611 timer->it.cpu.firing = -1;
612 ret = TIMER_RETRY;
613 } else
614 list_del_init(&timer->it.cpu.entry);
615
616
617
618
619
620
621
622
623
624 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
625 cpu_clock_sample(timer->it_clock, p, &val);
626 } else {
627 cpu_timer_sample_group(timer->it_clock, p, &val);
628 }
629
630 if (old) {
631 if (old_expires == 0) {
632 old->it_value.tv_sec = 0;
633 old->it_value.tv_nsec = 0;
634 } else {
635
636
637
638
639
640
641
642
643
644
645 bump_cpu_timer(timer, val);
646 if (val < timer->it.cpu.expires) {
647 old_expires = timer->it.cpu.expires - val;
648 old->it_value = ns_to_timespec64(old_expires);
649 } else {
650 old->it_value.tv_nsec = 1;
651 old->it_value.tv_sec = 0;
652 }
653 }
654 }
655
656 if (unlikely(ret)) {
657
658
659
660
661
662
663 unlock_task_sighand(p, &flags);
664 goto out;
665 }
666
667 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
668 new_expires += val;
669 }
670
671
672
673
674
675
676 timer->it.cpu.expires = new_expires;
677 if (new_expires != 0 && val < new_expires) {
678 arm_timer(timer);
679 }
680
681 unlock_task_sighand(p, &flags);
682
683
684
685
686 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
687
688
689
690
691
692
693 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
694 ~REQUEUE_PENDING;
695 timer->it_overrun_last = 0;
696 timer->it_overrun = -1;
697
698 if (new_expires != 0 && !(val < new_expires)) {
699
700
701
702
703
704 cpu_timer_fire(timer);
705 }
706
707 ret = 0;
708 out:
709 if (old)
710 old->it_interval = ns_to_timespec64(old_incr);
711
712 return ret;
713}
714
715static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
716{
717 u64 now;
718 struct task_struct *p = timer->it.cpu.task;
719
720 WARN_ON_ONCE(p == NULL);
721
722
723
724
725 itp->it_interval = ns_to_timespec64(timer->it.cpu.incr);
726
727 if (!timer->it.cpu.expires)
728 return;
729
730
731
732
733 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
734 cpu_clock_sample(timer->it_clock, p, &now);
735 } else {
736 struct sighand_struct *sighand;
737 unsigned long flags;
738
739
740
741
742
743
744 sighand = lock_task_sighand(p, &flags);
745 if (unlikely(sighand == NULL)) {
746
747
748
749
750
751 timer->it.cpu.expires = 0;
752 return;
753 } else {
754 cpu_timer_sample_group(timer->it_clock, p, &now);
755 unlock_task_sighand(p, &flags);
756 }
757 }
758
759 if (now < timer->it.cpu.expires) {
760 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now);
761 } else {
762
763
764
765
766 itp->it_value.tv_nsec = 1;
767 itp->it_value.tv_sec = 0;
768 }
769}
770
771static unsigned long long
772check_timers_list(struct list_head *timers,
773 struct list_head *firing,
774 unsigned long long curr)
775{
776 int maxfire = 20;
777
778 while (!list_empty(timers)) {
779 struct cpu_timer_list *t;
780
781 t = list_first_entry(timers, struct cpu_timer_list, entry);
782
783 if (!--maxfire || curr < t->expires)
784 return t->expires;
785
786 t->firing = 1;
787 list_move_tail(&t->entry, firing);
788 }
789
790 return 0;
791}
792
793
794
795
796
797
798static void check_thread_timers(struct task_struct *tsk,
799 struct list_head *firing)
800{
801 struct list_head *timers = tsk->cpu_timers;
802 struct signal_struct *const sig = tsk->signal;
803 struct task_cputime *tsk_expires = &tsk->cputime_expires;
804 u64 expires;
805 unsigned long soft;
806
807
808
809
810
811 if (task_cputime_zero(&tsk->cputime_expires))
812 return;
813
814 expires = check_timers_list(timers, firing, prof_ticks(tsk));
815 tsk_expires->prof_exp = expires;
816
817 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
818 tsk_expires->virt_exp = expires;
819
820 tsk_expires->sched_exp = check_timers_list(++timers, firing,
821 tsk->se.sum_exec_runtime);
822
823
824
825
826 soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
827 if (soft != RLIM_INFINITY) {
828 unsigned long hard =
829 READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
830
831 if (hard != RLIM_INFINITY &&
832 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
833
834
835
836
837 if (print_fatal_signals) {
838 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
839 tsk->comm, task_pid_nr(tsk));
840 }
841 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
842 return;
843 }
844 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
845
846
847
848 if (soft < hard) {
849 soft += USEC_PER_SEC;
850 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
851 }
852 if (print_fatal_signals) {
853 pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
854 tsk->comm, task_pid_nr(tsk));
855 }
856 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
857 }
858 }
859 if (task_cputime_zero(tsk_expires))
860 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
861}
862
863static inline void stop_process_timers(struct signal_struct *sig)
864{
865 struct thread_group_cputimer *cputimer = &sig->cputimer;
866
867
868 WRITE_ONCE(cputimer->running, false);
869 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
870}
871
872static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
873 u64 *expires, u64 cur_time, int signo)
874{
875 if (!it->expires)
876 return;
877
878 if (cur_time >= it->expires) {
879 if (it->incr)
880 it->expires += it->incr;
881 else
882 it->expires = 0;
883
884 trace_itimer_expire(signo == SIGPROF ?
885 ITIMER_PROF : ITIMER_VIRTUAL,
886 tsk->signal->leader_pid, cur_time);
887 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
888 }
889
890 if (it->expires && (!*expires || it->expires < *expires))
891 *expires = it->expires;
892}
893
894
895
896
897
898
899static void check_process_timers(struct task_struct *tsk,
900 struct list_head *firing)
901{
902 struct signal_struct *const sig = tsk->signal;
903 u64 utime, ptime, virt_expires, prof_expires;
904 u64 sum_sched_runtime, sched_expires;
905 struct list_head *timers = sig->cpu_timers;
906 struct task_cputime cputime;
907 unsigned long soft;
908
909
910
911
912
913 if (!READ_ONCE(tsk->signal->cputimer.running))
914 return;
915
916
917
918
919
920 sig->cputimer.checking_timer = true;
921
922
923
924
925 thread_group_cputimer(tsk, &cputime);
926 utime = cputime.utime;
927 ptime = utime + cputime.stime;
928 sum_sched_runtime = cputime.sum_exec_runtime;
929
930 prof_expires = check_timers_list(timers, firing, ptime);
931 virt_expires = check_timers_list(++timers, firing, utime);
932 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
933
934
935
936
937 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
938 SIGPROF);
939 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
940 SIGVTALRM);
941 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
942 if (soft != RLIM_INFINITY) {
943 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
944 unsigned long hard =
945 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
946 u64 x;
947 if (psecs >= hard) {
948
949
950
951
952 if (print_fatal_signals) {
953 pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
954 tsk->comm, task_pid_nr(tsk));
955 }
956 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
957 return;
958 }
959 if (psecs >= soft) {
960
961
962
963 if (print_fatal_signals) {
964 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
965 tsk->comm, task_pid_nr(tsk));
966 }
967 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
968 if (soft < hard) {
969 soft++;
970 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
971 }
972 }
973 x = soft * NSEC_PER_SEC;
974 if (!prof_expires || x < prof_expires)
975 prof_expires = x;
976 }
977
978 sig->cputime_expires.prof_exp = prof_expires;
979 sig->cputime_expires.virt_exp = virt_expires;
980 sig->cputime_expires.sched_exp = sched_expires;
981 if (task_cputime_zero(&sig->cputime_expires))
982 stop_process_timers(sig);
983
984 sig->cputimer.checking_timer = false;
985}
986
987
988
989
990
991static void posix_cpu_timer_rearm(struct k_itimer *timer)
992{
993 struct sighand_struct *sighand;
994 unsigned long flags;
995 struct task_struct *p = timer->it.cpu.task;
996 u64 now;
997
998 WARN_ON_ONCE(p == NULL);
999
1000
1001
1002
1003 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1004 cpu_clock_sample(timer->it_clock, p, &now);
1005 bump_cpu_timer(timer, now);
1006 if (unlikely(p->exit_state))
1007 return;
1008
1009
1010 sighand = lock_task_sighand(p, &flags);
1011 if (!sighand)
1012 return;
1013 } else {
1014
1015
1016
1017
1018 sighand = lock_task_sighand(p, &flags);
1019 if (unlikely(sighand == NULL)) {
1020
1021
1022
1023
1024 timer->it.cpu.expires = 0;
1025 return;
1026 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1027
1028 goto unlock;
1029 }
1030 cpu_timer_sample_group(timer->it_clock, p, &now);
1031 bump_cpu_timer(timer, now);
1032
1033 }
1034
1035
1036
1037
1038 WARN_ON_ONCE(!irqs_disabled());
1039 arm_timer(timer);
1040unlock:
1041 unlock_task_sighand(p, &flags);
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static inline int task_cputime_expired(const struct task_cputime *sample,
1055 const struct task_cputime *expires)
1056{
1057 if (expires->utime && sample->utime >= expires->utime)
1058 return 1;
1059 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1060 return 1;
1061 if (expires->sum_exec_runtime != 0 &&
1062 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1063 return 1;
1064 return 0;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077static inline int fastpath_timer_check(struct task_struct *tsk)
1078{
1079 struct signal_struct *sig;
1080
1081 if (!task_cputime_zero(&tsk->cputime_expires)) {
1082 struct task_cputime task_sample;
1083
1084 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1085 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1086 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1087 return 1;
1088 }
1089
1090 sig = tsk->signal;
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 if (READ_ONCE(sig->cputimer.running) &&
1106 !READ_ONCE(sig->cputimer.checking_timer)) {
1107 struct task_cputime group_sample;
1108
1109 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1110
1111 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1112 return 1;
1113 }
1114
1115 return 0;
1116}
1117
1118
1119
1120
1121
1122
1123void run_posix_cpu_timers(struct task_struct *tsk)
1124{
1125 LIST_HEAD(firing);
1126 struct k_itimer *timer, *next;
1127 unsigned long flags;
1128
1129 WARN_ON_ONCE(!irqs_disabled());
1130
1131
1132
1133
1134
1135 if (!fastpath_timer_check(tsk))
1136 return;
1137
1138 if (!lock_task_sighand(tsk, &flags))
1139 return;
1140
1141
1142
1143
1144
1145 check_thread_timers(tsk, &firing);
1146
1147 check_process_timers(tsk, &firing);
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 unlock_task_sighand(tsk, &flags);
1158
1159
1160
1161
1162
1163
1164
1165 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1166 int cpu_firing;
1167
1168 spin_lock(&timer->it_lock);
1169 list_del_init(&timer->it.cpu.entry);
1170 cpu_firing = timer->it.cpu.firing;
1171 timer->it.cpu.firing = 0;
1172
1173
1174
1175
1176
1177 if (likely(cpu_firing >= 0))
1178 cpu_timer_fire(timer);
1179 spin_unlock(&timer->it_lock);
1180 }
1181}
1182
1183
1184
1185
1186
1187void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1188 u64 *newval, u64 *oldval)
1189{
1190 u64 now;
1191
1192 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1193 cpu_timer_sample_group(clock_idx, tsk, &now);
1194
1195 if (oldval) {
1196
1197
1198
1199
1200
1201 if (*oldval) {
1202 if (*oldval <= now) {
1203
1204 *oldval = TICK_NSEC;
1205 } else {
1206 *oldval -= now;
1207 }
1208 }
1209
1210 if (!*newval)
1211 return;
1212 *newval += now;
1213 }
1214
1215
1216
1217
1218
1219 switch (clock_idx) {
1220 case CPUCLOCK_PROF:
1221 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1222 tsk->signal->cputime_expires.prof_exp = *newval;
1223 break;
1224 case CPUCLOCK_VIRT:
1225 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1226 tsk->signal->cputime_expires.virt_exp = *newval;
1227 break;
1228 }
1229
1230 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1231}
1232
1233static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1234 const struct timespec64 *rqtp)
1235{
1236 struct itimerspec64 it;
1237 struct k_itimer timer;
1238 u64 expires;
1239 int error;
1240
1241
1242
1243
1244 memset(&timer, 0, sizeof timer);
1245 spin_lock_init(&timer.it_lock);
1246 timer.it_clock = which_clock;
1247 timer.it_overrun = -1;
1248 error = posix_cpu_timer_create(&timer);
1249 timer.it_process = current;
1250 if (!error) {
1251 static struct itimerspec64 zero_it;
1252 struct restart_block *restart;
1253
1254 memset(&it, 0, sizeof(it));
1255 it.it_value = *rqtp;
1256
1257 spin_lock_irq(&timer.it_lock);
1258 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1259 if (error) {
1260 spin_unlock_irq(&timer.it_lock);
1261 return error;
1262 }
1263
1264 while (!signal_pending(current)) {
1265 if (timer.it.cpu.expires == 0) {
1266
1267
1268
1269
1270 posix_cpu_timer_del(&timer);
1271 spin_unlock_irq(&timer.it_lock);
1272 return 0;
1273 }
1274
1275
1276
1277
1278 __set_current_state(TASK_INTERRUPTIBLE);
1279 spin_unlock_irq(&timer.it_lock);
1280 schedule();
1281 spin_lock_irq(&timer.it_lock);
1282 }
1283
1284
1285
1286
1287 expires = timer.it.cpu.expires;
1288 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1289 if (!error) {
1290
1291
1292
1293 posix_cpu_timer_del(&timer);
1294 }
1295 spin_unlock_irq(&timer.it_lock);
1296
1297 while (error == TIMER_RETRY) {
1298
1299
1300
1301
1302
1303 spin_lock_irq(&timer.it_lock);
1304 error = posix_cpu_timer_del(&timer);
1305 spin_unlock_irq(&timer.it_lock);
1306 }
1307
1308 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1309
1310
1311
1312 return 0;
1313 }
1314
1315 error = -ERESTART_RESTARTBLOCK;
1316
1317
1318
1319 restart = ¤t->restart_block;
1320 restart->nanosleep.expires = expires;
1321 if (restart->nanosleep.type != TT_NONE)
1322 error = nanosleep_copyout(restart, &it.it_value);
1323 }
1324
1325 return error;
1326}
1327
1328static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1329
1330static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1331 const struct timespec64 *rqtp)
1332{
1333 struct restart_block *restart_block = ¤t->restart_block;
1334 int error;
1335
1336
1337
1338
1339 if (CPUCLOCK_PERTHREAD(which_clock) &&
1340 (CPUCLOCK_PID(which_clock) == 0 ||
1341 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1342 return -EINVAL;
1343
1344 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1345
1346 if (error == -ERESTART_RESTARTBLOCK) {
1347
1348 if (flags & TIMER_ABSTIME)
1349 return -ERESTARTNOHAND;
1350
1351 restart_block->fn = posix_cpu_nsleep_restart;
1352 restart_block->nanosleep.clockid = which_clock;
1353 }
1354 return error;
1355}
1356
1357static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1358{
1359 clockid_t which_clock = restart_block->nanosleep.clockid;
1360 struct timespec64 t;
1361
1362 t = ns_to_timespec64(restart_block->nanosleep.expires);
1363
1364 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1365}
1366
1367#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1368#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1369
1370static int process_cpu_clock_getres(const clockid_t which_clock,
1371 struct timespec64 *tp)
1372{
1373 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1374}
1375static int process_cpu_clock_get(const clockid_t which_clock,
1376 struct timespec64 *tp)
1377{
1378 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1379}
1380static int process_cpu_timer_create(struct k_itimer *timer)
1381{
1382 timer->it_clock = PROCESS_CLOCK;
1383 return posix_cpu_timer_create(timer);
1384}
1385static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1386 const struct timespec64 *rqtp)
1387{
1388 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1389}
1390static int thread_cpu_clock_getres(const clockid_t which_clock,
1391 struct timespec64 *tp)
1392{
1393 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1394}
1395static int thread_cpu_clock_get(const clockid_t which_clock,
1396 struct timespec64 *tp)
1397{
1398 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1399}
1400static int thread_cpu_timer_create(struct k_itimer *timer)
1401{
1402 timer->it_clock = THREAD_CLOCK;
1403 return posix_cpu_timer_create(timer);
1404}
1405
1406const struct k_clock clock_posix_cpu = {
1407 .clock_getres = posix_cpu_clock_getres,
1408 .clock_set = posix_cpu_clock_set,
1409 .clock_get = posix_cpu_clock_get,
1410 .timer_create = posix_cpu_timer_create,
1411 .nsleep = posix_cpu_nsleep,
1412 .timer_set = posix_cpu_timer_set,
1413 .timer_del = posix_cpu_timer_del,
1414 .timer_get = posix_cpu_timer_get,
1415 .timer_rearm = posix_cpu_timer_rearm,
1416};
1417
1418const struct k_clock clock_process = {
1419 .clock_getres = process_cpu_clock_getres,
1420 .clock_get = process_cpu_clock_get,
1421 .timer_create = process_cpu_timer_create,
1422 .nsleep = process_cpu_nsleep,
1423};
1424
1425const struct k_clock clock_thread = {
1426 .clock_getres = thread_cpu_clock_getres,
1427 .clock_get = thread_cpu_clock_get,
1428 .timer_create = thread_cpu_timer_create,
1429};
1430