1
2
3
4
5
6#include <linux/sched/signal.h>
7#include <linux/sched/cputime.h>
8#include <linux/posix-timers.h>
9#include <linux/errno.h>
10#include <linux/math64.h>
11#include <linux/uaccess.h>
12#include <linux/kernel_stat.h>
13#include <trace/events/timer.h>
14#include <linux/tick.h>
15#include <linux/workqueue.h>
16#include <linux/compat.h>
17#include <linux/sched/deadline.h>
18
19#include "posix-timers.h"
20
21static void posix_cpu_timer_rearm(struct k_itimer *timer);
22
23
24
25
26
27
28
29void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
30{
31 u64 nsecs = rlim_new * NSEC_PER_SEC;
32
33 spin_lock_irq(&task->sighand->siglock);
34 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
35 spin_unlock_irq(&task->sighand->siglock);
36}
37
38static int check_clock(const clockid_t which_clock)
39{
40 int error = 0;
41 struct task_struct *p;
42 const pid_t pid = CPUCLOCK_PID(which_clock);
43
44 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
45 return -EINVAL;
46
47 if (pid == 0)
48 return 0;
49
50 rcu_read_lock();
51 p = find_task_by_vpid(pid);
52 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
53 same_thread_group(p, current) : has_group_leader_pid(p))) {
54 error = -EINVAL;
55 }
56 rcu_read_unlock();
57
58 return error;
59}
60
61
62
63
64
65static void bump_cpu_timer(struct k_itimer *timer, u64 now)
66{
67 int i;
68 u64 delta, incr;
69
70 if (!timer->it_interval)
71 return;
72
73 if (now < timer->it.cpu.expires)
74 return;
75
76 incr = timer->it_interval;
77 delta = now + incr - timer->it.cpu.expires;
78
79
80 for (i = 0; incr < delta - incr; i++)
81 incr = incr << 1;
82
83 for (; i >= 0; incr >>= 1, i--) {
84 if (delta < incr)
85 continue;
86
87 timer->it.cpu.expires += incr;
88 timer->it_overrun += 1LL << i;
89 delta -= incr;
90 }
91}
92
93
94
95
96
97
98
99
100
101static inline int task_cputime_zero(const struct task_cputime *cputime)
102{
103 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
104 return 1;
105 return 0;
106}
107
108static inline u64 prof_ticks(struct task_struct *p)
109{
110 u64 utime, stime;
111
112 task_cputime(p, &utime, &stime);
113
114 return utime + stime;
115}
116static inline u64 virt_ticks(struct task_struct *p)
117{
118 u64 utime, stime;
119
120 task_cputime(p, &utime, &stime);
121
122 return utime;
123}
124
125static int
126posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
127{
128 int error = check_clock(which_clock);
129 if (!error) {
130 tp->tv_sec = 0;
131 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
132 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
133
134
135
136
137
138 tp->tv_nsec = 1;
139 }
140 }
141 return error;
142}
143
144static int
145posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp)
146{
147
148
149
150
151 int error = check_clock(which_clock);
152 if (error == 0) {
153 error = -EPERM;
154 }
155 return error;
156}
157
158
159
160
161
162static int cpu_clock_sample(const clockid_t which_clock,
163 struct task_struct *p, u64 *sample)
164{
165 switch (CPUCLOCK_WHICH(which_clock)) {
166 default:
167 return -EINVAL;
168 case CPUCLOCK_PROF:
169 *sample = prof_ticks(p);
170 break;
171 case CPUCLOCK_VIRT:
172 *sample = virt_ticks(p);
173 break;
174 case CPUCLOCK_SCHED:
175 *sample = task_sched_runtime(p);
176 break;
177 }
178 return 0;
179}
180
181
182
183
184
185static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
186{
187 u64 curr_cputime;
188retry:
189 curr_cputime = atomic64_read(cputime);
190 if (sum_cputime > curr_cputime) {
191 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
192 goto retry;
193 }
194}
195
196static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
197{
198 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
199 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
200 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
201}
202
203
204static inline void sample_cputime_atomic(struct task_cputime *times,
205 struct task_cputime_atomic *atomic_times)
206{
207 times->utime = atomic64_read(&atomic_times->utime);
208 times->stime = atomic64_read(&atomic_times->stime);
209 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
210}
211
212void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
213{
214 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
215 struct task_cputime sum;
216
217
218 if (!READ_ONCE(cputimer->running)) {
219
220
221
222
223
224 thread_group_cputime(tsk, &sum);
225 update_gt_cputime(&cputimer->cputime_atomic, &sum);
226
227
228
229
230
231
232
233
234 WRITE_ONCE(cputimer->running, true);
235 }
236 sample_cputime_atomic(times, &cputimer->cputime_atomic);
237}
238
239
240
241
242
243
244static int cpu_clock_sample_group(const clockid_t which_clock,
245 struct task_struct *p,
246 u64 *sample)
247{
248 struct task_cputime cputime;
249
250 switch (CPUCLOCK_WHICH(which_clock)) {
251 default:
252 return -EINVAL;
253 case CPUCLOCK_PROF:
254 thread_group_cputime(p, &cputime);
255 *sample = cputime.utime + cputime.stime;
256 break;
257 case CPUCLOCK_VIRT:
258 thread_group_cputime(p, &cputime);
259 *sample = cputime.utime;
260 break;
261 case CPUCLOCK_SCHED:
262 thread_group_cputime(p, &cputime);
263 *sample = cputime.sum_exec_runtime;
264 break;
265 }
266 return 0;
267}
268
269static int posix_cpu_clock_get_task(struct task_struct *tsk,
270 const clockid_t which_clock,
271 struct timespec64 *tp)
272{
273 int err = -EINVAL;
274 u64 rtn;
275
276 if (CPUCLOCK_PERTHREAD(which_clock)) {
277 if (same_thread_group(tsk, current))
278 err = cpu_clock_sample(which_clock, tsk, &rtn);
279 } else {
280 if (tsk == current || thread_group_leader(tsk))
281 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
282 }
283
284 if (!err)
285 *tp = ns_to_timespec64(rtn);
286
287 return err;
288}
289
290
291static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp)
292{
293 const pid_t pid = CPUCLOCK_PID(which_clock);
294 int err = -EINVAL;
295
296 if (pid == 0) {
297
298
299
300
301 err = posix_cpu_clock_get_task(current, which_clock, tp);
302 } else {
303
304
305
306
307 struct task_struct *p;
308 rcu_read_lock();
309 p = find_task_by_vpid(pid);
310 if (p)
311 err = posix_cpu_clock_get_task(p, which_clock, tp);
312 rcu_read_unlock();
313 }
314
315 return err;
316}
317
318
319
320
321
322
323static int posix_cpu_timer_create(struct k_itimer *new_timer)
324{
325 int ret = 0;
326 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
327 struct task_struct *p;
328
329 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
330 return -EINVAL;
331
332 new_timer->kclock = &clock_posix_cpu;
333
334 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
335
336 rcu_read_lock();
337 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
338 if (pid == 0) {
339 p = current;
340 } else {
341 p = find_task_by_vpid(pid);
342 if (p && !same_thread_group(p, current))
343 p = NULL;
344 }
345 } else {
346 if (pid == 0) {
347 p = current->group_leader;
348 } else {
349 p = find_task_by_vpid(pid);
350 if (p && !has_group_leader_pid(p))
351 p = NULL;
352 }
353 }
354 new_timer->it.cpu.task = p;
355 if (p) {
356 get_task_struct(p);
357 } else {
358 ret = -EINVAL;
359 }
360 rcu_read_unlock();
361
362 return ret;
363}
364
365
366
367
368
369
370
371static int posix_cpu_timer_del(struct k_itimer *timer)
372{
373 int ret = 0;
374 unsigned long flags;
375 struct sighand_struct *sighand;
376 struct task_struct *p = timer->it.cpu.task;
377
378 WARN_ON_ONCE(p == NULL);
379
380
381
382
383
384 sighand = lock_task_sighand(p, &flags);
385 if (unlikely(sighand == NULL)) {
386
387
388
389
390 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
391 } else {
392 if (timer->it.cpu.firing)
393 ret = TIMER_RETRY;
394 else
395 list_del(&timer->it.cpu.entry);
396
397 unlock_task_sighand(p, &flags);
398 }
399
400 if (!ret)
401 put_task_struct(p);
402
403 return ret;
404}
405
406static void cleanup_timers_list(struct list_head *head)
407{
408 struct cpu_timer_list *timer, *next;
409
410 list_for_each_entry_safe(timer, next, head, entry)
411 list_del_init(&timer->entry);
412}
413
414
415
416
417
418
419
420static void cleanup_timers(struct list_head *head)
421{
422 cleanup_timers_list(head);
423 cleanup_timers_list(++head);
424 cleanup_timers_list(++head);
425}
426
427
428
429
430
431
432void posix_cpu_timers_exit(struct task_struct *tsk)
433{
434 cleanup_timers(tsk->cpu_timers);
435}
436void posix_cpu_timers_exit_group(struct task_struct *tsk)
437{
438 cleanup_timers(tsk->signal->cpu_timers);
439}
440
441static inline int expires_gt(u64 expires, u64 new_exp)
442{
443 return expires == 0 || expires > new_exp;
444}
445
446
447
448
449
450static void arm_timer(struct k_itimer *timer)
451{
452 struct task_struct *p = timer->it.cpu.task;
453 struct list_head *head, *listpos;
454 struct task_cputime *cputime_expires;
455 struct cpu_timer_list *const nt = &timer->it.cpu;
456 struct cpu_timer_list *next;
457
458 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
459 head = p->cpu_timers;
460 cputime_expires = &p->cputime_expires;
461 } else {
462 head = p->signal->cpu_timers;
463 cputime_expires = &p->signal->cputime_expires;
464 }
465 head += CPUCLOCK_WHICH(timer->it_clock);
466
467 listpos = head;
468 list_for_each_entry(next, head, entry) {
469 if (nt->expires < next->expires)
470 break;
471 listpos = &next->entry;
472 }
473 list_add(&nt->entry, listpos);
474
475 if (listpos == head) {
476 u64 exp = nt->expires;
477
478
479
480
481
482
483
484
485 switch (CPUCLOCK_WHICH(timer->it_clock)) {
486 case CPUCLOCK_PROF:
487 if (expires_gt(cputime_expires->prof_exp, exp))
488 cputime_expires->prof_exp = exp;
489 break;
490 case CPUCLOCK_VIRT:
491 if (expires_gt(cputime_expires->virt_exp, exp))
492 cputime_expires->virt_exp = exp;
493 break;
494 case CPUCLOCK_SCHED:
495 if (expires_gt(cputime_expires->sched_exp, exp))
496 cputime_expires->sched_exp = exp;
497 break;
498 }
499 if (CPUCLOCK_PERTHREAD(timer->it_clock))
500 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
501 else
502 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
503 }
504}
505
506
507
508
509static void cpu_timer_fire(struct k_itimer *timer)
510{
511 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
512
513
514
515 timer->it.cpu.expires = 0;
516 } else if (unlikely(timer->sigq == NULL)) {
517
518
519
520
521 wake_up_process(timer->it_process);
522 timer->it.cpu.expires = 0;
523 } else if (!timer->it_interval) {
524
525
526
527 posix_timer_event(timer, 0);
528 timer->it.cpu.expires = 0;
529 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
530
531
532
533
534
535
536 posix_cpu_timer_rearm(timer);
537 ++timer->it_requeue_pending;
538 }
539}
540
541
542
543
544
545
546static int cpu_timer_sample_group(const clockid_t which_clock,
547 struct task_struct *p, u64 *sample)
548{
549 struct task_cputime cputime;
550
551 thread_group_cputimer(p, &cputime);
552 switch (CPUCLOCK_WHICH(which_clock)) {
553 default:
554 return -EINVAL;
555 case CPUCLOCK_PROF:
556 *sample = cputime.utime + cputime.stime;
557 break;
558 case CPUCLOCK_VIRT:
559 *sample = cputime.utime;
560 break;
561 case CPUCLOCK_SCHED:
562 *sample = cputime.sum_exec_runtime;
563 break;
564 }
565 return 0;
566}
567
568
569
570
571
572
573
574static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
575 struct itimerspec64 *new, struct itimerspec64 *old)
576{
577 unsigned long flags;
578 struct sighand_struct *sighand;
579 struct task_struct *p = timer->it.cpu.task;
580 u64 old_expires, new_expires, old_incr, val;
581 int ret;
582
583 WARN_ON_ONCE(p == NULL);
584
585
586
587
588
589 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
590
591
592
593
594
595 sighand = lock_task_sighand(p, &flags);
596
597
598
599
600 if (unlikely(sighand == NULL)) {
601 return -ESRCH;
602 }
603
604
605
606
607
608 ret = 0;
609 old_incr = timer->it_interval;
610 old_expires = timer->it.cpu.expires;
611 if (unlikely(timer->it.cpu.firing)) {
612 timer->it.cpu.firing = -1;
613 ret = TIMER_RETRY;
614 } else
615 list_del_init(&timer->it.cpu.entry);
616
617
618
619
620
621
622
623
624
625 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
626 cpu_clock_sample(timer->it_clock, p, &val);
627 } else {
628 cpu_timer_sample_group(timer->it_clock, p, &val);
629 }
630
631 if (old) {
632 if (old_expires == 0) {
633 old->it_value.tv_sec = 0;
634 old->it_value.tv_nsec = 0;
635 } else {
636
637
638
639
640
641
642
643
644
645
646 bump_cpu_timer(timer, val);
647 if (val < timer->it.cpu.expires) {
648 old_expires = timer->it.cpu.expires - val;
649 old->it_value = ns_to_timespec64(old_expires);
650 } else {
651 old->it_value.tv_nsec = 1;
652 old->it_value.tv_sec = 0;
653 }
654 }
655 }
656
657 if (unlikely(ret)) {
658
659
660
661
662
663
664 unlock_task_sighand(p, &flags);
665 goto out;
666 }
667
668 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
669 new_expires += val;
670 }
671
672
673
674
675
676
677 timer->it.cpu.expires = new_expires;
678 if (new_expires != 0 && val < new_expires) {
679 arm_timer(timer);
680 }
681
682 unlock_task_sighand(p, &flags);
683
684
685
686
687 timer->it_interval = timespec64_to_ktime(new->it_interval);
688
689
690
691
692
693
694 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
695 ~REQUEUE_PENDING;
696 timer->it_overrun_last = 0;
697 timer->it_overrun = -1;
698
699 if (new_expires != 0 && !(val < new_expires)) {
700
701
702
703
704
705 cpu_timer_fire(timer);
706 }
707
708 ret = 0;
709 out:
710 if (old)
711 old->it_interval = ns_to_timespec64(old_incr);
712
713 return ret;
714}
715
716static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
717{
718 u64 now;
719 struct task_struct *p = timer->it.cpu.task;
720
721 WARN_ON_ONCE(p == NULL);
722
723
724
725
726 itp->it_interval = ktime_to_timespec64(timer->it_interval);
727
728 if (!timer->it.cpu.expires)
729 return;
730
731
732
733
734 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
735 cpu_clock_sample(timer->it_clock, p, &now);
736 } else {
737 struct sighand_struct *sighand;
738 unsigned long flags;
739
740
741
742
743
744
745 sighand = lock_task_sighand(p, &flags);
746 if (unlikely(sighand == NULL)) {
747
748
749
750
751
752 timer->it.cpu.expires = 0;
753 return;
754 } else {
755 cpu_timer_sample_group(timer->it_clock, p, &now);
756 unlock_task_sighand(p, &flags);
757 }
758 }
759
760 if (now < timer->it.cpu.expires) {
761 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now);
762 } else {
763
764
765
766
767 itp->it_value.tv_nsec = 1;
768 itp->it_value.tv_sec = 0;
769 }
770}
771
772static unsigned long long
773check_timers_list(struct list_head *timers,
774 struct list_head *firing,
775 unsigned long long curr)
776{
777 int maxfire = 20;
778
779 while (!list_empty(timers)) {
780 struct cpu_timer_list *t;
781
782 t = list_first_entry(timers, struct cpu_timer_list, entry);
783
784 if (!--maxfire || curr < t->expires)
785 return t->expires;
786
787 t->firing = 1;
788 list_move_tail(&t->entry, firing);
789 }
790
791 return 0;
792}
793
794static inline void check_dl_overrun(struct task_struct *tsk)
795{
796 if (tsk->dl.dl_overrun) {
797 tsk->dl.dl_overrun = 0;
798 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
799 }
800}
801
802
803
804
805
806
807static void check_thread_timers(struct task_struct *tsk,
808 struct list_head *firing)
809{
810 struct list_head *timers = tsk->cpu_timers;
811 struct task_cputime *tsk_expires = &tsk->cputime_expires;
812 u64 expires;
813 unsigned long soft;
814
815 if (dl_task(tsk))
816 check_dl_overrun(tsk);
817
818
819
820
821
822 if (task_cputime_zero(&tsk->cputime_expires))
823 return;
824
825 expires = check_timers_list(timers, firing, prof_ticks(tsk));
826 tsk_expires->prof_exp = expires;
827
828 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
829 tsk_expires->virt_exp = expires;
830
831 tsk_expires->sched_exp = check_timers_list(++timers, firing,
832 tsk->se.sum_exec_runtime);
833
834
835
836
837 soft = task_rlimit(tsk, RLIMIT_RTTIME);
838 if (soft != RLIM_INFINITY) {
839 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
840
841 if (hard != RLIM_INFINITY &&
842 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
843
844
845
846
847 if (print_fatal_signals) {
848 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
849 tsk->comm, task_pid_nr(tsk));
850 }
851 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
852 return;
853 }
854 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
855
856
857
858 if (soft < hard) {
859 soft += USEC_PER_SEC;
860 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur =
861 soft;
862 }
863 if (print_fatal_signals) {
864 pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
865 tsk->comm, task_pid_nr(tsk));
866 }
867 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
868 }
869 }
870 if (task_cputime_zero(tsk_expires))
871 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
872}
873
874static inline void stop_process_timers(struct signal_struct *sig)
875{
876 struct thread_group_cputimer *cputimer = &sig->cputimer;
877
878
879 WRITE_ONCE(cputimer->running, false);
880 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
881}
882
883static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
884 u64 *expires, u64 cur_time, int signo)
885{
886 if (!it->expires)
887 return;
888
889 if (cur_time >= it->expires) {
890 if (it->incr)
891 it->expires += it->incr;
892 else
893 it->expires = 0;
894
895 trace_itimer_expire(signo == SIGPROF ?
896 ITIMER_PROF : ITIMER_VIRTUAL,
897 task_tgid(tsk), cur_time);
898 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
899 }
900
901 if (it->expires && (!*expires || it->expires < *expires))
902 *expires = it->expires;
903}
904
905
906
907
908
909
910static void check_process_timers(struct task_struct *tsk,
911 struct list_head *firing)
912{
913 struct signal_struct *const sig = tsk->signal;
914 u64 utime, ptime, virt_expires, prof_expires;
915 u64 sum_sched_runtime, sched_expires;
916 struct list_head *timers = sig->cpu_timers;
917 struct task_cputime cputime;
918 unsigned long soft;
919
920
921
922
923
924 if (!READ_ONCE(tsk->signal->cputimer.running))
925 return;
926
927
928
929
930
931 sig->cputimer.checking_timer = true;
932
933
934
935
936 thread_group_cputimer(tsk, &cputime);
937 utime = cputime.utime;
938 ptime = utime + cputime.stime;
939 sum_sched_runtime = cputime.sum_exec_runtime;
940
941 prof_expires = check_timers_list(timers, firing, ptime);
942 virt_expires = check_timers_list(++timers, firing, utime);
943 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
944
945
946
947
948 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
949 SIGPROF);
950 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
951 SIGVTALRM);
952 soft = task_rlimit(tsk, RLIMIT_CPU);
953 if (soft != RLIM_INFINITY) {
954 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
955 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
956 u64 x;
957 if (psecs >= hard) {
958
959
960
961
962 if (print_fatal_signals) {
963 pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
964 tsk->comm, task_pid_nr(tsk));
965 }
966 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
967 return;
968 }
969 if (psecs >= soft) {
970
971
972
973 if (print_fatal_signals) {
974 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
975 tsk->comm, task_pid_nr(tsk));
976 }
977 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
978 if (soft < hard) {
979 soft++;
980 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
981 }
982 }
983 x = soft * NSEC_PER_SEC;
984 if (!prof_expires || x < prof_expires)
985 prof_expires = x;
986 }
987
988 sig->cputime_expires.prof_exp = prof_expires;
989 sig->cputime_expires.virt_exp = virt_expires;
990 sig->cputime_expires.sched_exp = sched_expires;
991 if (task_cputime_zero(&sig->cputime_expires))
992 stop_process_timers(sig);
993
994 sig->cputimer.checking_timer = false;
995}
996
997
998
999
1000
1001static void posix_cpu_timer_rearm(struct k_itimer *timer)
1002{
1003 struct sighand_struct *sighand;
1004 unsigned long flags;
1005 struct task_struct *p = timer->it.cpu.task;
1006 u64 now;
1007
1008 WARN_ON_ONCE(p == NULL);
1009
1010
1011
1012
1013 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1014 cpu_clock_sample(timer->it_clock, p, &now);
1015 bump_cpu_timer(timer, now);
1016 if (unlikely(p->exit_state))
1017 return;
1018
1019
1020 sighand = lock_task_sighand(p, &flags);
1021 if (!sighand)
1022 return;
1023 } else {
1024
1025
1026
1027
1028 sighand = lock_task_sighand(p, &flags);
1029 if (unlikely(sighand == NULL)) {
1030
1031
1032
1033
1034 timer->it.cpu.expires = 0;
1035 return;
1036 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1037
1038 goto unlock;
1039 }
1040 cpu_timer_sample_group(timer->it_clock, p, &now);
1041 bump_cpu_timer(timer, now);
1042
1043 }
1044
1045
1046
1047
1048 arm_timer(timer);
1049unlock:
1050 unlock_task_sighand(p, &flags);
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063static inline int task_cputime_expired(const struct task_cputime *sample,
1064 const struct task_cputime *expires)
1065{
1066 if (expires->utime && sample->utime >= expires->utime)
1067 return 1;
1068 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1069 return 1;
1070 if (expires->sum_exec_runtime != 0 &&
1071 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1072 return 1;
1073 return 0;
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086static inline int fastpath_timer_check(struct task_struct *tsk)
1087{
1088 struct signal_struct *sig;
1089
1090 if (!task_cputime_zero(&tsk->cputime_expires)) {
1091 struct task_cputime task_sample;
1092
1093 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1094 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1095 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1096 return 1;
1097 }
1098
1099 sig = tsk->signal;
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114 if (READ_ONCE(sig->cputimer.running) &&
1115 !READ_ONCE(sig->cputimer.checking_timer)) {
1116 struct task_cputime group_sample;
1117
1118 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1119
1120 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1121 return 1;
1122 }
1123
1124 if (dl_task(tsk) && tsk->dl.dl_overrun)
1125 return 1;
1126
1127 return 0;
1128}
1129
1130
1131
1132
1133
1134
1135void run_posix_cpu_timers(struct task_struct *tsk)
1136{
1137 LIST_HEAD(firing);
1138 struct k_itimer *timer, *next;
1139 unsigned long flags;
1140
1141 lockdep_assert_irqs_disabled();
1142
1143
1144
1145
1146
1147 if (!fastpath_timer_check(tsk))
1148 return;
1149
1150 if (!lock_task_sighand(tsk, &flags))
1151 return;
1152
1153
1154
1155
1156
1157 check_thread_timers(tsk, &firing);
1158
1159 check_process_timers(tsk, &firing);
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 unlock_task_sighand(tsk, &flags);
1170
1171
1172
1173
1174
1175
1176
1177 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1178 int cpu_firing;
1179
1180 spin_lock(&timer->it_lock);
1181 list_del_init(&timer->it.cpu.entry);
1182 cpu_firing = timer->it.cpu.firing;
1183 timer->it.cpu.firing = 0;
1184
1185
1186
1187
1188
1189 if (likely(cpu_firing >= 0))
1190 cpu_timer_fire(timer);
1191 spin_unlock(&timer->it_lock);
1192 }
1193}
1194
1195
1196
1197
1198
1199void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1200 u64 *newval, u64 *oldval)
1201{
1202 u64 now;
1203 int ret;
1204
1205 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1206 ret = cpu_timer_sample_group(clock_idx, tsk, &now);
1207
1208 if (oldval && ret != -EINVAL) {
1209
1210
1211
1212
1213
1214 if (*oldval) {
1215 if (*oldval <= now) {
1216
1217 *oldval = TICK_NSEC;
1218 } else {
1219 *oldval -= now;
1220 }
1221 }
1222
1223 if (!*newval)
1224 return;
1225 *newval += now;
1226 }
1227
1228
1229
1230
1231
1232 switch (clock_idx) {
1233 case CPUCLOCK_PROF:
1234 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1235 tsk->signal->cputime_expires.prof_exp = *newval;
1236 break;
1237 case CPUCLOCK_VIRT:
1238 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1239 tsk->signal->cputime_expires.virt_exp = *newval;
1240 break;
1241 }
1242
1243 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1244}
1245
1246static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1247 const struct timespec64 *rqtp)
1248{
1249 struct itimerspec64 it;
1250 struct k_itimer timer;
1251 u64 expires;
1252 int error;
1253
1254
1255
1256
1257 memset(&timer, 0, sizeof timer);
1258 spin_lock_init(&timer.it_lock);
1259 timer.it_clock = which_clock;
1260 timer.it_overrun = -1;
1261 error = posix_cpu_timer_create(&timer);
1262 timer.it_process = current;
1263 if (!error) {
1264 static struct itimerspec64 zero_it;
1265 struct restart_block *restart;
1266
1267 memset(&it, 0, sizeof(it));
1268 it.it_value = *rqtp;
1269
1270 spin_lock_irq(&timer.it_lock);
1271 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1272 if (error) {
1273 spin_unlock_irq(&timer.it_lock);
1274 return error;
1275 }
1276
1277 while (!signal_pending(current)) {
1278 if (timer.it.cpu.expires == 0) {
1279
1280
1281
1282
1283 posix_cpu_timer_del(&timer);
1284 spin_unlock_irq(&timer.it_lock);
1285 return 0;
1286 }
1287
1288
1289
1290
1291 __set_current_state(TASK_INTERRUPTIBLE);
1292 spin_unlock_irq(&timer.it_lock);
1293 schedule();
1294 spin_lock_irq(&timer.it_lock);
1295 }
1296
1297
1298
1299
1300 expires = timer.it.cpu.expires;
1301 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1302 if (!error) {
1303
1304
1305
1306 posix_cpu_timer_del(&timer);
1307 }
1308 spin_unlock_irq(&timer.it_lock);
1309
1310 while (error == TIMER_RETRY) {
1311
1312
1313
1314
1315
1316 spin_lock_irq(&timer.it_lock);
1317 error = posix_cpu_timer_del(&timer);
1318 spin_unlock_irq(&timer.it_lock);
1319 }
1320
1321 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1322
1323
1324
1325 return 0;
1326 }
1327
1328 error = -ERESTART_RESTARTBLOCK;
1329
1330
1331
1332 restart = ¤t->restart_block;
1333 restart->nanosleep.expires = expires;
1334 if (restart->nanosleep.type != TT_NONE)
1335 error = nanosleep_copyout(restart, &it.it_value);
1336 }
1337
1338 return error;
1339}
1340
1341static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1342
1343static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1344 const struct timespec64 *rqtp)
1345{
1346 struct restart_block *restart_block = ¤t->restart_block;
1347 int error;
1348
1349
1350
1351
1352 if (CPUCLOCK_PERTHREAD(which_clock) &&
1353 (CPUCLOCK_PID(which_clock) == 0 ||
1354 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1355 return -EINVAL;
1356
1357 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1358
1359 if (error == -ERESTART_RESTARTBLOCK) {
1360
1361 if (flags & TIMER_ABSTIME)
1362 return -ERESTARTNOHAND;
1363
1364 restart_block->fn = posix_cpu_nsleep_restart;
1365 restart_block->nanosleep.clockid = which_clock;
1366 }
1367 return error;
1368}
1369
1370static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1371{
1372 clockid_t which_clock = restart_block->nanosleep.clockid;
1373 struct timespec64 t;
1374
1375 t = ns_to_timespec64(restart_block->nanosleep.expires);
1376
1377 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1378}
1379
1380#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1381#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1382
1383static int process_cpu_clock_getres(const clockid_t which_clock,
1384 struct timespec64 *tp)
1385{
1386 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1387}
1388static int process_cpu_clock_get(const clockid_t which_clock,
1389 struct timespec64 *tp)
1390{
1391 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1392}
1393static int process_cpu_timer_create(struct k_itimer *timer)
1394{
1395 timer->it_clock = PROCESS_CLOCK;
1396 return posix_cpu_timer_create(timer);
1397}
1398static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1399 const struct timespec64 *rqtp)
1400{
1401 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1402}
1403static int thread_cpu_clock_getres(const clockid_t which_clock,
1404 struct timespec64 *tp)
1405{
1406 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1407}
1408static int thread_cpu_clock_get(const clockid_t which_clock,
1409 struct timespec64 *tp)
1410{
1411 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1412}
1413static int thread_cpu_timer_create(struct k_itimer *timer)
1414{
1415 timer->it_clock = THREAD_CLOCK;
1416 return posix_cpu_timer_create(timer);
1417}
1418
1419const struct k_clock clock_posix_cpu = {
1420 .clock_getres = posix_cpu_clock_getres,
1421 .clock_set = posix_cpu_clock_set,
1422 .clock_get = posix_cpu_clock_get,
1423 .timer_create = posix_cpu_timer_create,
1424 .nsleep = posix_cpu_nsleep,
1425 .timer_set = posix_cpu_timer_set,
1426 .timer_del = posix_cpu_timer_del,
1427 .timer_get = posix_cpu_timer_get,
1428 .timer_rearm = posix_cpu_timer_rearm,
1429};
1430
1431const struct k_clock clock_process = {
1432 .clock_getres = process_cpu_clock_getres,
1433 .clock_get = process_cpu_clock_get,
1434 .timer_create = process_cpu_timer_create,
1435 .nsleep = process_cpu_nsleep,
1436};
1437
1438const struct k_clock clock_thread = {
1439 .clock_getres = thread_cpu_clock_getres,
1440 .clock_get = thread_cpu_clock_get,
1441 .timer_create = thread_cpu_timer_create,
1442};
1443