1
2
3
4
5
6
7
8
9
10
11
12#include <linux/mm.h>
13#include <linux/interrupt.h>
14#include <linux/slab.h>
15#include <linux/time.h>
16#include <linux/mutex.h>
17#include <linux/sched/task.h>
18
19#include <linux/uaccess.h>
20#include <linux/list.h>
21#include <linux/init.h>
22#include <linux/compiler.h>
23#include <linux/hash.h>
24#include <linux/posix-clock.h>
25#include <linux/posix-timers.h>
26#include <linux/syscalls.h>
27#include <linux/wait.h>
28#include <linux/workqueue.h>
29#include <linux/export.h>
30#include <linux/hashtable.h>
31#include <linux/compat.h>
32#include <linux/nospec.h>
33
34#include "timekeeping.h"
35#include "posix-timers.h"
36
37
38
39
40
41
42
43
44
45
46
47
48
49static struct kmem_cache *posix_timers_cache;
50
51static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
52static DEFINE_SPINLOCK(hash_lock);
53
54static const struct k_clock * const posix_clocks[];
55static const struct k_clock *clockid_to_kclock(const clockid_t id);
56static const struct k_clock clock_realtime, clock_monotonic;
57
58
59
60
61
62#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
63 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
64#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
65#endif
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
105
106#define lock_timer(tid, flags) \
107({ struct k_itimer *__timr; \
108 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
109 __timr; \
110})
111
112static int hash(struct signal_struct *sig, unsigned int nr)
113{
114 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
115}
116
117static struct k_itimer *__posix_timers_find(struct hlist_head *head,
118 struct signal_struct *sig,
119 timer_t id)
120{
121 struct k_itimer *timer;
122
123 hlist_for_each_entry_rcu(timer, head, t_hash) {
124 if ((timer->it_signal == sig) && (timer->it_id == id))
125 return timer;
126 }
127 return NULL;
128}
129
130static struct k_itimer *posix_timer_by_id(timer_t id)
131{
132 struct signal_struct *sig = current->signal;
133 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
134
135 return __posix_timers_find(head, sig, id);
136}
137
138static int posix_timer_add(struct k_itimer *timer)
139{
140 struct signal_struct *sig = current->signal;
141 int first_free_id = sig->posix_timer_id;
142 struct hlist_head *head;
143 int ret = -ENOENT;
144
145 do {
146 spin_lock(&hash_lock);
147 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
148 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
149 hlist_add_head_rcu(&timer->t_hash, head);
150 ret = sig->posix_timer_id;
151 }
152 if (++sig->posix_timer_id < 0)
153 sig->posix_timer_id = 0;
154 if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
155
156 ret = -EAGAIN;
157 spin_unlock(&hash_lock);
158 } while (ret == -ENOENT);
159 return ret;
160}
161
162static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
163{
164 spin_unlock_irqrestore(&timr->it_lock, flags);
165}
166
167
168static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp)
169{
170 ktime_get_real_ts64(tp);
171 return 0;
172}
173
174
175static int posix_clock_realtime_set(const clockid_t which_clock,
176 const struct timespec64 *tp)
177{
178 return do_sys_settimeofday64(tp, NULL);
179}
180
181static int posix_clock_realtime_adj(const clockid_t which_clock,
182 struct timex *t)
183{
184 return do_adjtimex(t);
185}
186
187
188
189
190static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
191{
192 ktime_get_ts64(tp);
193 return 0;
194}
195
196
197
198
199static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
200{
201 ktime_get_raw_ts64(tp);
202 return 0;
203}
204
205
206static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
207{
208 ktime_get_coarse_real_ts64(tp);
209 return 0;
210}
211
212static int posix_get_monotonic_coarse(clockid_t which_clock,
213 struct timespec64 *tp)
214{
215 ktime_get_coarse_ts64(tp);
216 return 0;
217}
218
219static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
220{
221 *tp = ktime_to_timespec64(KTIME_LOW_RES);
222 return 0;
223}
224
225static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
226{
227 ktime_get_boottime_ts64(tp);
228 return 0;
229}
230
231static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
232{
233 ktime_get_clocktai_ts64(tp);
234 return 0;
235}
236
237static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
238{
239 tp->tv_sec = 0;
240 tp->tv_nsec = hrtimer_resolution;
241 return 0;
242}
243
244
245
246
247static __init int init_posix_timers(void)
248{
249 posix_timers_cache = kmem_cache_create("posix_timers_cache",
250 sizeof (struct k_itimer), 0, SLAB_PANIC,
251 NULL);
252 return 0;
253}
254__initcall(init_posix_timers);
255
256
257
258
259
260static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
261{
262 s64 sum = timr->it_overrun_last + (s64)baseval;
263
264 return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
265}
266
267static void common_hrtimer_rearm(struct k_itimer *timr)
268{
269 struct hrtimer *timer = &timr->it.real.timer;
270
271 timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
272 timr->it_interval);
273 hrtimer_restart(timer);
274}
275
276
277
278
279
280
281
282
283
284
285
286
287void posixtimer_rearm(struct kernel_siginfo *info)
288{
289 struct k_itimer *timr;
290 unsigned long flags;
291
292 timr = lock_timer(info->si_tid, &flags);
293 if (!timr)
294 return;
295
296 if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
297 timr->kclock->timer_rearm(timr);
298
299 timr->it_active = 1;
300 timr->it_overrun_last = timr->it_overrun;
301 timr->it_overrun = -1LL;
302 ++timr->it_requeue_pending;
303
304 info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
305 }
306
307 unlock_timer(timr, flags);
308}
309
310int posix_timer_event(struct k_itimer *timr, int si_private)
311{
312 enum pid_type type;
313 int ret = -1;
314
315
316
317
318
319
320
321
322
323
324
325 timr->sigq->info.si_sys_private = si_private;
326
327 type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID;
328 ret = send_sigqueue(timr->sigq, timr->it_pid, type);
329
330 return ret > 0;
331}
332
333
334
335
336
337
338
339
340static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
341{
342 struct k_itimer *timr;
343 unsigned long flags;
344 int si_private = 0;
345 enum hrtimer_restart ret = HRTIMER_NORESTART;
346
347 timr = container_of(timer, struct k_itimer, it.real.timer);
348 spin_lock_irqsave(&timr->it_lock, flags);
349
350 timr->it_active = 0;
351 if (timr->it_interval != 0)
352 si_private = ++timr->it_requeue_pending;
353
354 if (posix_timer_event(timr, si_private)) {
355
356
357
358
359
360 if (timr->it_interval != 0) {
361 ktime_t now = hrtimer_cb_get_time(timer);
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385#ifdef CONFIG_HIGH_RES_TIMERS
386 {
387 ktime_t kj = NSEC_PER_SEC / HZ;
388
389 if (timr->it_interval < kj)
390 now = ktime_add(now, kj);
391 }
392#endif
393 timr->it_overrun += hrtimer_forward(timer, now,
394 timr->it_interval);
395 ret = HRTIMER_RESTART;
396 ++timr->it_requeue_pending;
397 timr->it_active = 1;
398 }
399 }
400
401 unlock_timer(timr, flags);
402 return ret;
403}
404
405static struct pid *good_sigevent(sigevent_t * event)
406{
407 struct pid *pid = task_tgid(current);
408 struct task_struct *rtn;
409
410 switch (event->sigev_notify) {
411 case SIGEV_SIGNAL | SIGEV_THREAD_ID:
412 pid = find_vpid(event->sigev_notify_thread_id);
413 rtn = pid_task(pid, PIDTYPE_PID);
414 if (!rtn || !same_thread_group(rtn, current))
415 return NULL;
416
417 case SIGEV_SIGNAL:
418 case SIGEV_THREAD:
419 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
420 return NULL;
421
422 case SIGEV_NONE:
423 return pid;
424 default:
425 return NULL;
426 }
427}
428
429static struct k_itimer * alloc_posix_timer(void)
430{
431 struct k_itimer *tmr;
432 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
433 if (!tmr)
434 return tmr;
435 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
436 kmem_cache_free(posix_timers_cache, tmr);
437 return NULL;
438 }
439 clear_siginfo(&tmr->sigq->info);
440 return tmr;
441}
442
443static void k_itimer_rcu_free(struct rcu_head *head)
444{
445 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
446
447 kmem_cache_free(posix_timers_cache, tmr);
448}
449
450#define IT_ID_SET 1
451#define IT_ID_NOT_SET 0
452static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
453{
454 if (it_id_set) {
455 unsigned long flags;
456 spin_lock_irqsave(&hash_lock, flags);
457 hlist_del_rcu(&tmr->t_hash);
458 spin_unlock_irqrestore(&hash_lock, flags);
459 }
460 put_pid(tmr->it_pid);
461 sigqueue_free(tmr->sigq);
462 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
463}
464
465static int common_timer_create(struct k_itimer *new_timer)
466{
467 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
468 return 0;
469}
470
471
472static int do_timer_create(clockid_t which_clock, struct sigevent *event,
473 timer_t __user *created_timer_id)
474{
475 const struct k_clock *kc = clockid_to_kclock(which_clock);
476 struct k_itimer *new_timer;
477 int error, new_timer_id;
478 int it_id_set = IT_ID_NOT_SET;
479
480 if (!kc)
481 return -EINVAL;
482 if (!kc->timer_create)
483 return -EOPNOTSUPP;
484
485 new_timer = alloc_posix_timer();
486 if (unlikely(!new_timer))
487 return -EAGAIN;
488
489 spin_lock_init(&new_timer->it_lock);
490 new_timer_id = posix_timer_add(new_timer);
491 if (new_timer_id < 0) {
492 error = new_timer_id;
493 goto out;
494 }
495
496 it_id_set = IT_ID_SET;
497 new_timer->it_id = (timer_t) new_timer_id;
498 new_timer->it_clock = which_clock;
499 new_timer->kclock = kc;
500 new_timer->it_overrun = -1LL;
501
502 if (event) {
503 rcu_read_lock();
504 new_timer->it_pid = get_pid(good_sigevent(event));
505 rcu_read_unlock();
506 if (!new_timer->it_pid) {
507 error = -EINVAL;
508 goto out;
509 }
510 new_timer->it_sigev_notify = event->sigev_notify;
511 new_timer->sigq->info.si_signo = event->sigev_signo;
512 new_timer->sigq->info.si_value = event->sigev_value;
513 } else {
514 new_timer->it_sigev_notify = SIGEV_SIGNAL;
515 new_timer->sigq->info.si_signo = SIGALRM;
516 memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t));
517 new_timer->sigq->info.si_value.sival_int = new_timer->it_id;
518 new_timer->it_pid = get_pid(task_tgid(current));
519 }
520
521 new_timer->sigq->info.si_tid = new_timer->it_id;
522 new_timer->sigq->info.si_code = SI_TIMER;
523
524 if (copy_to_user(created_timer_id,
525 &new_timer_id, sizeof (new_timer_id))) {
526 error = -EFAULT;
527 goto out;
528 }
529
530 error = kc->timer_create(new_timer);
531 if (error)
532 goto out;
533
534 spin_lock_irq(¤t->sighand->siglock);
535 new_timer->it_signal = current->signal;
536 list_add(&new_timer->list, ¤t->signal->posix_timers);
537 spin_unlock_irq(¤t->sighand->siglock);
538
539 return 0;
540
541
542
543
544
545
546out:
547 release_posix_timer(new_timer, it_id_set);
548 return error;
549}
550
551SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
552 struct sigevent __user *, timer_event_spec,
553 timer_t __user *, created_timer_id)
554{
555 if (timer_event_spec) {
556 sigevent_t event;
557
558 if (copy_from_user(&event, timer_event_spec, sizeof (event)))
559 return -EFAULT;
560 return do_timer_create(which_clock, &event, created_timer_id);
561 }
562 return do_timer_create(which_clock, NULL, created_timer_id);
563}
564
565#ifdef CONFIG_COMPAT
566COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
567 struct compat_sigevent __user *, timer_event_spec,
568 timer_t __user *, created_timer_id)
569{
570 if (timer_event_spec) {
571 sigevent_t event;
572
573 if (get_compat_sigevent(&event, timer_event_spec))
574 return -EFAULT;
575 return do_timer_create(which_clock, &event, created_timer_id);
576 }
577 return do_timer_create(which_clock, NULL, created_timer_id);
578}
579#endif
580
581
582
583
584
585
586
587
588static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
589{
590 struct k_itimer *timr;
591
592
593
594
595
596 if ((unsigned long long)timer_id > INT_MAX)
597 return NULL;
598
599 rcu_read_lock();
600 timr = posix_timer_by_id(timer_id);
601 if (timr) {
602 spin_lock_irqsave(&timr->it_lock, *flags);
603 if (timr->it_signal == current->signal) {
604 rcu_read_unlock();
605 return timr;
606 }
607 spin_unlock_irqrestore(&timr->it_lock, *flags);
608 }
609 rcu_read_unlock();
610
611 return NULL;
612}
613
614static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
615{
616 struct hrtimer *timer = &timr->it.real.timer;
617
618 return __hrtimer_expires_remaining_adjusted(timer, now);
619}
620
621static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
622{
623 struct hrtimer *timer = &timr->it.real.timer;
624
625 return hrtimer_forward(timer, now, timr->it_interval);
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
645{
646 const struct k_clock *kc = timr->kclock;
647 ktime_t now, remaining, iv;
648 struct timespec64 ts64;
649 bool sig_none;
650
651 sig_none = timr->it_sigev_notify == SIGEV_NONE;
652 iv = timr->it_interval;
653
654
655 if (iv) {
656 cur_setting->it_interval = ktime_to_timespec64(iv);
657 } else if (!timr->it_active) {
658
659
660
661
662 if (!sig_none)
663 return;
664 }
665
666
667
668
669
670 kc->clock_get(timr->it_clock, &ts64);
671 now = timespec64_to_ktime(ts64);
672
673
674
675
676
677 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
678 timr->it_overrun += kc->timer_forward(timr, now);
679
680 remaining = kc->timer_remaining(timr, now);
681
682 if (remaining <= 0) {
683
684
685
686
687 if (!sig_none)
688 cur_setting->it_value.tv_nsec = 1;
689 } else {
690 cur_setting->it_value = ktime_to_timespec64(remaining);
691 }
692}
693
694
695static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
696{
697 struct k_itimer *timr;
698 const struct k_clock *kc;
699 unsigned long flags;
700 int ret = 0;
701
702 timr = lock_timer(timer_id, &flags);
703 if (!timr)
704 return -EINVAL;
705
706 memset(setting, 0, sizeof(*setting));
707 kc = timr->kclock;
708 if (WARN_ON_ONCE(!kc || !kc->timer_get))
709 ret = -EINVAL;
710 else
711 kc->timer_get(timr, setting);
712
713 unlock_timer(timr, flags);
714 return ret;
715}
716
717
718SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
719 struct __kernel_itimerspec __user *, setting)
720{
721 struct itimerspec64 cur_setting;
722
723 int ret = do_timer_gettime(timer_id, &cur_setting);
724 if (!ret) {
725 if (put_itimerspec64(&cur_setting, setting))
726 ret = -EFAULT;
727 }
728 return ret;
729}
730
731#ifdef CONFIG_COMPAT_32BIT_TIME
732
733COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
734 struct old_itimerspec32 __user *, setting)
735{
736 struct itimerspec64 cur_setting;
737
738 int ret = do_timer_gettime(timer_id, &cur_setting);
739 if (!ret) {
740 if (put_old_itimerspec32(&cur_setting, setting))
741 ret = -EFAULT;
742 }
743 return ret;
744}
745
746#endif
747
748
749
750
751
752
753
754
755
756
757SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
758{
759 struct k_itimer *timr;
760 int overrun;
761 unsigned long flags;
762
763 timr = lock_timer(timer_id, &flags);
764 if (!timr)
765 return -EINVAL;
766
767 overrun = timer_overrun_to_int(timr, 0);
768 unlock_timer(timr, flags);
769
770 return overrun;
771}
772
773static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
774 bool absolute, bool sigev_none)
775{
776 struct hrtimer *timer = &timr->it.real.timer;
777 enum hrtimer_mode mode;
778
779 mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
780
781
782
783
784
785
786
787
788
789 if (timr->it_clock == CLOCK_REALTIME)
790 timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
791
792 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
793 timr->it.real.timer.function = posix_timer_fn;
794
795 if (!absolute)
796 expires = ktime_add_safe(expires, timer->base->get_time());
797 hrtimer_set_expires(timer, expires);
798
799 if (!sigev_none)
800 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
801}
802
803static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
804{
805 return hrtimer_try_to_cancel(&timr->it.real.timer);
806}
807
808
809int common_timer_set(struct k_itimer *timr, int flags,
810 struct itimerspec64 *new_setting,
811 struct itimerspec64 *old_setting)
812{
813 const struct k_clock *kc = timr->kclock;
814 bool sigev_none;
815 ktime_t expires;
816
817 if (old_setting)
818 common_timer_get(timr, old_setting);
819
820
821 timr->it_interval = 0;
822
823
824
825
826 if (kc->timer_try_to_cancel(timr) < 0)
827 return TIMER_RETRY;
828
829 timr->it_active = 0;
830 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
831 ~REQUEUE_PENDING;
832 timr->it_overrun_last = 0;
833
834
835 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
836 return 0;
837
838 timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
839 expires = timespec64_to_ktime(new_setting->it_value);
840 sigev_none = timr->it_sigev_notify == SIGEV_NONE;
841
842 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
843 timr->it_active = !sigev_none;
844 return 0;
845}
846
847static int do_timer_settime(timer_t timer_id, int flags,
848 struct itimerspec64 *new_spec64,
849 struct itimerspec64 *old_spec64)
850{
851 const struct k_clock *kc;
852 struct k_itimer *timr;
853 unsigned long flag;
854 int error = 0;
855
856 if (!timespec64_valid(&new_spec64->it_interval) ||
857 !timespec64_valid(&new_spec64->it_value))
858 return -EINVAL;
859
860 if (old_spec64)
861 memset(old_spec64, 0, sizeof(*old_spec64));
862retry:
863 timr = lock_timer(timer_id, &flag);
864 if (!timr)
865 return -EINVAL;
866
867 kc = timr->kclock;
868 if (WARN_ON_ONCE(!kc || !kc->timer_set))
869 error = -EINVAL;
870 else
871 error = kc->timer_set(timr, flags, new_spec64, old_spec64);
872
873 unlock_timer(timr, flag);
874 if (error == TIMER_RETRY) {
875 old_spec64 = NULL;
876 goto retry;
877 }
878
879 return error;
880}
881
882
883SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
884 const struct __kernel_itimerspec __user *, new_setting,
885 struct __kernel_itimerspec __user *, old_setting)
886{
887 struct itimerspec64 new_spec, old_spec;
888 struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
889 int error = 0;
890
891 if (!new_setting)
892 return -EINVAL;
893
894 if (get_itimerspec64(&new_spec, new_setting))
895 return -EFAULT;
896
897 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
898 if (!error && old_setting) {
899 if (put_itimerspec64(&old_spec, old_setting))
900 error = -EFAULT;
901 }
902 return error;
903}
904
905#ifdef CONFIG_COMPAT_32BIT_TIME
906COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
907 struct old_itimerspec32 __user *, new,
908 struct old_itimerspec32 __user *, old)
909{
910 struct itimerspec64 new_spec, old_spec;
911 struct itimerspec64 *rtn = old ? &old_spec : NULL;
912 int error = 0;
913
914 if (!new)
915 return -EINVAL;
916 if (get_old_itimerspec32(&new_spec, new))
917 return -EFAULT;
918
919 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
920 if (!error && old) {
921 if (put_old_itimerspec32(&old_spec, old))
922 error = -EFAULT;
923 }
924 return error;
925}
926#endif
927
928int common_timer_del(struct k_itimer *timer)
929{
930 const struct k_clock *kc = timer->kclock;
931
932 timer->it_interval = 0;
933 if (kc->timer_try_to_cancel(timer) < 0)
934 return TIMER_RETRY;
935 timer->it_active = 0;
936 return 0;
937}
938
939static inline int timer_delete_hook(struct k_itimer *timer)
940{
941 const struct k_clock *kc = timer->kclock;
942
943 if (WARN_ON_ONCE(!kc || !kc->timer_del))
944 return -EINVAL;
945 return kc->timer_del(timer);
946}
947
948
949SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
950{
951 struct k_itimer *timer;
952 unsigned long flags;
953
954retry_delete:
955 timer = lock_timer(timer_id, &flags);
956 if (!timer)
957 return -EINVAL;
958
959 if (timer_delete_hook(timer) == TIMER_RETRY) {
960 unlock_timer(timer, flags);
961 goto retry_delete;
962 }
963
964 spin_lock(¤t->sighand->siglock);
965 list_del(&timer->list);
966 spin_unlock(¤t->sighand->siglock);
967
968
969
970
971 timer->it_signal = NULL;
972
973 unlock_timer(timer, flags);
974 release_posix_timer(timer, IT_ID_SET);
975 return 0;
976}
977
978
979
980
981static void itimer_delete(struct k_itimer *timer)
982{
983 unsigned long flags;
984
985retry_delete:
986 spin_lock_irqsave(&timer->it_lock, flags);
987
988 if (timer_delete_hook(timer) == TIMER_RETRY) {
989 unlock_timer(timer, flags);
990 goto retry_delete;
991 }
992 list_del(&timer->list);
993
994
995
996
997 timer->it_signal = NULL;
998
999 unlock_timer(timer, flags);
1000 release_posix_timer(timer, IT_ID_SET);
1001}
1002
1003
1004
1005
1006
1007void exit_itimers(struct signal_struct *sig)
1008{
1009 struct k_itimer *tmr;
1010
1011 while (!list_empty(&sig->posix_timers)) {
1012 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
1013 itimer_delete(tmr);
1014 }
1015}
1016
1017SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
1018 const struct __kernel_timespec __user *, tp)
1019{
1020 const struct k_clock *kc = clockid_to_kclock(which_clock);
1021 struct timespec64 new_tp;
1022
1023 if (!kc || !kc->clock_set)
1024 return -EINVAL;
1025
1026 if (get_timespec64(&new_tp, tp))
1027 return -EFAULT;
1028
1029 return kc->clock_set(which_clock, &new_tp);
1030}
1031
1032SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
1033 struct __kernel_timespec __user *, tp)
1034{
1035 const struct k_clock *kc = clockid_to_kclock(which_clock);
1036 struct timespec64 kernel_tp;
1037 int error;
1038
1039 if (!kc)
1040 return -EINVAL;
1041
1042 error = kc->clock_get(which_clock, &kernel_tp);
1043
1044 if (!error && put_timespec64(&kernel_tp, tp))
1045 error = -EFAULT;
1046
1047 return error;
1048}
1049
1050SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1051 struct timex __user *, utx)
1052{
1053 const struct k_clock *kc = clockid_to_kclock(which_clock);
1054 struct timex ktx;
1055 int err;
1056
1057 if (!kc)
1058 return -EINVAL;
1059 if (!kc->clock_adj)
1060 return -EOPNOTSUPP;
1061
1062 if (copy_from_user(&ktx, utx, sizeof(ktx)))
1063 return -EFAULT;
1064
1065 err = kc->clock_adj(which_clock, &ktx);
1066
1067 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1068 return -EFAULT;
1069
1070 return err;
1071}
1072
1073SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1074 struct __kernel_timespec __user *, tp)
1075{
1076 const struct k_clock *kc = clockid_to_kclock(which_clock);
1077 struct timespec64 rtn_tp;
1078 int error;
1079
1080 if (!kc)
1081 return -EINVAL;
1082
1083 error = kc->clock_getres(which_clock, &rtn_tp);
1084
1085 if (!error && tp && put_timespec64(&rtn_tp, tp))
1086 error = -EFAULT;
1087
1088 return error;
1089}
1090
1091#ifdef CONFIG_COMPAT_32BIT_TIME
1092
1093COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
1094 struct old_timespec32 __user *, tp)
1095{
1096 const struct k_clock *kc = clockid_to_kclock(which_clock);
1097 struct timespec64 ts;
1098
1099 if (!kc || !kc->clock_set)
1100 return -EINVAL;
1101
1102 if (get_old_timespec32(&ts, tp))
1103 return -EFAULT;
1104
1105 return kc->clock_set(which_clock, &ts);
1106}
1107
1108COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
1109 struct old_timespec32 __user *, tp)
1110{
1111 const struct k_clock *kc = clockid_to_kclock(which_clock);
1112 struct timespec64 ts;
1113 int err;
1114
1115 if (!kc)
1116 return -EINVAL;
1117
1118 err = kc->clock_get(which_clock, &ts);
1119
1120 if (!err && put_old_timespec32(&ts, tp))
1121 err = -EFAULT;
1122
1123 return err;
1124}
1125
1126#endif
1127
1128#ifdef CONFIG_COMPAT
1129
1130COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
1131 struct compat_timex __user *, utp)
1132{
1133 const struct k_clock *kc = clockid_to_kclock(which_clock);
1134 struct timex ktx;
1135 int err;
1136
1137 if (!kc)
1138 return -EINVAL;
1139 if (!kc->clock_adj)
1140 return -EOPNOTSUPP;
1141
1142 err = compat_get_timex(&ktx, utp);
1143 if (err)
1144 return err;
1145
1146 err = kc->clock_adj(which_clock, &ktx);
1147
1148 if (err >= 0)
1149 err = compat_put_timex(utp, &ktx);
1150
1151 return err;
1152}
1153
1154#endif
1155
1156#ifdef CONFIG_COMPAT_32BIT_TIME
1157
1158COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
1159 struct old_timespec32 __user *, tp)
1160{
1161 const struct k_clock *kc = clockid_to_kclock(which_clock);
1162 struct timespec64 ts;
1163 int err;
1164
1165 if (!kc)
1166 return -EINVAL;
1167
1168 err = kc->clock_getres(which_clock, &ts);
1169 if (!err && tp && put_old_timespec32(&ts, tp))
1170 return -EFAULT;
1171
1172 return err;
1173}
1174
1175#endif
1176
1177
1178
1179
1180static int common_nsleep(const clockid_t which_clock, int flags,
1181 const struct timespec64 *rqtp)
1182{
1183 return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ?
1184 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1185 which_clock);
1186}
1187
1188SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1189 const struct __kernel_timespec __user *, rqtp,
1190 struct __kernel_timespec __user *, rmtp)
1191{
1192 const struct k_clock *kc = clockid_to_kclock(which_clock);
1193 struct timespec64 t;
1194
1195 if (!kc)
1196 return -EINVAL;
1197 if (!kc->nsleep)
1198 return -EOPNOTSUPP;
1199
1200 if (get_timespec64(&t, rqtp))
1201 return -EFAULT;
1202
1203 if (!timespec64_valid(&t))
1204 return -EINVAL;
1205 if (flags & TIMER_ABSTIME)
1206 rmtp = NULL;
1207 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1208 current->restart_block.nanosleep.rmtp = rmtp;
1209
1210 return kc->nsleep(which_clock, flags, &t);
1211}
1212
1213#ifdef CONFIG_COMPAT_32BIT_TIME
1214
1215COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
1216 struct old_timespec32 __user *, rqtp,
1217 struct old_timespec32 __user *, rmtp)
1218{
1219 const struct k_clock *kc = clockid_to_kclock(which_clock);
1220 struct timespec64 t;
1221
1222 if (!kc)
1223 return -EINVAL;
1224 if (!kc->nsleep)
1225 return -EOPNOTSUPP;
1226
1227 if (get_old_timespec32(&t, rqtp))
1228 return -EFAULT;
1229
1230 if (!timespec64_valid(&t))
1231 return -EINVAL;
1232 if (flags & TIMER_ABSTIME)
1233 rmtp = NULL;
1234 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1235 current->restart_block.nanosleep.compat_rmtp = rmtp;
1236
1237 return kc->nsleep(which_clock, flags, &t);
1238}
1239
1240#endif
1241
1242static const struct k_clock clock_realtime = {
1243 .clock_getres = posix_get_hrtimer_res,
1244 .clock_get = posix_clock_realtime_get,
1245 .clock_set = posix_clock_realtime_set,
1246 .clock_adj = posix_clock_realtime_adj,
1247 .nsleep = common_nsleep,
1248 .timer_create = common_timer_create,
1249 .timer_set = common_timer_set,
1250 .timer_get = common_timer_get,
1251 .timer_del = common_timer_del,
1252 .timer_rearm = common_hrtimer_rearm,
1253 .timer_forward = common_hrtimer_forward,
1254 .timer_remaining = common_hrtimer_remaining,
1255 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1256 .timer_arm = common_hrtimer_arm,
1257};
1258
1259static const struct k_clock clock_monotonic = {
1260 .clock_getres = posix_get_hrtimer_res,
1261 .clock_get = posix_ktime_get_ts,
1262 .nsleep = common_nsleep,
1263 .timer_create = common_timer_create,
1264 .timer_set = common_timer_set,
1265 .timer_get = common_timer_get,
1266 .timer_del = common_timer_del,
1267 .timer_rearm = common_hrtimer_rearm,
1268 .timer_forward = common_hrtimer_forward,
1269 .timer_remaining = common_hrtimer_remaining,
1270 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1271 .timer_arm = common_hrtimer_arm,
1272};
1273
1274static const struct k_clock clock_monotonic_raw = {
1275 .clock_getres = posix_get_hrtimer_res,
1276 .clock_get = posix_get_monotonic_raw,
1277};
1278
1279static const struct k_clock clock_realtime_coarse = {
1280 .clock_getres = posix_get_coarse_res,
1281 .clock_get = posix_get_realtime_coarse,
1282};
1283
1284static const struct k_clock clock_monotonic_coarse = {
1285 .clock_getres = posix_get_coarse_res,
1286 .clock_get = posix_get_monotonic_coarse,
1287};
1288
1289static const struct k_clock clock_tai = {
1290 .clock_getres = posix_get_hrtimer_res,
1291 .clock_get = posix_get_tai,
1292 .nsleep = common_nsleep,
1293 .timer_create = common_timer_create,
1294 .timer_set = common_timer_set,
1295 .timer_get = common_timer_get,
1296 .timer_del = common_timer_del,
1297 .timer_rearm = common_hrtimer_rearm,
1298 .timer_forward = common_hrtimer_forward,
1299 .timer_remaining = common_hrtimer_remaining,
1300 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1301 .timer_arm = common_hrtimer_arm,
1302};
1303
1304static const struct k_clock clock_boottime = {
1305 .clock_getres = posix_get_hrtimer_res,
1306 .clock_get = posix_get_boottime,
1307 .nsleep = common_nsleep,
1308 .timer_create = common_timer_create,
1309 .timer_set = common_timer_set,
1310 .timer_get = common_timer_get,
1311 .timer_del = common_timer_del,
1312 .timer_rearm = common_hrtimer_rearm,
1313 .timer_forward = common_hrtimer_forward,
1314 .timer_remaining = common_hrtimer_remaining,
1315 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1316 .timer_arm = common_hrtimer_arm,
1317};
1318
1319static const struct k_clock * const posix_clocks[] = {
1320 [CLOCK_REALTIME] = &clock_realtime,
1321 [CLOCK_MONOTONIC] = &clock_monotonic,
1322 [CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
1323 [CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
1324 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1325 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1326 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1327 [CLOCK_BOOTTIME] = &clock_boottime,
1328 [CLOCK_REALTIME_ALARM] = &alarm_clock,
1329 [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1330 [CLOCK_TAI] = &clock_tai,
1331};
1332
1333static const struct k_clock *clockid_to_kclock(const clockid_t id)
1334{
1335 clockid_t idx = id;
1336
1337 if (id < 0) {
1338 return (id & CLOCKFD_MASK) == CLOCKFD ?
1339 &clock_posix_dynamic : &clock_posix_cpu;
1340 }
1341
1342 if (id >= ARRAY_SIZE(posix_clocks))
1343 return NULL;
1344
1345 return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];
1346}
1347