1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/interrupt.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/mutex.h>
38
39#include <asm/uaccess.h>
40#include <linux/list.h>
41#include <linux/init.h>
42#include <linux/compiler.h>
43#include <linux/hash.h>
44#include <linux/posix-clock.h>
45#include <linux/posix-timers.h>
46#include <linux/syscalls.h>
47#include <linux/wait.h>
48#include <linux/workqueue.h>
49#include <linux/export.h>
50#include <linux/hashtable.h>
51
52
53
54
55
56
57
58
59
60
61
62
63
64static struct kmem_cache *posix_timers_cache;
65
66static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
67static DEFINE_SPINLOCK(hash_lock);
68
69
70
71
72
73#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
74 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
75#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
76#endif
77
78
79
80
81#ifndef ENOTSUP
82# define ENANOSLEEP_NOTSUP EOPNOTSUPP
83#else
84# define ENANOSLEEP_NOTSUP ENOTSUP
85#endif
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125static struct k_clock posix_clocks[MAX_CLOCKS];
126
127
128
129
130static int common_nsleep(const clockid_t, int flags, struct timespec *t,
131 struct timespec __user *rmtp);
132static int common_timer_create(struct k_itimer *new_timer);
133static void common_timer_get(struct k_itimer *, struct itimerspec *);
134static int common_timer_set(struct k_itimer *, int,
135 struct itimerspec *, struct itimerspec *);
136static int common_timer_del(struct k_itimer *timer);
137
138static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
139
140static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
141
142#define lock_timer(tid, flags) \
143({ struct k_itimer *__timr; \
144 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
145 __timr; \
146})
147
148static int hash(struct signal_struct *sig, unsigned int nr)
149{
150 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
151}
152
153static struct k_itimer *__posix_timers_find(struct hlist_head *head,
154 struct signal_struct *sig,
155 timer_t id)
156{
157 struct k_itimer *timer;
158
159 hlist_for_each_entry_rcu(timer, head, t_hash) {
160 if ((timer->it_signal == sig) && (timer->it_id == id))
161 return timer;
162 }
163 return NULL;
164}
165
166static struct k_itimer *posix_timer_by_id(timer_t id)
167{
168 struct signal_struct *sig = current->signal;
169 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
170
171 return __posix_timers_find(head, sig, id);
172}
173
174static int posix_timer_add(struct k_itimer *timer)
175{
176 struct signal_struct *sig = current->signal;
177 int first_free_id = sig->posix_timer_id;
178 struct hlist_head *head;
179 int ret = -ENOENT;
180
181 do {
182 spin_lock(&hash_lock);
183 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
184 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
185 hlist_add_head_rcu(&timer->t_hash, head);
186 ret = sig->posix_timer_id;
187 }
188 if (++sig->posix_timer_id < 0)
189 sig->posix_timer_id = 0;
190 if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
191
192 ret = -EAGAIN;
193 spin_unlock(&hash_lock);
194 } while (ret == -ENOENT);
195 return ret;
196}
197
198static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
199{
200 spin_unlock_irqrestore(&timr->it_lock, flags);
201}
202
203
204static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
205{
206 ktime_get_real_ts(tp);
207 return 0;
208}
209
210
211static int posix_clock_realtime_set(const clockid_t which_clock,
212 const struct timespec *tp)
213{
214 return do_sys_settimeofday(tp, NULL);
215}
216
217static int posix_clock_realtime_adj(const clockid_t which_clock,
218 struct timex *t)
219{
220 return do_adjtimex(t);
221}
222
223
224
225
226static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
227{
228 ktime_get_ts(tp);
229 return 0;
230}
231
232
233
234
235static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
236{
237 getrawmonotonic(tp);
238 return 0;
239}
240
241
242static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
243{
244 *tp = current_kernel_time();
245 return 0;
246}
247
248static int posix_get_monotonic_coarse(clockid_t which_clock,
249 struct timespec *tp)
250{
251 *tp = get_monotonic_coarse();
252 return 0;
253}
254
255static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
256{
257 *tp = ktime_to_timespec(KTIME_LOW_RES);
258 return 0;
259}
260
261static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
262{
263 get_monotonic_boottime(tp);
264 return 0;
265}
266
267static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
268{
269 timekeeping_clocktai(tp);
270 return 0;
271}
272
273
274
275
276static __init int init_posix_timers(void)
277{
278 struct k_clock clock_realtime = {
279 .clock_getres = hrtimer_get_res,
280 .clock_get = posix_clock_realtime_get,
281 .clock_set = posix_clock_realtime_set,
282 .clock_adj = posix_clock_realtime_adj,
283 .nsleep = common_nsleep,
284 .nsleep_restart = hrtimer_nanosleep_restart,
285 .timer_create = common_timer_create,
286 .timer_set = common_timer_set,
287 .timer_get = common_timer_get,
288 .timer_del = common_timer_del,
289 };
290 struct k_clock clock_monotonic = {
291 .clock_getres = hrtimer_get_res,
292 .clock_get = posix_ktime_get_ts,
293 .nsleep = common_nsleep,
294 .nsleep_restart = hrtimer_nanosleep_restart,
295 .timer_create = common_timer_create,
296 .timer_set = common_timer_set,
297 .timer_get = common_timer_get,
298 .timer_del = common_timer_del,
299 };
300 struct k_clock clock_monotonic_raw = {
301 .clock_getres = hrtimer_get_res,
302 .clock_get = posix_get_monotonic_raw,
303 };
304 struct k_clock clock_realtime_coarse = {
305 .clock_getres = posix_get_coarse_res,
306 .clock_get = posix_get_realtime_coarse,
307 };
308 struct k_clock clock_monotonic_coarse = {
309 .clock_getres = posix_get_coarse_res,
310 .clock_get = posix_get_monotonic_coarse,
311 };
312 struct k_clock clock_tai = {
313 .clock_getres = hrtimer_get_res,
314 .clock_get = posix_get_tai,
315 .nsleep = common_nsleep,
316 .nsleep_restart = hrtimer_nanosleep_restart,
317 .timer_create = common_timer_create,
318 .timer_set = common_timer_set,
319 .timer_get = common_timer_get,
320 .timer_del = common_timer_del,
321 };
322 struct k_clock clock_boottime = {
323 .clock_getres = hrtimer_get_res,
324 .clock_get = posix_get_boottime,
325 .nsleep = common_nsleep,
326 .nsleep_restart = hrtimer_nanosleep_restart,
327 .timer_create = common_timer_create,
328 .timer_set = common_timer_set,
329 .timer_get = common_timer_get,
330 .timer_del = common_timer_del,
331 };
332
333 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
334 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
335 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
336 posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
337 posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
338 posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
339 posix_timers_register_clock(CLOCK_TAI, &clock_tai);
340
341 posix_timers_cache = kmem_cache_create("posix_timers_cache",
342 sizeof (struct k_itimer), 0, SLAB_PANIC,
343 NULL);
344 return 0;
345}
346
347__initcall(init_posix_timers);
348
349static void schedule_next_timer(struct k_itimer *timr)
350{
351 struct hrtimer *timer = &timr->it.real.timer;
352
353 if (timr->it.real.interval.tv64 == 0)
354 return;
355
356 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
357 timer->base->get_time(),
358 timr->it.real.interval);
359
360 timr->it_overrun_last = timr->it_overrun;
361 timr->it_overrun = -1;
362 ++timr->it_requeue_pending;
363 hrtimer_restart(timer);
364}
365
366
367
368
369
370
371
372
373
374
375
376
377void do_schedule_next_timer(struct siginfo *info)
378{
379 struct k_itimer *timr;
380 unsigned long flags;
381
382 timr = lock_timer(info->si_tid, &flags);
383
384 if (timr && timr->it_requeue_pending == info->si_sys_private) {
385 if (timr->it_clock < 0)
386 posix_cpu_timer_schedule(timr);
387 else
388 schedule_next_timer(timr);
389
390 info->si_overrun += timr->it_overrun_last;
391 }
392
393 if (timr)
394 unlock_timer(timr, flags);
395}
396
397int posix_timer_event(struct k_itimer *timr, int si_private)
398{
399 struct task_struct *task;
400 int shared, ret = -1;
401
402
403
404
405
406
407
408
409
410
411
412 timr->sigq->info.si_sys_private = si_private;
413
414 rcu_read_lock();
415 task = pid_task(timr->it_pid, PIDTYPE_PID);
416 if (task) {
417 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
418 ret = send_sigqueue(timr->sigq, task, shared);
419 }
420 rcu_read_unlock();
421
422 return ret > 0;
423}
424EXPORT_SYMBOL_GPL(posix_timer_event);
425
426
427
428
429
430
431
432
433static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
434{
435 struct k_itimer *timr;
436 unsigned long flags;
437 int si_private = 0;
438 enum hrtimer_restart ret = HRTIMER_NORESTART;
439
440 timr = container_of(timer, struct k_itimer, it.real.timer);
441 spin_lock_irqsave(&timr->it_lock, flags);
442
443 if (timr->it.real.interval.tv64 != 0)
444 si_private = ++timr->it_requeue_pending;
445
446 if (posix_timer_event(timr, si_private)) {
447
448
449
450
451
452 if (timr->it.real.interval.tv64 != 0) {
453 ktime_t now = hrtimer_cb_get_time(timer);
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477#ifdef CONFIG_HIGH_RES_TIMERS
478 {
479 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
480
481 if (timr->it.real.interval.tv64 < kj.tv64)
482 now = ktime_add(now, kj);
483 }
484#endif
485 timr->it_overrun += (unsigned int)
486 hrtimer_forward(timer, now,
487 timr->it.real.interval);
488 ret = HRTIMER_RESTART;
489 ++timr->it_requeue_pending;
490 }
491 }
492
493 unlock_timer(timr, flags);
494 return ret;
495}
496
497static struct pid *good_sigevent(sigevent_t * event)
498{
499 struct task_struct *rtn = current->group_leader;
500
501 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
502 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
503 !same_thread_group(rtn, current) ||
504 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
505 return NULL;
506
507 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
508 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
509 return NULL;
510
511 return task_pid(rtn);
512}
513
514void posix_timers_register_clock(const clockid_t clock_id,
515 struct k_clock *new_clock)
516{
517 if ((unsigned) clock_id >= MAX_CLOCKS) {
518 printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
519 clock_id);
520 return;
521 }
522
523 if (!new_clock->clock_get) {
524 printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
525 clock_id);
526 return;
527 }
528 if (!new_clock->clock_getres) {
529 printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
530 clock_id);
531 return;
532 }
533
534 posix_clocks[clock_id] = *new_clock;
535}
536EXPORT_SYMBOL_GPL(posix_timers_register_clock);
537
538static struct k_itimer * alloc_posix_timer(void)
539{
540 struct k_itimer *tmr;
541 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
542 if (!tmr)
543 return tmr;
544 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
545 kmem_cache_free(posix_timers_cache, tmr);
546 return NULL;
547 }
548 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
549 return tmr;
550}
551
552static void k_itimer_rcu_free(struct rcu_head *head)
553{
554 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
555
556 kmem_cache_free(posix_timers_cache, tmr);
557}
558
559#define IT_ID_SET 1
560#define IT_ID_NOT_SET 0
561static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
562{
563 if (it_id_set) {
564 unsigned long flags;
565 spin_lock_irqsave(&hash_lock, flags);
566 hlist_del_rcu(&tmr->t_hash);
567 spin_unlock_irqrestore(&hash_lock, flags);
568 }
569 put_pid(tmr->it_pid);
570 sigqueue_free(tmr->sigq);
571 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
572}
573
574static struct k_clock *clockid_to_kclock(const clockid_t id)
575{
576 if (id < 0)
577 return (id & CLOCKFD_MASK) == CLOCKFD ?
578 &clock_posix_dynamic : &clock_posix_cpu;
579
580 if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
581 return NULL;
582 return &posix_clocks[id];
583}
584
585static int common_timer_create(struct k_itimer *new_timer)
586{
587 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
588 return 0;
589}
590
591
592
593SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
594 struct sigevent __user *, timer_event_spec,
595 timer_t __user *, created_timer_id)
596{
597 struct k_clock *kc = clockid_to_kclock(which_clock);
598 struct k_itimer *new_timer;
599 int error, new_timer_id;
600 sigevent_t event;
601 int it_id_set = IT_ID_NOT_SET;
602
603 if (!kc)
604 return -EINVAL;
605 if (!kc->timer_create)
606 return -EOPNOTSUPP;
607
608 new_timer = alloc_posix_timer();
609 if (unlikely(!new_timer))
610 return -EAGAIN;
611
612 spin_lock_init(&new_timer->it_lock);
613 new_timer_id = posix_timer_add(new_timer);
614 if (new_timer_id < 0) {
615 error = new_timer_id;
616 goto out;
617 }
618
619 it_id_set = IT_ID_SET;
620 new_timer->it_id = (timer_t) new_timer_id;
621 new_timer->it_clock = which_clock;
622 new_timer->it_overrun = -1;
623
624 if (timer_event_spec) {
625 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
626 error = -EFAULT;
627 goto out;
628 }
629 rcu_read_lock();
630 new_timer->it_pid = get_pid(good_sigevent(&event));
631 rcu_read_unlock();
632 if (!new_timer->it_pid) {
633 error = -EINVAL;
634 goto out;
635 }
636 } else {
637 event.sigev_notify = SIGEV_SIGNAL;
638 event.sigev_signo = SIGALRM;
639 event.sigev_value.sival_int = new_timer->it_id;
640 new_timer->it_pid = get_pid(task_tgid(current));
641 }
642
643 new_timer->it_sigev_notify = event.sigev_notify;
644 new_timer->sigq->info.si_signo = event.sigev_signo;
645 new_timer->sigq->info.si_value = event.sigev_value;
646 new_timer->sigq->info.si_tid = new_timer->it_id;
647 new_timer->sigq->info.si_code = SI_TIMER;
648
649 if (copy_to_user(created_timer_id,
650 &new_timer_id, sizeof (new_timer_id))) {
651 error = -EFAULT;
652 goto out;
653 }
654
655 error = kc->timer_create(new_timer);
656 if (error)
657 goto out;
658
659 spin_lock_irq(¤t->sighand->siglock);
660 new_timer->it_signal = current->signal;
661 list_add(&new_timer->list, ¤t->signal->posix_timers);
662 spin_unlock_irq(¤t->sighand->siglock);
663
664 return 0;
665
666
667
668
669
670
671out:
672 release_posix_timer(new_timer, it_id_set);
673 return error;
674}
675
676
677
678
679
680
681
682
683static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
684{
685 struct k_itimer *timr;
686
687
688
689
690
691 if ((unsigned long long)timer_id > INT_MAX)
692 return NULL;
693
694 rcu_read_lock();
695 timr = posix_timer_by_id(timer_id);
696 if (timr) {
697 spin_lock_irqsave(&timr->it_lock, *flags);
698 if (timr->it_signal == current->signal) {
699 rcu_read_unlock();
700 return timr;
701 }
702 spin_unlock_irqrestore(&timr->it_lock, *flags);
703 }
704 rcu_read_unlock();
705
706 return NULL;
707}
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725static void
726common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
727{
728 ktime_t now, remaining, iv;
729 struct hrtimer *timer = &timr->it.real.timer;
730
731 memset(cur_setting, 0, sizeof(struct itimerspec));
732
733 iv = timr->it.real.interval;
734
735
736 if (iv.tv64)
737 cur_setting->it_interval = ktime_to_timespec(iv);
738 else if (!hrtimer_active(timer) &&
739 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
740 return;
741
742 now = timer->base->get_time();
743
744
745
746
747
748
749 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
750 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
751 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
752
753 remaining = ktime_sub(hrtimer_get_expires(timer), now);
754
755 if (remaining.tv64 <= 0) {
756
757
758
759
760 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
761 cur_setting->it_value.tv_nsec = 1;
762 } else
763 cur_setting->it_value = ktime_to_timespec(remaining);
764}
765
766
767SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
768 struct itimerspec __user *, setting)
769{
770 struct itimerspec cur_setting;
771 struct k_itimer *timr;
772 struct k_clock *kc;
773 unsigned long flags;
774 int ret = 0;
775
776 timr = lock_timer(timer_id, &flags);
777 if (!timr)
778 return -EINVAL;
779
780 kc = clockid_to_kclock(timr->it_clock);
781 if (WARN_ON_ONCE(!kc || !kc->timer_get))
782 ret = -EINVAL;
783 else
784 kc->timer_get(timr, &cur_setting);
785
786 unlock_timer(timr, flags);
787
788 if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
789 return -EFAULT;
790
791 return ret;
792}
793
794
795
796
797
798
799
800
801
802
803SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
804{
805 struct k_itimer *timr;
806 int overrun;
807 unsigned long flags;
808
809 timr = lock_timer(timer_id, &flags);
810 if (!timr)
811 return -EINVAL;
812
813 overrun = timr->it_overrun_last;
814 unlock_timer(timr, flags);
815
816 return overrun;
817}
818
819
820
821static int
822common_timer_set(struct k_itimer *timr, int flags,
823 struct itimerspec *new_setting, struct itimerspec *old_setting)
824{
825 struct hrtimer *timer = &timr->it.real.timer;
826 enum hrtimer_mode mode;
827
828 if (old_setting)
829 common_timer_get(timr, old_setting);
830
831
832 timr->it.real.interval.tv64 = 0;
833
834
835
836
837 if (hrtimer_try_to_cancel(timer) < 0)
838 return TIMER_RETRY;
839
840 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
841 ~REQUEUE_PENDING;
842 timr->it_overrun_last = 0;
843
844
845 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
846 return 0;
847
848 mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
849 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
850 timr->it.real.timer.function = posix_timer_fn;
851
852 hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
853
854
855 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
856
857
858 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
859
860 if (mode == HRTIMER_MODE_REL) {
861 hrtimer_add_expires(timer, timer->base->get_time());
862 }
863 return 0;
864 }
865
866 hrtimer_start_expires(timer, mode);
867 return 0;
868}
869
870
871SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
872 const struct itimerspec __user *, new_setting,
873 struct itimerspec __user *, old_setting)
874{
875 struct k_itimer *timr;
876 struct itimerspec new_spec, old_spec;
877 int error = 0;
878 unsigned long flag;
879 struct itimerspec *rtn = old_setting ? &old_spec : NULL;
880 struct k_clock *kc;
881
882 if (!new_setting)
883 return -EINVAL;
884
885 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
886 return -EFAULT;
887
888 if (!timespec_valid(&new_spec.it_interval) ||
889 !timespec_valid(&new_spec.it_value))
890 return -EINVAL;
891retry:
892 timr = lock_timer(timer_id, &flag);
893 if (!timr)
894 return -EINVAL;
895
896 kc = clockid_to_kclock(timr->it_clock);
897 if (WARN_ON_ONCE(!kc || !kc->timer_set))
898 error = -EINVAL;
899 else
900 error = kc->timer_set(timr, flags, &new_spec, rtn);
901
902 unlock_timer(timr, flag);
903 if (error == TIMER_RETRY) {
904 rtn = NULL;
905 goto retry;
906 }
907
908 if (old_setting && !error &&
909 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
910 error = -EFAULT;
911
912 return error;
913}
914
915static int common_timer_del(struct k_itimer *timer)
916{
917 timer->it.real.interval.tv64 = 0;
918
919 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
920 return TIMER_RETRY;
921 return 0;
922}
923
924static inline int timer_delete_hook(struct k_itimer *timer)
925{
926 struct k_clock *kc = clockid_to_kclock(timer->it_clock);
927
928 if (WARN_ON_ONCE(!kc || !kc->timer_del))
929 return -EINVAL;
930 return kc->timer_del(timer);
931}
932
933
934SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
935{
936 struct k_itimer *timer;
937 unsigned long flags;
938
939retry_delete:
940 timer = lock_timer(timer_id, &flags);
941 if (!timer)
942 return -EINVAL;
943
944 if (timer_delete_hook(timer) == TIMER_RETRY) {
945 unlock_timer(timer, flags);
946 goto retry_delete;
947 }
948
949 spin_lock(¤t->sighand->siglock);
950 list_del(&timer->list);
951 spin_unlock(¤t->sighand->siglock);
952
953
954
955
956 timer->it_signal = NULL;
957
958 unlock_timer(timer, flags);
959 release_posix_timer(timer, IT_ID_SET);
960 return 0;
961}
962
963
964
965
966static void itimer_delete(struct k_itimer *timer)
967{
968 unsigned long flags;
969
970retry_delete:
971 spin_lock_irqsave(&timer->it_lock, flags);
972
973 if (timer_delete_hook(timer) == TIMER_RETRY) {
974 unlock_timer(timer, flags);
975 goto retry_delete;
976 }
977 list_del(&timer->list);
978
979
980
981
982 timer->it_signal = NULL;
983
984 unlock_timer(timer, flags);
985 release_posix_timer(timer, IT_ID_SET);
986}
987
988
989
990
991
992void exit_itimers(struct signal_struct *sig)
993{
994 struct k_itimer *tmr;
995
996 while (!list_empty(&sig->posix_timers)) {
997 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
998 itimer_delete(tmr);
999 }
1000}
1001
1002SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
1003 const struct timespec __user *, tp)
1004{
1005 struct k_clock *kc = clockid_to_kclock(which_clock);
1006 struct timespec new_tp;
1007
1008 if (!kc || !kc->clock_set)
1009 return -EINVAL;
1010
1011 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
1012 return -EFAULT;
1013
1014 return kc->clock_set(which_clock, &new_tp);
1015}
1016
1017SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
1018 struct timespec __user *,tp)
1019{
1020 struct k_clock *kc = clockid_to_kclock(which_clock);
1021 struct timespec kernel_tp;
1022 int error;
1023
1024 if (!kc)
1025 return -EINVAL;
1026
1027 error = kc->clock_get(which_clock, &kernel_tp);
1028
1029 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
1030 error = -EFAULT;
1031
1032 return error;
1033}
1034
1035SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1036 struct timex __user *, utx)
1037{
1038 struct k_clock *kc = clockid_to_kclock(which_clock);
1039 struct timex ktx;
1040 int err;
1041
1042 if (!kc)
1043 return -EINVAL;
1044 if (!kc->clock_adj)
1045 return -EOPNOTSUPP;
1046
1047 if (copy_from_user(&ktx, utx, sizeof(ktx)))
1048 return -EFAULT;
1049
1050 err = kc->clock_adj(which_clock, &ktx);
1051
1052 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1053 return -EFAULT;
1054
1055 return err;
1056}
1057
1058SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1059 struct timespec __user *, tp)
1060{
1061 struct k_clock *kc = clockid_to_kclock(which_clock);
1062 struct timespec rtn_tp;
1063 int error;
1064
1065 if (!kc)
1066 return -EINVAL;
1067
1068 error = kc->clock_getres(which_clock, &rtn_tp);
1069
1070 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
1071 error = -EFAULT;
1072
1073 return error;
1074}
1075
1076
1077
1078
1079static int common_nsleep(const clockid_t which_clock, int flags,
1080 struct timespec *tsave, struct timespec __user *rmtp)
1081{
1082 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
1083 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1084 which_clock);
1085}
1086
1087SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1088 const struct timespec __user *, rqtp,
1089 struct timespec __user *, rmtp)
1090{
1091 struct k_clock *kc = clockid_to_kclock(which_clock);
1092 struct timespec t;
1093
1094 if (!kc)
1095 return -EINVAL;
1096 if (!kc->nsleep)
1097 return -ENANOSLEEP_NOTSUP;
1098
1099 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1100 return -EFAULT;
1101
1102 if (!timespec_valid(&t))
1103 return -EINVAL;
1104
1105 return kc->nsleep(which_clock, flags, &t, rmtp);
1106}
1107
1108
1109
1110
1111
1112long clock_nanosleep_restart(struct restart_block *restart_block)
1113{
1114 clockid_t which_clock = restart_block->nanosleep.clockid;
1115 struct k_clock *kc = clockid_to_kclock(which_clock);
1116
1117 if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
1118 return -EINVAL;
1119
1120 return kc->nsleep_restart(restart_block);
1121}
1122