1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/interrupt.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/mutex.h>
38
39#include <asm/uaccess.h>
40#include <linux/list.h>
41#include <linux/init.h>
42#include <linux/compiler.h>
43#include <linux/idr.h>
44#include <linux/posix-clock.h>
45#include <linux/posix-timers.h>
46#include <linux/syscalls.h>
47#include <linux/wait.h>
48#include <linux/workqueue.h>
49#include <linux/export.h>
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72static struct kmem_cache *posix_timers_cache;
73static struct idr posix_timers_id;
74static DEFINE_SPINLOCK(idr_lock);
75
76
77
78
79
80#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
81 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
82#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
83#endif
84
85
86
87
88#ifndef ENOTSUP
89# define ENANOSLEEP_NOTSUP EOPNOTSUPP
90#else
91# define ENANOSLEEP_NOTSUP ENOTSUP
92#endif
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static struct k_clock posix_clocks[MAX_CLOCKS];
133
134
135
136
137static int common_nsleep(const clockid_t, int flags, struct timespec *t,
138 struct timespec __user *rmtp);
139static int common_timer_create(struct k_itimer *new_timer);
140static void common_timer_get(struct k_itimer *, struct itimerspec *);
141static int common_timer_set(struct k_itimer *, int,
142 struct itimerspec *, struct itimerspec *);
143static int common_timer_del(struct k_itimer *timer);
144
145static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
146
147static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
148
149#define lock_timer(tid, flags) \
150({ struct k_itimer *__timr; \
151 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
152 __timr; \
153})
154
155static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
156{
157 spin_unlock_irqrestore(&timr->it_lock, flags);
158}
159
160
161static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
162{
163 ktime_get_real_ts(tp);
164 return 0;
165}
166
167
168static int posix_clock_realtime_set(const clockid_t which_clock,
169 const struct timespec *tp)
170{
171 return do_sys_settimeofday(tp, NULL);
172}
173
174static int posix_clock_realtime_adj(const clockid_t which_clock,
175 struct timex *t)
176{
177 return do_adjtimex(t);
178}
179
180
181
182
183static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
184{
185 ktime_get_ts(tp);
186 return 0;
187}
188
189
190
191
192static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
193{
194 getrawmonotonic(tp);
195 return 0;
196}
197
198
199static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
200{
201 *tp = current_kernel_time();
202 return 0;
203}
204
205static int posix_get_monotonic_coarse(clockid_t which_clock,
206 struct timespec *tp)
207{
208 *tp = get_monotonic_coarse();
209 return 0;
210}
211
212static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
213{
214 *tp = ktime_to_timespec(KTIME_LOW_RES);
215 return 0;
216}
217
218static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
219{
220 get_monotonic_boottime(tp);
221 return 0;
222}
223
224
225
226
227
228static __init int init_posix_timers(void)
229{
230 struct k_clock clock_realtime = {
231 .clock_getres = hrtimer_get_res,
232 .clock_get = posix_clock_realtime_get,
233 .clock_set = posix_clock_realtime_set,
234 .clock_adj = posix_clock_realtime_adj,
235 .nsleep = common_nsleep,
236 .nsleep_restart = hrtimer_nanosleep_restart,
237 .timer_create = common_timer_create,
238 .timer_set = common_timer_set,
239 .timer_get = common_timer_get,
240 .timer_del = common_timer_del,
241 };
242 struct k_clock clock_monotonic = {
243 .clock_getres = hrtimer_get_res,
244 .clock_get = posix_ktime_get_ts,
245 .nsleep = common_nsleep,
246 .nsleep_restart = hrtimer_nanosleep_restart,
247 .timer_create = common_timer_create,
248 .timer_set = common_timer_set,
249 .timer_get = common_timer_get,
250 .timer_del = common_timer_del,
251 };
252 struct k_clock clock_monotonic_raw = {
253 .clock_getres = hrtimer_get_res,
254 .clock_get = posix_get_monotonic_raw,
255 };
256 struct k_clock clock_realtime_coarse = {
257 .clock_getres = posix_get_coarse_res,
258 .clock_get = posix_get_realtime_coarse,
259 };
260 struct k_clock clock_monotonic_coarse = {
261 .clock_getres = posix_get_coarse_res,
262 .clock_get = posix_get_monotonic_coarse,
263 };
264 struct k_clock clock_boottime = {
265 .clock_getres = hrtimer_get_res,
266 .clock_get = posix_get_boottime,
267 .nsleep = common_nsleep,
268 .nsleep_restart = hrtimer_nanosleep_restart,
269 .timer_create = common_timer_create,
270 .timer_set = common_timer_set,
271 .timer_get = common_timer_get,
272 .timer_del = common_timer_del,
273 };
274
275 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
276 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
277 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
278 posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
279 posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
280 posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
281
282 posix_timers_cache = kmem_cache_create("posix_timers_cache",
283 sizeof (struct k_itimer), 0, SLAB_PANIC,
284 NULL);
285 idr_init(&posix_timers_id);
286 return 0;
287}
288
289__initcall(init_posix_timers);
290
291static void schedule_next_timer(struct k_itimer *timr)
292{
293 struct hrtimer *timer = &timr->it.real.timer;
294
295 if (timr->it.real.interval.tv64 == 0)
296 return;
297
298 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
299 timer->base->get_time(),
300 timr->it.real.interval);
301
302 timr->it_overrun_last = timr->it_overrun;
303 timr->it_overrun = -1;
304 ++timr->it_requeue_pending;
305 hrtimer_restart(timer);
306}
307
308
309
310
311
312
313
314
315
316
317
318
319void do_schedule_next_timer(struct siginfo *info)
320{
321 struct k_itimer *timr;
322 unsigned long flags;
323
324 timr = lock_timer(info->si_tid, &flags);
325
326 if (timr && timr->it_requeue_pending == info->si_sys_private) {
327 if (timr->it_clock < 0)
328 posix_cpu_timer_schedule(timr);
329 else
330 schedule_next_timer(timr);
331
332 info->si_overrun += timr->it_overrun_last;
333 }
334
335 if (timr)
336 unlock_timer(timr, flags);
337}
338
339int posix_timer_event(struct k_itimer *timr, int si_private)
340{
341 struct task_struct *task;
342 int shared, ret = -1;
343
344
345
346
347
348
349
350
351
352
353
354 timr->sigq->info.si_sys_private = si_private;
355
356 rcu_read_lock();
357 task = pid_task(timr->it_pid, PIDTYPE_PID);
358 if (task) {
359 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
360 ret = send_sigqueue(timr->sigq, task, shared);
361 }
362 rcu_read_unlock();
363
364 return ret > 0;
365}
366EXPORT_SYMBOL_GPL(posix_timer_event);
367
368
369
370
371
372
373
374
375static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
376{
377 struct k_itimer *timr;
378 unsigned long flags;
379 int si_private = 0;
380 enum hrtimer_restart ret = HRTIMER_NORESTART;
381
382 timr = container_of(timer, struct k_itimer, it.real.timer);
383 spin_lock_irqsave(&timr->it_lock, flags);
384
385 if (timr->it.real.interval.tv64 != 0)
386 si_private = ++timr->it_requeue_pending;
387
388 if (posix_timer_event(timr, si_private)) {
389
390
391
392
393
394 if (timr->it.real.interval.tv64 != 0) {
395 ktime_t now = hrtimer_cb_get_time(timer);
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419#ifdef CONFIG_HIGH_RES_TIMERS
420 {
421 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
422
423 if (timr->it.real.interval.tv64 < kj.tv64)
424 now = ktime_add(now, kj);
425 }
426#endif
427 timr->it_overrun += (unsigned int)
428 hrtimer_forward(timer, now,
429 timr->it.real.interval);
430 ret = HRTIMER_RESTART;
431 ++timr->it_requeue_pending;
432 }
433 }
434
435 unlock_timer(timr, flags);
436 return ret;
437}
438
439static struct pid *good_sigevent(sigevent_t * event)
440{
441 struct task_struct *rtn = current->group_leader;
442
443 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
444 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
445 !same_thread_group(rtn, current) ||
446 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
447 return NULL;
448
449 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
450 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
451 return NULL;
452
453 return task_pid(rtn);
454}
455
456void posix_timers_register_clock(const clockid_t clock_id,
457 struct k_clock *new_clock)
458{
459 if ((unsigned) clock_id >= MAX_CLOCKS) {
460 printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
461 clock_id);
462 return;
463 }
464
465 if (!new_clock->clock_get) {
466 printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
467 clock_id);
468 return;
469 }
470 if (!new_clock->clock_getres) {
471 printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
472 clock_id);
473 return;
474 }
475
476 posix_clocks[clock_id] = *new_clock;
477}
478EXPORT_SYMBOL_GPL(posix_timers_register_clock);
479
480static struct k_itimer * alloc_posix_timer(void)
481{
482 struct k_itimer *tmr;
483 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
484 if (!tmr)
485 return tmr;
486 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
487 kmem_cache_free(posix_timers_cache, tmr);
488 return NULL;
489 }
490 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
491 return tmr;
492}
493
494static void k_itimer_rcu_free(struct rcu_head *head)
495{
496 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
497
498 kmem_cache_free(posix_timers_cache, tmr);
499}
500
501#define IT_ID_SET 1
502#define IT_ID_NOT_SET 0
503static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
504{
505 if (it_id_set) {
506 unsigned long flags;
507 spin_lock_irqsave(&idr_lock, flags);
508 idr_remove(&posix_timers_id, tmr->it_id);
509 spin_unlock_irqrestore(&idr_lock, flags);
510 }
511 put_pid(tmr->it_pid);
512 sigqueue_free(tmr->sigq);
513 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
514}
515
516static struct k_clock *clockid_to_kclock(const clockid_t id)
517{
518 if (id < 0)
519 return (id & CLOCKFD_MASK) == CLOCKFD ?
520 &clock_posix_dynamic : &clock_posix_cpu;
521
522 if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
523 return NULL;
524 return &posix_clocks[id];
525}
526
527static int common_timer_create(struct k_itimer *new_timer)
528{
529 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
530 return 0;
531}
532
533
534
535SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
536 struct sigevent __user *, timer_event_spec,
537 timer_t __user *, created_timer_id)
538{
539 struct k_clock *kc = clockid_to_kclock(which_clock);
540 struct k_itimer *new_timer;
541 int error, new_timer_id;
542 sigevent_t event;
543 int it_id_set = IT_ID_NOT_SET;
544
545 if (!kc)
546 return -EINVAL;
547 if (!kc->timer_create)
548 return -EOPNOTSUPP;
549
550 new_timer = alloc_posix_timer();
551 if (unlikely(!new_timer))
552 return -EAGAIN;
553
554 spin_lock_init(&new_timer->it_lock);
555 retry:
556 if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
557 error = -EAGAIN;
558 goto out;
559 }
560 spin_lock_irq(&idr_lock);
561 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
562 spin_unlock_irq(&idr_lock);
563 if (error) {
564 if (error == -EAGAIN)
565 goto retry;
566
567
568
569
570 error = -EAGAIN;
571 goto out;
572 }
573
574 it_id_set = IT_ID_SET;
575 new_timer->it_id = (timer_t) new_timer_id;
576 new_timer->it_clock = which_clock;
577 new_timer->it_overrun = -1;
578
579 if (timer_event_spec) {
580 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
581 error = -EFAULT;
582 goto out;
583 }
584 rcu_read_lock();
585 new_timer->it_pid = get_pid(good_sigevent(&event));
586 rcu_read_unlock();
587 if (!new_timer->it_pid) {
588 error = -EINVAL;
589 goto out;
590 }
591 } else {
592 event.sigev_notify = SIGEV_SIGNAL;
593 event.sigev_signo = SIGALRM;
594 event.sigev_value.sival_int = new_timer->it_id;
595 new_timer->it_pid = get_pid(task_tgid(current));
596 }
597
598 new_timer->it_sigev_notify = event.sigev_notify;
599 new_timer->sigq->info.si_signo = event.sigev_signo;
600 new_timer->sigq->info.si_value = event.sigev_value;
601 new_timer->sigq->info.si_tid = new_timer->it_id;
602 new_timer->sigq->info.si_code = SI_TIMER;
603
604 if (copy_to_user(created_timer_id,
605 &new_timer_id, sizeof (new_timer_id))) {
606 error = -EFAULT;
607 goto out;
608 }
609
610 error = kc->timer_create(new_timer);
611 if (error)
612 goto out;
613
614 spin_lock_irq(¤t->sighand->siglock);
615 new_timer->it_signal = current->signal;
616 list_add(&new_timer->list, ¤t->signal->posix_timers);
617 spin_unlock_irq(¤t->sighand->siglock);
618
619 return 0;
620
621
622
623
624
625
626out:
627 release_posix_timer(new_timer, it_id_set);
628 return error;
629}
630
631
632
633
634
635
636
637
638static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
639{
640 struct k_itimer *timr;
641
642 rcu_read_lock();
643 timr = idr_find(&posix_timers_id, (int)timer_id);
644 if (timr) {
645 spin_lock_irqsave(&timr->it_lock, *flags);
646 if (timr->it_signal == current->signal) {
647 rcu_read_unlock();
648 return timr;
649 }
650 spin_unlock_irqrestore(&timr->it_lock, *flags);
651 }
652 rcu_read_unlock();
653
654 return NULL;
655}
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673static void
674common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
675{
676 ktime_t now, remaining, iv;
677 struct hrtimer *timer = &timr->it.real.timer;
678
679 memset(cur_setting, 0, sizeof(struct itimerspec));
680
681 iv = timr->it.real.interval;
682
683
684 if (iv.tv64)
685 cur_setting->it_interval = ktime_to_timespec(iv);
686 else if (!hrtimer_active(timer) &&
687 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
688 return;
689
690 now = timer->base->get_time();
691
692
693
694
695
696
697 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
698 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
699 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
700
701 remaining = ktime_sub(hrtimer_get_expires(timer), now);
702
703 if (remaining.tv64 <= 0) {
704
705
706
707
708 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
709 cur_setting->it_value.tv_nsec = 1;
710 } else
711 cur_setting->it_value = ktime_to_timespec(remaining);
712}
713
714
715SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
716 struct itimerspec __user *, setting)
717{
718 struct itimerspec cur_setting;
719 struct k_itimer *timr;
720 struct k_clock *kc;
721 unsigned long flags;
722 int ret = 0;
723
724 timr = lock_timer(timer_id, &flags);
725 if (!timr)
726 return -EINVAL;
727
728 kc = clockid_to_kclock(timr->it_clock);
729 if (WARN_ON_ONCE(!kc || !kc->timer_get))
730 ret = -EINVAL;
731 else
732 kc->timer_get(timr, &cur_setting);
733
734 unlock_timer(timr, flags);
735
736 if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
737 return -EFAULT;
738
739 return ret;
740}
741
742
743
744
745
746
747
748
749
750
751SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
752{
753 struct k_itimer *timr;
754 int overrun;
755 unsigned long flags;
756
757 timr = lock_timer(timer_id, &flags);
758 if (!timr)
759 return -EINVAL;
760
761 overrun = timr->it_overrun_last;
762 unlock_timer(timr, flags);
763
764 return overrun;
765}
766
767
768
769static int
770common_timer_set(struct k_itimer *timr, int flags,
771 struct itimerspec *new_setting, struct itimerspec *old_setting)
772{
773 struct hrtimer *timer = &timr->it.real.timer;
774 enum hrtimer_mode mode;
775
776 if (old_setting)
777 common_timer_get(timr, old_setting);
778
779
780 timr->it.real.interval.tv64 = 0;
781
782
783
784
785 if (hrtimer_try_to_cancel(timer) < 0)
786 return TIMER_RETRY;
787
788 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
789 ~REQUEUE_PENDING;
790 timr->it_overrun_last = 0;
791
792
793 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
794 return 0;
795
796 mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
797 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
798 timr->it.real.timer.function = posix_timer_fn;
799
800 hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
801
802
803 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
804
805
806 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
807
808 if (mode == HRTIMER_MODE_REL) {
809 hrtimer_add_expires(timer, timer->base->get_time());
810 }
811 return 0;
812 }
813
814 hrtimer_start_expires(timer, mode);
815 return 0;
816}
817
818
819SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
820 const struct itimerspec __user *, new_setting,
821 struct itimerspec __user *, old_setting)
822{
823 struct k_itimer *timr;
824 struct itimerspec new_spec, old_spec;
825 int error = 0;
826 unsigned long flag;
827 struct itimerspec *rtn = old_setting ? &old_spec : NULL;
828 struct k_clock *kc;
829
830 if (!new_setting)
831 return -EINVAL;
832
833 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
834 return -EFAULT;
835
836 if (!timespec_valid(&new_spec.it_interval) ||
837 !timespec_valid(&new_spec.it_value))
838 return -EINVAL;
839retry:
840 timr = lock_timer(timer_id, &flag);
841 if (!timr)
842 return -EINVAL;
843
844 kc = clockid_to_kclock(timr->it_clock);
845 if (WARN_ON_ONCE(!kc || !kc->timer_set))
846 error = -EINVAL;
847 else
848 error = kc->timer_set(timr, flags, &new_spec, rtn);
849
850 unlock_timer(timr, flag);
851 if (error == TIMER_RETRY) {
852 rtn = NULL;
853 goto retry;
854 }
855
856 if (old_setting && !error &&
857 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
858 error = -EFAULT;
859
860 return error;
861}
862
863static int common_timer_del(struct k_itimer *timer)
864{
865 timer->it.real.interval.tv64 = 0;
866
867 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
868 return TIMER_RETRY;
869 return 0;
870}
871
872static inline int timer_delete_hook(struct k_itimer *timer)
873{
874 struct k_clock *kc = clockid_to_kclock(timer->it_clock);
875
876 if (WARN_ON_ONCE(!kc || !kc->timer_del))
877 return -EINVAL;
878 return kc->timer_del(timer);
879}
880
881
882SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
883{
884 struct k_itimer *timer;
885 unsigned long flags;
886
887retry_delete:
888 timer = lock_timer(timer_id, &flags);
889 if (!timer)
890 return -EINVAL;
891
892 if (timer_delete_hook(timer) == TIMER_RETRY) {
893 unlock_timer(timer, flags);
894 goto retry_delete;
895 }
896
897 spin_lock(¤t->sighand->siglock);
898 list_del(&timer->list);
899 spin_unlock(¤t->sighand->siglock);
900
901
902
903
904 timer->it_signal = NULL;
905
906 unlock_timer(timer, flags);
907 release_posix_timer(timer, IT_ID_SET);
908 return 0;
909}
910
911
912
913
914static void itimer_delete(struct k_itimer *timer)
915{
916 unsigned long flags;
917
918retry_delete:
919 spin_lock_irqsave(&timer->it_lock, flags);
920
921 if (timer_delete_hook(timer) == TIMER_RETRY) {
922 unlock_timer(timer, flags);
923 goto retry_delete;
924 }
925 list_del(&timer->list);
926
927
928
929
930 timer->it_signal = NULL;
931
932 unlock_timer(timer, flags);
933 release_posix_timer(timer, IT_ID_SET);
934}
935
936
937
938
939
940void exit_itimers(struct signal_struct *sig)
941{
942 struct k_itimer *tmr;
943
944 while (!list_empty(&sig->posix_timers)) {
945 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
946 itimer_delete(tmr);
947 }
948}
949
950SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
951 const struct timespec __user *, tp)
952{
953 struct k_clock *kc = clockid_to_kclock(which_clock);
954 struct timespec new_tp;
955
956 if (!kc || !kc->clock_set)
957 return -EINVAL;
958
959 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
960 return -EFAULT;
961
962 return kc->clock_set(which_clock, &new_tp);
963}
964
965SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
966 struct timespec __user *,tp)
967{
968 struct k_clock *kc = clockid_to_kclock(which_clock);
969 struct timespec kernel_tp;
970 int error;
971
972 if (!kc)
973 return -EINVAL;
974
975 error = kc->clock_get(which_clock, &kernel_tp);
976
977 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
978 error = -EFAULT;
979
980 return error;
981}
982
983SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
984 struct timex __user *, utx)
985{
986 struct k_clock *kc = clockid_to_kclock(which_clock);
987 struct timex ktx;
988 int err;
989
990 if (!kc)
991 return -EINVAL;
992 if (!kc->clock_adj)
993 return -EOPNOTSUPP;
994
995 if (copy_from_user(&ktx, utx, sizeof(ktx)))
996 return -EFAULT;
997
998 err = kc->clock_adj(which_clock, &ktx);
999
1000 if (!err && copy_to_user(utx, &ktx, sizeof(ktx)))
1001 return -EFAULT;
1002
1003 return err;
1004}
1005
1006SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1007 struct timespec __user *, tp)
1008{
1009 struct k_clock *kc = clockid_to_kclock(which_clock);
1010 struct timespec rtn_tp;
1011 int error;
1012
1013 if (!kc)
1014 return -EINVAL;
1015
1016 error = kc->clock_getres(which_clock, &rtn_tp);
1017
1018 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
1019 error = -EFAULT;
1020
1021 return error;
1022}
1023
1024
1025
1026
1027static int common_nsleep(const clockid_t which_clock, int flags,
1028 struct timespec *tsave, struct timespec __user *rmtp)
1029{
1030 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
1031 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1032 which_clock);
1033}
1034
1035SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1036 const struct timespec __user *, rqtp,
1037 struct timespec __user *, rmtp)
1038{
1039 struct k_clock *kc = clockid_to_kclock(which_clock);
1040 struct timespec t;
1041
1042 if (!kc)
1043 return -EINVAL;
1044 if (!kc->nsleep)
1045 return -ENANOSLEEP_NOTSUP;
1046
1047 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1048 return -EFAULT;
1049
1050 if (!timespec_valid(&t))
1051 return -EINVAL;
1052
1053 return kc->nsleep(which_clock, flags, &t, rmtp);
1054}
1055
1056
1057
1058
1059
1060long clock_nanosleep_restart(struct restart_block *restart_block)
1061{
1062 clockid_t which_clock = restart_block->nanosleep.clockid;
1063 struct k_clock *kc = clockid_to_kclock(which_clock);
1064
1065 if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
1066 return -EINVAL;
1067
1068 return kc->nsleep_restart(restart_block);
1069}
1070