1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/interrupt.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/mutex.h>
38
39#include <asm/uaccess.h>
40#include <linux/list.h>
41#include <linux/init.h>
42#include <linux/compiler.h>
43#include <linux/idr.h>
44#include <linux/posix-timers.h>
45#include <linux/syscalls.h>
46#include <linux/wait.h>
47#include <linux/workqueue.h>
48#include <linux/module.h>
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71static struct kmem_cache *posix_timers_cache;
72static struct idr posix_timers_id;
73static DEFINE_SPINLOCK(idr_lock);
74
75
76
77
78
79#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
80 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
81#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
82#endif
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134static struct k_clock posix_clocks[MAX_CLOCKS];
135
136
137
138
139static int common_nsleep(const clockid_t, int flags, struct timespec *t,
140 struct timespec __user *rmtp);
141static void common_timer_get(struct k_itimer *, struct itimerspec *);
142static int common_timer_set(struct k_itimer *, int,
143 struct itimerspec *, struct itimerspec *);
144static int common_timer_del(struct k_itimer *timer);
145
146static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
147
148static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
149
150static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
151{
152 spin_unlock_irqrestore(&timr->it_lock, flags);
153}
154
155
156
157
158#define CLOCK_DISPATCH(clock, call, arglist) \
159 ((clock) < 0 ? posix_cpu_##call arglist : \
160 (posix_clocks[clock].call != NULL \
161 ? (*posix_clocks[clock].call) arglist : common_##call arglist))
162
163
164
165
166
167
168
169
170
171static inline int common_clock_getres(const clockid_t which_clock,
172 struct timespec *tp)
173{
174 tp->tv_sec = 0;
175 tp->tv_nsec = posix_clocks[which_clock].res;
176 return 0;
177}
178
179
180
181
182static int common_clock_get(clockid_t which_clock, struct timespec *tp)
183{
184 ktime_get_real_ts(tp);
185 return 0;
186}
187
188static inline int common_clock_set(const clockid_t which_clock,
189 struct timespec *tp)
190{
191 return do_sys_settimeofday(tp, NULL);
192}
193
194static int common_timer_create(struct k_itimer *new_timer)
195{
196 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
197 return 0;
198}
199
200static int no_timer_create(struct k_itimer *new_timer)
201{
202 return -EOPNOTSUPP;
203}
204
205static int no_nsleep(const clockid_t which_clock, int flags,
206 struct timespec *tsave, struct timespec __user *rmtp)
207{
208 return -EOPNOTSUPP;
209}
210
211
212
213
214static inline int invalid_clockid(const clockid_t which_clock)
215{
216 if (which_clock < 0)
217 return 0;
218 if ((unsigned) which_clock >= MAX_CLOCKS)
219 return 1;
220 if (posix_clocks[which_clock].clock_getres != NULL)
221 return 0;
222 if (posix_clocks[which_clock].res != 0)
223 return 0;
224 return 1;
225}
226
227
228
229
230static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
231{
232 ktime_get_ts(tp);
233 return 0;
234}
235
236
237
238
239static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
240{
241 getrawmonotonic(tp);
242 return 0;
243}
244
245
246static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
247{
248 *tp = current_kernel_time();
249 return 0;
250}
251
252static int posix_get_monotonic_coarse(clockid_t which_clock,
253 struct timespec *tp)
254{
255 *tp = get_monotonic_coarse();
256 return 0;
257}
258
259int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
260{
261 *tp = ktime_to_timespec(KTIME_LOW_RES);
262 return 0;
263}
264
265
266
267static __init int init_posix_timers(void)
268{
269 struct k_clock clock_realtime = {
270 .clock_getres = hrtimer_get_res,
271 };
272 struct k_clock clock_monotonic = {
273 .clock_getres = hrtimer_get_res,
274 .clock_get = posix_ktime_get_ts,
275 .clock_set = do_posix_clock_nosettime,
276 };
277 struct k_clock clock_monotonic_raw = {
278 .clock_getres = hrtimer_get_res,
279 .clock_get = posix_get_monotonic_raw,
280 .clock_set = do_posix_clock_nosettime,
281 .timer_create = no_timer_create,
282 .nsleep = no_nsleep,
283 };
284 struct k_clock clock_realtime_coarse = {
285 .clock_getres = posix_get_coarse_res,
286 .clock_get = posix_get_realtime_coarse,
287 .clock_set = do_posix_clock_nosettime,
288 .timer_create = no_timer_create,
289 .nsleep = no_nsleep,
290 };
291 struct k_clock clock_monotonic_coarse = {
292 .clock_getres = posix_get_coarse_res,
293 .clock_get = posix_get_monotonic_coarse,
294 .clock_set = do_posix_clock_nosettime,
295 .timer_create = no_timer_create,
296 .nsleep = no_nsleep,
297 };
298
299 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
300 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
301 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
302 register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
303 register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
304
305 posix_timers_cache = kmem_cache_create("posix_timers_cache",
306 sizeof (struct k_itimer), 0, SLAB_PANIC,
307 NULL);
308 idr_init(&posix_timers_id);
309 return 0;
310}
311
312__initcall(init_posix_timers);
313
314static void schedule_next_timer(struct k_itimer *timr)
315{
316 struct hrtimer *timer = &timr->it.real.timer;
317
318 if (timr->it.real.interval.tv64 == 0)
319 return;
320
321 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
322 timer->base->get_time(),
323 timr->it.real.interval);
324
325 timr->it_overrun_last = timr->it_overrun;
326 timr->it_overrun = -1;
327 ++timr->it_requeue_pending;
328 hrtimer_restart(timer);
329}
330
331
332
333
334
335
336
337
338
339
340
341
342void do_schedule_next_timer(struct siginfo *info)
343{
344 struct k_itimer *timr;
345 unsigned long flags;
346
347 timr = lock_timer(info->si_tid, &flags);
348
349 if (timr && timr->it_requeue_pending == info->si_sys_private) {
350 if (timr->it_clock < 0)
351 posix_cpu_timer_schedule(timr);
352 else
353 schedule_next_timer(timr);
354
355 info->si_overrun += timr->it_overrun_last;
356 }
357
358 if (timr)
359 unlock_timer(timr, flags);
360}
361
362int posix_timer_event(struct k_itimer *timr, int si_private)
363{
364 struct task_struct *task;
365 int shared, ret = -1;
366
367
368
369
370
371
372
373
374
375
376
377 timr->sigq->info.si_sys_private = si_private;
378
379 rcu_read_lock();
380 task = pid_task(timr->it_pid, PIDTYPE_PID);
381 if (task) {
382 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
383 ret = send_sigqueue(timr->sigq, task, shared);
384 }
385 rcu_read_unlock();
386
387 return ret > 0;
388}
389EXPORT_SYMBOL_GPL(posix_timer_event);
390
391
392
393
394
395
396
397
398static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
399{
400 struct k_itimer *timr;
401 unsigned long flags;
402 int si_private = 0;
403 enum hrtimer_restart ret = HRTIMER_NORESTART;
404
405 timr = container_of(timer, struct k_itimer, it.real.timer);
406 spin_lock_irqsave(&timr->it_lock, flags);
407
408 if (timr->it.real.interval.tv64 != 0)
409 si_private = ++timr->it_requeue_pending;
410
411 if (posix_timer_event(timr, si_private)) {
412
413
414
415
416
417 if (timr->it.real.interval.tv64 != 0) {
418 ktime_t now = hrtimer_cb_get_time(timer);
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442#ifdef CONFIG_HIGH_RES_TIMERS
443 {
444 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
445
446 if (timr->it.real.interval.tv64 < kj.tv64)
447 now = ktime_add(now, kj);
448 }
449#endif
450 timr->it_overrun += (unsigned int)
451 hrtimer_forward(timer, now,
452 timr->it.real.interval);
453 ret = HRTIMER_RESTART;
454 ++timr->it_requeue_pending;
455 }
456 }
457
458 unlock_timer(timr, flags);
459 return ret;
460}
461
462static struct pid *good_sigevent(sigevent_t * event)
463{
464 struct task_struct *rtn = current->group_leader;
465
466 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
467 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
468 !same_thread_group(rtn, current) ||
469 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
470 return NULL;
471
472 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
473 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
474 return NULL;
475
476 return task_pid(rtn);
477}
478
479void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
480{
481 if ((unsigned) clock_id >= MAX_CLOCKS) {
482 printk("POSIX clock register failed for clock_id %d\n",
483 clock_id);
484 return;
485 }
486
487 posix_clocks[clock_id] = *new_clock;
488}
489EXPORT_SYMBOL_GPL(register_posix_clock);
490
491static struct k_itimer * alloc_posix_timer(void)
492{
493 struct k_itimer *tmr;
494 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
495 if (!tmr)
496 return tmr;
497 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
498 kmem_cache_free(posix_timers_cache, tmr);
499 return NULL;
500 }
501 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
502 return tmr;
503}
504
505#define IT_ID_SET 1
506#define IT_ID_NOT_SET 0
507static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
508{
509 if (it_id_set) {
510 unsigned long flags;
511 spin_lock_irqsave(&idr_lock, flags);
512 idr_remove(&posix_timers_id, tmr->it_id);
513 spin_unlock_irqrestore(&idr_lock, flags);
514 }
515 put_pid(tmr->it_pid);
516 sigqueue_free(tmr->sigq);
517 kmem_cache_free(posix_timers_cache, tmr);
518}
519
520
521
522SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
523 struct sigevent __user *, timer_event_spec,
524 timer_t __user *, created_timer_id)
525{
526 struct k_itimer *new_timer;
527 int error, new_timer_id;
528 sigevent_t event;
529 int it_id_set = IT_ID_NOT_SET;
530
531 if (invalid_clockid(which_clock))
532 return -EINVAL;
533
534 new_timer = alloc_posix_timer();
535 if (unlikely(!new_timer))
536 return -EAGAIN;
537
538 spin_lock_init(&new_timer->it_lock);
539 retry:
540 if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
541 error = -EAGAIN;
542 goto out;
543 }
544 spin_lock_irq(&idr_lock);
545 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
546 spin_unlock_irq(&idr_lock);
547 if (error) {
548 if (error == -EAGAIN)
549 goto retry;
550
551
552
553
554 error = -EAGAIN;
555 goto out;
556 }
557
558 it_id_set = IT_ID_SET;
559 new_timer->it_id = (timer_t) new_timer_id;
560 new_timer->it_clock = which_clock;
561 new_timer->it_overrun = -1;
562 error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
563 if (error)
564 goto out;
565
566
567
568
569
570 if (copy_to_user(created_timer_id,
571 &new_timer_id, sizeof (new_timer_id))) {
572 error = -EFAULT;
573 goto out;
574 }
575 if (timer_event_spec) {
576 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
577 error = -EFAULT;
578 goto out;
579 }
580 rcu_read_lock();
581 new_timer->it_pid = get_pid(good_sigevent(&event));
582 rcu_read_unlock();
583 if (!new_timer->it_pid) {
584 error = -EINVAL;
585 goto out;
586 }
587 } else {
588 event.sigev_notify = SIGEV_SIGNAL;
589 event.sigev_signo = SIGALRM;
590 event.sigev_value.sival_int = new_timer->it_id;
591 new_timer->it_pid = get_pid(task_tgid(current));
592 }
593
594 new_timer->it_sigev_notify = event.sigev_notify;
595 new_timer->sigq->info.si_signo = event.sigev_signo;
596 new_timer->sigq->info.si_value = event.sigev_value;
597 new_timer->sigq->info.si_tid = new_timer->it_id;
598 new_timer->sigq->info.si_code = SI_TIMER;
599
600 spin_lock_irq(¤t->sighand->siglock);
601 new_timer->it_signal = current->signal;
602 list_add(&new_timer->list, ¤t->signal->posix_timers);
603 spin_unlock_irq(¤t->sighand->siglock);
604
605 return 0;
606
607
608
609
610
611
612out:
613 release_posix_timer(new_timer, it_id_set);
614 return error;
615}
616
617
618
619
620
621
622
623
624static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
625{
626 struct k_itimer *timr;
627
628
629
630
631
632 spin_lock_irqsave(&idr_lock, *flags);
633 timr = idr_find(&posix_timers_id, (int)timer_id);
634 if (timr) {
635 spin_lock(&timr->it_lock);
636 if (timr->it_signal == current->signal) {
637 spin_unlock(&idr_lock);
638 return timr;
639 }
640 spin_unlock(&timr->it_lock);
641 }
642 spin_unlock_irqrestore(&idr_lock, *flags);
643
644 return NULL;
645}
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663static void
664common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
665{
666 ktime_t now, remaining, iv;
667 struct hrtimer *timer = &timr->it.real.timer;
668
669 memset(cur_setting, 0, sizeof(struct itimerspec));
670
671 iv = timr->it.real.interval;
672
673
674 if (iv.tv64)
675 cur_setting->it_interval = ktime_to_timespec(iv);
676 else if (!hrtimer_active(timer) &&
677 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
678 return;
679
680 now = timer->base->get_time();
681
682
683
684
685
686
687 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
688 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
689 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
690
691 remaining = ktime_sub(hrtimer_get_expires(timer), now);
692
693 if (remaining.tv64 <= 0) {
694
695
696
697
698 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
699 cur_setting->it_value.tv_nsec = 1;
700 } else
701 cur_setting->it_value = ktime_to_timespec(remaining);
702}
703
704
705SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
706 struct itimerspec __user *, setting)
707{
708 struct k_itimer *timr;
709 struct itimerspec cur_setting;
710 unsigned long flags;
711
712 timr = lock_timer(timer_id, &flags);
713 if (!timr)
714 return -EINVAL;
715
716 CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));
717
718 unlock_timer(timr, flags);
719
720 if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
721 return -EFAULT;
722
723 return 0;
724}
725
726
727
728
729
730
731
732
733
734
735SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
736{
737 struct k_itimer *timr;
738 int overrun;
739 unsigned long flags;
740
741 timr = lock_timer(timer_id, &flags);
742 if (!timr)
743 return -EINVAL;
744
745 overrun = timr->it_overrun_last;
746 unlock_timer(timr, flags);
747
748 return overrun;
749}
750
751
752
753static int
754common_timer_set(struct k_itimer *timr, int flags,
755 struct itimerspec *new_setting, struct itimerspec *old_setting)
756{
757 struct hrtimer *timer = &timr->it.real.timer;
758 enum hrtimer_mode mode;
759
760 if (old_setting)
761 common_timer_get(timr, old_setting);
762
763
764 timr->it.real.interval.tv64 = 0;
765
766
767
768
769 if (hrtimer_try_to_cancel(timer) < 0)
770 return TIMER_RETRY;
771
772 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
773 ~REQUEUE_PENDING;
774 timr->it_overrun_last = 0;
775
776
777 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
778 return 0;
779
780 mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
781 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
782 timr->it.real.timer.function = posix_timer_fn;
783
784 hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
785
786
787 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
788
789
790 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
791
792 if (mode == HRTIMER_MODE_REL) {
793 hrtimer_add_expires(timer, timer->base->get_time());
794 }
795 return 0;
796 }
797
798 hrtimer_start_expires(timer, mode);
799 return 0;
800}
801
802
803SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
804 const struct itimerspec __user *, new_setting,
805 struct itimerspec __user *, old_setting)
806{
807 struct k_itimer *timr;
808 struct itimerspec new_spec, old_spec;
809 int error = 0;
810 unsigned long flag;
811 struct itimerspec *rtn = old_setting ? &old_spec : NULL;
812
813 if (!new_setting)
814 return -EINVAL;
815
816 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
817 return -EFAULT;
818
819 if (!timespec_valid(&new_spec.it_interval) ||
820 !timespec_valid(&new_spec.it_value))
821 return -EINVAL;
822retry:
823 timr = lock_timer(timer_id, &flag);
824 if (!timr)
825 return -EINVAL;
826
827 error = CLOCK_DISPATCH(timr->it_clock, timer_set,
828 (timr, flags, &new_spec, rtn));
829
830 unlock_timer(timr, flag);
831 if (error == TIMER_RETRY) {
832 rtn = NULL;
833 goto retry;
834 }
835
836 if (old_setting && !error &&
837 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
838 error = -EFAULT;
839
840 return error;
841}
842
843static inline int common_timer_del(struct k_itimer *timer)
844{
845 timer->it.real.interval.tv64 = 0;
846
847 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
848 return TIMER_RETRY;
849 return 0;
850}
851
852static inline int timer_delete_hook(struct k_itimer *timer)
853{
854 return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));
855}
856
857
858SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
859{
860 struct k_itimer *timer;
861 unsigned long flags;
862
863retry_delete:
864 timer = lock_timer(timer_id, &flags);
865 if (!timer)
866 return -EINVAL;
867
868 if (timer_delete_hook(timer) == TIMER_RETRY) {
869 unlock_timer(timer, flags);
870 goto retry_delete;
871 }
872
873 spin_lock(¤t->sighand->siglock);
874 list_del(&timer->list);
875 spin_unlock(¤t->sighand->siglock);
876
877
878
879
880 timer->it_signal = NULL;
881
882 unlock_timer(timer, flags);
883 release_posix_timer(timer, IT_ID_SET);
884 return 0;
885}
886
887
888
889
890static void itimer_delete(struct k_itimer *timer)
891{
892 unsigned long flags;
893
894retry_delete:
895 spin_lock_irqsave(&timer->it_lock, flags);
896
897 if (timer_delete_hook(timer) == TIMER_RETRY) {
898 unlock_timer(timer, flags);
899 goto retry_delete;
900 }
901 list_del(&timer->list);
902
903
904
905
906 timer->it_signal = NULL;
907
908 unlock_timer(timer, flags);
909 release_posix_timer(timer, IT_ID_SET);
910}
911
912
913
914
915
916void exit_itimers(struct signal_struct *sig)
917{
918 struct k_itimer *tmr;
919
920 while (!list_empty(&sig->posix_timers)) {
921 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
922 itimer_delete(tmr);
923 }
924}
925
926
927int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
928{
929 return -EINVAL;
930}
931EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
932
933int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
934 struct timespec *t, struct timespec __user *r)
935{
936#ifndef ENOTSUP
937 return -EOPNOTSUPP;
938#else
939 return -ENOTSUP;
940#endif
941}
942EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
943
944SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
945 const struct timespec __user *, tp)
946{
947 struct timespec new_tp;
948
949 if (invalid_clockid(which_clock))
950 return -EINVAL;
951 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
952 return -EFAULT;
953
954 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
955}
956
957SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
958 struct timespec __user *,tp)
959{
960 struct timespec kernel_tp;
961 int error;
962
963 if (invalid_clockid(which_clock))
964 return -EINVAL;
965 error = CLOCK_DISPATCH(which_clock, clock_get,
966 (which_clock, &kernel_tp));
967 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
968 error = -EFAULT;
969
970 return error;
971
972}
973
974SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
975 struct timespec __user *, tp)
976{
977 struct timespec rtn_tp;
978 int error;
979
980 if (invalid_clockid(which_clock))
981 return -EINVAL;
982
983 error = CLOCK_DISPATCH(which_clock, clock_getres,
984 (which_clock, &rtn_tp));
985
986 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {
987 error = -EFAULT;
988 }
989
990 return error;
991}
992
993
994
995
996static int common_nsleep(const clockid_t which_clock, int flags,
997 struct timespec *tsave, struct timespec __user *rmtp)
998{
999 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
1000 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1001 which_clock);
1002}
1003
1004SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1005 const struct timespec __user *, rqtp,
1006 struct timespec __user *, rmtp)
1007{
1008 struct timespec t;
1009
1010 if (invalid_clockid(which_clock))
1011 return -EINVAL;
1012
1013 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1014 return -EFAULT;
1015
1016 if (!timespec_valid(&t))
1017 return -EINVAL;
1018
1019 return CLOCK_DISPATCH(which_clock, nsleep,
1020 (which_clock, flags, &t, rmtp));
1021}
1022
1023
1024
1025
1026static int common_nsleep_restart(struct restart_block *restart_block)
1027{
1028 return hrtimer_nanosleep_restart(restart_block);
1029}
1030
1031
1032
1033
1034
1035long
1036clock_nanosleep_restart(struct restart_block *restart_block)
1037{
1038 clockid_t which_clock = restart_block->arg0;
1039
1040 return CLOCK_DISPATCH(which_clock, nsleep_restart,
1041 (restart_block));
1042}
1043