1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/interrupt.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/mutex.h>
38
39#include <asm/uaccess.h>
40#include <linux/list.h>
41#include <linux/init.h>
42#include <linux/compiler.h>
43#include <linux/idr.h>
44#include <linux/posix-timers.h>
45#include <linux/syscalls.h>
46#include <linux/wait.h>
47#include <linux/workqueue.h>
48#include <linux/module.h>
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71static struct kmem_cache *posix_timers_cache;
72static struct idr posix_timers_id;
73static DEFINE_SPINLOCK(idr_lock);
74
75
76
77
78
79#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
80 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
81#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
82#endif
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134static struct k_clock posix_clocks[MAX_CLOCKS];
135
136
137
138
139static int common_nsleep(const clockid_t, int flags, struct timespec *t,
140 struct timespec __user *rmtp);
141static void common_timer_get(struct k_itimer *, struct itimerspec *);
142static int common_timer_set(struct k_itimer *, int,
143 struct itimerspec *, struct itimerspec *);
144static int common_timer_del(struct k_itimer *timer);
145
146static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
147
148static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
149
150#define lock_timer(tid, flags) \
151({ struct k_itimer *__timr; \
152 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
153 __timr; \
154})
155
156static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
157{
158 spin_unlock_irqrestore(&timr->it_lock, flags);
159}
160
161
162
163
164#define CLOCK_DISPATCH(clock, call, arglist) \
165 ((clock) < 0 ? posix_cpu_##call arglist : \
166 (posix_clocks[clock].call != NULL \
167 ? (*posix_clocks[clock].call) arglist : common_##call arglist))
168
169
170
171
172
173
174
175
176
177static inline int common_clock_getres(const clockid_t which_clock,
178 struct timespec *tp)
179{
180 tp->tv_sec = 0;
181 tp->tv_nsec = posix_clocks[which_clock].res;
182 return 0;
183}
184
185
186
187
188static int common_clock_get(clockid_t which_clock, struct timespec *tp)
189{
190 ktime_get_real_ts(tp);
191 return 0;
192}
193
194static inline int common_clock_set(const clockid_t which_clock,
195 struct timespec *tp)
196{
197 return do_sys_settimeofday(tp, NULL);
198}
199
200static int common_timer_create(struct k_itimer *new_timer)
201{
202 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
203 return 0;
204}
205
206static int no_timer_create(struct k_itimer *new_timer)
207{
208 return -EOPNOTSUPP;
209}
210
211static int no_nsleep(const clockid_t which_clock, int flags,
212 struct timespec *tsave, struct timespec __user *rmtp)
213{
214 return -EOPNOTSUPP;
215}
216
217
218
219
220static inline int invalid_clockid(const clockid_t which_clock)
221{
222 if (which_clock < 0)
223 return 0;
224 if ((unsigned) which_clock >= MAX_CLOCKS)
225 return 1;
226 if (posix_clocks[which_clock].clock_getres != NULL)
227 return 0;
228 if (posix_clocks[which_clock].res != 0)
229 return 0;
230 return 1;
231}
232
233
234
235
236static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
237{
238 ktime_get_ts(tp);
239 return 0;
240}
241
242
243
244
245static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
246{
247 getrawmonotonic(tp);
248 return 0;
249}
250
251
252static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
253{
254 *tp = current_kernel_time();
255 return 0;
256}
257
258static int posix_get_monotonic_coarse(clockid_t which_clock,
259 struct timespec *tp)
260{
261 *tp = get_monotonic_coarse();
262 return 0;
263}
264
265static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
266{
267 *tp = ktime_to_timespec(KTIME_LOW_RES);
268 return 0;
269}
270
271
272
273static __init int init_posix_timers(void)
274{
275 struct k_clock clock_realtime = {
276 .clock_getres = hrtimer_get_res,
277 };
278 struct k_clock clock_monotonic = {
279 .clock_getres = hrtimer_get_res,
280 .clock_get = posix_ktime_get_ts,
281 .clock_set = do_posix_clock_nosettime,
282 };
283 struct k_clock clock_monotonic_raw = {
284 .clock_getres = hrtimer_get_res,
285 .clock_get = posix_get_monotonic_raw,
286 .clock_set = do_posix_clock_nosettime,
287 .timer_create = no_timer_create,
288 .nsleep = no_nsleep,
289 };
290 struct k_clock clock_realtime_coarse = {
291 .clock_getres = posix_get_coarse_res,
292 .clock_get = posix_get_realtime_coarse,
293 .clock_set = do_posix_clock_nosettime,
294 .timer_create = no_timer_create,
295 .nsleep = no_nsleep,
296 };
297 struct k_clock clock_monotonic_coarse = {
298 .clock_getres = posix_get_coarse_res,
299 .clock_get = posix_get_monotonic_coarse,
300 .clock_set = do_posix_clock_nosettime,
301 .timer_create = no_timer_create,
302 .nsleep = no_nsleep,
303 };
304
305 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
306 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
307 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
308 register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
309 register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
310
311 posix_timers_cache = kmem_cache_create("posix_timers_cache",
312 sizeof (struct k_itimer), 0, SLAB_PANIC,
313 NULL);
314 idr_init(&posix_timers_id);
315 return 0;
316}
317
318__initcall(init_posix_timers);
319
320static void schedule_next_timer(struct k_itimer *timr)
321{
322 struct hrtimer *timer = &timr->it.real.timer;
323
324 if (timr->it.real.interval.tv64 == 0)
325 return;
326
327 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
328 timer->base->get_time(),
329 timr->it.real.interval);
330
331 timr->it_overrun_last = timr->it_overrun;
332 timr->it_overrun = -1;
333 ++timr->it_requeue_pending;
334 hrtimer_restart(timer);
335}
336
337
338
339
340
341
342
343
344
345
346
347
348void do_schedule_next_timer(struct siginfo *info)
349{
350 struct k_itimer *timr;
351 unsigned long flags;
352
353 timr = lock_timer(info->si_tid, &flags);
354
355 if (timr && timr->it_requeue_pending == info->si_sys_private) {
356 if (timr->it_clock < 0)
357 posix_cpu_timer_schedule(timr);
358 else
359 schedule_next_timer(timr);
360
361 info->si_overrun += timr->it_overrun_last;
362 }
363
364 if (timr)
365 unlock_timer(timr, flags);
366}
367
368int posix_timer_event(struct k_itimer *timr, int si_private)
369{
370 struct task_struct *task;
371 int shared, ret = -1;
372
373
374
375
376
377
378
379
380
381
382
383 timr->sigq->info.si_sys_private = si_private;
384
385 rcu_read_lock();
386 task = pid_task(timr->it_pid, PIDTYPE_PID);
387 if (task) {
388 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
389 ret = send_sigqueue(timr->sigq, task, shared);
390 }
391 rcu_read_unlock();
392
393 return ret > 0;
394}
395EXPORT_SYMBOL_GPL(posix_timer_event);
396
397
398
399
400
401
402
403
404static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
405{
406 struct k_itimer *timr;
407 unsigned long flags;
408 int si_private = 0;
409 enum hrtimer_restart ret = HRTIMER_NORESTART;
410
411 timr = container_of(timer, struct k_itimer, it.real.timer);
412 spin_lock_irqsave(&timr->it_lock, flags);
413
414 if (timr->it.real.interval.tv64 != 0)
415 si_private = ++timr->it_requeue_pending;
416
417 if (posix_timer_event(timr, si_private)) {
418
419
420
421
422
423 if (timr->it.real.interval.tv64 != 0) {
424 ktime_t now = hrtimer_cb_get_time(timer);
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448#ifdef CONFIG_HIGH_RES_TIMERS
449 {
450 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
451
452 if (timr->it.real.interval.tv64 < kj.tv64)
453 now = ktime_add(now, kj);
454 }
455#endif
456 timr->it_overrun += (unsigned int)
457 hrtimer_forward(timer, now,
458 timr->it.real.interval);
459 ret = HRTIMER_RESTART;
460 ++timr->it_requeue_pending;
461 }
462 }
463
464 unlock_timer(timr, flags);
465 return ret;
466}
467
468static struct pid *good_sigevent(sigevent_t * event)
469{
470 struct task_struct *rtn = current->group_leader;
471
472 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
473 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
474 !same_thread_group(rtn, current) ||
475 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
476 return NULL;
477
478 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
479 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
480 return NULL;
481
482 return task_pid(rtn);
483}
484
485void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
486{
487 if ((unsigned) clock_id >= MAX_CLOCKS) {
488 printk("POSIX clock register failed for clock_id %d\n",
489 clock_id);
490 return;
491 }
492
493 posix_clocks[clock_id] = *new_clock;
494}
495EXPORT_SYMBOL_GPL(register_posix_clock);
496
497static struct k_itimer * alloc_posix_timer(void)
498{
499 struct k_itimer *tmr;
500 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
501 if (!tmr)
502 return tmr;
503 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
504 kmem_cache_free(posix_timers_cache, tmr);
505 return NULL;
506 }
507 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
508 return tmr;
509}
510
511#define IT_ID_SET 1
512#define IT_ID_NOT_SET 0
513static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
514{
515 if (it_id_set) {
516 unsigned long flags;
517 spin_lock_irqsave(&idr_lock, flags);
518 idr_remove(&posix_timers_id, tmr->it_id);
519 spin_unlock_irqrestore(&idr_lock, flags);
520 }
521 put_pid(tmr->it_pid);
522 sigqueue_free(tmr->sigq);
523 kmem_cache_free(posix_timers_cache, tmr);
524}
525
526
527
528SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
529 struct sigevent __user *, timer_event_spec,
530 timer_t __user *, created_timer_id)
531{
532 struct k_itimer *new_timer;
533 int error, new_timer_id;
534 sigevent_t event;
535 int it_id_set = IT_ID_NOT_SET;
536
537 if (invalid_clockid(which_clock))
538 return -EINVAL;
539
540 new_timer = alloc_posix_timer();
541 if (unlikely(!new_timer))
542 return -EAGAIN;
543
544 spin_lock_init(&new_timer->it_lock);
545 retry:
546 if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
547 error = -EAGAIN;
548 goto out;
549 }
550 spin_lock_irq(&idr_lock);
551 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
552 spin_unlock_irq(&idr_lock);
553 if (error) {
554 if (error == -EAGAIN)
555 goto retry;
556
557
558
559
560 error = -EAGAIN;
561 goto out;
562 }
563
564 it_id_set = IT_ID_SET;
565 new_timer->it_id = (timer_t) new_timer_id;
566 new_timer->it_clock = which_clock;
567 new_timer->it_overrun = -1;
568
569 if (timer_event_spec) {
570 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
571 error = -EFAULT;
572 goto out;
573 }
574 rcu_read_lock();
575 new_timer->it_pid = get_pid(good_sigevent(&event));
576 rcu_read_unlock();
577 if (!new_timer->it_pid) {
578 error = -EINVAL;
579 goto out;
580 }
581 } else {
582 event.sigev_notify = SIGEV_SIGNAL;
583 event.sigev_signo = SIGALRM;
584 event.sigev_value.sival_int = new_timer->it_id;
585 new_timer->it_pid = get_pid(task_tgid(current));
586 }
587
588 new_timer->it_sigev_notify = event.sigev_notify;
589 new_timer->sigq->info.si_signo = event.sigev_signo;
590 new_timer->sigq->info.si_value = event.sigev_value;
591 new_timer->sigq->info.si_tid = new_timer->it_id;
592 new_timer->sigq->info.si_code = SI_TIMER;
593
594 if (copy_to_user(created_timer_id,
595 &new_timer_id, sizeof (new_timer_id))) {
596 error = -EFAULT;
597 goto out;
598 }
599
600 error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
601 if (error)
602 goto out;
603
604 spin_lock_irq(¤t->sighand->siglock);
605 new_timer->it_signal = current->signal;
606 list_add(&new_timer->list, ¤t->signal->posix_timers);
607 spin_unlock_irq(¤t->sighand->siglock);
608
609 return 0;
610
611
612
613
614
615
616out:
617 release_posix_timer(new_timer, it_id_set);
618 return error;
619}
620
621
622
623
624
625
626
627
628static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
629{
630 struct k_itimer *timr;
631
632
633
634
635
636 spin_lock_irqsave(&idr_lock, *flags);
637 timr = idr_find(&posix_timers_id, (int)timer_id);
638 if (timr) {
639 spin_lock(&timr->it_lock);
640 if (timr->it_signal == current->signal) {
641 spin_unlock(&idr_lock);
642 return timr;
643 }
644 spin_unlock(&timr->it_lock);
645 }
646 spin_unlock_irqrestore(&idr_lock, *flags);
647
648 return NULL;
649}
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667static void
668common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
669{
670 ktime_t now, remaining, iv;
671 struct hrtimer *timer = &timr->it.real.timer;
672
673 memset(cur_setting, 0, sizeof(struct itimerspec));
674
675 iv = timr->it.real.interval;
676
677
678 if (iv.tv64)
679 cur_setting->it_interval = ktime_to_timespec(iv);
680 else if (!hrtimer_active(timer) &&
681 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
682 return;
683
684 now = timer->base->get_time();
685
686
687
688
689
690
691 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
692 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
693 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
694
695 remaining = ktime_sub(hrtimer_get_expires(timer), now);
696
697 if (remaining.tv64 <= 0) {
698
699
700
701
702 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
703 cur_setting->it_value.tv_nsec = 1;
704 } else
705 cur_setting->it_value = ktime_to_timespec(remaining);
706}
707
708
709SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
710 struct itimerspec __user *, setting)
711{
712 struct k_itimer *timr;
713 struct itimerspec cur_setting;
714 unsigned long flags;
715
716 timr = lock_timer(timer_id, &flags);
717 if (!timr)
718 return -EINVAL;
719
720 CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));
721
722 unlock_timer(timr, flags);
723
724 if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
725 return -EFAULT;
726
727 return 0;
728}
729
730
731
732
733
734
735
736
737
738
739SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
740{
741 struct k_itimer *timr;
742 int overrun;
743 unsigned long flags;
744
745 timr = lock_timer(timer_id, &flags);
746 if (!timr)
747 return -EINVAL;
748
749 overrun = timr->it_overrun_last;
750 unlock_timer(timr, flags);
751
752 return overrun;
753}
754
755
756
757static int
758common_timer_set(struct k_itimer *timr, int flags,
759 struct itimerspec *new_setting, struct itimerspec *old_setting)
760{
761 struct hrtimer *timer = &timr->it.real.timer;
762 enum hrtimer_mode mode;
763
764 if (old_setting)
765 common_timer_get(timr, old_setting);
766
767
768 timr->it.real.interval.tv64 = 0;
769
770
771
772
773 if (hrtimer_try_to_cancel(timer) < 0)
774 return TIMER_RETRY;
775
776 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
777 ~REQUEUE_PENDING;
778 timr->it_overrun_last = 0;
779
780
781 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
782 return 0;
783
784 mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
785 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
786 timr->it.real.timer.function = posix_timer_fn;
787
788 hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
789
790
791 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
792
793
794 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
795
796 if (mode == HRTIMER_MODE_REL) {
797 hrtimer_add_expires(timer, timer->base->get_time());
798 }
799 return 0;
800 }
801
802 hrtimer_start_expires(timer, mode);
803 return 0;
804}
805
806
807SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
808 const struct itimerspec __user *, new_setting,
809 struct itimerspec __user *, old_setting)
810{
811 struct k_itimer *timr;
812 struct itimerspec new_spec, old_spec;
813 int error = 0;
814 unsigned long flag;
815 struct itimerspec *rtn = old_setting ? &old_spec : NULL;
816
817 if (!new_setting)
818 return -EINVAL;
819
820 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
821 return -EFAULT;
822
823 if (!timespec_valid(&new_spec.it_interval) ||
824 !timespec_valid(&new_spec.it_value))
825 return -EINVAL;
826retry:
827 timr = lock_timer(timer_id, &flag);
828 if (!timr)
829 return -EINVAL;
830
831 error = CLOCK_DISPATCH(timr->it_clock, timer_set,
832 (timr, flags, &new_spec, rtn));
833
834 unlock_timer(timr, flag);
835 if (error == TIMER_RETRY) {
836 rtn = NULL;
837 goto retry;
838 }
839
840 if (old_setting && !error &&
841 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
842 error = -EFAULT;
843
844 return error;
845}
846
847static inline int common_timer_del(struct k_itimer *timer)
848{
849 timer->it.real.interval.tv64 = 0;
850
851 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
852 return TIMER_RETRY;
853 return 0;
854}
855
856static inline int timer_delete_hook(struct k_itimer *timer)
857{
858 return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));
859}
860
861
862SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
863{
864 struct k_itimer *timer;
865 unsigned long flags;
866
867retry_delete:
868 timer = lock_timer(timer_id, &flags);
869 if (!timer)
870 return -EINVAL;
871
872 if (timer_delete_hook(timer) == TIMER_RETRY) {
873 unlock_timer(timer, flags);
874 goto retry_delete;
875 }
876
877 spin_lock(¤t->sighand->siglock);
878 list_del(&timer->list);
879 spin_unlock(¤t->sighand->siglock);
880
881
882
883
884 timer->it_signal = NULL;
885
886 unlock_timer(timer, flags);
887 release_posix_timer(timer, IT_ID_SET);
888 return 0;
889}
890
891
892
893
894static void itimer_delete(struct k_itimer *timer)
895{
896 unsigned long flags;
897
898retry_delete:
899 spin_lock_irqsave(&timer->it_lock, flags);
900
901 if (timer_delete_hook(timer) == TIMER_RETRY) {
902 unlock_timer(timer, flags);
903 goto retry_delete;
904 }
905 list_del(&timer->list);
906
907
908
909
910 timer->it_signal = NULL;
911
912 unlock_timer(timer, flags);
913 release_posix_timer(timer, IT_ID_SET);
914}
915
916
917
918
919
920void exit_itimers(struct signal_struct *sig)
921{
922 struct k_itimer *tmr;
923
924 while (!list_empty(&sig->posix_timers)) {
925 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
926 itimer_delete(tmr);
927 }
928}
929
930
931int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
932{
933 return -EINVAL;
934}
935EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
936
937int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
938 struct timespec *t, struct timespec __user *r)
939{
940#ifndef ENOTSUP
941 return -EOPNOTSUPP;
942#else
943 return -ENOTSUP;
944#endif
945}
946EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
947
948SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
949 const struct timespec __user *, tp)
950{
951 struct timespec new_tp;
952
953 if (invalid_clockid(which_clock))
954 return -EINVAL;
955 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
956 return -EFAULT;
957
958 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
959}
960
961SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
962 struct timespec __user *,tp)
963{
964 struct timespec kernel_tp;
965 int error;
966
967 if (invalid_clockid(which_clock))
968 return -EINVAL;
969 error = CLOCK_DISPATCH(which_clock, clock_get,
970 (which_clock, &kernel_tp));
971 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
972 error = -EFAULT;
973
974 return error;
975
976}
977
978SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
979 struct timespec __user *, tp)
980{
981 struct timespec rtn_tp;
982 int error;
983
984 if (invalid_clockid(which_clock))
985 return -EINVAL;
986
987 error = CLOCK_DISPATCH(which_clock, clock_getres,
988 (which_clock, &rtn_tp));
989
990 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {
991 error = -EFAULT;
992 }
993
994 return error;
995}
996
997
998
999
1000static int common_nsleep(const clockid_t which_clock, int flags,
1001 struct timespec *tsave, struct timespec __user *rmtp)
1002{
1003 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
1004 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1005 which_clock);
1006}
1007
1008SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1009 const struct timespec __user *, rqtp,
1010 struct timespec __user *, rmtp)
1011{
1012 struct timespec t;
1013
1014 if (invalid_clockid(which_clock))
1015 return -EINVAL;
1016
1017 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1018 return -EFAULT;
1019
1020 if (!timespec_valid(&t))
1021 return -EINVAL;
1022
1023 return CLOCK_DISPATCH(which_clock, nsleep,
1024 (which_clock, flags, &t, rmtp));
1025}
1026
1027
1028
1029
1030static int common_nsleep_restart(struct restart_block *restart_block)
1031{
1032 return hrtimer_nanosleep_restart(restart_block);
1033}
1034
1035
1036
1037
1038
1039long
1040clock_nanosleep_restart(struct restart_block *restart_block)
1041{
1042 clockid_t which_clock = restart_block->arg0;
1043
1044 return CLOCK_DISPATCH(which_clock, nsleep_restart,
1045 (restart_block));
1046}
1047