1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/interrupt.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/mutex.h>
38
39#include <asm/uaccess.h>
40#include <linux/list.h>
41#include <linux/init.h>
42#include <linux/compiler.h>
43#include <linux/hash.h>
44#include <linux/posix-clock.h>
45#include <linux/posix-timers.h>
46#include <linux/syscalls.h>
47#include <linux/wait.h>
48#include <linux/workqueue.h>
49#include <linux/export.h>
50#include <linux/hashtable.h>
51
52#include "timekeeping.h"
53
54
55
56
57
58
59
60
61
62
63
64
65
66static struct kmem_cache *posix_timers_cache;
67
68static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
69static DEFINE_SPINLOCK(hash_lock);
70
71
72
73
74
75#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
76 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
77#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
78#endif
79
80
81
82
83#ifndef ENOTSUP
84# define ENANOSLEEP_NOTSUP EOPNOTSUPP
85#else
86# define ENANOSLEEP_NOTSUP ENOTSUP
87#endif
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127static struct k_clock posix_clocks[MAX_CLOCKS];
128
129
130
131
132static int common_nsleep(const clockid_t, int flags, struct timespec *t,
133 struct timespec __user *rmtp);
134static int common_timer_create(struct k_itimer *new_timer);
135static void common_timer_get(struct k_itimer *, struct itimerspec *);
136static int common_timer_set(struct k_itimer *, int,
137 struct itimerspec *, struct itimerspec *);
138static int common_timer_del(struct k_itimer *timer);
139
140static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
141
142static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
143
144#define lock_timer(tid, flags) \
145({ struct k_itimer *__timr; \
146 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
147 __timr; \
148})
149
150static int hash(struct signal_struct *sig, unsigned int nr)
151{
152 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
153}
154
155static struct k_itimer *__posix_timers_find(struct hlist_head *head,
156 struct signal_struct *sig,
157 timer_t id)
158{
159 struct k_itimer *timer;
160
161 hlist_for_each_entry_rcu(timer, head, t_hash) {
162 if ((timer->it_signal == sig) && (timer->it_id == id))
163 return timer;
164 }
165 return NULL;
166}
167
168static struct k_itimer *posix_timer_by_id(timer_t id)
169{
170 struct signal_struct *sig = current->signal;
171 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
172
173 return __posix_timers_find(head, sig, id);
174}
175
176static int posix_timer_add(struct k_itimer *timer)
177{
178 struct signal_struct *sig = current->signal;
179 int first_free_id = sig->posix_timer_id;
180 struct hlist_head *head;
181 int ret = -ENOENT;
182
183 do {
184 spin_lock(&hash_lock);
185 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
186 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
187 hlist_add_head_rcu(&timer->t_hash, head);
188 ret = sig->posix_timer_id;
189 }
190 if (++sig->posix_timer_id < 0)
191 sig->posix_timer_id = 0;
192 if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
193
194 ret = -EAGAIN;
195 spin_unlock(&hash_lock);
196 } while (ret == -ENOENT);
197 return ret;
198}
199
200static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
201{
202 spin_unlock_irqrestore(&timr->it_lock, flags);
203}
204
205
206static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
207{
208 ktime_get_real_ts(tp);
209 return 0;
210}
211
212
213static int posix_clock_realtime_set(const clockid_t which_clock,
214 const struct timespec *tp)
215{
216 return do_sys_settimeofday(tp, NULL);
217}
218
219static int posix_clock_realtime_adj(const clockid_t which_clock,
220 struct timex *t)
221{
222 return do_adjtimex(t);
223}
224
225
226
227
228static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
229{
230 ktime_get_ts(tp);
231 return 0;
232}
233
234
235
236
237static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
238{
239 getrawmonotonic(tp);
240 return 0;
241}
242
243
244static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
245{
246 *tp = current_kernel_time();
247 return 0;
248}
249
250static int posix_get_monotonic_coarse(clockid_t which_clock,
251 struct timespec *tp)
252{
253 *tp = get_monotonic_coarse();
254 return 0;
255}
256
257static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
258{
259 *tp = ktime_to_timespec(KTIME_LOW_RES);
260 return 0;
261}
262
263static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
264{
265 get_monotonic_boottime(tp);
266 return 0;
267}
268
269static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
270{
271 timekeeping_clocktai(tp);
272 return 0;
273}
274
275static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec *tp)
276{
277 tp->tv_sec = 0;
278 tp->tv_nsec = hrtimer_resolution;
279 return 0;
280}
281
282
283
284
285static __init int init_posix_timers(void)
286{
287 struct k_clock clock_realtime = {
288 .clock_getres = posix_get_hrtimer_res,
289 .clock_get = posix_clock_realtime_get,
290 .clock_set = posix_clock_realtime_set,
291 .clock_adj = posix_clock_realtime_adj,
292 .nsleep = common_nsleep,
293 .nsleep_restart = hrtimer_nanosleep_restart,
294 .timer_create = common_timer_create,
295 .timer_set = common_timer_set,
296 .timer_get = common_timer_get,
297 .timer_del = common_timer_del,
298 };
299 struct k_clock clock_monotonic = {
300 .clock_getres = posix_get_hrtimer_res,
301 .clock_get = posix_ktime_get_ts,
302 .nsleep = common_nsleep,
303 .nsleep_restart = hrtimer_nanosleep_restart,
304 .timer_create = common_timer_create,
305 .timer_set = common_timer_set,
306 .timer_get = common_timer_get,
307 .timer_del = common_timer_del,
308 };
309 struct k_clock clock_monotonic_raw = {
310 .clock_getres = posix_get_hrtimer_res,
311 .clock_get = posix_get_monotonic_raw,
312 };
313 struct k_clock clock_realtime_coarse = {
314 .clock_getres = posix_get_coarse_res,
315 .clock_get = posix_get_realtime_coarse,
316 };
317 struct k_clock clock_monotonic_coarse = {
318 .clock_getres = posix_get_coarse_res,
319 .clock_get = posix_get_monotonic_coarse,
320 };
321 struct k_clock clock_tai = {
322 .clock_getres = posix_get_hrtimer_res,
323 .clock_get = posix_get_tai,
324 .nsleep = common_nsleep,
325 .nsleep_restart = hrtimer_nanosleep_restart,
326 .timer_create = common_timer_create,
327 .timer_set = common_timer_set,
328 .timer_get = common_timer_get,
329 .timer_del = common_timer_del,
330 };
331 struct k_clock clock_boottime = {
332 .clock_getres = posix_get_hrtimer_res,
333 .clock_get = posix_get_boottime,
334 .nsleep = common_nsleep,
335 .nsleep_restart = hrtimer_nanosleep_restart,
336 .timer_create = common_timer_create,
337 .timer_set = common_timer_set,
338 .timer_get = common_timer_get,
339 .timer_del = common_timer_del,
340 };
341
342 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
343 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
344 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
345 posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
346 posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
347 posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
348 posix_timers_register_clock(CLOCK_TAI, &clock_tai);
349
350 posix_timers_cache = kmem_cache_create("posix_timers_cache",
351 sizeof (struct k_itimer), 0, SLAB_PANIC,
352 NULL);
353 return 0;
354}
355
356__initcall(init_posix_timers);
357
358static void schedule_next_timer(struct k_itimer *timr)
359{
360 struct hrtimer *timer = &timr->it.real.timer;
361
362 if (timr->it.real.interval.tv64 == 0)
363 return;
364
365 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
366 timer->base->get_time(),
367 timr->it.real.interval);
368
369 timr->it_overrun_last = timr->it_overrun;
370 timr->it_overrun = -1;
371 ++timr->it_requeue_pending;
372 hrtimer_restart(timer);
373}
374
375
376
377
378
379
380
381
382
383
384
385
386void do_schedule_next_timer(struct siginfo *info)
387{
388 struct k_itimer *timr;
389 unsigned long flags;
390
391 timr = lock_timer(info->si_tid, &flags);
392
393 if (timr && timr->it_requeue_pending == info->si_sys_private) {
394 if (timr->it_clock < 0)
395 posix_cpu_timer_schedule(timr);
396 else
397 schedule_next_timer(timr);
398
399 info->si_overrun += timr->it_overrun_last;
400 }
401
402 if (timr)
403 unlock_timer(timr, flags);
404}
405
406int posix_timer_event(struct k_itimer *timr, int si_private)
407{
408 struct task_struct *task;
409 int shared, ret = -1;
410
411
412
413
414
415
416
417
418
419
420
421 timr->sigq->info.si_sys_private = si_private;
422
423 rcu_read_lock();
424 task = pid_task(timr->it_pid, PIDTYPE_PID);
425 if (task) {
426 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
427 ret = send_sigqueue(timr->sigq, task, shared);
428 }
429 rcu_read_unlock();
430
431 return ret > 0;
432}
433EXPORT_SYMBOL_GPL(posix_timer_event);
434
435
436
437
438
439
440
441
442static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
443{
444 struct k_itimer *timr;
445 unsigned long flags;
446 int si_private = 0;
447 enum hrtimer_restart ret = HRTIMER_NORESTART;
448
449 timr = container_of(timer, struct k_itimer, it.real.timer);
450 spin_lock_irqsave(&timr->it_lock, flags);
451
452 if (timr->it.real.interval.tv64 != 0)
453 si_private = ++timr->it_requeue_pending;
454
455 if (posix_timer_event(timr, si_private)) {
456
457
458
459
460
461 if (timr->it.real.interval.tv64 != 0) {
462 ktime_t now = hrtimer_cb_get_time(timer);
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486#ifdef CONFIG_HIGH_RES_TIMERS
487 {
488 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
489
490 if (timr->it.real.interval.tv64 < kj.tv64)
491 now = ktime_add(now, kj);
492 }
493#endif
494 timr->it_overrun += (unsigned int)
495 hrtimer_forward(timer, now,
496 timr->it.real.interval);
497 ret = HRTIMER_RESTART;
498 ++timr->it_requeue_pending;
499 }
500 }
501
502 unlock_timer(timr, flags);
503 return ret;
504}
505
506static struct pid *good_sigevent(sigevent_t * event)
507{
508 struct task_struct *rtn = current->group_leader;
509
510 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
511 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
512 !same_thread_group(rtn, current) ||
513 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
514 return NULL;
515
516 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
517 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
518 return NULL;
519
520 return task_pid(rtn);
521}
522
523void posix_timers_register_clock(const clockid_t clock_id,
524 struct k_clock *new_clock)
525{
526 if ((unsigned) clock_id >= MAX_CLOCKS) {
527 printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
528 clock_id);
529 return;
530 }
531
532 if (!new_clock->clock_get) {
533 printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
534 clock_id);
535 return;
536 }
537 if (!new_clock->clock_getres) {
538 printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
539 clock_id);
540 return;
541 }
542
543 posix_clocks[clock_id] = *new_clock;
544}
545EXPORT_SYMBOL_GPL(posix_timers_register_clock);
546
547static struct k_itimer * alloc_posix_timer(void)
548{
549 struct k_itimer *tmr;
550 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
551 if (!tmr)
552 return tmr;
553 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
554 kmem_cache_free(posix_timers_cache, tmr);
555 return NULL;
556 }
557 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
558 return tmr;
559}
560
561static void k_itimer_rcu_free(struct rcu_head *head)
562{
563 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
564
565 kmem_cache_free(posix_timers_cache, tmr);
566}
567
568#define IT_ID_SET 1
569#define IT_ID_NOT_SET 0
570static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
571{
572 if (it_id_set) {
573 unsigned long flags;
574 spin_lock_irqsave(&hash_lock, flags);
575 hlist_del_rcu(&tmr->t_hash);
576 spin_unlock_irqrestore(&hash_lock, flags);
577 }
578 put_pid(tmr->it_pid);
579 sigqueue_free(tmr->sigq);
580 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
581}
582
583static struct k_clock *clockid_to_kclock(const clockid_t id)
584{
585 if (id < 0)
586 return (id & CLOCKFD_MASK) == CLOCKFD ?
587 &clock_posix_dynamic : &clock_posix_cpu;
588
589 if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
590 return NULL;
591 return &posix_clocks[id];
592}
593
594static int common_timer_create(struct k_itimer *new_timer)
595{
596 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
597 return 0;
598}
599
600
601
602SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
603 struct sigevent __user *, timer_event_spec,
604 timer_t __user *, created_timer_id)
605{
606 struct k_clock *kc = clockid_to_kclock(which_clock);
607 struct k_itimer *new_timer;
608 int error, new_timer_id;
609 sigevent_t event;
610 int it_id_set = IT_ID_NOT_SET;
611
612 if (!kc)
613 return -EINVAL;
614 if (!kc->timer_create)
615 return -EOPNOTSUPP;
616
617 new_timer = alloc_posix_timer();
618 if (unlikely(!new_timer))
619 return -EAGAIN;
620
621 spin_lock_init(&new_timer->it_lock);
622 new_timer_id = posix_timer_add(new_timer);
623 if (new_timer_id < 0) {
624 error = new_timer_id;
625 goto out;
626 }
627
628 it_id_set = IT_ID_SET;
629 new_timer->it_id = (timer_t) new_timer_id;
630 new_timer->it_clock = which_clock;
631 new_timer->it_overrun = -1;
632
633 if (timer_event_spec) {
634 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
635 error = -EFAULT;
636 goto out;
637 }
638 rcu_read_lock();
639 new_timer->it_pid = get_pid(good_sigevent(&event));
640 rcu_read_unlock();
641 if (!new_timer->it_pid) {
642 error = -EINVAL;
643 goto out;
644 }
645 } else {
646 memset(&event.sigev_value, 0, sizeof(event.sigev_value));
647 event.sigev_notify = SIGEV_SIGNAL;
648 event.sigev_signo = SIGALRM;
649 event.sigev_value.sival_int = new_timer->it_id;
650 new_timer->it_pid = get_pid(task_tgid(current));
651 }
652
653 new_timer->it_sigev_notify = event.sigev_notify;
654 new_timer->sigq->info.si_signo = event.sigev_signo;
655 new_timer->sigq->info.si_value = event.sigev_value;
656 new_timer->sigq->info.si_tid = new_timer->it_id;
657 new_timer->sigq->info.si_code = SI_TIMER;
658
659 if (copy_to_user(created_timer_id,
660 &new_timer_id, sizeof (new_timer_id))) {
661 error = -EFAULT;
662 goto out;
663 }
664
665 error = kc->timer_create(new_timer);
666 if (error)
667 goto out;
668
669 spin_lock_irq(¤t->sighand->siglock);
670 new_timer->it_signal = current->signal;
671 list_add(&new_timer->list, ¤t->signal->posix_timers);
672 spin_unlock_irq(¤t->sighand->siglock);
673
674 return 0;
675
676
677
678
679
680
681out:
682 release_posix_timer(new_timer, it_id_set);
683 return error;
684}
685
686
687
688
689
690
691
692
693static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
694{
695 struct k_itimer *timr;
696
697
698
699
700
701 if ((unsigned long long)timer_id > INT_MAX)
702 return NULL;
703
704 rcu_read_lock();
705 timr = posix_timer_by_id(timer_id);
706 if (timr) {
707 spin_lock_irqsave(&timr->it_lock, *flags);
708 if (timr->it_signal == current->signal) {
709 rcu_read_unlock();
710 return timr;
711 }
712 spin_unlock_irqrestore(&timr->it_lock, *flags);
713 }
714 rcu_read_unlock();
715
716 return NULL;
717}
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735static void
736common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
737{
738 ktime_t now, remaining, iv;
739 struct hrtimer *timer = &timr->it.real.timer;
740
741 memset(cur_setting, 0, sizeof(struct itimerspec));
742
743 iv = timr->it.real.interval;
744
745
746 if (iv.tv64)
747 cur_setting->it_interval = ktime_to_timespec(iv);
748 else if (!hrtimer_active(timer) &&
749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
750 return;
751
752 now = timer->base->get_time();
753
754
755
756
757
758
759 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
762
763 remaining = __hrtimer_expires_remaining_adjusted(timer, now);
764
765 if (remaining.tv64 <= 0) {
766
767
768
769
770 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
771 cur_setting->it_value.tv_nsec = 1;
772 } else
773 cur_setting->it_value = ktime_to_timespec(remaining);
774}
775
776
777SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
778 struct itimerspec __user *, setting)
779{
780 struct itimerspec cur_setting;
781 struct k_itimer *timr;
782 struct k_clock *kc;
783 unsigned long flags;
784 int ret = 0;
785
786 timr = lock_timer(timer_id, &flags);
787 if (!timr)
788 return -EINVAL;
789
790 kc = clockid_to_kclock(timr->it_clock);
791 if (WARN_ON_ONCE(!kc || !kc->timer_get))
792 ret = -EINVAL;
793 else
794 kc->timer_get(timr, &cur_setting);
795
796 unlock_timer(timr, flags);
797
798 if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
799 return -EFAULT;
800
801 return ret;
802}
803
804
805
806
807
808
809
810
811
812
813SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
814{
815 struct k_itimer *timr;
816 int overrun;
817 unsigned long flags;
818
819 timr = lock_timer(timer_id, &flags);
820 if (!timr)
821 return -EINVAL;
822
823 overrun = timr->it_overrun_last;
824 unlock_timer(timr, flags);
825
826 return overrun;
827}
828
829
830
831static int
832common_timer_set(struct k_itimer *timr, int flags,
833 struct itimerspec *new_setting, struct itimerspec *old_setting)
834{
835 struct hrtimer *timer = &timr->it.real.timer;
836 enum hrtimer_mode mode;
837
838 if (old_setting)
839 common_timer_get(timr, old_setting);
840
841
842 timr->it.real.interval.tv64 = 0;
843
844
845
846
847 if (hrtimer_try_to_cancel(timer) < 0)
848 return TIMER_RETRY;
849
850 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
851 ~REQUEUE_PENDING;
852 timr->it_overrun_last = 0;
853
854
855 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
856 return 0;
857
858 mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
859 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
860 timr->it.real.timer.function = posix_timer_fn;
861
862 hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
863
864
865 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
866
867
868 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
869
870 if (mode == HRTIMER_MODE_REL) {
871 hrtimer_add_expires(timer, timer->base->get_time());
872 }
873 return 0;
874 }
875
876 hrtimer_start_expires(timer, mode);
877 return 0;
878}
879
880
881SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
882 const struct itimerspec __user *, new_setting,
883 struct itimerspec __user *, old_setting)
884{
885 struct k_itimer *timr;
886 struct itimerspec new_spec, old_spec;
887 int error = 0;
888 unsigned long flag;
889 struct itimerspec *rtn = old_setting ? &old_spec : NULL;
890 struct k_clock *kc;
891
892 if (!new_setting)
893 return -EINVAL;
894
895 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
896 return -EFAULT;
897
898 if (!timespec_valid(&new_spec.it_interval) ||
899 !timespec_valid(&new_spec.it_value))
900 return -EINVAL;
901retry:
902 timr = lock_timer(timer_id, &flag);
903 if (!timr)
904 return -EINVAL;
905
906 kc = clockid_to_kclock(timr->it_clock);
907 if (WARN_ON_ONCE(!kc || !kc->timer_set))
908 error = -EINVAL;
909 else
910 error = kc->timer_set(timr, flags, &new_spec, rtn);
911
912 unlock_timer(timr, flag);
913 if (error == TIMER_RETRY) {
914 rtn = NULL;
915 goto retry;
916 }
917
918 if (old_setting && !error &&
919 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
920 error = -EFAULT;
921
922 return error;
923}
924
925static int common_timer_del(struct k_itimer *timer)
926{
927 timer->it.real.interval.tv64 = 0;
928
929 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
930 return TIMER_RETRY;
931 return 0;
932}
933
934static inline int timer_delete_hook(struct k_itimer *timer)
935{
936 struct k_clock *kc = clockid_to_kclock(timer->it_clock);
937
938 if (WARN_ON_ONCE(!kc || !kc->timer_del))
939 return -EINVAL;
940 return kc->timer_del(timer);
941}
942
943
944SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
945{
946 struct k_itimer *timer;
947 unsigned long flags;
948
949retry_delete:
950 timer = lock_timer(timer_id, &flags);
951 if (!timer)
952 return -EINVAL;
953
954 if (timer_delete_hook(timer) == TIMER_RETRY) {
955 unlock_timer(timer, flags);
956 goto retry_delete;
957 }
958
959 spin_lock(¤t->sighand->siglock);
960 list_del(&timer->list);
961 spin_unlock(¤t->sighand->siglock);
962
963
964
965
966 timer->it_signal = NULL;
967
968 unlock_timer(timer, flags);
969 release_posix_timer(timer, IT_ID_SET);
970 return 0;
971}
972
973
974
975
976static void itimer_delete(struct k_itimer *timer)
977{
978 unsigned long flags;
979
980retry_delete:
981 spin_lock_irqsave(&timer->it_lock, flags);
982
983 if (timer_delete_hook(timer) == TIMER_RETRY) {
984 unlock_timer(timer, flags);
985 goto retry_delete;
986 }
987 list_del(&timer->list);
988
989
990
991
992 timer->it_signal = NULL;
993
994 unlock_timer(timer, flags);
995 release_posix_timer(timer, IT_ID_SET);
996}
997
998
999
1000
1001
1002void exit_itimers(struct signal_struct *sig)
1003{
1004 struct k_itimer *tmr;
1005
1006 while (!list_empty(&sig->posix_timers)) {
1007 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
1008 itimer_delete(tmr);
1009 }
1010}
1011
1012SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
1013 const struct timespec __user *, tp)
1014{
1015 struct k_clock *kc = clockid_to_kclock(which_clock);
1016 struct timespec new_tp;
1017
1018 if (!kc || !kc->clock_set)
1019 return -EINVAL;
1020
1021 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
1022 return -EFAULT;
1023
1024 return kc->clock_set(which_clock, &new_tp);
1025}
1026
1027SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
1028 struct timespec __user *,tp)
1029{
1030 struct k_clock *kc = clockid_to_kclock(which_clock);
1031 struct timespec kernel_tp;
1032 int error;
1033
1034 if (!kc)
1035 return -EINVAL;
1036
1037 error = kc->clock_get(which_clock, &kernel_tp);
1038
1039 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
1040 error = -EFAULT;
1041
1042 return error;
1043}
1044
1045SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1046 struct timex __user *, utx)
1047{
1048 struct k_clock *kc = clockid_to_kclock(which_clock);
1049 struct timex ktx;
1050 int err;
1051
1052 if (!kc)
1053 return -EINVAL;
1054 if (!kc->clock_adj)
1055 return -EOPNOTSUPP;
1056
1057 if (copy_from_user(&ktx, utx, sizeof(ktx)))
1058 return -EFAULT;
1059
1060 err = kc->clock_adj(which_clock, &ktx);
1061
1062 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1063 return -EFAULT;
1064
1065 return err;
1066}
1067
1068SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1069 struct timespec __user *, tp)
1070{
1071 struct k_clock *kc = clockid_to_kclock(which_clock);
1072 struct timespec rtn_tp;
1073 int error;
1074
1075 if (!kc)
1076 return -EINVAL;
1077
1078 error = kc->clock_getres(which_clock, &rtn_tp);
1079
1080 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
1081 error = -EFAULT;
1082
1083 return error;
1084}
1085
1086
1087
1088
1089static int common_nsleep(const clockid_t which_clock, int flags,
1090 struct timespec *tsave, struct timespec __user *rmtp)
1091{
1092 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
1093 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1094 which_clock);
1095}
1096
1097SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1098 const struct timespec __user *, rqtp,
1099 struct timespec __user *, rmtp)
1100{
1101 struct k_clock *kc = clockid_to_kclock(which_clock);
1102 struct timespec t;
1103
1104 if (!kc)
1105 return -EINVAL;
1106 if (!kc->nsleep)
1107 return -ENANOSLEEP_NOTSUP;
1108
1109 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1110 return -EFAULT;
1111
1112 if (!timespec_valid(&t))
1113 return -EINVAL;
1114
1115 return kc->nsleep(which_clock, flags, &t, rmtp);
1116}
1117
1118
1119
1120
1121
1122long clock_nanosleep_restart(struct restart_block *restart_block)
1123{
1124 clockid_t which_clock = restart_block->nanosleep.clockid;
1125 struct k_clock *kc = clockid_to_kclock(which_clock);
1126
1127 if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
1128 return -EINVAL;
1129
1130 return kc->nsleep_restart(restart_block);
1131}
1132