1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76#include <linux/slab.h>
77#include <linux/spinlock.h>
78#include <linux/init.h>
79#include <linux/proc_fs.h>
80#include <linux/time.h>
81#include <linux/security.h>
82#include <linux/syscalls.h>
83#include <linux/audit.h>
84#include <linux/capability.h>
85#include <linux/seq_file.h>
86#include <linux/rwsem.h>
87#include <linux/nsproxy.h>
88#include <linux/ipc_namespace.h>
89
90#include <asm/uaccess.h>
91#include "util.h"
92
93#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
94
95#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
96#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
97
98static int newary(struct ipc_namespace *, struct ipc_params *);
99static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
100#ifdef CONFIG_PROC_FS
101static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
102#endif
103
104#define SEMMSL_FAST 256
105#define SEMOPM_FAST 64
106
107
108
109
110
111
112
113
114
115
116#define sc_semmsl sem_ctls[0]
117#define sc_semmns sem_ctls[1]
118#define sc_semopm sem_ctls[2]
119#define sc_semmni sem_ctls[3]
120
121void sem_init_ns(struct ipc_namespace *ns)
122{
123 ns->sc_semmsl = SEMMSL;
124 ns->sc_semmns = SEMMNS;
125 ns->sc_semopm = SEMOPM;
126 ns->sc_semmni = SEMMNI;
127 ns->used_sems = 0;
128 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
129}
130
131#ifdef CONFIG_IPC_NS
132void sem_exit_ns(struct ipc_namespace *ns)
133{
134 free_ipcs(ns, &sem_ids(ns), freeary);
135 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
136}
137#endif
138
139void __init sem_init (void)
140{
141 sem_init_ns(&init_ipc_ns);
142 ipc_init_proc_interface("sysvipc/sem",
143 " key semid perms nsems uid gid cuid cgid otime ctime\n",
144 IPC_SEM_IDS, sysvipc_sem_proc_show);
145}
146
147
148
149
150
151static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
152{
153 struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
154
155 if (IS_ERR(ipcp))
156 return (struct sem_array *)ipcp;
157
158 return container_of(ipcp, struct sem_array, sem_perm);
159}
160
161static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
162 int id)
163{
164 struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
165
166 if (IS_ERR(ipcp))
167 return (struct sem_array *)ipcp;
168
169 return container_of(ipcp, struct sem_array, sem_perm);
170}
171
172static inline void sem_lock_and_putref(struct sem_array *sma)
173{
174 ipc_lock_by_ptr(&sma->sem_perm);
175 ipc_rcu_putref(sma);
176}
177
178static inline void sem_getref_and_unlock(struct sem_array *sma)
179{
180 ipc_rcu_getref(sma);
181 ipc_unlock(&(sma)->sem_perm);
182}
183
184static inline void sem_putref(struct sem_array *sma)
185{
186 ipc_lock_by_ptr(&sma->sem_perm);
187 ipc_rcu_putref(sma);
188 ipc_unlock(&(sma)->sem_perm);
189}
190
191static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
192{
193 ipc_rmid(&sem_ids(ns), &s->sem_perm);
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228#define IN_WAKEUP 1
229
230
231
232
233
234
235
236
237
238static int newary(struct ipc_namespace *ns, struct ipc_params *params)
239{
240 int id;
241 int retval;
242 struct sem_array *sma;
243 int size;
244 key_t key = params->key;
245 int nsems = params->u.nsems;
246 int semflg = params->flg;
247 int i;
248
249 if (!nsems)
250 return -EINVAL;
251 if (ns->used_sems + nsems > ns->sc_semmns)
252 return -ENOSPC;
253
254 size = sizeof (*sma) + nsems * sizeof (struct sem);
255 sma = ipc_rcu_alloc(size);
256 if (!sma) {
257 return -ENOMEM;
258 }
259 memset (sma, 0, size);
260
261 sma->sem_perm.mode = (semflg & S_IRWXUGO);
262 sma->sem_perm.key = key;
263
264 sma->sem_perm.security = NULL;
265 retval = security_sem_alloc(sma);
266 if (retval) {
267 ipc_rcu_putref(sma);
268 return retval;
269 }
270
271 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
272 if (id < 0) {
273 security_sem_free(sma);
274 ipc_rcu_putref(sma);
275 return id;
276 }
277 ns->used_sems += nsems;
278
279 sma->sem_base = (struct sem *) &sma[1];
280
281 for (i = 0; i < nsems; i++)
282 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
283
284 sma->complex_count = 0;
285 INIT_LIST_HEAD(&sma->sem_pending);
286 INIT_LIST_HEAD(&sma->list_id);
287 sma->sem_nsems = nsems;
288 sma->sem_ctime = get_seconds();
289 sem_unlock(sma);
290
291 return sma->sem_perm.id;
292}
293
294
295
296
297
298static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
299{
300 struct sem_array *sma;
301
302 sma = container_of(ipcp, struct sem_array, sem_perm);
303 return security_sem_associate(sma, semflg);
304}
305
306
307
308
309static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
310 struct ipc_params *params)
311{
312 struct sem_array *sma;
313
314 sma = container_of(ipcp, struct sem_array, sem_perm);
315 if (params->u.nsems > sma->sem_nsems)
316 return -EINVAL;
317
318 return 0;
319}
320
321SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
322{
323 struct ipc_namespace *ns;
324 struct ipc_ops sem_ops;
325 struct ipc_params sem_params;
326
327 ns = current->nsproxy->ipc_ns;
328
329 if (nsems < 0 || nsems > ns->sc_semmsl)
330 return -EINVAL;
331
332 sem_ops.getnew = newary;
333 sem_ops.associate = sem_security;
334 sem_ops.more_checks = sem_more_checks;
335
336 sem_params.key = key;
337 sem_params.flg = semflg;
338 sem_params.u.nsems = nsems;
339
340 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
341}
342
343
344
345
346
347
348static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
349 int nsops, struct sem_undo *un, int pid)
350{
351 int result, sem_op;
352 struct sembuf *sop;
353 struct sem * curr;
354
355 for (sop = sops; sop < sops + nsops; sop++) {
356 curr = sma->sem_base + sop->sem_num;
357 sem_op = sop->sem_op;
358 result = curr->semval;
359
360 if (!sem_op && result)
361 goto would_block;
362
363 result += sem_op;
364 if (result < 0)
365 goto would_block;
366 if (result > SEMVMX)
367 goto out_of_range;
368 if (sop->sem_flg & SEM_UNDO) {
369 int undo = un->semadj[sop->sem_num] - sem_op;
370
371
372
373 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
374 goto out_of_range;
375 }
376 curr->semval = result;
377 }
378
379 sop--;
380 while (sop >= sops) {
381 sma->sem_base[sop->sem_num].sempid = pid;
382 if (sop->sem_flg & SEM_UNDO)
383 un->semadj[sop->sem_num] -= sop->sem_op;
384 sop--;
385 }
386
387 return 0;
388
389out_of_range:
390 result = -ERANGE;
391 goto undo;
392
393would_block:
394 if (sop->sem_flg & IPC_NOWAIT)
395 result = -EAGAIN;
396 else
397 result = 1;
398
399undo:
400 sop--;
401 while (sop >= sops) {
402 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
403 sop--;
404 }
405
406 return result;
407}
408
409
410
411
412
413
414
415static void wake_up_sem_queue_prepare(struct list_head *pt,
416 struct sem_queue *q, int error)
417{
418 if (list_empty(pt)) {
419
420
421
422
423 preempt_disable();
424 }
425 q->status = IN_WAKEUP;
426 q->pid = error;
427
428 list_add_tail(&q->simple_list, pt);
429}
430
431
432
433
434
435
436
437
438
439
440static void wake_up_sem_queue_do(struct list_head *pt)
441{
442 struct sem_queue *q, *t;
443 int did_something;
444
445 did_something = !list_empty(pt);
446 list_for_each_entry_safe(q, t, pt, simple_list) {
447 wake_up_process(q->sleeper);
448
449 smp_wmb();
450 q->status = q->pid;
451 }
452 if (did_something)
453 preempt_enable();
454}
455
456static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
457{
458 list_del(&q->list);
459 if (q->nsops == 1)
460 list_del(&q->simple_list);
461 else
462 sma->complex_count--;
463}
464
465
466
467
468
469
470
471
472
473
474static int check_restart(struct sem_array *sma, struct sem_queue *q)
475{
476 struct sem *curr;
477 struct sem_queue *h;
478
479
480 if (q->alter == 0)
481 return 0;
482
483
484 if (sma->complex_count)
485 return 1;
486
487
488 if (q->nsops > 1)
489 return 1;
490
491 curr = sma->sem_base + q->sops[0].sem_num;
492
493
494 if (list_empty(&curr->sem_pending))
495 return 0;
496
497
498 if (curr->semval) {
499
500
501
502
503
504
505
506
507
508
509 BUG_ON(q->sops[0].sem_op >= 0);
510 return 0;
511 }
512
513
514
515
516 h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
517 BUG_ON(h->nsops != 1);
518 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
519
520
521 if (h->sops[0].sem_op == 0)
522 return 1;
523
524
525 return 0;
526}
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
543{
544 struct sem_queue *q;
545 struct list_head *walk;
546 struct list_head *pending_list;
547 int offset;
548 int semop_completed = 0;
549
550
551
552
553
554 if (sma->complex_count)
555 semnum = -1;
556
557 if (semnum == -1) {
558 pending_list = &sma->sem_pending;
559 offset = offsetof(struct sem_queue, list);
560 } else {
561 pending_list = &sma->sem_base[semnum].sem_pending;
562 offset = offsetof(struct sem_queue, simple_list);
563 }
564
565again:
566 walk = pending_list->next;
567 while (walk != pending_list) {
568 int error, restart;
569
570 q = (struct sem_queue *)((char *)walk - offset);
571 walk = walk->next;
572
573
574
575
576
577
578
579
580 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
581 q->alter)
582 break;
583
584 error = try_atomic_semop(sma, q->sops, q->nsops,
585 q->undo, q->pid);
586
587
588 if (error > 0)
589 continue;
590
591 unlink_queue(sma, q);
592
593 if (error) {
594 restart = 0;
595 } else {
596 semop_completed = 1;
597 restart = check_restart(sma, q);
598 }
599
600 wake_up_sem_queue_prepare(pt, q, error);
601 if (restart)
602 goto again;
603 }
604 return semop_completed;
605}
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
622 int otime, struct list_head *pt)
623{
624 int i;
625
626 if (sma->complex_count || sops == NULL) {
627 if (update_queue(sma, -1, pt))
628 otime = 1;
629 goto done;
630 }
631
632 for (i = 0; i < nsops; i++) {
633 if (sops[i].sem_op > 0 ||
634 (sops[i].sem_op < 0 &&
635 sma->sem_base[sops[i].sem_num].semval == 0))
636 if (update_queue(sma, sops[i].sem_num, pt))
637 otime = 1;
638 }
639done:
640 if (otime)
641 sma->sem_otime = get_seconds();
642}
643
644
645
646
647
648
649
650
651
652
653
654static int count_semncnt (struct sem_array * sma, ushort semnum)
655{
656 int semncnt;
657 struct sem_queue * q;
658
659 semncnt = 0;
660 list_for_each_entry(q, &sma->sem_pending, list) {
661 struct sembuf * sops = q->sops;
662 int nsops = q->nsops;
663 int i;
664 for (i = 0; i < nsops; i++)
665 if (sops[i].sem_num == semnum
666 && (sops[i].sem_op < 0)
667 && !(sops[i].sem_flg & IPC_NOWAIT))
668 semncnt++;
669 }
670 return semncnt;
671}
672
673static int count_semzcnt (struct sem_array * sma, ushort semnum)
674{
675 int semzcnt;
676 struct sem_queue * q;
677
678 semzcnt = 0;
679 list_for_each_entry(q, &sma->sem_pending, list) {
680 struct sembuf * sops = q->sops;
681 int nsops = q->nsops;
682 int i;
683 for (i = 0; i < nsops; i++)
684 if (sops[i].sem_num == semnum
685 && (sops[i].sem_op == 0)
686 && !(sops[i].sem_flg & IPC_NOWAIT))
687 semzcnt++;
688 }
689 return semzcnt;
690}
691
692static void free_un(struct rcu_head *head)
693{
694 struct sem_undo *un = container_of(head, struct sem_undo, rcu);
695 kfree(un);
696}
697
698
699
700
701
702static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
703{
704 struct sem_undo *un, *tu;
705 struct sem_queue *q, *tq;
706 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
707 struct list_head tasks;
708
709
710 assert_spin_locked(&sma->sem_perm.lock);
711 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
712 list_del(&un->list_id);
713 spin_lock(&un->ulp->lock);
714 un->semid = -1;
715 list_del_rcu(&un->list_proc);
716 spin_unlock(&un->ulp->lock);
717 call_rcu(&un->rcu, free_un);
718 }
719
720
721 INIT_LIST_HEAD(&tasks);
722 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
723 unlink_queue(sma, q);
724 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
725 }
726
727
728 sem_rmid(ns, sma);
729 sem_unlock(sma);
730
731 wake_up_sem_queue_do(&tasks);
732 ns->used_sems -= sma->sem_nsems;
733 security_sem_free(sma);
734 ipc_rcu_putref(sma);
735}
736
737static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
738{
739 switch(version) {
740 case IPC_64:
741 return copy_to_user(buf, in, sizeof(*in));
742 case IPC_OLD:
743 {
744 struct semid_ds out;
745
746 memset(&out, 0, sizeof(out));
747
748 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
749
750 out.sem_otime = in->sem_otime;
751 out.sem_ctime = in->sem_ctime;
752 out.sem_nsems = in->sem_nsems;
753
754 return copy_to_user(buf, &out, sizeof(out));
755 }
756 default:
757 return -EINVAL;
758 }
759}
760
761static int semctl_nolock(struct ipc_namespace *ns, int semid,
762 int cmd, int version, union semun arg)
763{
764 int err;
765 struct sem_array *sma;
766
767 switch(cmd) {
768 case IPC_INFO:
769 case SEM_INFO:
770 {
771 struct seminfo seminfo;
772 int max_id;
773
774 err = security_sem_semctl(NULL, cmd);
775 if (err)
776 return err;
777
778 memset(&seminfo,0,sizeof(seminfo));
779 seminfo.semmni = ns->sc_semmni;
780 seminfo.semmns = ns->sc_semmns;
781 seminfo.semmsl = ns->sc_semmsl;
782 seminfo.semopm = ns->sc_semopm;
783 seminfo.semvmx = SEMVMX;
784 seminfo.semmnu = SEMMNU;
785 seminfo.semmap = SEMMAP;
786 seminfo.semume = SEMUME;
787 down_read(&sem_ids(ns).rw_mutex);
788 if (cmd == SEM_INFO) {
789 seminfo.semusz = sem_ids(ns).in_use;
790 seminfo.semaem = ns->used_sems;
791 } else {
792 seminfo.semusz = SEMUSZ;
793 seminfo.semaem = SEMAEM;
794 }
795 max_id = ipc_get_maxid(&sem_ids(ns));
796 up_read(&sem_ids(ns).rw_mutex);
797 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
798 return -EFAULT;
799 return (max_id < 0) ? 0: max_id;
800 }
801 case IPC_STAT:
802 case SEM_STAT:
803 {
804 struct semid64_ds tbuf;
805 int id;
806
807 if (cmd == SEM_STAT) {
808 sma = sem_lock(ns, semid);
809 if (IS_ERR(sma))
810 return PTR_ERR(sma);
811 id = sma->sem_perm.id;
812 } else {
813 sma = sem_lock_check(ns, semid);
814 if (IS_ERR(sma))
815 return PTR_ERR(sma);
816 id = 0;
817 }
818
819 err = -EACCES;
820 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
821 goto out_unlock;
822
823 err = security_sem_semctl(sma, cmd);
824 if (err)
825 goto out_unlock;
826
827 memset(&tbuf, 0, sizeof(tbuf));
828
829 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
830 tbuf.sem_otime = sma->sem_otime;
831 tbuf.sem_ctime = sma->sem_ctime;
832 tbuf.sem_nsems = sma->sem_nsems;
833 sem_unlock(sma);
834 if (copy_semid_to_user (arg.buf, &tbuf, version))
835 return -EFAULT;
836 return id;
837 }
838 default:
839 return -EINVAL;
840 }
841out_unlock:
842 sem_unlock(sma);
843 return err;
844}
845
846static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
847 int cmd, int version, union semun arg)
848{
849 struct sem_array *sma;
850 struct sem* curr;
851 int err;
852 ushort fast_sem_io[SEMMSL_FAST];
853 ushort* sem_io = fast_sem_io;
854 int nsems;
855 struct list_head tasks;
856
857 sma = sem_lock_check(ns, semid);
858 if (IS_ERR(sma))
859 return PTR_ERR(sma);
860
861 INIT_LIST_HEAD(&tasks);
862 nsems = sma->sem_nsems;
863
864 err = -EACCES;
865 if (ipcperms(ns, &sma->sem_perm,
866 (cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO))
867 goto out_unlock;
868
869 err = security_sem_semctl(sma, cmd);
870 if (err)
871 goto out_unlock;
872
873 err = -EACCES;
874 switch (cmd) {
875 case GETALL:
876 {
877 ushort __user *array = arg.array;
878 int i;
879
880 if(nsems > SEMMSL_FAST) {
881 sem_getref_and_unlock(sma);
882
883 sem_io = ipc_alloc(sizeof(ushort)*nsems);
884 if(sem_io == NULL) {
885 sem_putref(sma);
886 return -ENOMEM;
887 }
888
889 sem_lock_and_putref(sma);
890 if (sma->sem_perm.deleted) {
891 sem_unlock(sma);
892 err = -EIDRM;
893 goto out_free;
894 }
895 }
896
897 for (i = 0; i < sma->sem_nsems; i++)
898 sem_io[i] = sma->sem_base[i].semval;
899 sem_unlock(sma);
900 err = 0;
901 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
902 err = -EFAULT;
903 goto out_free;
904 }
905 case SETALL:
906 {
907 int i;
908 struct sem_undo *un;
909
910 sem_getref_and_unlock(sma);
911
912 if(nsems > SEMMSL_FAST) {
913 sem_io = ipc_alloc(sizeof(ushort)*nsems);
914 if(sem_io == NULL) {
915 sem_putref(sma);
916 return -ENOMEM;
917 }
918 }
919
920 if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
921 sem_putref(sma);
922 err = -EFAULT;
923 goto out_free;
924 }
925
926 for (i = 0; i < nsems; i++) {
927 if (sem_io[i] > SEMVMX) {
928 sem_putref(sma);
929 err = -ERANGE;
930 goto out_free;
931 }
932 }
933 sem_lock_and_putref(sma);
934 if (sma->sem_perm.deleted) {
935 sem_unlock(sma);
936 err = -EIDRM;
937 goto out_free;
938 }
939
940 for (i = 0; i < nsems; i++)
941 sma->sem_base[i].semval = sem_io[i];
942
943 assert_spin_locked(&sma->sem_perm.lock);
944 list_for_each_entry(un, &sma->list_id, list_id) {
945 for (i = 0; i < nsems; i++)
946 un->semadj[i] = 0;
947 }
948 sma->sem_ctime = get_seconds();
949
950 do_smart_update(sma, NULL, 0, 0, &tasks);
951 err = 0;
952 goto out_unlock;
953 }
954
955 }
956 err = -EINVAL;
957 if(semnum < 0 || semnum >= nsems)
958 goto out_unlock;
959
960 curr = &sma->sem_base[semnum];
961
962 switch (cmd) {
963 case GETVAL:
964 err = curr->semval;
965 goto out_unlock;
966 case GETPID:
967 err = curr->sempid;
968 goto out_unlock;
969 case GETNCNT:
970 err = count_semncnt(sma,semnum);
971 goto out_unlock;
972 case GETZCNT:
973 err = count_semzcnt(sma,semnum);
974 goto out_unlock;
975 case SETVAL:
976 {
977 int val = arg.val;
978 struct sem_undo *un;
979
980 err = -ERANGE;
981 if (val > SEMVMX || val < 0)
982 goto out_unlock;
983
984 assert_spin_locked(&sma->sem_perm.lock);
985 list_for_each_entry(un, &sma->list_id, list_id)
986 un->semadj[semnum] = 0;
987
988 curr->semval = val;
989 curr->sempid = task_tgid_vnr(current);
990 sma->sem_ctime = get_seconds();
991
992 do_smart_update(sma, NULL, 0, 0, &tasks);
993 err = 0;
994 goto out_unlock;
995 }
996 }
997out_unlock:
998 sem_unlock(sma);
999 wake_up_sem_queue_do(&tasks);
1000
1001out_free:
1002 if(sem_io != fast_sem_io)
1003 ipc_free(sem_io, sizeof(ushort)*nsems);
1004 return err;
1005}
1006
1007static inline unsigned long
1008copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1009{
1010 switch(version) {
1011 case IPC_64:
1012 if (copy_from_user(out, buf, sizeof(*out)))
1013 return -EFAULT;
1014 return 0;
1015 case IPC_OLD:
1016 {
1017 struct semid_ds tbuf_old;
1018
1019 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1020 return -EFAULT;
1021
1022 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1023 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1024 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1025
1026 return 0;
1027 }
1028 default:
1029 return -EINVAL;
1030 }
1031}
1032
1033
1034
1035
1036
1037
1038static int semctl_down(struct ipc_namespace *ns, int semid,
1039 int cmd, int version, union semun arg)
1040{
1041 struct sem_array *sma;
1042 int err;
1043 struct semid64_ds semid64;
1044 struct kern_ipc_perm *ipcp;
1045
1046 if(cmd == IPC_SET) {
1047 if (copy_semid_from_user(&semid64, arg.buf, version))
1048 return -EFAULT;
1049 }
1050
1051 ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd,
1052 &semid64.sem_perm, 0);
1053 if (IS_ERR(ipcp))
1054 return PTR_ERR(ipcp);
1055
1056 sma = container_of(ipcp, struct sem_array, sem_perm);
1057
1058 err = security_sem_semctl(sma, cmd);
1059 if (err)
1060 goto out_unlock;
1061
1062 switch(cmd){
1063 case IPC_RMID:
1064 freeary(ns, ipcp);
1065 goto out_up;
1066 case IPC_SET:
1067 ipc_update_perm(&semid64.sem_perm, ipcp);
1068 sma->sem_ctime = get_seconds();
1069 break;
1070 default:
1071 err = -EINVAL;
1072 }
1073
1074out_unlock:
1075 sem_unlock(sma);
1076out_up:
1077 up_write(&sem_ids(ns).rw_mutex);
1078 return err;
1079}
1080
1081SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
1082{
1083 int err = -EINVAL;
1084 int version;
1085 struct ipc_namespace *ns;
1086
1087 if (semid < 0)
1088 return -EINVAL;
1089
1090 version = ipc_parse_version(&cmd);
1091 ns = current->nsproxy->ipc_ns;
1092
1093 switch(cmd) {
1094 case IPC_INFO:
1095 case SEM_INFO:
1096 case IPC_STAT:
1097 case SEM_STAT:
1098 err = semctl_nolock(ns, semid, cmd, version, arg);
1099 return err;
1100 case GETALL:
1101 case GETVAL:
1102 case GETPID:
1103 case GETNCNT:
1104 case GETZCNT:
1105 case SETVAL:
1106 case SETALL:
1107 err = semctl_main(ns,semid,semnum,cmd,version,arg);
1108 return err;
1109 case IPC_RMID:
1110 case IPC_SET:
1111 err = semctl_down(ns, semid, cmd, version, arg);
1112 return err;
1113 default:
1114 return -EINVAL;
1115 }
1116}
1117#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1118asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
1119{
1120 return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
1121}
1122SYSCALL_ALIAS(sys_semctl, SyS_semctl);
1123#endif
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136static inline int get_undo_list(struct sem_undo_list **undo_listp)
1137{
1138 struct sem_undo_list *undo_list;
1139
1140 undo_list = current->sysvsem.undo_list;
1141 if (!undo_list) {
1142 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1143 if (undo_list == NULL)
1144 return -ENOMEM;
1145 spin_lock_init(&undo_list->lock);
1146 atomic_set(&undo_list->refcnt, 1);
1147 INIT_LIST_HEAD(&undo_list->list_proc);
1148
1149 current->sysvsem.undo_list = undo_list;
1150 }
1151 *undo_listp = undo_list;
1152 return 0;
1153}
1154
1155static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1156{
1157 struct sem_undo *un;
1158
1159 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1160 if (un->semid == semid)
1161 return un;
1162 }
1163 return NULL;
1164}
1165
1166static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1167{
1168 struct sem_undo *un;
1169
1170 assert_spin_locked(&ulp->lock);
1171
1172 un = __lookup_undo(ulp, semid);
1173 if (un) {
1174 list_del_rcu(&un->list_proc);
1175 list_add_rcu(&un->list_proc, &ulp->list_proc);
1176 }
1177 return un;
1178}
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1192{
1193 struct sem_array *sma;
1194 struct sem_undo_list *ulp;
1195 struct sem_undo *un, *new;
1196 int nsems;
1197 int error;
1198
1199 error = get_undo_list(&ulp);
1200 if (error)
1201 return ERR_PTR(error);
1202
1203 rcu_read_lock();
1204 spin_lock(&ulp->lock);
1205 un = lookup_undo(ulp, semid);
1206 spin_unlock(&ulp->lock);
1207 if (likely(un!=NULL))
1208 goto out;
1209 rcu_read_unlock();
1210
1211
1212
1213 sma = sem_lock_check(ns, semid);
1214 if (IS_ERR(sma))
1215 return ERR_CAST(sma);
1216
1217 nsems = sma->sem_nsems;
1218 sem_getref_and_unlock(sma);
1219
1220
1221 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1222 if (!new) {
1223 sem_putref(sma);
1224 return ERR_PTR(-ENOMEM);
1225 }
1226
1227
1228 sem_lock_and_putref(sma);
1229 if (sma->sem_perm.deleted) {
1230 sem_unlock(sma);
1231 kfree(new);
1232 un = ERR_PTR(-EIDRM);
1233 goto out;
1234 }
1235 spin_lock(&ulp->lock);
1236
1237
1238
1239
1240 un = lookup_undo(ulp, semid);
1241 if (un) {
1242 kfree(new);
1243 goto success;
1244 }
1245
1246 new->semadj = (short *) &new[1];
1247 new->ulp = ulp;
1248 new->semid = semid;
1249 assert_spin_locked(&ulp->lock);
1250 list_add_rcu(&new->list_proc, &ulp->list_proc);
1251 assert_spin_locked(&sma->sem_perm.lock);
1252 list_add(&new->list_id, &sma->list_id);
1253 un = new;
1254
1255success:
1256 spin_unlock(&ulp->lock);
1257 rcu_read_lock();
1258 sem_unlock(sma);
1259out:
1260 return un;
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static int get_queue_result(struct sem_queue *q)
1277{
1278 int error;
1279
1280 error = q->status;
1281 while (unlikely(error == IN_WAKEUP)) {
1282 cpu_relax();
1283 error = q->status;
1284 }
1285
1286 return error;
1287}
1288
1289
1290SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1291 unsigned, nsops, const struct timespec __user *, timeout)
1292{
1293 int error = -EINVAL;
1294 struct sem_array *sma;
1295 struct sembuf fast_sops[SEMOPM_FAST];
1296 struct sembuf* sops = fast_sops, *sop;
1297 struct sem_undo *un;
1298 int undos = 0, alter = 0, max;
1299 struct sem_queue queue;
1300 unsigned long jiffies_left = 0;
1301 struct ipc_namespace *ns;
1302 struct list_head tasks;
1303
1304 ns = current->nsproxy->ipc_ns;
1305
1306 if (nsops < 1 || semid < 0)
1307 return -EINVAL;
1308 if (nsops > ns->sc_semopm)
1309 return -E2BIG;
1310 if(nsops > SEMOPM_FAST) {
1311 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1312 if(sops==NULL)
1313 return -ENOMEM;
1314 }
1315 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1316 error=-EFAULT;
1317 goto out_free;
1318 }
1319 if (timeout) {
1320 struct timespec _timeout;
1321 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1322 error = -EFAULT;
1323 goto out_free;
1324 }
1325 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1326 _timeout.tv_nsec >= 1000000000L) {
1327 error = -EINVAL;
1328 goto out_free;
1329 }
1330 jiffies_left = timespec_to_jiffies(&_timeout);
1331 }
1332 max = 0;
1333 for (sop = sops; sop < sops + nsops; sop++) {
1334 if (sop->sem_num >= max)
1335 max = sop->sem_num;
1336 if (sop->sem_flg & SEM_UNDO)
1337 undos = 1;
1338 if (sop->sem_op != 0)
1339 alter = 1;
1340 }
1341
1342 if (undos) {
1343 un = find_alloc_undo(ns, semid);
1344 if (IS_ERR(un)) {
1345 error = PTR_ERR(un);
1346 goto out_free;
1347 }
1348 } else
1349 un = NULL;
1350
1351 INIT_LIST_HEAD(&tasks);
1352
1353 sma = sem_lock_check(ns, semid);
1354 if (IS_ERR(sma)) {
1355 if (un)
1356 rcu_read_unlock();
1357 error = PTR_ERR(sma);
1358 goto out_free;
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368 error = -EIDRM;
1369 if (un) {
1370 if (un->semid == -1) {
1371 rcu_read_unlock();
1372 goto out_unlock_free;
1373 } else {
1374
1375
1376
1377
1378
1379
1380
1381
1382 rcu_read_unlock();
1383 }
1384 }
1385
1386 error = -EFBIG;
1387 if (max >= sma->sem_nsems)
1388 goto out_unlock_free;
1389
1390 error = -EACCES;
1391 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1392 goto out_unlock_free;
1393
1394 error = security_sem_semop(sma, sops, nsops, alter);
1395 if (error)
1396 goto out_unlock_free;
1397
1398 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1399 if (error <= 0) {
1400 if (alter && error == 0)
1401 do_smart_update(sma, sops, nsops, 1, &tasks);
1402
1403 goto out_unlock_free;
1404 }
1405
1406
1407
1408
1409
1410 queue.sops = sops;
1411 queue.nsops = nsops;
1412 queue.undo = un;
1413 queue.pid = task_tgid_vnr(current);
1414 queue.alter = alter;
1415 if (alter)
1416 list_add_tail(&queue.list, &sma->sem_pending);
1417 else
1418 list_add(&queue.list, &sma->sem_pending);
1419
1420 if (nsops == 1) {
1421 struct sem *curr;
1422 curr = &sma->sem_base[sops->sem_num];
1423
1424 if (alter)
1425 list_add_tail(&queue.simple_list, &curr->sem_pending);
1426 else
1427 list_add(&queue.simple_list, &curr->sem_pending);
1428 } else {
1429 INIT_LIST_HEAD(&queue.simple_list);
1430 sma->complex_count++;
1431 }
1432
1433 queue.status = -EINTR;
1434 queue.sleeper = current;
1435 current->state = TASK_INTERRUPTIBLE;
1436 sem_unlock(sma);
1437
1438 if (timeout)
1439 jiffies_left = schedule_timeout(jiffies_left);
1440 else
1441 schedule();
1442
1443 error = get_queue_result(&queue);
1444
1445 if (error != -EINTR) {
1446
1447
1448
1449
1450
1451
1452
1453 smp_mb();
1454
1455 goto out_free;
1456 }
1457
1458 sma = sem_lock(ns, semid);
1459 if (IS_ERR(sma)) {
1460 error = -EIDRM;
1461 goto out_free;
1462 }
1463
1464 error = get_queue_result(&queue);
1465
1466
1467
1468
1469
1470 if (error != -EINTR) {
1471 goto out_unlock_free;
1472 }
1473
1474
1475
1476
1477 if (timeout && jiffies_left == 0)
1478 error = -EAGAIN;
1479 unlink_queue(sma, &queue);
1480
1481out_unlock_free:
1482 sem_unlock(sma);
1483
1484 wake_up_sem_queue_do(&tasks);
1485out_free:
1486 if(sops != fast_sops)
1487 kfree(sops);
1488 return error;
1489}
1490
1491SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1492 unsigned, nsops)
1493{
1494 return sys_semtimedop(semid, tsops, nsops, NULL);
1495}
1496
1497
1498
1499
1500
1501int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1502{
1503 struct sem_undo_list *undo_list;
1504 int error;
1505
1506 if (clone_flags & CLONE_SYSVSEM) {
1507 error = get_undo_list(&undo_list);
1508 if (error)
1509 return error;
1510 atomic_inc(&undo_list->refcnt);
1511 tsk->sysvsem.undo_list = undo_list;
1512 } else
1513 tsk->sysvsem.undo_list = NULL;
1514
1515 return 0;
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530void exit_sem(struct task_struct *tsk)
1531{
1532 struct sem_undo_list *ulp;
1533
1534 ulp = tsk->sysvsem.undo_list;
1535 if (!ulp)
1536 return;
1537 tsk->sysvsem.undo_list = NULL;
1538
1539 if (!atomic_dec_and_test(&ulp->refcnt))
1540 return;
1541
1542 for (;;) {
1543 struct sem_array *sma;
1544 struct sem_undo *un;
1545 struct list_head tasks;
1546 int semid;
1547 int i;
1548
1549 rcu_read_lock();
1550 un = list_entry_rcu(ulp->list_proc.next,
1551 struct sem_undo, list_proc);
1552 if (&un->list_proc == &ulp->list_proc)
1553 semid = -1;
1554 else
1555 semid = un->semid;
1556 rcu_read_unlock();
1557
1558 if (semid == -1)
1559 break;
1560
1561 sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1562
1563
1564 if (IS_ERR(sma))
1565 continue;
1566
1567 un = __lookup_undo(ulp, semid);
1568 if (un == NULL) {
1569
1570
1571
1572 sem_unlock(sma);
1573 continue;
1574 }
1575
1576
1577 assert_spin_locked(&sma->sem_perm.lock);
1578 list_del(&un->list_id);
1579
1580 spin_lock(&ulp->lock);
1581 list_del_rcu(&un->list_proc);
1582 spin_unlock(&ulp->lock);
1583
1584
1585 for (i = 0; i < sma->sem_nsems; i++) {
1586 struct sem * semaphore = &sma->sem_base[i];
1587 if (un->semadj[i]) {
1588 semaphore->semval += un->semadj[i];
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 if (semaphore->semval < 0)
1603 semaphore->semval = 0;
1604 if (semaphore->semval > SEMVMX)
1605 semaphore->semval = SEMVMX;
1606 semaphore->sempid = task_tgid_vnr(current);
1607 }
1608 }
1609
1610 INIT_LIST_HEAD(&tasks);
1611 do_smart_update(sma, NULL, 0, 1, &tasks);
1612 sem_unlock(sma);
1613 wake_up_sem_queue_do(&tasks);
1614
1615 call_rcu(&un->rcu, free_un);
1616 }
1617 kfree(ulp);
1618}
1619
1620#ifdef CONFIG_PROC_FS
1621static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1622{
1623 struct sem_array *sma = it;
1624
1625 return seq_printf(s,
1626 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1627 sma->sem_perm.key,
1628 sma->sem_perm.id,
1629 sma->sem_perm.mode,
1630 sma->sem_nsems,
1631 sma->sem_perm.uid,
1632 sma->sem_perm.gid,
1633 sma->sem_perm.cuid,
1634 sma->sem_perm.cgid,
1635 sma->sem_otime,
1636 sma->sem_ctime);
1637}
1638#endif
1639