1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76#include <linux/slab.h>
77#include <linux/spinlock.h>
78#include <linux/init.h>
79#include <linux/proc_fs.h>
80#include <linux/time.h>
81#include <linux/security.h>
82#include <linux/syscalls.h>
83#include <linux/audit.h>
84#include <linux/capability.h>
85#include <linux/seq_file.h>
86#include <linux/rwsem.h>
87#include <linux/nsproxy.h>
88#include <linux/ipc_namespace.h>
89
90#include <asm/uaccess.h>
91#include "util.h"
92
93
94struct sem {
95 int semval;
96 int sempid;
97 spinlock_t lock;
98 struct list_head pending_alter;
99
100 struct list_head pending_const;
101
102 time_t sem_otime;
103} ____cacheline_aligned_in_smp;
104
105
106struct sem_queue {
107 struct list_head list;
108 struct task_struct *sleeper;
109 struct sem_undo *undo;
110 int pid;
111 int status;
112 struct sembuf *sops;
113 int nsops;
114 int alter;
115};
116
117
118
119
120struct sem_undo {
121 struct list_head list_proc;
122
123
124 struct rcu_head rcu;
125 struct sem_undo_list *ulp;
126 struct list_head list_id;
127
128 int semid;
129 short *semadj;
130
131};
132
133
134
135
136struct sem_undo_list {
137 atomic_t refcnt;
138 spinlock_t lock;
139 struct list_head list_proc;
140};
141
142
143#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
144
145#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
146
147static int newary(struct ipc_namespace *, struct ipc_params *);
148static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
149#ifdef CONFIG_PROC_FS
150static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
151#endif
152
153#define SEMMSL_FAST 256
154#define SEMOPM_FAST 64
155
156
157
158
159
160
161
162
163
164
165
166
167
168#define sc_semmsl sem_ctls[0]
169#define sc_semmns sem_ctls[1]
170#define sc_semopm sem_ctls[2]
171#define sc_semmni sem_ctls[3]
172
173void sem_init_ns(struct ipc_namespace *ns)
174{
175 ns->sc_semmsl = SEMMSL;
176 ns->sc_semmns = SEMMNS;
177 ns->sc_semopm = SEMOPM;
178 ns->sc_semmni = SEMMNI;
179 ns->used_sems = 0;
180 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
181}
182
183#ifdef CONFIG_IPC_NS
184void sem_exit_ns(struct ipc_namespace *ns)
185{
186 free_ipcs(ns, &sem_ids(ns), freeary);
187 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
188}
189#endif
190
191void __init sem_init (void)
192{
193 sem_init_ns(&init_ipc_ns);
194 ipc_init_proc_interface("sysvipc/sem",
195 " key semid perms nsems uid gid cuid cgid otime ctime\n",
196 IPC_SEM_IDS, sysvipc_sem_proc_show);
197}
198
199
200
201
202
203
204
205
206static void unmerge_queues(struct sem_array *sma)
207{
208 struct sem_queue *q, *tq;
209
210
211 if (sma->complex_count)
212 return;
213
214
215
216
217
218 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
219 struct sem *curr;
220 curr = &sma->sem_base[q->sops[0].sem_num];
221
222 list_add_tail(&q->list, &curr->pending_alter);
223 }
224 INIT_LIST_HEAD(&sma->pending_alter);
225}
226
227
228
229
230
231
232
233
234
235
236static void merge_queues(struct sem_array *sma)
237{
238 int i;
239 for (i = 0; i < sma->sem_nsems; i++) {
240 struct sem *sem = sma->sem_base + i;
241
242 list_splice_init(&sem->pending_alter, &sma->pending_alter);
243 }
244}
245
246static void sem_rcu_free(struct rcu_head *head)
247{
248 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 struct sem_array *sma = ipc_rcu_to_struct(p);
250
251 security_sem_free(sma);
252 ipc_rcu_free(head);
253}
254
255
256
257
258
259
260
261
262static void sem_wait_array(struct sem_array *sma)
263{
264 int i;
265 struct sem *sem;
266
267 if (sma->complex_count) {
268
269
270
271 return;
272 }
273
274 for (i = 0; i < sma->sem_nsems; i++) {
275 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock);
277 }
278}
279
280
281
282
283
284
285
286
287static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
288 int nsops)
289{
290 struct sem *sem;
291
292 if (nsops != 1) {
293
294 ipc_lock_object(&sma->sem_perm);
295
296
297
298
299 sem_wait_array(sma);
300 return -1;
301 }
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318 sem = sma->sem_base + sops->sem_num;
319
320 if (sma->complex_count == 0) {
321
322
323
324
325 spin_lock(&sem->lock);
326
327
328 if (!spin_is_locked(&sma->sem_perm.lock)) {
329
330 smp_mb();
331
332
333
334
335
336 if (sma->complex_count == 0) {
337
338 return sops->sem_num;
339 }
340 }
341 spin_unlock(&sem->lock);
342 }
343
344
345 ipc_lock_object(&sma->sem_perm);
346
347 if (sma->complex_count == 0) {
348
349
350
351
352 spin_lock(&sem->lock);
353 ipc_unlock_object(&sma->sem_perm);
354 return sops->sem_num;
355 } else {
356
357
358
359 sem_wait_array(sma);
360 return -1;
361 }
362}
363
364static inline void sem_unlock(struct sem_array *sma, int locknum)
365{
366 if (locknum == -1) {
367 unmerge_queues(sma);
368 ipc_unlock_object(&sma->sem_perm);
369 } else {
370 struct sem *sem = sma->sem_base + locknum;
371 spin_unlock(&sem->lock);
372 }
373}
374
375
376
377
378
379
380
381static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
382 int id, struct sembuf *sops, int nsops, int *locknum)
383{
384 struct kern_ipc_perm *ipcp;
385 struct sem_array *sma;
386
387 ipcp = ipc_obtain_object(&sem_ids(ns), id);
388 if (IS_ERR(ipcp))
389 return ERR_CAST(ipcp);
390
391 sma = container_of(ipcp, struct sem_array, sem_perm);
392 *locknum = sem_lock(sma, sops, nsops);
393
394
395
396
397 if (ipc_valid_object(ipcp))
398 return container_of(ipcp, struct sem_array, sem_perm);
399
400 sem_unlock(sma, *locknum);
401 return ERR_PTR(-EINVAL);
402}
403
404static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
405{
406 struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
407
408 if (IS_ERR(ipcp))
409 return ERR_CAST(ipcp);
410
411 return container_of(ipcp, struct sem_array, sem_perm);
412}
413
414static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
415 int id)
416{
417 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
418
419 if (IS_ERR(ipcp))
420 return ERR_CAST(ipcp);
421
422 return container_of(ipcp, struct sem_array, sem_perm);
423}
424
425static inline void sem_lock_and_putref(struct sem_array *sma)
426{
427 sem_lock(sma, NULL, -1);
428 ipc_rcu_putref(sma, ipc_rcu_free);
429}
430
431static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
432{
433 ipc_rmid(&sem_ids(ns), &s->sem_perm);
434}
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468#define IN_WAKEUP 1
469
470
471
472
473
474
475
476
477
478static int newary(struct ipc_namespace *ns, struct ipc_params *params)
479{
480 int id;
481 int retval;
482 struct sem_array *sma;
483 int size;
484 key_t key = params->key;
485 int nsems = params->u.nsems;
486 int semflg = params->flg;
487 int i;
488
489 if (!nsems)
490 return -EINVAL;
491 if (ns->used_sems + nsems > ns->sc_semmns)
492 return -ENOSPC;
493
494 size = sizeof (*sma) + nsems * sizeof (struct sem);
495 sma = ipc_rcu_alloc(size);
496 if (!sma) {
497 return -ENOMEM;
498 }
499 memset (sma, 0, size);
500
501 sma->sem_perm.mode = (semflg & S_IRWXUGO);
502 sma->sem_perm.key = key;
503
504 sma->sem_perm.security = NULL;
505 retval = security_sem_alloc(sma);
506 if (retval) {
507 ipc_rcu_putref(sma, ipc_rcu_free);
508 return retval;
509 }
510
511 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
512 if (id < 0) {
513 ipc_rcu_putref(sma, sem_rcu_free);
514 return id;
515 }
516 ns->used_sems += nsems;
517
518 sma->sem_base = (struct sem *) &sma[1];
519
520 for (i = 0; i < nsems; i++) {
521 INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
522 INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
523 spin_lock_init(&sma->sem_base[i].lock);
524 }
525
526 sma->complex_count = 0;
527 INIT_LIST_HEAD(&sma->pending_alter);
528 INIT_LIST_HEAD(&sma->pending_const);
529 INIT_LIST_HEAD(&sma->list_id);
530 sma->sem_nsems = nsems;
531 sma->sem_ctime = get_seconds();
532 sem_unlock(sma, -1);
533 rcu_read_unlock();
534
535 return sma->sem_perm.id;
536}
537
538
539
540
541
542static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
543{
544 struct sem_array *sma;
545
546 sma = container_of(ipcp, struct sem_array, sem_perm);
547 return security_sem_associate(sma, semflg);
548}
549
550
551
552
553static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
554 struct ipc_params *params)
555{
556 struct sem_array *sma;
557
558 sma = container_of(ipcp, struct sem_array, sem_perm);
559 if (params->u.nsems > sma->sem_nsems)
560 return -EINVAL;
561
562 return 0;
563}
564
565SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
566{
567 struct ipc_namespace *ns;
568 struct ipc_ops sem_ops;
569 struct ipc_params sem_params;
570
571 ns = current->nsproxy->ipc_ns;
572
573 if (nsems < 0 || nsems > ns->sc_semmsl)
574 return -EINVAL;
575
576 sem_ops.getnew = newary;
577 sem_ops.associate = sem_security;
578 sem_ops.more_checks = sem_more_checks;
579
580 sem_params.key = key;
581 sem_params.flg = semflg;
582 sem_params.u.nsems = nsems;
583
584 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
600 int nsops, struct sem_undo *un, int pid)
601{
602 int result, sem_op;
603 struct sembuf *sop;
604 struct sem * curr;
605
606 for (sop = sops; sop < sops + nsops; sop++) {
607 curr = sma->sem_base + sop->sem_num;
608 sem_op = sop->sem_op;
609 result = curr->semval;
610
611 if (!sem_op && result)
612 goto would_block;
613
614 result += sem_op;
615 if (result < 0)
616 goto would_block;
617 if (result > SEMVMX)
618 goto out_of_range;
619 if (sop->sem_flg & SEM_UNDO) {
620 int undo = un->semadj[sop->sem_num] - sem_op;
621
622
623
624 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
625 goto out_of_range;
626 }
627 curr->semval = result;
628 }
629
630 sop--;
631 while (sop >= sops) {
632 sma->sem_base[sop->sem_num].sempid = pid;
633 if (sop->sem_flg & SEM_UNDO)
634 un->semadj[sop->sem_num] -= sop->sem_op;
635 sop--;
636 }
637
638 return 0;
639
640out_of_range:
641 result = -ERANGE;
642 goto undo;
643
644would_block:
645 if (sop->sem_flg & IPC_NOWAIT)
646 result = -EAGAIN;
647 else
648 result = 1;
649
650undo:
651 sop--;
652 while (sop >= sops) {
653 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
654 sop--;
655 }
656
657 return result;
658}
659
660
661
662
663
664
665
666static void wake_up_sem_queue_prepare(struct list_head *pt,
667 struct sem_queue *q, int error)
668{
669 if (list_empty(pt)) {
670
671
672
673
674 preempt_disable();
675 }
676 q->status = IN_WAKEUP;
677 q->pid = error;
678
679 list_add_tail(&q->list, pt);
680}
681
682
683
684
685
686
687
688
689
690
691static void wake_up_sem_queue_do(struct list_head *pt)
692{
693 struct sem_queue *q, *t;
694 int did_something;
695
696 did_something = !list_empty(pt);
697 list_for_each_entry_safe(q, t, pt, list) {
698 wake_up_process(q->sleeper);
699
700 smp_wmb();
701 q->status = q->pid;
702 }
703 if (did_something)
704 preempt_enable();
705}
706
707static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
708{
709 list_del(&q->list);
710 if (q->nsops > 1)
711 sma->complex_count--;
712}
713
714
715
716
717
718
719
720
721
722
723
724static int check_restart(struct sem_array *sma, struct sem_queue *q)
725{
726
727 if (!list_empty(&sma->pending_alter))
728 return 1;
729
730
731 if (q->nsops > 1)
732 return 1;
733
734
735
736
737
738
739
740
741
742
743
744
745 return 0;
746}
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762static int wake_const_ops(struct sem_array *sma, int semnum,
763 struct list_head *pt)
764{
765 struct sem_queue *q;
766 struct list_head *walk;
767 struct list_head *pending_list;
768 int semop_completed = 0;
769
770 if (semnum == -1)
771 pending_list = &sma->pending_const;
772 else
773 pending_list = &sma->sem_base[semnum].pending_const;
774
775 walk = pending_list->next;
776 while (walk != pending_list) {
777 int error;
778
779 q = container_of(walk, struct sem_queue, list);
780 walk = walk->next;
781
782 error = perform_atomic_semop(sma, q->sops, q->nsops,
783 q->undo, q->pid);
784
785 if (error <= 0) {
786
787
788 unlink_queue(sma, q);
789
790 wake_up_sem_queue_prepare(pt, q, error);
791 if (error == 0)
792 semop_completed = 1;
793 }
794 }
795 return semop_completed;
796}
797
798
799
800
801
802
803
804
805
806
807
808
809
810static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
811 int nsops, struct list_head *pt)
812{
813 int i;
814 int semop_completed = 0;
815 int got_zero = 0;
816
817
818 if (sops) {
819 for (i = 0; i < nsops; i++) {
820 int num = sops[i].sem_num;
821
822 if (sma->sem_base[num].semval == 0) {
823 got_zero = 1;
824 semop_completed |= wake_const_ops(sma, num, pt);
825 }
826 }
827 } else {
828
829
830
831
832 for (i = 0; i < sma->sem_nsems; i++) {
833 if (sma->sem_base[i].semval == 0) {
834 got_zero = 1;
835 semop_completed |= wake_const_ops(sma, i, pt);
836 }
837 }
838 }
839
840
841
842
843 if (got_zero)
844 semop_completed |= wake_const_ops(sma, -1, pt);
845
846 return semop_completed;
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
867{
868 struct sem_queue *q;
869 struct list_head *walk;
870 struct list_head *pending_list;
871 int semop_completed = 0;
872
873 if (semnum == -1)
874 pending_list = &sma->pending_alter;
875 else
876 pending_list = &sma->sem_base[semnum].pending_alter;
877
878again:
879 walk = pending_list->next;
880 while (walk != pending_list) {
881 int error, restart;
882
883 q = container_of(walk, struct sem_queue, list);
884 walk = walk->next;
885
886
887
888
889
890
891
892
893 if (semnum != -1 && sma->sem_base[semnum].semval == 0)
894 break;
895
896 error = perform_atomic_semop(sma, q->sops, q->nsops,
897 q->undo, q->pid);
898
899
900 if (error > 0)
901 continue;
902
903 unlink_queue(sma, q);
904
905 if (error) {
906 restart = 0;
907 } else {
908 semop_completed = 1;
909 do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
910 restart = check_restart(sma, q);
911 }
912
913 wake_up_sem_queue_prepare(pt, q, error);
914 if (restart)
915 goto again;
916 }
917 return semop_completed;
918}
919
920
921
922
923
924
925
926
927
928static void set_semotime(struct sem_array *sma, struct sembuf *sops)
929{
930 if (sops == NULL) {
931 sma->sem_base[0].sem_otime = get_seconds();
932 } else {
933 sma->sem_base[sops[0].sem_num].sem_otime =
934 get_seconds();
935 }
936}
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
953 int otime, struct list_head *pt)
954{
955 int i;
956
957 otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
958
959 if (!list_empty(&sma->pending_alter)) {
960
961 otime |= update_queue(sma, -1, pt);
962 } else {
963 if (!sops) {
964
965
966
967
968 for (i = 0; i < sma->sem_nsems; i++)
969 otime |= update_queue(sma, i, pt);
970 } else {
971
972
973
974
975
976
977
978
979
980 for (i = 0; i < nsops; i++) {
981 if (sops[i].sem_op > 0) {
982 otime |= update_queue(sma,
983 sops[i].sem_num, pt);
984 }
985 }
986 }
987 }
988 if (otime)
989 set_semotime(sma, sops);
990}
991
992
993
994
995
996
997
998
999
1000
1001static int count_semncnt (struct sem_array * sma, ushort semnum)
1002{
1003 int semncnt;
1004 struct sem_queue * q;
1005
1006 semncnt = 0;
1007 list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
1008 struct sembuf * sops = q->sops;
1009 BUG_ON(sops->sem_num != semnum);
1010 if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
1011 semncnt++;
1012 }
1013
1014 list_for_each_entry(q, &sma->pending_alter, list) {
1015 struct sembuf * sops = q->sops;
1016 int nsops = q->nsops;
1017 int i;
1018 for (i = 0; i < nsops; i++)
1019 if (sops[i].sem_num == semnum
1020 && (sops[i].sem_op < 0)
1021 && !(sops[i].sem_flg & IPC_NOWAIT))
1022 semncnt++;
1023 }
1024 return semncnt;
1025}
1026
1027static int count_semzcnt (struct sem_array * sma, ushort semnum)
1028{
1029 int semzcnt;
1030 struct sem_queue * q;
1031
1032 semzcnt = 0;
1033 list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
1034 struct sembuf * sops = q->sops;
1035 BUG_ON(sops->sem_num != semnum);
1036 if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
1037 semzcnt++;
1038 }
1039
1040 list_for_each_entry(q, &sma->pending_const, list) {
1041 struct sembuf * sops = q->sops;
1042 int nsops = q->nsops;
1043 int i;
1044 for (i = 0; i < nsops; i++)
1045 if (sops[i].sem_num == semnum
1046 && (sops[i].sem_op == 0)
1047 && !(sops[i].sem_flg & IPC_NOWAIT))
1048 semzcnt++;
1049 }
1050 return semzcnt;
1051}
1052
1053
1054
1055
1056
1057static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1058{
1059 struct sem_undo *un, *tu;
1060 struct sem_queue *q, *tq;
1061 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1062 struct list_head tasks;
1063 int i;
1064
1065
1066 ipc_assert_locked_object(&sma->sem_perm);
1067 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1068 list_del(&un->list_id);
1069 spin_lock(&un->ulp->lock);
1070 un->semid = -1;
1071 list_del_rcu(&un->list_proc);
1072 spin_unlock(&un->ulp->lock);
1073 kfree_rcu(un, rcu);
1074 }
1075
1076
1077 INIT_LIST_HEAD(&tasks);
1078 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1079 unlink_queue(sma, q);
1080 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1081 }
1082
1083 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1084 unlink_queue(sma, q);
1085 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1086 }
1087 for (i = 0; i < sma->sem_nsems; i++) {
1088 struct sem *sem = sma->sem_base + i;
1089 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1090 unlink_queue(sma, q);
1091 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1092 }
1093 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1094 unlink_queue(sma, q);
1095 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1096 }
1097 }
1098
1099
1100 sem_rmid(ns, sma);
1101 sem_unlock(sma, -1);
1102 rcu_read_unlock();
1103
1104 wake_up_sem_queue_do(&tasks);
1105 ns->used_sems -= sma->sem_nsems;
1106 ipc_rcu_putref(sma, sem_rcu_free);
1107}
1108
1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1110{
1111 switch(version) {
1112 case IPC_64:
1113 return copy_to_user(buf, in, sizeof(*in));
1114 case IPC_OLD:
1115 {
1116 struct semid_ds out;
1117
1118 memset(&out, 0, sizeof(out));
1119
1120 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1121
1122 out.sem_otime = in->sem_otime;
1123 out.sem_ctime = in->sem_ctime;
1124 out.sem_nsems = in->sem_nsems;
1125
1126 return copy_to_user(buf, &out, sizeof(out));
1127 }
1128 default:
1129 return -EINVAL;
1130 }
1131}
1132
1133static time_t get_semotime(struct sem_array *sma)
1134{
1135 int i;
1136 time_t res;
1137
1138 res = sma->sem_base[0].sem_otime;
1139 for (i = 1; i < sma->sem_nsems; i++) {
1140 time_t to = sma->sem_base[i].sem_otime;
1141
1142 if (to > res)
1143 res = to;
1144 }
1145 return res;
1146}
1147
1148static int semctl_nolock(struct ipc_namespace *ns, int semid,
1149 int cmd, int version, void __user *p)
1150{
1151 int err;
1152 struct sem_array *sma;
1153
1154 switch(cmd) {
1155 case IPC_INFO:
1156 case SEM_INFO:
1157 {
1158 struct seminfo seminfo;
1159 int max_id;
1160
1161 err = security_sem_semctl(NULL, cmd);
1162 if (err)
1163 return err;
1164
1165 memset(&seminfo,0,sizeof(seminfo));
1166 seminfo.semmni = ns->sc_semmni;
1167 seminfo.semmns = ns->sc_semmns;
1168 seminfo.semmsl = ns->sc_semmsl;
1169 seminfo.semopm = ns->sc_semopm;
1170 seminfo.semvmx = SEMVMX;
1171 seminfo.semmnu = SEMMNU;
1172 seminfo.semmap = SEMMAP;
1173 seminfo.semume = SEMUME;
1174 down_read(&sem_ids(ns).rwsem);
1175 if (cmd == SEM_INFO) {
1176 seminfo.semusz = sem_ids(ns).in_use;
1177 seminfo.semaem = ns->used_sems;
1178 } else {
1179 seminfo.semusz = SEMUSZ;
1180 seminfo.semaem = SEMAEM;
1181 }
1182 max_id = ipc_get_maxid(&sem_ids(ns));
1183 up_read(&sem_ids(ns).rwsem);
1184 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1185 return -EFAULT;
1186 return (max_id < 0) ? 0: max_id;
1187 }
1188 case IPC_STAT:
1189 case SEM_STAT:
1190 {
1191 struct semid64_ds tbuf;
1192 int id = 0;
1193
1194 memset(&tbuf, 0, sizeof(tbuf));
1195
1196 rcu_read_lock();
1197 if (cmd == SEM_STAT) {
1198 sma = sem_obtain_object(ns, semid);
1199 if (IS_ERR(sma)) {
1200 err = PTR_ERR(sma);
1201 goto out_unlock;
1202 }
1203 id = sma->sem_perm.id;
1204 } else {
1205 sma = sem_obtain_object_check(ns, semid);
1206 if (IS_ERR(sma)) {
1207 err = PTR_ERR(sma);
1208 goto out_unlock;
1209 }
1210 }
1211
1212 err = -EACCES;
1213 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1214 goto out_unlock;
1215
1216 err = security_sem_semctl(sma, cmd);
1217 if (err)
1218 goto out_unlock;
1219
1220 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1221 tbuf.sem_otime = get_semotime(sma);
1222 tbuf.sem_ctime = sma->sem_ctime;
1223 tbuf.sem_nsems = sma->sem_nsems;
1224 rcu_read_unlock();
1225 if (copy_semid_to_user(p, &tbuf, version))
1226 return -EFAULT;
1227 return id;
1228 }
1229 default:
1230 return -EINVAL;
1231 }
1232out_unlock:
1233 rcu_read_unlock();
1234 return err;
1235}
1236
1237static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1238 unsigned long arg)
1239{
1240 struct sem_undo *un;
1241 struct sem_array *sma;
1242 struct sem* curr;
1243 int err;
1244 struct list_head tasks;
1245 int val;
1246#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1247
1248 val = arg >> 32;
1249#else
1250
1251 val = arg;
1252#endif
1253
1254 if (val > SEMVMX || val < 0)
1255 return -ERANGE;
1256
1257 INIT_LIST_HEAD(&tasks);
1258
1259 rcu_read_lock();
1260 sma = sem_obtain_object_check(ns, semid);
1261 if (IS_ERR(sma)) {
1262 rcu_read_unlock();
1263 return PTR_ERR(sma);
1264 }
1265
1266 if (semnum < 0 || semnum >= sma->sem_nsems) {
1267 rcu_read_unlock();
1268 return -EINVAL;
1269 }
1270
1271
1272 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1273 rcu_read_unlock();
1274 return -EACCES;
1275 }
1276
1277 err = security_sem_semctl(sma, SETVAL);
1278 if (err) {
1279 rcu_read_unlock();
1280 return -EACCES;
1281 }
1282
1283 sem_lock(sma, NULL, -1);
1284
1285 if (!ipc_valid_object(&sma->sem_perm)) {
1286 sem_unlock(sma, -1);
1287 rcu_read_unlock();
1288 return -EIDRM;
1289 }
1290
1291 curr = &sma->sem_base[semnum];
1292
1293 ipc_assert_locked_object(&sma->sem_perm);
1294 list_for_each_entry(un, &sma->list_id, list_id)
1295 un->semadj[semnum] = 0;
1296
1297 curr->semval = val;
1298 curr->sempid = task_tgid_vnr(current);
1299 sma->sem_ctime = get_seconds();
1300
1301 do_smart_update(sma, NULL, 0, 0, &tasks);
1302 sem_unlock(sma, -1);
1303 rcu_read_unlock();
1304 wake_up_sem_queue_do(&tasks);
1305 return 0;
1306}
1307
1308static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1309 int cmd, void __user *p)
1310{
1311 struct sem_array *sma;
1312 struct sem* curr;
1313 int err, nsems;
1314 ushort fast_sem_io[SEMMSL_FAST];
1315 ushort* sem_io = fast_sem_io;
1316 struct list_head tasks;
1317
1318 INIT_LIST_HEAD(&tasks);
1319
1320 rcu_read_lock();
1321 sma = sem_obtain_object_check(ns, semid);
1322 if (IS_ERR(sma)) {
1323 rcu_read_unlock();
1324 return PTR_ERR(sma);
1325 }
1326
1327 nsems = sma->sem_nsems;
1328
1329 err = -EACCES;
1330 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1331 goto out_rcu_wakeup;
1332
1333 err = security_sem_semctl(sma, cmd);
1334 if (err)
1335 goto out_rcu_wakeup;
1336
1337 err = -EACCES;
1338 switch (cmd) {
1339 case GETALL:
1340 {
1341 ushort __user *array = p;
1342 int i;
1343
1344 sem_lock(sma, NULL, -1);
1345 if (!ipc_valid_object(&sma->sem_perm)) {
1346 err = -EIDRM;
1347 goto out_unlock;
1348 }
1349 if(nsems > SEMMSL_FAST) {
1350 if (!ipc_rcu_getref(sma)) {
1351 err = -EIDRM;
1352 goto out_unlock;
1353 }
1354 sem_unlock(sma, -1);
1355 rcu_read_unlock();
1356 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1357 if(sem_io == NULL) {
1358 ipc_rcu_putref(sma, ipc_rcu_free);
1359 return -ENOMEM;
1360 }
1361
1362 rcu_read_lock();
1363 sem_lock_and_putref(sma);
1364 if (!ipc_valid_object(&sma->sem_perm)) {
1365 err = -EIDRM;
1366 goto out_unlock;
1367 }
1368 }
1369 for (i = 0; i < sma->sem_nsems; i++)
1370 sem_io[i] = sma->sem_base[i].semval;
1371 sem_unlock(sma, -1);
1372 rcu_read_unlock();
1373 err = 0;
1374 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1375 err = -EFAULT;
1376 goto out_free;
1377 }
1378 case SETALL:
1379 {
1380 int i;
1381 struct sem_undo *un;
1382
1383 if (!ipc_rcu_getref(sma)) {
1384 err = -EIDRM;
1385 goto out_rcu_wakeup;
1386 }
1387 rcu_read_unlock();
1388
1389 if(nsems > SEMMSL_FAST) {
1390 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1391 if(sem_io == NULL) {
1392 ipc_rcu_putref(sma, ipc_rcu_free);
1393 return -ENOMEM;
1394 }
1395 }
1396
1397 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1398 ipc_rcu_putref(sma, ipc_rcu_free);
1399 err = -EFAULT;
1400 goto out_free;
1401 }
1402
1403 for (i = 0; i < nsems; i++) {
1404 if (sem_io[i] > SEMVMX) {
1405 ipc_rcu_putref(sma, ipc_rcu_free);
1406 err = -ERANGE;
1407 goto out_free;
1408 }
1409 }
1410 rcu_read_lock();
1411 sem_lock_and_putref(sma);
1412 if (!ipc_valid_object(&sma->sem_perm)) {
1413 err = -EIDRM;
1414 goto out_unlock;
1415 }
1416
1417 for (i = 0; i < nsems; i++)
1418 sma->sem_base[i].semval = sem_io[i];
1419
1420 ipc_assert_locked_object(&sma->sem_perm);
1421 list_for_each_entry(un, &sma->list_id, list_id) {
1422 for (i = 0; i < nsems; i++)
1423 un->semadj[i] = 0;
1424 }
1425 sma->sem_ctime = get_seconds();
1426
1427 do_smart_update(sma, NULL, 0, 0, &tasks);
1428 err = 0;
1429 goto out_unlock;
1430 }
1431
1432 }
1433 err = -EINVAL;
1434 if (semnum < 0 || semnum >= nsems)
1435 goto out_rcu_wakeup;
1436
1437 sem_lock(sma, NULL, -1);
1438 if (!ipc_valid_object(&sma->sem_perm)) {
1439 err = -EIDRM;
1440 goto out_unlock;
1441 }
1442 curr = &sma->sem_base[semnum];
1443
1444 switch (cmd) {
1445 case GETVAL:
1446 err = curr->semval;
1447 goto out_unlock;
1448 case GETPID:
1449 err = curr->sempid;
1450 goto out_unlock;
1451 case GETNCNT:
1452 err = count_semncnt(sma,semnum);
1453 goto out_unlock;
1454 case GETZCNT:
1455 err = count_semzcnt(sma,semnum);
1456 goto out_unlock;
1457 }
1458
1459out_unlock:
1460 sem_unlock(sma, -1);
1461out_rcu_wakeup:
1462 rcu_read_unlock();
1463 wake_up_sem_queue_do(&tasks);
1464out_free:
1465 if(sem_io != fast_sem_io)
1466 ipc_free(sem_io, sizeof(ushort)*nsems);
1467 return err;
1468}
1469
1470static inline unsigned long
1471copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1472{
1473 switch(version) {
1474 case IPC_64:
1475 if (copy_from_user(out, buf, sizeof(*out)))
1476 return -EFAULT;
1477 return 0;
1478 case IPC_OLD:
1479 {
1480 struct semid_ds tbuf_old;
1481
1482 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1483 return -EFAULT;
1484
1485 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1486 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1487 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1488
1489 return 0;
1490 }
1491 default:
1492 return -EINVAL;
1493 }
1494}
1495
1496
1497
1498
1499
1500
1501static int semctl_down(struct ipc_namespace *ns, int semid,
1502 int cmd, int version, void __user *p)
1503{
1504 struct sem_array *sma;
1505 int err;
1506 struct semid64_ds semid64;
1507 struct kern_ipc_perm *ipcp;
1508
1509 if(cmd == IPC_SET) {
1510 if (copy_semid_from_user(&semid64, p, version))
1511 return -EFAULT;
1512 }
1513
1514 down_write(&sem_ids(ns).rwsem);
1515 rcu_read_lock();
1516
1517 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1518 &semid64.sem_perm, 0);
1519 if (IS_ERR(ipcp)) {
1520 err = PTR_ERR(ipcp);
1521 goto out_unlock1;
1522 }
1523
1524 sma = container_of(ipcp, struct sem_array, sem_perm);
1525
1526 err = security_sem_semctl(sma, cmd);
1527 if (err)
1528 goto out_unlock1;
1529
1530 switch (cmd) {
1531 case IPC_RMID:
1532 sem_lock(sma, NULL, -1);
1533
1534 freeary(ns, ipcp);
1535 goto out_up;
1536 case IPC_SET:
1537 sem_lock(sma, NULL, -1);
1538 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1539 if (err)
1540 goto out_unlock0;
1541 sma->sem_ctime = get_seconds();
1542 break;
1543 default:
1544 err = -EINVAL;
1545 goto out_unlock1;
1546 }
1547
1548out_unlock0:
1549 sem_unlock(sma, -1);
1550out_unlock1:
1551 rcu_read_unlock();
1552out_up:
1553 up_write(&sem_ids(ns).rwsem);
1554 return err;
1555}
1556
1557SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1558{
1559 int version;
1560 struct ipc_namespace *ns;
1561 void __user *p = (void __user *)arg;
1562
1563 if (semid < 0)
1564 return -EINVAL;
1565
1566 version = ipc_parse_version(&cmd);
1567 ns = current->nsproxy->ipc_ns;
1568
1569 switch(cmd) {
1570 case IPC_INFO:
1571 case SEM_INFO:
1572 case IPC_STAT:
1573 case SEM_STAT:
1574 return semctl_nolock(ns, semid, cmd, version, p);
1575 case GETALL:
1576 case GETVAL:
1577 case GETPID:
1578 case GETNCNT:
1579 case GETZCNT:
1580 case SETALL:
1581 return semctl_main(ns, semid, semnum, cmd, p);
1582 case SETVAL:
1583 return semctl_setval(ns, semid, semnum, arg);
1584 case IPC_RMID:
1585 case IPC_SET:
1586 return semctl_down(ns, semid, cmd, version, p);
1587 default:
1588 return -EINVAL;
1589 }
1590}
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603static inline int get_undo_list(struct sem_undo_list **undo_listp)
1604{
1605 struct sem_undo_list *undo_list;
1606
1607 undo_list = current->sysvsem.undo_list;
1608 if (!undo_list) {
1609 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1610 if (undo_list == NULL)
1611 return -ENOMEM;
1612 spin_lock_init(&undo_list->lock);
1613 atomic_set(&undo_list->refcnt, 1);
1614 INIT_LIST_HEAD(&undo_list->list_proc);
1615
1616 current->sysvsem.undo_list = undo_list;
1617 }
1618 *undo_listp = undo_list;
1619 return 0;
1620}
1621
1622static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1623{
1624 struct sem_undo *un;
1625
1626 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1627 if (un->semid == semid)
1628 return un;
1629 }
1630 return NULL;
1631}
1632
1633static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1634{
1635 struct sem_undo *un;
1636
1637 assert_spin_locked(&ulp->lock);
1638
1639 un = __lookup_undo(ulp, semid);
1640 if (un) {
1641 list_del_rcu(&un->list_proc);
1642 list_add_rcu(&un->list_proc, &ulp->list_proc);
1643 }
1644 return un;
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1659{
1660 struct sem_array *sma;
1661 struct sem_undo_list *ulp;
1662 struct sem_undo *un, *new;
1663 int nsems, error;
1664
1665 error = get_undo_list(&ulp);
1666 if (error)
1667 return ERR_PTR(error);
1668
1669 rcu_read_lock();
1670 spin_lock(&ulp->lock);
1671 un = lookup_undo(ulp, semid);
1672 spin_unlock(&ulp->lock);
1673 if (likely(un!=NULL))
1674 goto out;
1675
1676
1677
1678 sma = sem_obtain_object_check(ns, semid);
1679 if (IS_ERR(sma)) {
1680 rcu_read_unlock();
1681 return ERR_CAST(sma);
1682 }
1683
1684 nsems = sma->sem_nsems;
1685 if (!ipc_rcu_getref(sma)) {
1686 rcu_read_unlock();
1687 un = ERR_PTR(-EIDRM);
1688 goto out;
1689 }
1690 rcu_read_unlock();
1691
1692
1693 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1694 if (!new) {
1695 ipc_rcu_putref(sma, ipc_rcu_free);
1696 return ERR_PTR(-ENOMEM);
1697 }
1698
1699
1700 rcu_read_lock();
1701 sem_lock_and_putref(sma);
1702 if (!ipc_valid_object(&sma->sem_perm)) {
1703 sem_unlock(sma, -1);
1704 rcu_read_unlock();
1705 kfree(new);
1706 un = ERR_PTR(-EIDRM);
1707 goto out;
1708 }
1709 spin_lock(&ulp->lock);
1710
1711
1712
1713
1714 un = lookup_undo(ulp, semid);
1715 if (un) {
1716 kfree(new);
1717 goto success;
1718 }
1719
1720 new->semadj = (short *) &new[1];
1721 new->ulp = ulp;
1722 new->semid = semid;
1723 assert_spin_locked(&ulp->lock);
1724 list_add_rcu(&new->list_proc, &ulp->list_proc);
1725 ipc_assert_locked_object(&sma->sem_perm);
1726 list_add(&new->list_id, &sma->list_id);
1727 un = new;
1728
1729success:
1730 spin_unlock(&ulp->lock);
1731 sem_unlock(sma, -1);
1732out:
1733 return un;
1734}
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749static int get_queue_result(struct sem_queue *q)
1750{
1751 int error;
1752
1753 error = q->status;
1754 while (unlikely(error == IN_WAKEUP)) {
1755 cpu_relax();
1756 error = q->status;
1757 }
1758
1759 return error;
1760}
1761
1762SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1763 unsigned, nsops, const struct timespec __user *, timeout)
1764{
1765 int error = -EINVAL;
1766 struct sem_array *sma;
1767 struct sembuf fast_sops[SEMOPM_FAST];
1768 struct sembuf* sops = fast_sops, *sop;
1769 struct sem_undo *un;
1770 int undos = 0, alter = 0, max, locknum;
1771 struct sem_queue queue;
1772 unsigned long jiffies_left = 0;
1773 struct ipc_namespace *ns;
1774 struct list_head tasks;
1775
1776 ns = current->nsproxy->ipc_ns;
1777
1778 if (nsops < 1 || semid < 0)
1779 return -EINVAL;
1780 if (nsops > ns->sc_semopm)
1781 return -E2BIG;
1782 if(nsops > SEMOPM_FAST) {
1783 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1784 if(sops==NULL)
1785 return -ENOMEM;
1786 }
1787 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1788 error=-EFAULT;
1789 goto out_free;
1790 }
1791 if (timeout) {
1792 struct timespec _timeout;
1793 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1794 error = -EFAULT;
1795 goto out_free;
1796 }
1797 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1798 _timeout.tv_nsec >= 1000000000L) {
1799 error = -EINVAL;
1800 goto out_free;
1801 }
1802 jiffies_left = timespec_to_jiffies(&_timeout);
1803 }
1804 max = 0;
1805 for (sop = sops; sop < sops + nsops; sop++) {
1806 if (sop->sem_num >= max)
1807 max = sop->sem_num;
1808 if (sop->sem_flg & SEM_UNDO)
1809 undos = 1;
1810 if (sop->sem_op != 0)
1811 alter = 1;
1812 }
1813
1814 INIT_LIST_HEAD(&tasks);
1815
1816 if (undos) {
1817
1818 un = find_alloc_undo(ns, semid);
1819 if (IS_ERR(un)) {
1820 error = PTR_ERR(un);
1821 goto out_free;
1822 }
1823 } else {
1824 un = NULL;
1825 rcu_read_lock();
1826 }
1827
1828 sma = sem_obtain_object_check(ns, semid);
1829 if (IS_ERR(sma)) {
1830 rcu_read_unlock();
1831 error = PTR_ERR(sma);
1832 goto out_free;
1833 }
1834
1835 error = -EFBIG;
1836 if (max >= sma->sem_nsems)
1837 goto out_rcu_wakeup;
1838
1839 error = -EACCES;
1840 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1841 goto out_rcu_wakeup;
1842
1843 error = security_sem_semop(sma, sops, nsops, alter);
1844 if (error)
1845 goto out_rcu_wakeup;
1846
1847 error = -EIDRM;
1848 locknum = sem_lock(sma, sops, nsops);
1849
1850
1851
1852
1853
1854
1855
1856
1857 if (!ipc_valid_object(&sma->sem_perm))
1858 goto out_unlock_free;
1859
1860
1861
1862
1863
1864
1865
1866 if (un && un->semid == -1)
1867 goto out_unlock_free;
1868
1869 error = perform_atomic_semop(sma, sops, nsops, un,
1870 task_tgid_vnr(current));
1871 if (error == 0) {
1872
1873
1874
1875 if (alter)
1876 do_smart_update(sma, sops, nsops, 1, &tasks);
1877 else
1878 set_semotime(sma, sops);
1879 }
1880 if (error <= 0)
1881 goto out_unlock_free;
1882
1883
1884
1885
1886
1887 queue.sops = sops;
1888 queue.nsops = nsops;
1889 queue.undo = un;
1890 queue.pid = task_tgid_vnr(current);
1891 queue.alter = alter;
1892
1893 if (nsops == 1) {
1894 struct sem *curr;
1895 curr = &sma->sem_base[sops->sem_num];
1896
1897 if (alter) {
1898 if (sma->complex_count) {
1899 list_add_tail(&queue.list,
1900 &sma->pending_alter);
1901 } else {
1902
1903 list_add_tail(&queue.list,
1904 &curr->pending_alter);
1905 }
1906 } else {
1907 list_add_tail(&queue.list, &curr->pending_const);
1908 }
1909 } else {
1910 if (!sma->complex_count)
1911 merge_queues(sma);
1912
1913 if (alter)
1914 list_add_tail(&queue.list, &sma->pending_alter);
1915 else
1916 list_add_tail(&queue.list, &sma->pending_const);
1917
1918 sma->complex_count++;
1919 }
1920
1921 queue.status = -EINTR;
1922 queue.sleeper = current;
1923
1924sleep_again:
1925 current->state = TASK_INTERRUPTIBLE;
1926 sem_unlock(sma, locknum);
1927 rcu_read_unlock();
1928
1929 if (timeout)
1930 jiffies_left = schedule_timeout(jiffies_left);
1931 else
1932 schedule();
1933
1934 error = get_queue_result(&queue);
1935
1936 if (error != -EINTR) {
1937
1938
1939
1940
1941
1942
1943
1944 smp_mb();
1945
1946 goto out_free;
1947 }
1948
1949 rcu_read_lock();
1950 sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1951
1952
1953
1954
1955 error = get_queue_result(&queue);
1956
1957
1958
1959
1960 if (IS_ERR(sma)) {
1961 rcu_read_unlock();
1962 goto out_free;
1963 }
1964
1965
1966
1967
1968
1969
1970
1971 if (error != -EINTR) {
1972 goto out_unlock_free;
1973 }
1974
1975
1976
1977
1978 if (timeout && jiffies_left == 0)
1979 error = -EAGAIN;
1980
1981
1982
1983
1984 if (error == -EINTR && !signal_pending(current))
1985 goto sleep_again;
1986
1987 unlink_queue(sma, &queue);
1988
1989out_unlock_free:
1990 sem_unlock(sma, locknum);
1991out_rcu_wakeup:
1992 rcu_read_unlock();
1993 wake_up_sem_queue_do(&tasks);
1994out_free:
1995 if(sops != fast_sops)
1996 kfree(sops);
1997 return error;
1998}
1999
2000SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2001 unsigned, nsops)
2002{
2003 return sys_semtimedop(semid, tsops, nsops, NULL);
2004}
2005
2006
2007
2008
2009
2010int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2011{
2012 struct sem_undo_list *undo_list;
2013 int error;
2014
2015 if (clone_flags & CLONE_SYSVSEM) {
2016 error = get_undo_list(&undo_list);
2017 if (error)
2018 return error;
2019 atomic_inc(&undo_list->refcnt);
2020 tsk->sysvsem.undo_list = undo_list;
2021 } else
2022 tsk->sysvsem.undo_list = NULL;
2023
2024 return 0;
2025}
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039void exit_sem(struct task_struct *tsk)
2040{
2041 struct sem_undo_list *ulp;
2042
2043 ulp = tsk->sysvsem.undo_list;
2044 if (!ulp)
2045 return;
2046 tsk->sysvsem.undo_list = NULL;
2047
2048 if (!atomic_dec_and_test(&ulp->refcnt))
2049 return;
2050
2051 for (;;) {
2052 struct sem_array *sma;
2053 struct sem_undo *un;
2054 struct list_head tasks;
2055 int semid, i;
2056
2057 rcu_read_lock();
2058 un = list_entry_rcu(ulp->list_proc.next,
2059 struct sem_undo, list_proc);
2060 if (&un->list_proc == &ulp->list_proc) {
2061
2062
2063
2064
2065
2066
2067 spin_unlock_wait(&ulp->lock);
2068 rcu_read_unlock();
2069 break;
2070 }
2071 spin_lock(&ulp->lock);
2072 semid = un->semid;
2073 spin_unlock(&ulp->lock);
2074
2075
2076 if (semid == -1) {
2077 rcu_read_unlock();
2078 continue;
2079 }
2080
2081 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2082
2083 if (IS_ERR(sma)) {
2084 rcu_read_unlock();
2085 continue;
2086 }
2087
2088 sem_lock(sma, NULL, -1);
2089
2090 if (!ipc_valid_object(&sma->sem_perm)) {
2091 sem_unlock(sma, -1);
2092 rcu_read_unlock();
2093 continue;
2094 }
2095 un = __lookup_undo(ulp, semid);
2096 if (un == NULL) {
2097
2098
2099
2100 sem_unlock(sma, -1);
2101 rcu_read_unlock();
2102 continue;
2103 }
2104
2105
2106 ipc_assert_locked_object(&sma->sem_perm);
2107 list_del(&un->list_id);
2108
2109 spin_lock(&ulp->lock);
2110 list_del_rcu(&un->list_proc);
2111 spin_unlock(&ulp->lock);
2112
2113
2114 for (i = 0; i < sma->sem_nsems; i++) {
2115 struct sem * semaphore = &sma->sem_base[i];
2116 if (un->semadj[i]) {
2117 semaphore->semval += un->semadj[i];
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131 if (semaphore->semval < 0)
2132 semaphore->semval = 0;
2133 if (semaphore->semval > SEMVMX)
2134 semaphore->semval = SEMVMX;
2135 semaphore->sempid = task_tgid_vnr(current);
2136 }
2137 }
2138
2139 INIT_LIST_HEAD(&tasks);
2140 do_smart_update(sma, NULL, 0, 1, &tasks);
2141 sem_unlock(sma, -1);
2142 rcu_read_unlock();
2143 wake_up_sem_queue_do(&tasks);
2144
2145 kfree_rcu(un, rcu);
2146 }
2147 kfree(ulp);
2148}
2149
2150#ifdef CONFIG_PROC_FS
2151static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2152{
2153 struct user_namespace *user_ns = seq_user_ns(s);
2154 struct sem_array *sma = it;
2155 time_t sem_otime;
2156
2157
2158
2159
2160
2161
2162
2163 sem_wait_array(sma);
2164
2165 sem_otime = get_semotime(sma);
2166
2167 return seq_printf(s,
2168 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2169 sma->sem_perm.key,
2170 sma->sem_perm.id,
2171 sma->sem_perm.mode,
2172 sma->sem_nsems,
2173 from_kuid_munged(user_ns, sma->sem_perm.uid),
2174 from_kgid_munged(user_ns, sma->sem_perm.gid),
2175 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2176 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2177 sem_otime,
2178 sma->sem_ctime);
2179}
2180#endif
2181