1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#include <linux/slab.h>
76#include <linux/spinlock.h>
77#include <linux/init.h>
78#include <linux/proc_fs.h>
79#include <linux/time.h>
80#include <linux/security.h>
81#include <linux/syscalls.h>
82#include <linux/audit.h>
83#include <linux/capability.h>
84#include <linux/seq_file.h>
85#include <linux/rwsem.h>
86#include <linux/nsproxy.h>
87#include <linux/ipc_namespace.h>
88
89#include <linux/uaccess.h>
90#include "util.h"
91
92
93struct sem {
94 int semval;
95
96
97
98
99
100
101
102 int sempid;
103 spinlock_t lock;
104 struct list_head pending_alter;
105
106 struct list_head pending_const;
107
108 time_t sem_otime;
109} ____cacheline_aligned_in_smp;
110
111
112struct sem_queue {
113 struct list_head list;
114 struct task_struct *sleeper;
115 struct sem_undo *undo;
116 int pid;
117 int status;
118 struct sembuf *sops;
119 struct sembuf *blocking;
120 int nsops;
121 int alter;
122};
123
124
125
126
127struct sem_undo {
128 struct list_head list_proc;
129
130
131 struct rcu_head rcu;
132 struct sem_undo_list *ulp;
133 struct list_head list_id;
134
135 int semid;
136 short *semadj;
137
138};
139
140
141
142
143struct sem_undo_list {
144 atomic_t refcnt;
145 spinlock_t lock;
146 struct list_head list_proc;
147};
148
149
150#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
151
152#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
153
154static int newary(struct ipc_namespace *, struct ipc_params *);
155static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
156#ifdef CONFIG_PROC_FS
157static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
158#endif
159
160#define SEMMSL_FAST 256
161#define SEMOPM_FAST 64
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182#define sc_semmsl sem_ctls[0]
183#define sc_semmns sem_ctls[1]
184#define sc_semopm sem_ctls[2]
185#define sc_semmni sem_ctls[3]
186
187void sem_init_ns(struct ipc_namespace *ns)
188{
189 ns->sc_semmsl = SEMMSL;
190 ns->sc_semmns = SEMMNS;
191 ns->sc_semopm = SEMOPM;
192 ns->sc_semmni = SEMMNI;
193 ns->used_sems = 0;
194 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
195}
196
197#ifdef CONFIG_IPC_NS
198void sem_exit_ns(struct ipc_namespace *ns)
199{
200 free_ipcs(ns, &sem_ids(ns), freeary);
201 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
202}
203#endif
204
205void __init sem_init(void)
206{
207 sem_init_ns(&init_ipc_ns);
208 ipc_init_proc_interface("sysvipc/sem",
209 " key semid perms nsems uid gid cuid cgid otime ctime\n",
210 IPC_SEM_IDS, sysvipc_sem_proc_show);
211}
212
213
214
215
216
217
218
219
220static void unmerge_queues(struct sem_array *sma)
221{
222 struct sem_queue *q, *tq;
223
224
225 if (sma->complex_count)
226 return;
227
228
229
230
231
232 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
233 struct sem *curr;
234 curr = &sma->sem_base[q->sops[0].sem_num];
235
236 list_add_tail(&q->list, &curr->pending_alter);
237 }
238 INIT_LIST_HEAD(&sma->pending_alter);
239}
240
241
242
243
244
245
246
247
248
249
250static void merge_queues(struct sem_array *sma)
251{
252 int i;
253 for (i = 0; i < sma->sem_nsems; i++) {
254 struct sem *sem = sma->sem_base + i;
255
256 list_splice_init(&sem->pending_alter, &sma->pending_alter);
257 }
258}
259
260static void sem_rcu_free(struct rcu_head *head)
261{
262 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
263 struct sem_array *sma = ipc_rcu_to_struct(p);
264
265 security_sem_free(sma);
266 ipc_rcu_free(head);
267}
268
269
270
271
272
273static void complexmode_enter(struct sem_array *sma)
274{
275 int i;
276 struct sem *sem;
277
278 if (sma->complex_mode) {
279
280 return;
281 }
282
283
284
285
286
287 smp_store_mb(sma->complex_mode, true);
288
289 for (i = 0; i < sma->sem_nsems; i++) {
290 sem = sma->sem_base + i;
291 spin_unlock_wait(&sem->lock);
292 }
293
294
295
296
297
298
299
300 smp_rmb();
301}
302
303
304
305
306
307static void complexmode_tryleave(struct sem_array *sma)
308{
309 if (sma->complex_count) {
310
311
312
313 return;
314 }
315
316
317
318
319
320
321 smp_store_release(&sma->complex_mode, false);
322}
323
324#define SEM_GLOBAL_LOCK (-1)
325
326
327
328
329
330
331
332static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
333 int nsops)
334{
335 struct sem *sem;
336
337 if (nsops != 1) {
338
339 ipc_lock_object(&sma->sem_perm);
340
341
342 complexmode_enter(sma);
343 return SEM_GLOBAL_LOCK;
344 }
345
346
347
348
349
350
351
352
353 sem = sma->sem_base + sops->sem_num;
354
355
356
357
358
359 if (!sma->complex_mode) {
360
361
362
363
364 spin_lock(&sem->lock);
365
366
367
368
369
370
371
372 smp_mb();
373
374 if (!smp_load_acquire(&sma->complex_mode)) {
375
376 return sops->sem_num;
377 }
378 spin_unlock(&sem->lock);
379 }
380
381
382 ipc_lock_object(&sma->sem_perm);
383
384 if (sma->complex_count == 0) {
385
386
387
388
389 spin_lock(&sem->lock);
390 ipc_unlock_object(&sma->sem_perm);
391 return sops->sem_num;
392 } else {
393
394
395
396 complexmode_enter(sma);
397 return SEM_GLOBAL_LOCK;
398 }
399}
400
401static inline void sem_unlock(struct sem_array *sma, int locknum)
402{
403 if (locknum == SEM_GLOBAL_LOCK) {
404 unmerge_queues(sma);
405 complexmode_tryleave(sma);
406 ipc_unlock_object(&sma->sem_perm);
407 } else {
408 struct sem *sem = sma->sem_base + locknum;
409 spin_unlock(&sem->lock);
410 }
411}
412
413
414
415
416
417
418
419static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
420 int id, struct sembuf *sops, int nsops, int *locknum)
421{
422 struct kern_ipc_perm *ipcp;
423 struct sem_array *sma;
424
425 ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
426 if (IS_ERR(ipcp))
427 return ERR_CAST(ipcp);
428
429 sma = container_of(ipcp, struct sem_array, sem_perm);
430 *locknum = sem_lock(sma, sops, nsops);
431
432
433
434
435 if (ipc_valid_object(ipcp))
436 return container_of(ipcp, struct sem_array, sem_perm);
437
438 sem_unlock(sma, *locknum);
439 return ERR_PTR(-EINVAL);
440}
441
442static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
443{
444 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
445
446 if (IS_ERR(ipcp))
447 return ERR_CAST(ipcp);
448
449 return container_of(ipcp, struct sem_array, sem_perm);
450}
451
452static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
453 int id)
454{
455 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
456
457 if (IS_ERR(ipcp))
458 return ERR_CAST(ipcp);
459
460 return container_of(ipcp, struct sem_array, sem_perm);
461}
462
463static inline void sem_lock_and_putref(struct sem_array *sma)
464{
465 sem_lock(sma, NULL, -1);
466 ipc_rcu_putref(sma, sem_rcu_free);
467}
468
469static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
470{
471 ipc_rmid(&sem_ids(ns), &s->sem_perm);
472}
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506#define IN_WAKEUP 1
507
508
509
510
511
512
513
514
515static int newary(struct ipc_namespace *ns, struct ipc_params *params)
516{
517 int id;
518 int retval;
519 struct sem_array *sma;
520 int size;
521 key_t key = params->key;
522 int nsems = params->u.nsems;
523 int semflg = params->flg;
524 int i;
525
526 if (!nsems)
527 return -EINVAL;
528 if (ns->used_sems + nsems > ns->sc_semmns)
529 return -ENOSPC;
530
531 size = sizeof(*sma) + nsems * sizeof(struct sem);
532 sma = ipc_rcu_alloc(size);
533 if (!sma)
534 return -ENOMEM;
535
536 memset(sma, 0, size);
537
538 sma->sem_perm.mode = (semflg & S_IRWXUGO);
539 sma->sem_perm.key = key;
540
541 sma->sem_perm.security = NULL;
542 retval = security_sem_alloc(sma);
543 if (retval) {
544 ipc_rcu_putref(sma, ipc_rcu_free);
545 return retval;
546 }
547
548 sma->sem_base = (struct sem *) &sma[1];
549
550 for (i = 0; i < nsems; i++) {
551 INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
552 INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
553 spin_lock_init(&sma->sem_base[i].lock);
554 }
555
556 sma->complex_count = 0;
557 sma->complex_mode = true;
558 INIT_LIST_HEAD(&sma->pending_alter);
559 INIT_LIST_HEAD(&sma->pending_const);
560 INIT_LIST_HEAD(&sma->list_id);
561 sma->sem_nsems = nsems;
562 sma->sem_ctime = get_seconds();
563
564 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
565 if (id < 0) {
566 ipc_rcu_putref(sma, sem_rcu_free);
567 return id;
568 }
569 ns->used_sems += nsems;
570
571 sem_unlock(sma, -1);
572 rcu_read_unlock();
573
574 return sma->sem_perm.id;
575}
576
577
578
579
580
581static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
582{
583 struct sem_array *sma;
584
585 sma = container_of(ipcp, struct sem_array, sem_perm);
586 return security_sem_associate(sma, semflg);
587}
588
589
590
591
592static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
593 struct ipc_params *params)
594{
595 struct sem_array *sma;
596
597 sma = container_of(ipcp, struct sem_array, sem_perm);
598 if (params->u.nsems > sma->sem_nsems)
599 return -EINVAL;
600
601 return 0;
602}
603
604SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
605{
606 struct ipc_namespace *ns;
607 static const struct ipc_ops sem_ops = {
608 .getnew = newary,
609 .associate = sem_security,
610 .more_checks = sem_more_checks,
611 };
612 struct ipc_params sem_params;
613
614 ns = current->nsproxy->ipc_ns;
615
616 if (nsems < 0 || nsems > ns->sc_semmsl)
617 return -EINVAL;
618
619 sem_params.key = key;
620 sem_params.flg = semflg;
621 sem_params.u.nsems = nsems;
622
623 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
624}
625
626
627
628
629
630
631
632
633
634
635static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
636{
637 int result, sem_op, nsops, pid;
638 struct sembuf *sop;
639 struct sem *curr;
640 struct sembuf *sops;
641 struct sem_undo *un;
642
643 sops = q->sops;
644 nsops = q->nsops;
645 un = q->undo;
646
647 for (sop = sops; sop < sops + nsops; sop++) {
648 curr = sma->sem_base + sop->sem_num;
649 sem_op = sop->sem_op;
650 result = curr->semval;
651
652 if (!sem_op && result)
653 goto would_block;
654
655 result += sem_op;
656 if (result < 0)
657 goto would_block;
658 if (result > SEMVMX)
659 goto out_of_range;
660
661 if (sop->sem_flg & SEM_UNDO) {
662 int undo = un->semadj[sop->sem_num] - sem_op;
663
664 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
665 goto out_of_range;
666 un->semadj[sop->sem_num] = undo;
667 }
668
669 curr->semval = result;
670 }
671
672 sop--;
673 pid = q->pid;
674 while (sop >= sops) {
675 sma->sem_base[sop->sem_num].sempid = pid;
676 sop--;
677 }
678
679 return 0;
680
681out_of_range:
682 result = -ERANGE;
683 goto undo;
684
685would_block:
686 q->blocking = sop;
687
688 if (sop->sem_flg & IPC_NOWAIT)
689 result = -EAGAIN;
690 else
691 result = 1;
692
693undo:
694 sop--;
695 while (sop >= sops) {
696 sem_op = sop->sem_op;
697 sma->sem_base[sop->sem_num].semval -= sem_op;
698 if (sop->sem_flg & SEM_UNDO)
699 un->semadj[sop->sem_num] += sem_op;
700 sop--;
701 }
702
703 return result;
704}
705
706
707
708
709
710
711
712static void wake_up_sem_queue_prepare(struct list_head *pt,
713 struct sem_queue *q, int error)
714{
715 if (list_empty(pt)) {
716
717
718
719
720 preempt_disable();
721 }
722 q->status = IN_WAKEUP;
723 q->pid = error;
724
725 list_add_tail(&q->list, pt);
726}
727
728
729
730
731
732
733
734
735
736
737static void wake_up_sem_queue_do(struct list_head *pt)
738{
739 struct sem_queue *q, *t;
740 int did_something;
741
742 did_something = !list_empty(pt);
743 list_for_each_entry_safe(q, t, pt, list) {
744 wake_up_process(q->sleeper);
745
746 smp_wmb();
747 q->status = q->pid;
748 }
749 if (did_something)
750 preempt_enable();
751}
752
753static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
754{
755 list_del(&q->list);
756 if (q->nsops > 1)
757 sma->complex_count--;
758}
759
760
761
762
763
764
765
766
767
768
769
770static int check_restart(struct sem_array *sma, struct sem_queue *q)
771{
772
773 if (!list_empty(&sma->pending_alter))
774 return 1;
775
776
777 if (q->nsops > 1)
778 return 1;
779
780
781
782
783
784
785
786
787
788
789
790
791 return 0;
792}
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808static int wake_const_ops(struct sem_array *sma, int semnum,
809 struct list_head *pt)
810{
811 struct sem_queue *q;
812 struct list_head *walk;
813 struct list_head *pending_list;
814 int semop_completed = 0;
815
816 if (semnum == -1)
817 pending_list = &sma->pending_const;
818 else
819 pending_list = &sma->sem_base[semnum].pending_const;
820
821 walk = pending_list->next;
822 while (walk != pending_list) {
823 int error;
824
825 q = container_of(walk, struct sem_queue, list);
826 walk = walk->next;
827
828 error = perform_atomic_semop(sma, q);
829
830 if (error <= 0) {
831
832
833 unlink_queue(sma, q);
834
835 wake_up_sem_queue_prepare(pt, q, error);
836 if (error == 0)
837 semop_completed = 1;
838 }
839 }
840 return semop_completed;
841}
842
843
844
845
846
847
848
849
850
851
852
853
854static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
855 int nsops, struct list_head *pt)
856{
857 int i;
858 int semop_completed = 0;
859 int got_zero = 0;
860
861
862 if (sops) {
863 for (i = 0; i < nsops; i++) {
864 int num = sops[i].sem_num;
865
866 if (sma->sem_base[num].semval == 0) {
867 got_zero = 1;
868 semop_completed |= wake_const_ops(sma, num, pt);
869 }
870 }
871 } else {
872
873
874
875
876 for (i = 0; i < sma->sem_nsems; i++) {
877 if (sma->sem_base[i].semval == 0) {
878 got_zero = 1;
879 semop_completed |= wake_const_ops(sma, i, pt);
880 }
881 }
882 }
883
884
885
886
887 if (got_zero)
888 semop_completed |= wake_const_ops(sma, -1, pt);
889
890 return semop_completed;
891}
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
911{
912 struct sem_queue *q;
913 struct list_head *walk;
914 struct list_head *pending_list;
915 int semop_completed = 0;
916
917 if (semnum == -1)
918 pending_list = &sma->pending_alter;
919 else
920 pending_list = &sma->sem_base[semnum].pending_alter;
921
922again:
923 walk = pending_list->next;
924 while (walk != pending_list) {
925 int error, restart;
926
927 q = container_of(walk, struct sem_queue, list);
928 walk = walk->next;
929
930
931
932
933
934
935
936
937 if (semnum != -1 && sma->sem_base[semnum].semval == 0)
938 break;
939
940 error = perform_atomic_semop(sma, q);
941
942
943 if (error > 0)
944 continue;
945
946 unlink_queue(sma, q);
947
948 if (error) {
949 restart = 0;
950 } else {
951 semop_completed = 1;
952 do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
953 restart = check_restart(sma, q);
954 }
955
956 wake_up_sem_queue_prepare(pt, q, error);
957 if (restart)
958 goto again;
959 }
960 return semop_completed;
961}
962
963
964
965
966
967
968
969
970
971static void set_semotime(struct sem_array *sma, struct sembuf *sops)
972{
973 if (sops == NULL) {
974 sma->sem_base[0].sem_otime = get_seconds();
975 } else {
976 sma->sem_base[sops[0].sem_num].sem_otime =
977 get_seconds();
978 }
979}
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
996 int otime, struct list_head *pt)
997{
998 int i;
999
1000 otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
1001
1002 if (!list_empty(&sma->pending_alter)) {
1003
1004 otime |= update_queue(sma, -1, pt);
1005 } else {
1006 if (!sops) {
1007
1008
1009
1010
1011 for (i = 0; i < sma->sem_nsems; i++)
1012 otime |= update_queue(sma, i, pt);
1013 } else {
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 for (i = 0; i < nsops; i++) {
1024 if (sops[i].sem_op > 0) {
1025 otime |= update_queue(sma,
1026 sops[i].sem_num, pt);
1027 }
1028 }
1029 }
1030 }
1031 if (otime)
1032 set_semotime(sma, sops);
1033}
1034
1035
1036
1037
1038static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1039 bool count_zero)
1040{
1041 struct sembuf *sop = q->blocking;
1042
1043
1044
1045
1046
1047
1048
1049
1050 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1051 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1052 current->comm, task_pid_nr(current));
1053
1054 if (sop->sem_num != semnum)
1055 return 0;
1056
1057 if (count_zero && sop->sem_op == 0)
1058 return 1;
1059 if (!count_zero && sop->sem_op < 0)
1060 return 1;
1061
1062 return 0;
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072static int count_semcnt(struct sem_array *sma, ushort semnum,
1073 bool count_zero)
1074{
1075 struct list_head *l;
1076 struct sem_queue *q;
1077 int semcnt;
1078
1079 semcnt = 0;
1080
1081 if (count_zero)
1082 l = &sma->sem_base[semnum].pending_const;
1083 else
1084 l = &sma->sem_base[semnum].pending_alter;
1085
1086 list_for_each_entry(q, l, list) {
1087
1088
1089
1090 semcnt++;
1091 }
1092
1093
1094 list_for_each_entry(q, &sma->pending_alter, list) {
1095 semcnt += check_qop(sma, semnum, q, count_zero);
1096 }
1097 if (count_zero) {
1098 list_for_each_entry(q, &sma->pending_const, list) {
1099 semcnt += check_qop(sma, semnum, q, count_zero);
1100 }
1101 }
1102 return semcnt;
1103}
1104
1105
1106
1107
1108
1109static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1110{
1111 struct sem_undo *un, *tu;
1112 struct sem_queue *q, *tq;
1113 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1114 struct list_head tasks;
1115 int i;
1116
1117
1118 ipc_assert_locked_object(&sma->sem_perm);
1119 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1120 list_del(&un->list_id);
1121 spin_lock(&un->ulp->lock);
1122 un->semid = -1;
1123 list_del_rcu(&un->list_proc);
1124 spin_unlock(&un->ulp->lock);
1125 kfree_rcu(un, rcu);
1126 }
1127
1128
1129 INIT_LIST_HEAD(&tasks);
1130 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1131 unlink_queue(sma, q);
1132 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1133 }
1134
1135 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1136 unlink_queue(sma, q);
1137 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1138 }
1139 for (i = 0; i < sma->sem_nsems; i++) {
1140 struct sem *sem = sma->sem_base + i;
1141 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1142 unlink_queue(sma, q);
1143 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1144 }
1145 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1146 unlink_queue(sma, q);
1147 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1148 }
1149 }
1150
1151
1152 sem_rmid(ns, sma);
1153 sem_unlock(sma, -1);
1154 rcu_read_unlock();
1155
1156 wake_up_sem_queue_do(&tasks);
1157 ns->used_sems -= sma->sem_nsems;
1158 ipc_rcu_putref(sma, sem_rcu_free);
1159}
1160
1161static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1162{
1163 switch (version) {
1164 case IPC_64:
1165 return copy_to_user(buf, in, sizeof(*in));
1166 case IPC_OLD:
1167 {
1168 struct semid_ds out;
1169
1170 memset(&out, 0, sizeof(out));
1171
1172 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1173
1174 out.sem_otime = in->sem_otime;
1175 out.sem_ctime = in->sem_ctime;
1176 out.sem_nsems = in->sem_nsems;
1177
1178 return copy_to_user(buf, &out, sizeof(out));
1179 }
1180 default:
1181 return -EINVAL;
1182 }
1183}
1184
1185static time_t get_semotime(struct sem_array *sma)
1186{
1187 int i;
1188 time_t res;
1189
1190 res = sma->sem_base[0].sem_otime;
1191 for (i = 1; i < sma->sem_nsems; i++) {
1192 time_t to = sma->sem_base[i].sem_otime;
1193
1194 if (to > res)
1195 res = to;
1196 }
1197 return res;
1198}
1199
1200static int semctl_nolock(struct ipc_namespace *ns, int semid,
1201 int cmd, int version, void __user *p)
1202{
1203 int err;
1204 struct sem_array *sma;
1205
1206 switch (cmd) {
1207 case IPC_INFO:
1208 case SEM_INFO:
1209 {
1210 struct seminfo seminfo;
1211 int max_id;
1212
1213 err = security_sem_semctl(NULL, cmd);
1214 if (err)
1215 return err;
1216
1217 memset(&seminfo, 0, sizeof(seminfo));
1218 seminfo.semmni = ns->sc_semmni;
1219 seminfo.semmns = ns->sc_semmns;
1220 seminfo.semmsl = ns->sc_semmsl;
1221 seminfo.semopm = ns->sc_semopm;
1222 seminfo.semvmx = SEMVMX;
1223 seminfo.semmnu = SEMMNU;
1224 seminfo.semmap = SEMMAP;
1225 seminfo.semume = SEMUME;
1226 down_read(&sem_ids(ns).rwsem);
1227 if (cmd == SEM_INFO) {
1228 seminfo.semusz = sem_ids(ns).in_use;
1229 seminfo.semaem = ns->used_sems;
1230 } else {
1231 seminfo.semusz = SEMUSZ;
1232 seminfo.semaem = SEMAEM;
1233 }
1234 max_id = ipc_get_maxid(&sem_ids(ns));
1235 up_read(&sem_ids(ns).rwsem);
1236 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1237 return -EFAULT;
1238 return (max_id < 0) ? 0 : max_id;
1239 }
1240 case IPC_STAT:
1241 case SEM_STAT:
1242 {
1243 struct semid64_ds tbuf;
1244 int id = 0;
1245
1246 memset(&tbuf, 0, sizeof(tbuf));
1247
1248 rcu_read_lock();
1249 if (cmd == SEM_STAT) {
1250 sma = sem_obtain_object(ns, semid);
1251 if (IS_ERR(sma)) {
1252 err = PTR_ERR(sma);
1253 goto out_unlock;
1254 }
1255 id = sma->sem_perm.id;
1256 } else {
1257 sma = sem_obtain_object_check(ns, semid);
1258 if (IS_ERR(sma)) {
1259 err = PTR_ERR(sma);
1260 goto out_unlock;
1261 }
1262 }
1263
1264 err = -EACCES;
1265 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1266 goto out_unlock;
1267
1268 err = security_sem_semctl(sma, cmd);
1269 if (err)
1270 goto out_unlock;
1271
1272 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1273 tbuf.sem_otime = get_semotime(sma);
1274 tbuf.sem_ctime = sma->sem_ctime;
1275 tbuf.sem_nsems = sma->sem_nsems;
1276 rcu_read_unlock();
1277 if (copy_semid_to_user(p, &tbuf, version))
1278 return -EFAULT;
1279 return id;
1280 }
1281 default:
1282 return -EINVAL;
1283 }
1284out_unlock:
1285 rcu_read_unlock();
1286 return err;
1287}
1288
1289static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1290 unsigned long arg)
1291{
1292 struct sem_undo *un;
1293 struct sem_array *sma;
1294 struct sem *curr;
1295 int err;
1296 struct list_head tasks;
1297 int val;
1298#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1299
1300 val = arg >> 32;
1301#else
1302
1303 val = arg;
1304#endif
1305
1306 if (val > SEMVMX || val < 0)
1307 return -ERANGE;
1308
1309 INIT_LIST_HEAD(&tasks);
1310
1311 rcu_read_lock();
1312 sma = sem_obtain_object_check(ns, semid);
1313 if (IS_ERR(sma)) {
1314 rcu_read_unlock();
1315 return PTR_ERR(sma);
1316 }
1317
1318 if (semnum < 0 || semnum >= sma->sem_nsems) {
1319 rcu_read_unlock();
1320 return -EINVAL;
1321 }
1322
1323
1324 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1325 rcu_read_unlock();
1326 return -EACCES;
1327 }
1328
1329 err = security_sem_semctl(sma, SETVAL);
1330 if (err) {
1331 rcu_read_unlock();
1332 return -EACCES;
1333 }
1334
1335 sem_lock(sma, NULL, -1);
1336
1337 if (!ipc_valid_object(&sma->sem_perm)) {
1338 sem_unlock(sma, -1);
1339 rcu_read_unlock();
1340 return -EIDRM;
1341 }
1342
1343 curr = &sma->sem_base[semnum];
1344
1345 ipc_assert_locked_object(&sma->sem_perm);
1346 list_for_each_entry(un, &sma->list_id, list_id)
1347 un->semadj[semnum] = 0;
1348
1349 curr->semval = val;
1350 curr->sempid = task_tgid_vnr(current);
1351 sma->sem_ctime = get_seconds();
1352
1353 do_smart_update(sma, NULL, 0, 0, &tasks);
1354 sem_unlock(sma, -1);
1355 rcu_read_unlock();
1356 wake_up_sem_queue_do(&tasks);
1357 return 0;
1358}
1359
1360static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1361 int cmd, void __user *p)
1362{
1363 struct sem_array *sma;
1364 struct sem *curr;
1365 int err, nsems;
1366 ushort fast_sem_io[SEMMSL_FAST];
1367 ushort *sem_io = fast_sem_io;
1368 struct list_head tasks;
1369
1370 INIT_LIST_HEAD(&tasks);
1371
1372 rcu_read_lock();
1373 sma = sem_obtain_object_check(ns, semid);
1374 if (IS_ERR(sma)) {
1375 rcu_read_unlock();
1376 return PTR_ERR(sma);
1377 }
1378
1379 nsems = sma->sem_nsems;
1380
1381 err = -EACCES;
1382 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1383 goto out_rcu_wakeup;
1384
1385 err = security_sem_semctl(sma, cmd);
1386 if (err)
1387 goto out_rcu_wakeup;
1388
1389 err = -EACCES;
1390 switch (cmd) {
1391 case GETALL:
1392 {
1393 ushort __user *array = p;
1394 int i;
1395
1396 sem_lock(sma, NULL, -1);
1397 if (!ipc_valid_object(&sma->sem_perm)) {
1398 err = -EIDRM;
1399 goto out_unlock;
1400 }
1401 if (nsems > SEMMSL_FAST) {
1402 if (!ipc_rcu_getref(sma)) {
1403 err = -EIDRM;
1404 goto out_unlock;
1405 }
1406 sem_unlock(sma, -1);
1407 rcu_read_unlock();
1408 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1409 if (sem_io == NULL) {
1410 ipc_rcu_putref(sma, sem_rcu_free);
1411 return -ENOMEM;
1412 }
1413
1414 rcu_read_lock();
1415 sem_lock_and_putref(sma);
1416 if (!ipc_valid_object(&sma->sem_perm)) {
1417 err = -EIDRM;
1418 goto out_unlock;
1419 }
1420 }
1421 for (i = 0; i < sma->sem_nsems; i++)
1422 sem_io[i] = sma->sem_base[i].semval;
1423 sem_unlock(sma, -1);
1424 rcu_read_unlock();
1425 err = 0;
1426 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1427 err = -EFAULT;
1428 goto out_free;
1429 }
1430 case SETALL:
1431 {
1432 int i;
1433 struct sem_undo *un;
1434
1435 if (!ipc_rcu_getref(sma)) {
1436 err = -EIDRM;
1437 goto out_rcu_wakeup;
1438 }
1439 rcu_read_unlock();
1440
1441 if (nsems > SEMMSL_FAST) {
1442 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1443 if (sem_io == NULL) {
1444 ipc_rcu_putref(sma, sem_rcu_free);
1445 return -ENOMEM;
1446 }
1447 }
1448
1449 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1450 ipc_rcu_putref(sma, sem_rcu_free);
1451 err = -EFAULT;
1452 goto out_free;
1453 }
1454
1455 for (i = 0; i < nsems; i++) {
1456 if (sem_io[i] > SEMVMX) {
1457 ipc_rcu_putref(sma, sem_rcu_free);
1458 err = -ERANGE;
1459 goto out_free;
1460 }
1461 }
1462 rcu_read_lock();
1463 sem_lock_and_putref(sma);
1464 if (!ipc_valid_object(&sma->sem_perm)) {
1465 err = -EIDRM;
1466 goto out_unlock;
1467 }
1468
1469 for (i = 0; i < nsems; i++) {
1470 sma->sem_base[i].semval = sem_io[i];
1471 sma->sem_base[i].sempid = task_tgid_vnr(current);
1472 }
1473
1474 ipc_assert_locked_object(&sma->sem_perm);
1475 list_for_each_entry(un, &sma->list_id, list_id) {
1476 for (i = 0; i < nsems; i++)
1477 un->semadj[i] = 0;
1478 }
1479 sma->sem_ctime = get_seconds();
1480
1481 do_smart_update(sma, NULL, 0, 0, &tasks);
1482 err = 0;
1483 goto out_unlock;
1484 }
1485
1486 }
1487 err = -EINVAL;
1488 if (semnum < 0 || semnum >= nsems)
1489 goto out_rcu_wakeup;
1490
1491 sem_lock(sma, NULL, -1);
1492 if (!ipc_valid_object(&sma->sem_perm)) {
1493 err = -EIDRM;
1494 goto out_unlock;
1495 }
1496 curr = &sma->sem_base[semnum];
1497
1498 switch (cmd) {
1499 case GETVAL:
1500 err = curr->semval;
1501 goto out_unlock;
1502 case GETPID:
1503 err = curr->sempid;
1504 goto out_unlock;
1505 case GETNCNT:
1506 err = count_semcnt(sma, semnum, 0);
1507 goto out_unlock;
1508 case GETZCNT:
1509 err = count_semcnt(sma, semnum, 1);
1510 goto out_unlock;
1511 }
1512
1513out_unlock:
1514 sem_unlock(sma, -1);
1515out_rcu_wakeup:
1516 rcu_read_unlock();
1517 wake_up_sem_queue_do(&tasks);
1518out_free:
1519 if (sem_io != fast_sem_io)
1520 ipc_free(sem_io);
1521 return err;
1522}
1523
1524static inline unsigned long
1525copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1526{
1527 switch (version) {
1528 case IPC_64:
1529 if (copy_from_user(out, buf, sizeof(*out)))
1530 return -EFAULT;
1531 return 0;
1532 case IPC_OLD:
1533 {
1534 struct semid_ds tbuf_old;
1535
1536 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1537 return -EFAULT;
1538
1539 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1540 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1541 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1542
1543 return 0;
1544 }
1545 default:
1546 return -EINVAL;
1547 }
1548}
1549
1550
1551
1552
1553
1554
1555static int semctl_down(struct ipc_namespace *ns, int semid,
1556 int cmd, int version, void __user *p)
1557{
1558 struct sem_array *sma;
1559 int err;
1560 struct semid64_ds semid64;
1561 struct kern_ipc_perm *ipcp;
1562
1563 if (cmd == IPC_SET) {
1564 if (copy_semid_from_user(&semid64, p, version))
1565 return -EFAULT;
1566 }
1567
1568 down_write(&sem_ids(ns).rwsem);
1569 rcu_read_lock();
1570
1571 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1572 &semid64.sem_perm, 0);
1573 if (IS_ERR(ipcp)) {
1574 err = PTR_ERR(ipcp);
1575 goto out_unlock1;
1576 }
1577
1578 sma = container_of(ipcp, struct sem_array, sem_perm);
1579
1580 err = security_sem_semctl(sma, cmd);
1581 if (err)
1582 goto out_unlock1;
1583
1584 switch (cmd) {
1585 case IPC_RMID:
1586 sem_lock(sma, NULL, -1);
1587
1588 freeary(ns, ipcp);
1589 goto out_up;
1590 case IPC_SET:
1591 sem_lock(sma, NULL, -1);
1592 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1593 if (err)
1594 goto out_unlock0;
1595 sma->sem_ctime = get_seconds();
1596 break;
1597 default:
1598 err = -EINVAL;
1599 goto out_unlock1;
1600 }
1601
1602out_unlock0:
1603 sem_unlock(sma, -1);
1604out_unlock1:
1605 rcu_read_unlock();
1606out_up:
1607 up_write(&sem_ids(ns).rwsem);
1608 return err;
1609}
1610
1611SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1612{
1613 int version;
1614 struct ipc_namespace *ns;
1615 void __user *p = (void __user *)arg;
1616
1617 if (semid < 0)
1618 return -EINVAL;
1619
1620 version = ipc_parse_version(&cmd);
1621 ns = current->nsproxy->ipc_ns;
1622
1623 switch (cmd) {
1624 case IPC_INFO:
1625 case SEM_INFO:
1626 case IPC_STAT:
1627 case SEM_STAT:
1628 return semctl_nolock(ns, semid, cmd, version, p);
1629 case GETALL:
1630 case GETVAL:
1631 case GETPID:
1632 case GETNCNT:
1633 case GETZCNT:
1634 case SETALL:
1635 return semctl_main(ns, semid, semnum, cmd, p);
1636 case SETVAL:
1637 return semctl_setval(ns, semid, semnum, arg);
1638 case IPC_RMID:
1639 case IPC_SET:
1640 return semctl_down(ns, semid, cmd, version, p);
1641 default:
1642 return -EINVAL;
1643 }
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657static inline int get_undo_list(struct sem_undo_list **undo_listp)
1658{
1659 struct sem_undo_list *undo_list;
1660
1661 undo_list = current->sysvsem.undo_list;
1662 if (!undo_list) {
1663 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1664 if (undo_list == NULL)
1665 return -ENOMEM;
1666 spin_lock_init(&undo_list->lock);
1667 atomic_set(&undo_list->refcnt, 1);
1668 INIT_LIST_HEAD(&undo_list->list_proc);
1669
1670 current->sysvsem.undo_list = undo_list;
1671 }
1672 *undo_listp = undo_list;
1673 return 0;
1674}
1675
1676static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1677{
1678 struct sem_undo *un;
1679
1680 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1681 if (un->semid == semid)
1682 return un;
1683 }
1684 return NULL;
1685}
1686
1687static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1688{
1689 struct sem_undo *un;
1690
1691 assert_spin_locked(&ulp->lock);
1692
1693 un = __lookup_undo(ulp, semid);
1694 if (un) {
1695 list_del_rcu(&un->list_proc);
1696 list_add_rcu(&un->list_proc, &ulp->list_proc);
1697 }
1698 return un;
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1713{
1714 struct sem_array *sma;
1715 struct sem_undo_list *ulp;
1716 struct sem_undo *un, *new;
1717 int nsems, error;
1718
1719 error = get_undo_list(&ulp);
1720 if (error)
1721 return ERR_PTR(error);
1722
1723 rcu_read_lock();
1724 spin_lock(&ulp->lock);
1725 un = lookup_undo(ulp, semid);
1726 spin_unlock(&ulp->lock);
1727 if (likely(un != NULL))
1728 goto out;
1729
1730
1731
1732 sma = sem_obtain_object_check(ns, semid);
1733 if (IS_ERR(sma)) {
1734 rcu_read_unlock();
1735 return ERR_CAST(sma);
1736 }
1737
1738 nsems = sma->sem_nsems;
1739 if (!ipc_rcu_getref(sma)) {
1740 rcu_read_unlock();
1741 un = ERR_PTR(-EIDRM);
1742 goto out;
1743 }
1744 rcu_read_unlock();
1745
1746
1747 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1748 if (!new) {
1749 ipc_rcu_putref(sma, sem_rcu_free);
1750 return ERR_PTR(-ENOMEM);
1751 }
1752
1753
1754 rcu_read_lock();
1755 sem_lock_and_putref(sma);
1756 if (!ipc_valid_object(&sma->sem_perm)) {
1757 sem_unlock(sma, -1);
1758 rcu_read_unlock();
1759 kfree(new);
1760 un = ERR_PTR(-EIDRM);
1761 goto out;
1762 }
1763 spin_lock(&ulp->lock);
1764
1765
1766
1767
1768 un = lookup_undo(ulp, semid);
1769 if (un) {
1770 kfree(new);
1771 goto success;
1772 }
1773
1774 new->semadj = (short *) &new[1];
1775 new->ulp = ulp;
1776 new->semid = semid;
1777 assert_spin_locked(&ulp->lock);
1778 list_add_rcu(&new->list_proc, &ulp->list_proc);
1779 ipc_assert_locked_object(&sma->sem_perm);
1780 list_add(&new->list_id, &sma->list_id);
1781 un = new;
1782
1783success:
1784 spin_unlock(&ulp->lock);
1785 sem_unlock(sma, -1);
1786out:
1787 return un;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803static int get_queue_result(struct sem_queue *q)
1804{
1805 int error;
1806
1807 error = q->status;
1808 while (unlikely(error == IN_WAKEUP)) {
1809 cpu_relax();
1810 error = q->status;
1811 }
1812
1813 return error;
1814}
1815
1816SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1817 unsigned, nsops, const struct timespec __user *, timeout)
1818{
1819 int error = -EINVAL;
1820 struct sem_array *sma;
1821 struct sembuf fast_sops[SEMOPM_FAST];
1822 struct sembuf *sops = fast_sops, *sop;
1823 struct sem_undo *un;
1824 int undos = 0, alter = 0, max, locknum;
1825 struct sem_queue queue;
1826 unsigned long jiffies_left = 0;
1827 struct ipc_namespace *ns;
1828 struct list_head tasks;
1829
1830 ns = current->nsproxy->ipc_ns;
1831
1832 if (nsops < 1 || semid < 0)
1833 return -EINVAL;
1834 if (nsops > ns->sc_semopm)
1835 return -E2BIG;
1836 if (nsops > SEMOPM_FAST) {
1837 sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1838 if (sops == NULL)
1839 return -ENOMEM;
1840 }
1841 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1842 error = -EFAULT;
1843 goto out_free;
1844 }
1845 if (timeout) {
1846 struct timespec _timeout;
1847 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1848 error = -EFAULT;
1849 goto out_free;
1850 }
1851 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1852 _timeout.tv_nsec >= 1000000000L) {
1853 error = -EINVAL;
1854 goto out_free;
1855 }
1856 jiffies_left = timespec_to_jiffies(&_timeout);
1857 }
1858 max = 0;
1859 for (sop = sops; sop < sops + nsops; sop++) {
1860 if (sop->sem_num >= max)
1861 max = sop->sem_num;
1862 if (sop->sem_flg & SEM_UNDO)
1863 undos = 1;
1864 if (sop->sem_op != 0)
1865 alter = 1;
1866 }
1867
1868 INIT_LIST_HEAD(&tasks);
1869
1870 if (undos) {
1871
1872 un = find_alloc_undo(ns, semid);
1873 if (IS_ERR(un)) {
1874 error = PTR_ERR(un);
1875 goto out_free;
1876 }
1877 } else {
1878 un = NULL;
1879 rcu_read_lock();
1880 }
1881
1882 sma = sem_obtain_object_check(ns, semid);
1883 if (IS_ERR(sma)) {
1884 rcu_read_unlock();
1885 error = PTR_ERR(sma);
1886 goto out_free;
1887 }
1888
1889 error = -EFBIG;
1890 if (max >= sma->sem_nsems)
1891 goto out_rcu_wakeup;
1892
1893 error = -EACCES;
1894 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1895 goto out_rcu_wakeup;
1896
1897 error = security_sem_semop(sma, sops, nsops, alter);
1898 if (error)
1899 goto out_rcu_wakeup;
1900
1901 error = -EIDRM;
1902 locknum = sem_lock(sma, sops, nsops);
1903
1904
1905
1906
1907
1908
1909
1910
1911 if (!ipc_valid_object(&sma->sem_perm))
1912 goto out_unlock_free;
1913
1914
1915
1916
1917
1918
1919
1920 if (un && un->semid == -1)
1921 goto out_unlock_free;
1922
1923 queue.sops = sops;
1924 queue.nsops = nsops;
1925 queue.undo = un;
1926 queue.pid = task_tgid_vnr(current);
1927 queue.alter = alter;
1928
1929 error = perform_atomic_semop(sma, &queue);
1930 if (error == 0) {
1931
1932
1933
1934 if (alter)
1935 do_smart_update(sma, sops, nsops, 1, &tasks);
1936 else
1937 set_semotime(sma, sops);
1938 }
1939 if (error <= 0)
1940 goto out_unlock_free;
1941
1942
1943
1944
1945
1946 if (nsops == 1) {
1947 struct sem *curr;
1948 curr = &sma->sem_base[sops->sem_num];
1949
1950 if (alter) {
1951 if (sma->complex_count) {
1952 list_add_tail(&queue.list,
1953 &sma->pending_alter);
1954 } else {
1955
1956 list_add_tail(&queue.list,
1957 &curr->pending_alter);
1958 }
1959 } else {
1960 list_add_tail(&queue.list, &curr->pending_const);
1961 }
1962 } else {
1963 if (!sma->complex_count)
1964 merge_queues(sma);
1965
1966 if (alter)
1967 list_add_tail(&queue.list, &sma->pending_alter);
1968 else
1969 list_add_tail(&queue.list, &sma->pending_const);
1970
1971 sma->complex_count++;
1972 }
1973
1974 queue.status = -EINTR;
1975 queue.sleeper = current;
1976
1977sleep_again:
1978 __set_current_state(TASK_INTERRUPTIBLE);
1979 sem_unlock(sma, locknum);
1980 rcu_read_unlock();
1981
1982 if (timeout)
1983 jiffies_left = schedule_timeout(jiffies_left);
1984 else
1985 schedule();
1986
1987 error = get_queue_result(&queue);
1988
1989 if (error != -EINTR) {
1990
1991
1992
1993
1994
1995
1996
1997 smp_mb();
1998
1999 goto out_free;
2000 }
2001
2002 rcu_read_lock();
2003 sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
2004
2005
2006
2007
2008 error = get_queue_result(&queue);
2009
2010
2011
2012
2013 if (IS_ERR(sma)) {
2014 rcu_read_unlock();
2015 goto out_free;
2016 }
2017
2018
2019
2020
2021
2022
2023 if (error != -EINTR)
2024 goto out_unlock_free;
2025
2026
2027
2028
2029 if (timeout && jiffies_left == 0)
2030 error = -EAGAIN;
2031
2032
2033
2034
2035 if (error == -EINTR && !signal_pending(current))
2036 goto sleep_again;
2037
2038 unlink_queue(sma, &queue);
2039
2040out_unlock_free:
2041 sem_unlock(sma, locknum);
2042out_rcu_wakeup:
2043 rcu_read_unlock();
2044 wake_up_sem_queue_do(&tasks);
2045out_free:
2046 if (sops != fast_sops)
2047 kfree(sops);
2048 return error;
2049}
2050
2051SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2052 unsigned, nsops)
2053{
2054 return sys_semtimedop(semid, tsops, nsops, NULL);
2055}
2056
2057
2058
2059
2060
2061int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2062{
2063 struct sem_undo_list *undo_list;
2064 int error;
2065
2066 if (clone_flags & CLONE_SYSVSEM) {
2067 error = get_undo_list(&undo_list);
2068 if (error)
2069 return error;
2070 atomic_inc(&undo_list->refcnt);
2071 tsk->sysvsem.undo_list = undo_list;
2072 } else
2073 tsk->sysvsem.undo_list = NULL;
2074
2075 return 0;
2076}
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090void exit_sem(struct task_struct *tsk)
2091{
2092 struct sem_undo_list *ulp;
2093
2094 ulp = tsk->sysvsem.undo_list;
2095 if (!ulp)
2096 return;
2097 tsk->sysvsem.undo_list = NULL;
2098
2099 if (!atomic_dec_and_test(&ulp->refcnt))
2100 return;
2101
2102 for (;;) {
2103 struct sem_array *sma;
2104 struct sem_undo *un;
2105 struct list_head tasks;
2106 int semid, i;
2107
2108 cond_resched();
2109
2110 rcu_read_lock();
2111 un = list_entry_rcu(ulp->list_proc.next,
2112 struct sem_undo, list_proc);
2113 if (&un->list_proc == &ulp->list_proc) {
2114
2115
2116
2117
2118
2119
2120 spin_unlock_wait(&ulp->lock);
2121 rcu_read_unlock();
2122 break;
2123 }
2124 spin_lock(&ulp->lock);
2125 semid = un->semid;
2126 spin_unlock(&ulp->lock);
2127
2128
2129 if (semid == -1) {
2130 rcu_read_unlock();
2131 continue;
2132 }
2133
2134 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2135
2136 if (IS_ERR(sma)) {
2137 rcu_read_unlock();
2138 continue;
2139 }
2140
2141 sem_lock(sma, NULL, -1);
2142
2143 if (!ipc_valid_object(&sma->sem_perm)) {
2144 sem_unlock(sma, -1);
2145 rcu_read_unlock();
2146 continue;
2147 }
2148 un = __lookup_undo(ulp, semid);
2149 if (un == NULL) {
2150
2151
2152
2153 sem_unlock(sma, -1);
2154 rcu_read_unlock();
2155 continue;
2156 }
2157
2158
2159 ipc_assert_locked_object(&sma->sem_perm);
2160 list_del(&un->list_id);
2161
2162
2163
2164
2165
2166 list_del_rcu(&un->list_proc);
2167
2168
2169 for (i = 0; i < sma->sem_nsems; i++) {
2170 struct sem *semaphore = &sma->sem_base[i];
2171 if (un->semadj[i]) {
2172 semaphore->semval += un->semadj[i];
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186 if (semaphore->semval < 0)
2187 semaphore->semval = 0;
2188 if (semaphore->semval > SEMVMX)
2189 semaphore->semval = SEMVMX;
2190 semaphore->sempid = task_tgid_vnr(current);
2191 }
2192 }
2193
2194 INIT_LIST_HEAD(&tasks);
2195 do_smart_update(sma, NULL, 0, 1, &tasks);
2196 sem_unlock(sma, -1);
2197 rcu_read_unlock();
2198 wake_up_sem_queue_do(&tasks);
2199
2200 kfree_rcu(un, rcu);
2201 }
2202 kfree(ulp);
2203}
2204
2205#ifdef CONFIG_PROC_FS
2206static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2207{
2208 struct user_namespace *user_ns = seq_user_ns(s);
2209 struct sem_array *sma = it;
2210 time_t sem_otime;
2211
2212
2213
2214
2215
2216
2217
2218 complexmode_enter(sma);
2219
2220 sem_otime = get_semotime(sma);
2221
2222 seq_printf(s,
2223 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2224 sma->sem_perm.key,
2225 sma->sem_perm.id,
2226 sma->sem_perm.mode,
2227 sma->sem_nsems,
2228 from_kuid_munged(user_ns, sma->sem_perm.uid),
2229 from_kgid_munged(user_ns, sma->sem_perm.gid),
2230 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2231 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2232 sem_otime,
2233 sma->sem_ctime);
2234
2235 complexmode_tryleave(sma);
2236
2237 return 0;
2238}
2239#endif
2240