1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#include <linux/slab.h>
74#include <linux/spinlock.h>
75#include <linux/init.h>
76#include <linux/proc_fs.h>
77#include <linux/time.h>
78#include <linux/security.h>
79#include <linux/syscalls.h>
80#include <linux/audit.h>
81#include <linux/capability.h>
82#include <linux/seq_file.h>
83#include <linux/rwsem.h>
84#include <linux/nsproxy.h>
85#include <linux/ipc_namespace.h>
86#include <linux/sched/wake_q.h>
87
88#include <linux/uaccess.h>
89#include "util.h"
90
91
92struct sem {
93 int semval;
94
95
96
97
98
99
100
101 struct pid *sempid;
102 spinlock_t lock;
103 struct list_head pending_alter;
104
105 struct list_head pending_const;
106
107 time_t sem_otime;
108} ____cacheline_aligned_in_smp;
109
110
111struct sem_array {
112 struct kern_ipc_perm sem_perm;
113 time64_t sem_ctime;
114 struct list_head pending_alter;
115
116 struct list_head pending_const;
117
118 struct list_head list_id;
119 int sem_nsems;
120 int complex_count;
121 unsigned int use_global_lock;
122
123 struct sem sems[];
124} __randomize_layout;
125
126
127struct sem_queue {
128 struct list_head list;
129 struct task_struct *sleeper;
130 struct sem_undo *undo;
131 struct pid *pid;
132 int status;
133 struct sembuf *sops;
134 struct sembuf *blocking;
135 int nsops;
136 bool alter;
137 bool dupsop;
138};
139
140
141
142
143struct sem_undo {
144 struct list_head list_proc;
145
146
147 struct rcu_head rcu;
148 struct sem_undo_list *ulp;
149 struct list_head list_id;
150
151 int semid;
152 short *semadj;
153
154};
155
156
157
158
159struct sem_undo_list {
160 refcount_t refcnt;
161 spinlock_t lock;
162 struct list_head list_proc;
163};
164
165
166#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
167
168static int newary(struct ipc_namespace *, struct ipc_params *);
169static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
170#ifdef CONFIG_PROC_FS
171static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
172#endif
173
174#define SEMMSL_FAST 256
175#define SEMOPM_FAST 64
176
177
178
179
180
181
182#define USE_GLOBAL_LOCK_HYSTERESIS 10
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216#define sc_semmsl sem_ctls[0]
217#define sc_semmns sem_ctls[1]
218#define sc_semopm sem_ctls[2]
219#define sc_semmni sem_ctls[3]
220
221int sem_init_ns(struct ipc_namespace *ns)
222{
223 ns->sc_semmsl = SEMMSL;
224 ns->sc_semmns = SEMMNS;
225 ns->sc_semopm = SEMOPM;
226 ns->sc_semmni = SEMMNI;
227 ns->used_sems = 0;
228 return ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
229}
230
231#ifdef CONFIG_IPC_NS
232void sem_exit_ns(struct ipc_namespace *ns)
233{
234 free_ipcs(ns, &sem_ids(ns), freeary);
235 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
236 rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
237}
238#endif
239
240int __init sem_init(void)
241{
242 const int err = sem_init_ns(&init_ipc_ns);
243
244 ipc_init_proc_interface("sysvipc/sem",
245 " key semid perms nsems uid gid cuid cgid otime ctime\n",
246 IPC_SEM_IDS, sysvipc_sem_proc_show);
247 return err;
248}
249
250
251
252
253
254
255
256
257static void unmerge_queues(struct sem_array *sma)
258{
259 struct sem_queue *q, *tq;
260
261
262 if (sma->complex_count)
263 return;
264
265
266
267
268
269 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
270 struct sem *curr;
271 curr = &sma->sems[q->sops[0].sem_num];
272
273 list_add_tail(&q->list, &curr->pending_alter);
274 }
275 INIT_LIST_HEAD(&sma->pending_alter);
276}
277
278
279
280
281
282
283
284
285
286
287static void merge_queues(struct sem_array *sma)
288{
289 int i;
290 for (i = 0; i < sma->sem_nsems; i++) {
291 struct sem *sem = &sma->sems[i];
292
293 list_splice_init(&sem->pending_alter, &sma->pending_alter);
294 }
295}
296
297static void sem_rcu_free(struct rcu_head *head)
298{
299 struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
300 struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
301
302 security_sem_free(&sma->sem_perm);
303 kvfree(sma);
304}
305
306
307
308
309
310static void complexmode_enter(struct sem_array *sma)
311{
312 int i;
313 struct sem *sem;
314
315 if (sma->use_global_lock > 0) {
316
317
318
319
320
321 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
322 return;
323 }
324 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
325
326 for (i = 0; i < sma->sem_nsems; i++) {
327 sem = &sma->sems[i];
328 spin_lock(&sem->lock);
329 spin_unlock(&sem->lock);
330 }
331}
332
333
334
335
336
337static void complexmode_tryleave(struct sem_array *sma)
338{
339 if (sma->complex_count) {
340
341
342
343 return;
344 }
345 if (sma->use_global_lock == 1) {
346
347
348
349
350
351
352 smp_store_release(&sma->use_global_lock, 0);
353 } else {
354 sma->use_global_lock--;
355 }
356}
357
358#define SEM_GLOBAL_LOCK (-1)
359
360
361
362
363
364
365
366static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
367 int nsops)
368{
369 struct sem *sem;
370
371 if (nsops != 1) {
372
373 ipc_lock_object(&sma->sem_perm);
374
375
376 complexmode_enter(sma);
377 return SEM_GLOBAL_LOCK;
378 }
379
380
381
382
383
384
385
386
387 sem = &sma->sems[sops->sem_num];
388
389
390
391
392
393 if (!sma->use_global_lock) {
394
395
396
397
398 spin_lock(&sem->lock);
399
400
401 if (!smp_load_acquire(&sma->use_global_lock)) {
402
403 return sops->sem_num;
404 }
405 spin_unlock(&sem->lock);
406 }
407
408
409 ipc_lock_object(&sma->sem_perm);
410
411 if (sma->use_global_lock == 0) {
412
413
414
415
416
417
418
419
420
421 spin_lock(&sem->lock);
422
423 ipc_unlock_object(&sma->sem_perm);
424 return sops->sem_num;
425 } else {
426
427
428
429
430
431 return SEM_GLOBAL_LOCK;
432 }
433}
434
435static inline void sem_unlock(struct sem_array *sma, int locknum)
436{
437 if (locknum == SEM_GLOBAL_LOCK) {
438 unmerge_queues(sma);
439 complexmode_tryleave(sma);
440 ipc_unlock_object(&sma->sem_perm);
441 } else {
442 struct sem *sem = &sma->sems[locknum];
443 spin_unlock(&sem->lock);
444 }
445}
446
447
448
449
450
451
452
453static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
454{
455 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
456
457 if (IS_ERR(ipcp))
458 return ERR_CAST(ipcp);
459
460 return container_of(ipcp, struct sem_array, sem_perm);
461}
462
463static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
464 int id)
465{
466 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
467
468 if (IS_ERR(ipcp))
469 return ERR_CAST(ipcp);
470
471 return container_of(ipcp, struct sem_array, sem_perm);
472}
473
474static inline void sem_lock_and_putref(struct sem_array *sma)
475{
476 sem_lock(sma, NULL, -1);
477 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
478}
479
480static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
481{
482 ipc_rmid(&sem_ids(ns), &s->sem_perm);
483}
484
485static struct sem_array *sem_alloc(size_t nsems)
486{
487 struct sem_array *sma;
488 size_t size;
489
490 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
491 return NULL;
492
493 size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
494 sma = kvmalloc(size, GFP_KERNEL);
495 if (unlikely(!sma))
496 return NULL;
497
498 memset(sma, 0, size);
499
500 return sma;
501}
502
503
504
505
506
507
508
509
510static int newary(struct ipc_namespace *ns, struct ipc_params *params)
511{
512 int retval;
513 struct sem_array *sma;
514 key_t key = params->key;
515 int nsems = params->u.nsems;
516 int semflg = params->flg;
517 int i;
518
519 if (!nsems)
520 return -EINVAL;
521 if (ns->used_sems + nsems > ns->sc_semmns)
522 return -ENOSPC;
523
524 sma = sem_alloc(nsems);
525 if (!sma)
526 return -ENOMEM;
527
528 sma->sem_perm.mode = (semflg & S_IRWXUGO);
529 sma->sem_perm.key = key;
530
531 sma->sem_perm.security = NULL;
532 retval = security_sem_alloc(&sma->sem_perm);
533 if (retval) {
534 kvfree(sma);
535 return retval;
536 }
537
538 for (i = 0; i < nsems; i++) {
539 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
540 INIT_LIST_HEAD(&sma->sems[i].pending_const);
541 spin_lock_init(&sma->sems[i].lock);
542 }
543
544 sma->complex_count = 0;
545 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
546 INIT_LIST_HEAD(&sma->pending_alter);
547 INIT_LIST_HEAD(&sma->pending_const);
548 INIT_LIST_HEAD(&sma->list_id);
549 sma->sem_nsems = nsems;
550 sma->sem_ctime = ktime_get_real_seconds();
551
552
553 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
554 if (retval < 0) {
555 call_rcu(&sma->sem_perm.rcu, sem_rcu_free);
556 return retval;
557 }
558 ns->used_sems += nsems;
559
560 sem_unlock(sma, -1);
561 rcu_read_unlock();
562
563 return sma->sem_perm.id;
564}
565
566
567
568
569
570static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
571 struct ipc_params *params)
572{
573 struct sem_array *sma;
574
575 sma = container_of(ipcp, struct sem_array, sem_perm);
576 if (params->u.nsems > sma->sem_nsems)
577 return -EINVAL;
578
579 return 0;
580}
581
582long ksys_semget(key_t key, int nsems, int semflg)
583{
584 struct ipc_namespace *ns;
585 static const struct ipc_ops sem_ops = {
586 .getnew = newary,
587 .associate = security_sem_associate,
588 .more_checks = sem_more_checks,
589 };
590 struct ipc_params sem_params;
591
592 ns = current->nsproxy->ipc_ns;
593
594 if (nsems < 0 || nsems > ns->sc_semmsl)
595 return -EINVAL;
596
597 sem_params.key = key;
598 sem_params.flg = semflg;
599 sem_params.u.nsems = nsems;
600
601 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
602}
603
604SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
605{
606 return ksys_semget(key, nsems, semflg);
607}
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
627{
628 int result, sem_op, nsops;
629 struct pid *pid;
630 struct sembuf *sop;
631 struct sem *curr;
632 struct sembuf *sops;
633 struct sem_undo *un;
634
635 sops = q->sops;
636 nsops = q->nsops;
637 un = q->undo;
638
639 for (sop = sops; sop < sops + nsops; sop++) {
640 curr = &sma->sems[sop->sem_num];
641 sem_op = sop->sem_op;
642 result = curr->semval;
643
644 if (!sem_op && result)
645 goto would_block;
646
647 result += sem_op;
648 if (result < 0)
649 goto would_block;
650 if (result > SEMVMX)
651 goto out_of_range;
652
653 if (sop->sem_flg & SEM_UNDO) {
654 int undo = un->semadj[sop->sem_num] - sem_op;
655
656 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
657 goto out_of_range;
658 un->semadj[sop->sem_num] = undo;
659 }
660
661 curr->semval = result;
662 }
663
664 sop--;
665 pid = q->pid;
666 while (sop >= sops) {
667 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
668 sop--;
669 }
670
671 return 0;
672
673out_of_range:
674 result = -ERANGE;
675 goto undo;
676
677would_block:
678 q->blocking = sop;
679
680 if (sop->sem_flg & IPC_NOWAIT)
681 result = -EAGAIN;
682 else
683 result = 1;
684
685undo:
686 sop--;
687 while (sop >= sops) {
688 sem_op = sop->sem_op;
689 sma->sems[sop->sem_num].semval -= sem_op;
690 if (sop->sem_flg & SEM_UNDO)
691 un->semadj[sop->sem_num] += sem_op;
692 sop--;
693 }
694
695 return result;
696}
697
698static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
699{
700 int result, sem_op, nsops;
701 struct sembuf *sop;
702 struct sem *curr;
703 struct sembuf *sops;
704 struct sem_undo *un;
705
706 sops = q->sops;
707 nsops = q->nsops;
708 un = q->undo;
709
710 if (unlikely(q->dupsop))
711 return perform_atomic_semop_slow(sma, q);
712
713
714
715
716
717
718
719 for (sop = sops; sop < sops + nsops; sop++) {
720 curr = &sma->sems[sop->sem_num];
721 sem_op = sop->sem_op;
722 result = curr->semval;
723
724 if (!sem_op && result)
725 goto would_block;
726
727 result += sem_op;
728 if (result < 0)
729 goto would_block;
730
731 if (result > SEMVMX)
732 return -ERANGE;
733
734 if (sop->sem_flg & SEM_UNDO) {
735 int undo = un->semadj[sop->sem_num] - sem_op;
736
737
738 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
739 return -ERANGE;
740 }
741 }
742
743 for (sop = sops; sop < sops + nsops; sop++) {
744 curr = &sma->sems[sop->sem_num];
745 sem_op = sop->sem_op;
746 result = curr->semval;
747
748 if (sop->sem_flg & SEM_UNDO) {
749 int undo = un->semadj[sop->sem_num] - sem_op;
750
751 un->semadj[sop->sem_num] = undo;
752 }
753 curr->semval += sem_op;
754 ipc_update_pid(&curr->sempid, q->pid);
755 }
756
757 return 0;
758
759would_block:
760 q->blocking = sop;
761 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
762}
763
764static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
765 struct wake_q_head *wake_q)
766{
767 wake_q_add(wake_q, q->sleeper);
768
769
770
771
772
773
774
775 WRITE_ONCE(q->status, error);
776}
777
778static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
779{
780 list_del(&q->list);
781 if (q->nsops > 1)
782 sma->complex_count--;
783}
784
785
786
787
788
789
790
791
792
793
794
795static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
796{
797
798 if (!list_empty(&sma->pending_alter))
799 return 1;
800
801
802 if (q->nsops > 1)
803 return 1;
804
805
806
807
808
809
810
811
812
813
814
815
816 return 0;
817}
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833static int wake_const_ops(struct sem_array *sma, int semnum,
834 struct wake_q_head *wake_q)
835{
836 struct sem_queue *q, *tmp;
837 struct list_head *pending_list;
838 int semop_completed = 0;
839
840 if (semnum == -1)
841 pending_list = &sma->pending_const;
842 else
843 pending_list = &sma->sems[semnum].pending_const;
844
845 list_for_each_entry_safe(q, tmp, pending_list, list) {
846 int error = perform_atomic_semop(sma, q);
847
848 if (error > 0)
849 continue;
850
851 unlink_queue(sma, q);
852
853 wake_up_sem_queue_prepare(q, error, wake_q);
854 if (error == 0)
855 semop_completed = 1;
856 }
857
858 return semop_completed;
859}
860
861
862
863
864
865
866
867
868
869
870
871
872static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
873 int nsops, struct wake_q_head *wake_q)
874{
875 int i;
876 int semop_completed = 0;
877 int got_zero = 0;
878
879
880 if (sops) {
881 for (i = 0; i < nsops; i++) {
882 int num = sops[i].sem_num;
883
884 if (sma->sems[num].semval == 0) {
885 got_zero = 1;
886 semop_completed |= wake_const_ops(sma, num, wake_q);
887 }
888 }
889 } else {
890
891
892
893
894 for (i = 0; i < sma->sem_nsems; i++) {
895 if (sma->sems[i].semval == 0) {
896 got_zero = 1;
897 semop_completed |= wake_const_ops(sma, i, wake_q);
898 }
899 }
900 }
901
902
903
904
905 if (got_zero)
906 semop_completed |= wake_const_ops(sma, -1, wake_q);
907
908 return semop_completed;
909}
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
929{
930 struct sem_queue *q, *tmp;
931 struct list_head *pending_list;
932 int semop_completed = 0;
933
934 if (semnum == -1)
935 pending_list = &sma->pending_alter;
936 else
937 pending_list = &sma->sems[semnum].pending_alter;
938
939again:
940 list_for_each_entry_safe(q, tmp, pending_list, list) {
941 int error, restart;
942
943
944
945
946
947
948
949
950 if (semnum != -1 && sma->sems[semnum].semval == 0)
951 break;
952
953 error = perform_atomic_semop(sma, q);
954
955
956 if (error > 0)
957 continue;
958
959 unlink_queue(sma, q);
960
961 if (error) {
962 restart = 0;
963 } else {
964 semop_completed = 1;
965 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
966 restart = check_restart(sma, q);
967 }
968
969 wake_up_sem_queue_prepare(q, error, wake_q);
970 if (restart)
971 goto again;
972 }
973 return semop_completed;
974}
975
976
977
978
979
980
981
982
983
984static void set_semotime(struct sem_array *sma, struct sembuf *sops)
985{
986 if (sops == NULL) {
987 sma->sems[0].sem_otime = get_seconds();
988 } else {
989 sma->sems[sops[0].sem_num].sem_otime =
990 get_seconds();
991 }
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1009 int otime, struct wake_q_head *wake_q)
1010{
1011 int i;
1012
1013 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1014
1015 if (!list_empty(&sma->pending_alter)) {
1016
1017 otime |= update_queue(sma, -1, wake_q);
1018 } else {
1019 if (!sops) {
1020
1021
1022
1023
1024 for (i = 0; i < sma->sem_nsems; i++)
1025 otime |= update_queue(sma, i, wake_q);
1026 } else {
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 for (i = 0; i < nsops; i++) {
1037 if (sops[i].sem_op > 0) {
1038 otime |= update_queue(sma,
1039 sops[i].sem_num, wake_q);
1040 }
1041 }
1042 }
1043 }
1044 if (otime)
1045 set_semotime(sma, sops);
1046}
1047
1048
1049
1050
1051static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1052 bool count_zero)
1053{
1054 struct sembuf *sop = q->blocking;
1055
1056
1057
1058
1059
1060
1061
1062
1063 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1064 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1065 current->comm, task_pid_nr(current));
1066
1067 if (sop->sem_num != semnum)
1068 return 0;
1069
1070 if (count_zero && sop->sem_op == 0)
1071 return 1;
1072 if (!count_zero && sop->sem_op < 0)
1073 return 1;
1074
1075 return 0;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085static int count_semcnt(struct sem_array *sma, ushort semnum,
1086 bool count_zero)
1087{
1088 struct list_head *l;
1089 struct sem_queue *q;
1090 int semcnt;
1091
1092 semcnt = 0;
1093
1094 if (count_zero)
1095 l = &sma->sems[semnum].pending_const;
1096 else
1097 l = &sma->sems[semnum].pending_alter;
1098
1099 list_for_each_entry(q, l, list) {
1100
1101
1102
1103 semcnt++;
1104 }
1105
1106
1107 list_for_each_entry(q, &sma->pending_alter, list) {
1108 semcnt += check_qop(sma, semnum, q, count_zero);
1109 }
1110 if (count_zero) {
1111 list_for_each_entry(q, &sma->pending_const, list) {
1112 semcnt += check_qop(sma, semnum, q, count_zero);
1113 }
1114 }
1115 return semcnt;
1116}
1117
1118
1119
1120
1121
1122static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1123{
1124 struct sem_undo *un, *tu;
1125 struct sem_queue *q, *tq;
1126 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1127 int i;
1128 DEFINE_WAKE_Q(wake_q);
1129
1130
1131 ipc_assert_locked_object(&sma->sem_perm);
1132 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1133 list_del(&un->list_id);
1134 spin_lock(&un->ulp->lock);
1135 un->semid = -1;
1136 list_del_rcu(&un->list_proc);
1137 spin_unlock(&un->ulp->lock);
1138 kfree_rcu(un, rcu);
1139 }
1140
1141
1142 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1143 unlink_queue(sma, q);
1144 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1145 }
1146
1147 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1148 unlink_queue(sma, q);
1149 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1150 }
1151 for (i = 0; i < sma->sem_nsems; i++) {
1152 struct sem *sem = &sma->sems[i];
1153 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1154 unlink_queue(sma, q);
1155 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1156 }
1157 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1158 unlink_queue(sma, q);
1159 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1160 }
1161 ipc_update_pid(&sem->sempid, NULL);
1162 }
1163
1164
1165 sem_rmid(ns, sma);
1166 sem_unlock(sma, -1);
1167 rcu_read_unlock();
1168
1169 wake_up_q(&wake_q);
1170 ns->used_sems -= sma->sem_nsems;
1171 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1172}
1173
1174static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1175{
1176 switch (version) {
1177 case IPC_64:
1178 return copy_to_user(buf, in, sizeof(*in));
1179 case IPC_OLD:
1180 {
1181 struct semid_ds out;
1182
1183 memset(&out, 0, sizeof(out));
1184
1185 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1186
1187 out.sem_otime = in->sem_otime;
1188 out.sem_ctime = in->sem_ctime;
1189 out.sem_nsems = in->sem_nsems;
1190
1191 return copy_to_user(buf, &out, sizeof(out));
1192 }
1193 default:
1194 return -EINVAL;
1195 }
1196}
1197
1198static time64_t get_semotime(struct sem_array *sma)
1199{
1200 int i;
1201 time64_t res;
1202
1203 res = sma->sems[0].sem_otime;
1204 for (i = 1; i < sma->sem_nsems; i++) {
1205 time64_t to = sma->sems[i].sem_otime;
1206
1207 if (to > res)
1208 res = to;
1209 }
1210 return res;
1211}
1212
1213static int semctl_stat(struct ipc_namespace *ns, int semid,
1214 int cmd, struct semid64_ds *semid64)
1215{
1216 struct sem_array *sma;
1217 int id = 0;
1218 int err;
1219
1220 memset(semid64, 0, sizeof(*semid64));
1221
1222 rcu_read_lock();
1223 if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1224 sma = sem_obtain_object(ns, semid);
1225 if (IS_ERR(sma)) {
1226 err = PTR_ERR(sma);
1227 goto out_unlock;
1228 }
1229 id = sma->sem_perm.id;
1230 } else {
1231 sma = sem_obtain_object_check(ns, semid);
1232 if (IS_ERR(sma)) {
1233 err = PTR_ERR(sma);
1234 goto out_unlock;
1235 }
1236 }
1237
1238
1239 if (cmd == SEM_STAT_ANY)
1240 audit_ipc_obj(&sma->sem_perm);
1241 else {
1242 err = -EACCES;
1243 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1244 goto out_unlock;
1245 }
1246
1247 err = security_sem_semctl(&sma->sem_perm, cmd);
1248 if (err)
1249 goto out_unlock;
1250
1251 ipc_lock_object(&sma->sem_perm);
1252
1253 if (!ipc_valid_object(&sma->sem_perm)) {
1254 ipc_unlock_object(&sma->sem_perm);
1255 err = -EIDRM;
1256 goto out_unlock;
1257 }
1258
1259 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1260 semid64->sem_otime = get_semotime(sma);
1261 semid64->sem_ctime = sma->sem_ctime;
1262 semid64->sem_nsems = sma->sem_nsems;
1263
1264 ipc_unlock_object(&sma->sem_perm);
1265 rcu_read_unlock();
1266 return id;
1267
1268out_unlock:
1269 rcu_read_unlock();
1270 return err;
1271}
1272
1273static int semctl_info(struct ipc_namespace *ns, int semid,
1274 int cmd, void __user *p)
1275{
1276 struct seminfo seminfo;
1277 int max_id;
1278 int err;
1279
1280 err = security_sem_semctl(NULL, cmd);
1281 if (err)
1282 return err;
1283
1284 memset(&seminfo, 0, sizeof(seminfo));
1285 seminfo.semmni = ns->sc_semmni;
1286 seminfo.semmns = ns->sc_semmns;
1287 seminfo.semmsl = ns->sc_semmsl;
1288 seminfo.semopm = ns->sc_semopm;
1289 seminfo.semvmx = SEMVMX;
1290 seminfo.semmnu = SEMMNU;
1291 seminfo.semmap = SEMMAP;
1292 seminfo.semume = SEMUME;
1293 down_read(&sem_ids(ns).rwsem);
1294 if (cmd == SEM_INFO) {
1295 seminfo.semusz = sem_ids(ns).in_use;
1296 seminfo.semaem = ns->used_sems;
1297 } else {
1298 seminfo.semusz = SEMUSZ;
1299 seminfo.semaem = SEMAEM;
1300 }
1301 max_id = ipc_get_maxid(&sem_ids(ns));
1302 up_read(&sem_ids(ns).rwsem);
1303 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1304 return -EFAULT;
1305 return (max_id < 0) ? 0 : max_id;
1306}
1307
1308static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1309 int val)
1310{
1311 struct sem_undo *un;
1312 struct sem_array *sma;
1313 struct sem *curr;
1314 int err;
1315 DEFINE_WAKE_Q(wake_q);
1316
1317 if (val > SEMVMX || val < 0)
1318 return -ERANGE;
1319
1320 rcu_read_lock();
1321 sma = sem_obtain_object_check(ns, semid);
1322 if (IS_ERR(sma)) {
1323 rcu_read_unlock();
1324 return PTR_ERR(sma);
1325 }
1326
1327 if (semnum < 0 || semnum >= sma->sem_nsems) {
1328 rcu_read_unlock();
1329 return -EINVAL;
1330 }
1331
1332
1333 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1334 rcu_read_unlock();
1335 return -EACCES;
1336 }
1337
1338 err = security_sem_semctl(&sma->sem_perm, SETVAL);
1339 if (err) {
1340 rcu_read_unlock();
1341 return -EACCES;
1342 }
1343
1344 sem_lock(sma, NULL, -1);
1345
1346 if (!ipc_valid_object(&sma->sem_perm)) {
1347 sem_unlock(sma, -1);
1348 rcu_read_unlock();
1349 return -EIDRM;
1350 }
1351
1352 curr = &sma->sems[semnum];
1353
1354 ipc_assert_locked_object(&sma->sem_perm);
1355 list_for_each_entry(un, &sma->list_id, list_id)
1356 un->semadj[semnum] = 0;
1357
1358 curr->semval = val;
1359 ipc_update_pid(&curr->sempid, task_tgid(current));
1360 sma->sem_ctime = ktime_get_real_seconds();
1361
1362 do_smart_update(sma, NULL, 0, 0, &wake_q);
1363 sem_unlock(sma, -1);
1364 rcu_read_unlock();
1365 wake_up_q(&wake_q);
1366 return 0;
1367}
1368
1369static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1370 int cmd, void __user *p)
1371{
1372 struct sem_array *sma;
1373 struct sem *curr;
1374 int err, nsems;
1375 ushort fast_sem_io[SEMMSL_FAST];
1376 ushort *sem_io = fast_sem_io;
1377 DEFINE_WAKE_Q(wake_q);
1378
1379 rcu_read_lock();
1380 sma = sem_obtain_object_check(ns, semid);
1381 if (IS_ERR(sma)) {
1382 rcu_read_unlock();
1383 return PTR_ERR(sma);
1384 }
1385
1386 nsems = sma->sem_nsems;
1387
1388 err = -EACCES;
1389 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1390 goto out_rcu_wakeup;
1391
1392 err = security_sem_semctl(&sma->sem_perm, cmd);
1393 if (err)
1394 goto out_rcu_wakeup;
1395
1396 err = -EACCES;
1397 switch (cmd) {
1398 case GETALL:
1399 {
1400 ushort __user *array = p;
1401 int i;
1402
1403 sem_lock(sma, NULL, -1);
1404 if (!ipc_valid_object(&sma->sem_perm)) {
1405 err = -EIDRM;
1406 goto out_unlock;
1407 }
1408 if (nsems > SEMMSL_FAST) {
1409 if (!ipc_rcu_getref(&sma->sem_perm)) {
1410 err = -EIDRM;
1411 goto out_unlock;
1412 }
1413 sem_unlock(sma, -1);
1414 rcu_read_unlock();
1415 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1416 GFP_KERNEL);
1417 if (sem_io == NULL) {
1418 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1419 return -ENOMEM;
1420 }
1421
1422 rcu_read_lock();
1423 sem_lock_and_putref(sma);
1424 if (!ipc_valid_object(&sma->sem_perm)) {
1425 err = -EIDRM;
1426 goto out_unlock;
1427 }
1428 }
1429 for (i = 0; i < sma->sem_nsems; i++)
1430 sem_io[i] = sma->sems[i].semval;
1431 sem_unlock(sma, -1);
1432 rcu_read_unlock();
1433 err = 0;
1434 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1435 err = -EFAULT;
1436 goto out_free;
1437 }
1438 case SETALL:
1439 {
1440 int i;
1441 struct sem_undo *un;
1442
1443 if (!ipc_rcu_getref(&sma->sem_perm)) {
1444 err = -EIDRM;
1445 goto out_rcu_wakeup;
1446 }
1447 rcu_read_unlock();
1448
1449 if (nsems > SEMMSL_FAST) {
1450 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1451 GFP_KERNEL);
1452 if (sem_io == NULL) {
1453 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1454 return -ENOMEM;
1455 }
1456 }
1457
1458 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1459 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1460 err = -EFAULT;
1461 goto out_free;
1462 }
1463
1464 for (i = 0; i < nsems; i++) {
1465 if (sem_io[i] > SEMVMX) {
1466 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1467 err = -ERANGE;
1468 goto out_free;
1469 }
1470 }
1471 rcu_read_lock();
1472 sem_lock_and_putref(sma);
1473 if (!ipc_valid_object(&sma->sem_perm)) {
1474 err = -EIDRM;
1475 goto out_unlock;
1476 }
1477
1478 for (i = 0; i < nsems; i++) {
1479 sma->sems[i].semval = sem_io[i];
1480 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1481 }
1482
1483 ipc_assert_locked_object(&sma->sem_perm);
1484 list_for_each_entry(un, &sma->list_id, list_id) {
1485 for (i = 0; i < nsems; i++)
1486 un->semadj[i] = 0;
1487 }
1488 sma->sem_ctime = ktime_get_real_seconds();
1489
1490 do_smart_update(sma, NULL, 0, 0, &wake_q);
1491 err = 0;
1492 goto out_unlock;
1493 }
1494
1495 }
1496 err = -EINVAL;
1497 if (semnum < 0 || semnum >= nsems)
1498 goto out_rcu_wakeup;
1499
1500 sem_lock(sma, NULL, -1);
1501 if (!ipc_valid_object(&sma->sem_perm)) {
1502 err = -EIDRM;
1503 goto out_unlock;
1504 }
1505 curr = &sma->sems[semnum];
1506
1507 switch (cmd) {
1508 case GETVAL:
1509 err = curr->semval;
1510 goto out_unlock;
1511 case GETPID:
1512 err = pid_vnr(curr->sempid);
1513 goto out_unlock;
1514 case GETNCNT:
1515 err = count_semcnt(sma, semnum, 0);
1516 goto out_unlock;
1517 case GETZCNT:
1518 err = count_semcnt(sma, semnum, 1);
1519 goto out_unlock;
1520 }
1521
1522out_unlock:
1523 sem_unlock(sma, -1);
1524out_rcu_wakeup:
1525 rcu_read_unlock();
1526 wake_up_q(&wake_q);
1527out_free:
1528 if (sem_io != fast_sem_io)
1529 kvfree(sem_io);
1530 return err;
1531}
1532
1533static inline unsigned long
1534copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1535{
1536 switch (version) {
1537 case IPC_64:
1538 if (copy_from_user(out, buf, sizeof(*out)))
1539 return -EFAULT;
1540 return 0;
1541 case IPC_OLD:
1542 {
1543 struct semid_ds tbuf_old;
1544
1545 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1546 return -EFAULT;
1547
1548 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1549 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1550 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1551
1552 return 0;
1553 }
1554 default:
1555 return -EINVAL;
1556 }
1557}
1558
1559
1560
1561
1562
1563
1564static int semctl_down(struct ipc_namespace *ns, int semid,
1565 int cmd, struct semid64_ds *semid64)
1566{
1567 struct sem_array *sma;
1568 int err;
1569 struct kern_ipc_perm *ipcp;
1570
1571 down_write(&sem_ids(ns).rwsem);
1572 rcu_read_lock();
1573
1574 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1575 &semid64->sem_perm, 0);
1576 if (IS_ERR(ipcp)) {
1577 err = PTR_ERR(ipcp);
1578 goto out_unlock1;
1579 }
1580
1581 sma = container_of(ipcp, struct sem_array, sem_perm);
1582
1583 err = security_sem_semctl(&sma->sem_perm, cmd);
1584 if (err)
1585 goto out_unlock1;
1586
1587 switch (cmd) {
1588 case IPC_RMID:
1589 sem_lock(sma, NULL, -1);
1590
1591 freeary(ns, ipcp);
1592 goto out_up;
1593 case IPC_SET:
1594 sem_lock(sma, NULL, -1);
1595 err = ipc_update_perm(&semid64->sem_perm, ipcp);
1596 if (err)
1597 goto out_unlock0;
1598 sma->sem_ctime = ktime_get_real_seconds();
1599 break;
1600 default:
1601 err = -EINVAL;
1602 goto out_unlock1;
1603 }
1604
1605out_unlock0:
1606 sem_unlock(sma, -1);
1607out_unlock1:
1608 rcu_read_unlock();
1609out_up:
1610 up_write(&sem_ids(ns).rwsem);
1611 return err;
1612}
1613
1614long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg)
1615{
1616 int version;
1617 struct ipc_namespace *ns;
1618 void __user *p = (void __user *)arg;
1619 struct semid64_ds semid64;
1620 int err;
1621
1622 if (semid < 0)
1623 return -EINVAL;
1624
1625 version = ipc_parse_version(&cmd);
1626 ns = current->nsproxy->ipc_ns;
1627
1628 switch (cmd) {
1629 case IPC_INFO:
1630 case SEM_INFO:
1631 return semctl_info(ns, semid, cmd, p);
1632 case IPC_STAT:
1633 case SEM_STAT:
1634 case SEM_STAT_ANY:
1635 err = semctl_stat(ns, semid, cmd, &semid64);
1636 if (err < 0)
1637 return err;
1638 if (copy_semid_to_user(p, &semid64, version))
1639 err = -EFAULT;
1640 return err;
1641 case GETALL:
1642 case GETVAL:
1643 case GETPID:
1644 case GETNCNT:
1645 case GETZCNT:
1646 case SETALL:
1647 return semctl_main(ns, semid, semnum, cmd, p);
1648 case SETVAL: {
1649 int val;
1650#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1651
1652 val = arg >> 32;
1653#else
1654
1655 val = arg;
1656#endif
1657 return semctl_setval(ns, semid, semnum, val);
1658 }
1659 case IPC_SET:
1660 if (copy_semid_from_user(&semid64, p, version))
1661 return -EFAULT;
1662 case IPC_RMID:
1663 return semctl_down(ns, semid, cmd, &semid64);
1664 default:
1665 return -EINVAL;
1666 }
1667}
1668
1669SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1670{
1671 return ksys_semctl(semid, semnum, cmd, arg);
1672}
1673
1674#ifdef CONFIG_COMPAT
1675
1676struct compat_semid_ds {
1677 struct compat_ipc_perm sem_perm;
1678 compat_time_t sem_otime;
1679 compat_time_t sem_ctime;
1680 compat_uptr_t sem_base;
1681 compat_uptr_t sem_pending;
1682 compat_uptr_t sem_pending_last;
1683 compat_uptr_t undo;
1684 unsigned short sem_nsems;
1685};
1686
1687static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1688 int version)
1689{
1690 memset(out, 0, sizeof(*out));
1691 if (version == IPC_64) {
1692 struct compat_semid64_ds __user *p = buf;
1693 return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1694 } else {
1695 struct compat_semid_ds __user *p = buf;
1696 return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1697 }
1698}
1699
1700static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1701 int version)
1702{
1703 if (version == IPC_64) {
1704 struct compat_semid64_ds v;
1705 memset(&v, 0, sizeof(v));
1706 to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1707 v.sem_otime = in->sem_otime;
1708 v.sem_ctime = in->sem_ctime;
1709 v.sem_nsems = in->sem_nsems;
1710 return copy_to_user(buf, &v, sizeof(v));
1711 } else {
1712 struct compat_semid_ds v;
1713 memset(&v, 0, sizeof(v));
1714 to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1715 v.sem_otime = in->sem_otime;
1716 v.sem_ctime = in->sem_ctime;
1717 v.sem_nsems = in->sem_nsems;
1718 return copy_to_user(buf, &v, sizeof(v));
1719 }
1720}
1721
1722long compat_ksys_semctl(int semid, int semnum, int cmd, int arg)
1723{
1724 void __user *p = compat_ptr(arg);
1725 struct ipc_namespace *ns;
1726 struct semid64_ds semid64;
1727 int version = compat_ipc_parse_version(&cmd);
1728 int err;
1729
1730 ns = current->nsproxy->ipc_ns;
1731
1732 if (semid < 0)
1733 return -EINVAL;
1734
1735 switch (cmd & (~IPC_64)) {
1736 case IPC_INFO:
1737 case SEM_INFO:
1738 return semctl_info(ns, semid, cmd, p);
1739 case IPC_STAT:
1740 case SEM_STAT:
1741 case SEM_STAT_ANY:
1742 err = semctl_stat(ns, semid, cmd, &semid64);
1743 if (err < 0)
1744 return err;
1745 if (copy_compat_semid_to_user(p, &semid64, version))
1746 err = -EFAULT;
1747 return err;
1748 case GETVAL:
1749 case GETPID:
1750 case GETNCNT:
1751 case GETZCNT:
1752 case GETALL:
1753 case SETALL:
1754 return semctl_main(ns, semid, semnum, cmd, p);
1755 case SETVAL:
1756 return semctl_setval(ns, semid, semnum, arg);
1757 case IPC_SET:
1758 if (copy_compat_semid_from_user(&semid64, p, version))
1759 return -EFAULT;
1760
1761 case IPC_RMID:
1762 return semctl_down(ns, semid, cmd, &semid64);
1763 default:
1764 return -EINVAL;
1765 }
1766}
1767
1768COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1769{
1770 return compat_ksys_semctl(semid, semnum, cmd, arg);
1771}
1772#endif
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static inline int get_undo_list(struct sem_undo_list **undo_listp)
1786{
1787 struct sem_undo_list *undo_list;
1788
1789 undo_list = current->sysvsem.undo_list;
1790 if (!undo_list) {
1791 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1792 if (undo_list == NULL)
1793 return -ENOMEM;
1794 spin_lock_init(&undo_list->lock);
1795 refcount_set(&undo_list->refcnt, 1);
1796 INIT_LIST_HEAD(&undo_list->list_proc);
1797
1798 current->sysvsem.undo_list = undo_list;
1799 }
1800 *undo_listp = undo_list;
1801 return 0;
1802}
1803
1804static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1805{
1806 struct sem_undo *un;
1807
1808 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1809 if (un->semid == semid)
1810 return un;
1811 }
1812 return NULL;
1813}
1814
1815static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1816{
1817 struct sem_undo *un;
1818
1819 assert_spin_locked(&ulp->lock);
1820
1821 un = __lookup_undo(ulp, semid);
1822 if (un) {
1823 list_del_rcu(&un->list_proc);
1824 list_add_rcu(&un->list_proc, &ulp->list_proc);
1825 }
1826 return un;
1827}
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1841{
1842 struct sem_array *sma;
1843 struct sem_undo_list *ulp;
1844 struct sem_undo *un, *new;
1845 int nsems, error;
1846
1847 error = get_undo_list(&ulp);
1848 if (error)
1849 return ERR_PTR(error);
1850
1851 rcu_read_lock();
1852 spin_lock(&ulp->lock);
1853 un = lookup_undo(ulp, semid);
1854 spin_unlock(&ulp->lock);
1855 if (likely(un != NULL))
1856 goto out;
1857
1858
1859
1860 sma = sem_obtain_object_check(ns, semid);
1861 if (IS_ERR(sma)) {
1862 rcu_read_unlock();
1863 return ERR_CAST(sma);
1864 }
1865
1866 nsems = sma->sem_nsems;
1867 if (!ipc_rcu_getref(&sma->sem_perm)) {
1868 rcu_read_unlock();
1869 un = ERR_PTR(-EIDRM);
1870 goto out;
1871 }
1872 rcu_read_unlock();
1873
1874
1875 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1876 if (!new) {
1877 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1878 return ERR_PTR(-ENOMEM);
1879 }
1880
1881
1882 rcu_read_lock();
1883 sem_lock_and_putref(sma);
1884 if (!ipc_valid_object(&sma->sem_perm)) {
1885 sem_unlock(sma, -1);
1886 rcu_read_unlock();
1887 kfree(new);
1888 un = ERR_PTR(-EIDRM);
1889 goto out;
1890 }
1891 spin_lock(&ulp->lock);
1892
1893
1894
1895
1896 un = lookup_undo(ulp, semid);
1897 if (un) {
1898 kfree(new);
1899 goto success;
1900 }
1901
1902 new->semadj = (short *) &new[1];
1903 new->ulp = ulp;
1904 new->semid = semid;
1905 assert_spin_locked(&ulp->lock);
1906 list_add_rcu(&new->list_proc, &ulp->list_proc);
1907 ipc_assert_locked_object(&sma->sem_perm);
1908 list_add(&new->list_id, &sma->list_id);
1909 un = new;
1910
1911success:
1912 spin_unlock(&ulp->lock);
1913 sem_unlock(sma, -1);
1914out:
1915 return un;
1916}
1917
1918static long do_semtimedop(int semid, struct sembuf __user *tsops,
1919 unsigned nsops, const struct timespec64 *timeout)
1920{
1921 int error = -EINVAL;
1922 struct sem_array *sma;
1923 struct sembuf fast_sops[SEMOPM_FAST];
1924 struct sembuf *sops = fast_sops, *sop;
1925 struct sem_undo *un;
1926 int max, locknum;
1927 bool undos = false, alter = false, dupsop = false;
1928 struct sem_queue queue;
1929 unsigned long dup = 0, jiffies_left = 0;
1930 struct ipc_namespace *ns;
1931
1932 ns = current->nsproxy->ipc_ns;
1933
1934 if (nsops < 1 || semid < 0)
1935 return -EINVAL;
1936 if (nsops > ns->sc_semopm)
1937 return -E2BIG;
1938 if (nsops > SEMOPM_FAST) {
1939 sops = kvmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1940 if (sops == NULL)
1941 return -ENOMEM;
1942 }
1943
1944 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1945 error = -EFAULT;
1946 goto out_free;
1947 }
1948
1949 if (timeout) {
1950 if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
1951 timeout->tv_nsec >= 1000000000L) {
1952 error = -EINVAL;
1953 goto out_free;
1954 }
1955 jiffies_left = timespec64_to_jiffies(timeout);
1956 }
1957
1958 max = 0;
1959 for (sop = sops; sop < sops + nsops; sop++) {
1960 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
1961
1962 if (sop->sem_num >= max)
1963 max = sop->sem_num;
1964 if (sop->sem_flg & SEM_UNDO)
1965 undos = true;
1966 if (dup & mask) {
1967
1968
1969
1970
1971
1972
1973 dupsop = true;
1974 }
1975 if (sop->sem_op != 0) {
1976 alter = true;
1977 dup |= mask;
1978 }
1979 }
1980
1981 if (undos) {
1982
1983 un = find_alloc_undo(ns, semid);
1984 if (IS_ERR(un)) {
1985 error = PTR_ERR(un);
1986 goto out_free;
1987 }
1988 } else {
1989 un = NULL;
1990 rcu_read_lock();
1991 }
1992
1993 sma = sem_obtain_object_check(ns, semid);
1994 if (IS_ERR(sma)) {
1995 rcu_read_unlock();
1996 error = PTR_ERR(sma);
1997 goto out_free;
1998 }
1999
2000 error = -EFBIG;
2001 if (max >= sma->sem_nsems) {
2002 rcu_read_unlock();
2003 goto out_free;
2004 }
2005
2006 error = -EACCES;
2007 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2008 rcu_read_unlock();
2009 goto out_free;
2010 }
2011
2012 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2013 if (error) {
2014 rcu_read_unlock();
2015 goto out_free;
2016 }
2017
2018 error = -EIDRM;
2019 locknum = sem_lock(sma, sops, nsops);
2020
2021
2022
2023
2024
2025
2026
2027
2028 if (!ipc_valid_object(&sma->sem_perm))
2029 goto out_unlock_free;
2030
2031
2032
2033
2034
2035
2036
2037 if (un && un->semid == -1)
2038 goto out_unlock_free;
2039
2040 queue.sops = sops;
2041 queue.nsops = nsops;
2042 queue.undo = un;
2043 queue.pid = task_tgid(current);
2044 queue.alter = alter;
2045 queue.dupsop = dupsop;
2046
2047 error = perform_atomic_semop(sma, &queue);
2048 if (error == 0) {
2049 DEFINE_WAKE_Q(wake_q);
2050
2051
2052
2053
2054
2055 if (alter)
2056 do_smart_update(sma, sops, nsops, 1, &wake_q);
2057 else
2058 set_semotime(sma, sops);
2059
2060 sem_unlock(sma, locknum);
2061 rcu_read_unlock();
2062 wake_up_q(&wake_q);
2063
2064 goto out_free;
2065 }
2066 if (error < 0)
2067 goto out_unlock_free;
2068
2069
2070
2071
2072
2073 if (nsops == 1) {
2074 struct sem *curr;
2075 curr = &sma->sems[sops->sem_num];
2076
2077 if (alter) {
2078 if (sma->complex_count) {
2079 list_add_tail(&queue.list,
2080 &sma->pending_alter);
2081 } else {
2082
2083 list_add_tail(&queue.list,
2084 &curr->pending_alter);
2085 }
2086 } else {
2087 list_add_tail(&queue.list, &curr->pending_const);
2088 }
2089 } else {
2090 if (!sma->complex_count)
2091 merge_queues(sma);
2092
2093 if (alter)
2094 list_add_tail(&queue.list, &sma->pending_alter);
2095 else
2096 list_add_tail(&queue.list, &sma->pending_const);
2097
2098 sma->complex_count++;
2099 }
2100
2101 do {
2102 queue.status = -EINTR;
2103 queue.sleeper = current;
2104
2105 __set_current_state(TASK_INTERRUPTIBLE);
2106 sem_unlock(sma, locknum);
2107 rcu_read_unlock();
2108
2109 if (timeout)
2110 jiffies_left = schedule_timeout(jiffies_left);
2111 else
2112 schedule();
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 error = READ_ONCE(queue.status);
2126 if (error != -EINTR) {
2127
2128
2129
2130
2131
2132
2133 smp_mb();
2134 goto out_free;
2135 }
2136
2137 rcu_read_lock();
2138 locknum = sem_lock(sma, sops, nsops);
2139
2140 if (!ipc_valid_object(&sma->sem_perm))
2141 goto out_unlock_free;
2142
2143 error = READ_ONCE(queue.status);
2144
2145
2146
2147
2148
2149 if (error != -EINTR)
2150 goto out_unlock_free;
2151
2152
2153
2154
2155 if (timeout && jiffies_left == 0)
2156 error = -EAGAIN;
2157 } while (error == -EINTR && !signal_pending(current));
2158
2159 unlink_queue(sma, &queue);
2160
2161out_unlock_free:
2162 sem_unlock(sma, locknum);
2163 rcu_read_unlock();
2164out_free:
2165 if (sops != fast_sops)
2166 kvfree(sops);
2167 return error;
2168}
2169
2170long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2171 unsigned int nsops, const struct timespec __user *timeout)
2172{
2173 if (timeout) {
2174 struct timespec64 ts;
2175 if (get_timespec64(&ts, timeout))
2176 return -EFAULT;
2177 return do_semtimedop(semid, tsops, nsops, &ts);
2178 }
2179 return do_semtimedop(semid, tsops, nsops, NULL);
2180}
2181
2182SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2183 unsigned int, nsops, const struct timespec __user *, timeout)
2184{
2185 return ksys_semtimedop(semid, tsops, nsops, timeout);
2186}
2187
2188#ifdef CONFIG_COMPAT
2189long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2190 unsigned int nsops,
2191 const struct compat_timespec __user *timeout)
2192{
2193 if (timeout) {
2194 struct timespec64 ts;
2195 if (compat_get_timespec64(&ts, timeout))
2196 return -EFAULT;
2197 return do_semtimedop(semid, tsems, nsops, &ts);
2198 }
2199 return do_semtimedop(semid, tsems, nsops, NULL);
2200}
2201
2202COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
2203 unsigned int, nsops,
2204 const struct compat_timespec __user *, timeout)
2205{
2206 return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2207}
2208#endif
2209
2210SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2211 unsigned, nsops)
2212{
2213 return do_semtimedop(semid, tsops, nsops, NULL);
2214}
2215
2216
2217
2218
2219
2220int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2221{
2222 struct sem_undo_list *undo_list;
2223 int error;
2224
2225 if (clone_flags & CLONE_SYSVSEM) {
2226 error = get_undo_list(&undo_list);
2227 if (error)
2228 return error;
2229 refcount_inc(&undo_list->refcnt);
2230 tsk->sysvsem.undo_list = undo_list;
2231 } else
2232 tsk->sysvsem.undo_list = NULL;
2233
2234 return 0;
2235}
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249void exit_sem(struct task_struct *tsk)
2250{
2251 struct sem_undo_list *ulp;
2252
2253 ulp = tsk->sysvsem.undo_list;
2254 if (!ulp)
2255 return;
2256 tsk->sysvsem.undo_list = NULL;
2257
2258 if (!refcount_dec_and_test(&ulp->refcnt))
2259 return;
2260
2261 for (;;) {
2262 struct sem_array *sma;
2263 struct sem_undo *un;
2264 int semid, i;
2265 DEFINE_WAKE_Q(wake_q);
2266
2267 cond_resched();
2268
2269 rcu_read_lock();
2270 un = list_entry_rcu(ulp->list_proc.next,
2271 struct sem_undo, list_proc);
2272 if (&un->list_proc == &ulp->list_proc) {
2273
2274
2275
2276
2277
2278
2279 spin_lock(&ulp->lock);
2280 spin_unlock(&ulp->lock);
2281 rcu_read_unlock();
2282 break;
2283 }
2284 spin_lock(&ulp->lock);
2285 semid = un->semid;
2286 spin_unlock(&ulp->lock);
2287
2288
2289 if (semid == -1) {
2290 rcu_read_unlock();
2291 continue;
2292 }
2293
2294 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2295
2296 if (IS_ERR(sma)) {
2297 rcu_read_unlock();
2298 continue;
2299 }
2300
2301 sem_lock(sma, NULL, -1);
2302
2303 if (!ipc_valid_object(&sma->sem_perm)) {
2304 sem_unlock(sma, -1);
2305 rcu_read_unlock();
2306 continue;
2307 }
2308 un = __lookup_undo(ulp, semid);
2309 if (un == NULL) {
2310
2311
2312
2313 sem_unlock(sma, -1);
2314 rcu_read_unlock();
2315 continue;
2316 }
2317
2318
2319 ipc_assert_locked_object(&sma->sem_perm);
2320 list_del(&un->list_id);
2321
2322
2323
2324
2325
2326 list_del_rcu(&un->list_proc);
2327
2328
2329 for (i = 0; i < sma->sem_nsems; i++) {
2330 struct sem *semaphore = &sma->sems[i];
2331 if (un->semadj[i]) {
2332 semaphore->semval += un->semadj[i];
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346 if (semaphore->semval < 0)
2347 semaphore->semval = 0;
2348 if (semaphore->semval > SEMVMX)
2349 semaphore->semval = SEMVMX;
2350 ipc_update_pid(&semaphore->sempid, task_tgid(current));
2351 }
2352 }
2353
2354 do_smart_update(sma, NULL, 0, 1, &wake_q);
2355 sem_unlock(sma, -1);
2356 rcu_read_unlock();
2357 wake_up_q(&wake_q);
2358
2359 kfree_rcu(un, rcu);
2360 }
2361 kfree(ulp);
2362}
2363
2364#ifdef CONFIG_PROC_FS
2365static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2366{
2367 struct user_namespace *user_ns = seq_user_ns(s);
2368 struct kern_ipc_perm *ipcp = it;
2369 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2370 time64_t sem_otime;
2371
2372
2373
2374
2375
2376
2377
2378 complexmode_enter(sma);
2379
2380 sem_otime = get_semotime(sma);
2381
2382 seq_printf(s,
2383 "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2384 sma->sem_perm.key,
2385 sma->sem_perm.id,
2386 sma->sem_perm.mode,
2387 sma->sem_nsems,
2388 from_kuid_munged(user_ns, sma->sem_perm.uid),
2389 from_kgid_munged(user_ns, sma->sem_perm.gid),
2390 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2391 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2392 sem_otime,
2393 sma->sem_ctime);
2394
2395 complexmode_tryleave(sma);
2396
2397 return 0;
2398}
2399#endif
2400