1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#include <linux/compat.h>
74#include <linux/slab.h>
75#include <linux/spinlock.h>
76#include <linux/init.h>
77#include <linux/proc_fs.h>
78#include <linux/time.h>
79#include <linux/security.h>
80#include <linux/syscalls.h>
81#include <linux/audit.h>
82#include <linux/capability.h>
83#include <linux/seq_file.h>
84#include <linux/rwsem.h>
85#include <linux/nsproxy.h>
86#include <linux/ipc_namespace.h>
87#include <linux/sched/wake_q.h>
88#include <linux/nospec.h>
89#include <linux/rhashtable.h>
90
91#include <linux/uaccess.h>
92#include "util.h"
93
94
95struct sem {
96 int semval;
97
98
99
100
101
102
103
104 struct pid *sempid;
105 spinlock_t lock;
106 struct list_head pending_alter;
107
108 struct list_head pending_const;
109
110 time64_t sem_otime;
111} ____cacheline_aligned_in_smp;
112
113
114struct sem_array {
115 struct kern_ipc_perm sem_perm;
116 time64_t sem_ctime;
117 struct list_head pending_alter;
118
119 struct list_head pending_const;
120
121 struct list_head list_id;
122 int sem_nsems;
123 int complex_count;
124 unsigned int use_global_lock;
125
126 struct sem sems[];
127} __randomize_layout;
128
129
130struct sem_queue {
131 struct list_head list;
132 struct task_struct *sleeper;
133 struct sem_undo *undo;
134 struct pid *pid;
135 int status;
136 struct sembuf *sops;
137 struct sembuf *blocking;
138 int nsops;
139 bool alter;
140 bool dupsop;
141};
142
143
144
145
146struct sem_undo {
147 struct list_head list_proc;
148
149
150 struct rcu_head rcu;
151 struct sem_undo_list *ulp;
152 struct list_head list_id;
153
154 int semid;
155 short *semadj;
156
157};
158
159
160
161
162struct sem_undo_list {
163 refcount_t refcnt;
164 spinlock_t lock;
165 struct list_head list_proc;
166};
167
168
169#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
170
171static int newary(struct ipc_namespace *, struct ipc_params *);
172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
173#ifdef CONFIG_PROC_FS
174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
175#endif
176
177#define SEMMSL_FAST 256
178#define SEMOPM_FAST 64
179
180
181
182
183
184
185#define USE_GLOBAL_LOCK_HYSTERESIS 10
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242#define sc_semmsl sem_ctls[0]
243#define sc_semmns sem_ctls[1]
244#define sc_semopm sem_ctls[2]
245#define sc_semmni sem_ctls[3]
246
247void sem_init_ns(struct ipc_namespace *ns)
248{
249 ns->sc_semmsl = SEMMSL;
250 ns->sc_semmns = SEMMNS;
251 ns->sc_semopm = SEMOPM;
252 ns->sc_semmni = SEMMNI;
253 ns->used_sems = 0;
254 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
255}
256
257#ifdef CONFIG_IPC_NS
258void sem_exit_ns(struct ipc_namespace *ns)
259{
260 free_ipcs(ns, &sem_ids(ns), freeary);
261 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
262 rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
263}
264#endif
265
266void __init sem_init(void)
267{
268 sem_init_ns(&init_ipc_ns);
269 ipc_init_proc_interface("sysvipc/sem",
270 " key semid perms nsems uid gid cuid cgid otime ctime\n",
271 IPC_SEM_IDS, sysvipc_sem_proc_show);
272}
273
274
275
276
277
278
279
280
281static void unmerge_queues(struct sem_array *sma)
282{
283 struct sem_queue *q, *tq;
284
285
286 if (sma->complex_count)
287 return;
288
289
290
291
292
293 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
294 struct sem *curr;
295 curr = &sma->sems[q->sops[0].sem_num];
296
297 list_add_tail(&q->list, &curr->pending_alter);
298 }
299 INIT_LIST_HEAD(&sma->pending_alter);
300}
301
302
303
304
305
306
307
308
309
310
311static void merge_queues(struct sem_array *sma)
312{
313 int i;
314 for (i = 0; i < sma->sem_nsems; i++) {
315 struct sem *sem = &sma->sems[i];
316
317 list_splice_init(&sem->pending_alter, &sma->pending_alter);
318 }
319}
320
321static void sem_rcu_free(struct rcu_head *head)
322{
323 struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
324 struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
325
326 security_sem_free(&sma->sem_perm);
327 kvfree(sma);
328}
329
330
331
332
333
334static void complexmode_enter(struct sem_array *sma)
335{
336 int i;
337 struct sem *sem;
338
339 if (sma->use_global_lock > 0) {
340
341
342
343
344
345 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
346 return;
347 }
348 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
349
350 for (i = 0; i < sma->sem_nsems; i++) {
351 sem = &sma->sems[i];
352 spin_lock(&sem->lock);
353 spin_unlock(&sem->lock);
354 }
355}
356
357
358
359
360
361static void complexmode_tryleave(struct sem_array *sma)
362{
363 if (sma->complex_count) {
364
365
366
367 return;
368 }
369 if (sma->use_global_lock == 1) {
370
371
372 smp_store_release(&sma->use_global_lock, 0);
373 } else {
374 sma->use_global_lock--;
375 }
376}
377
378#define SEM_GLOBAL_LOCK (-1)
379
380
381
382
383
384
385
386static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
387 int nsops)
388{
389 struct sem *sem;
390 int idx;
391
392 if (nsops != 1) {
393
394 ipc_lock_object(&sma->sem_perm);
395
396
397 complexmode_enter(sma);
398 return SEM_GLOBAL_LOCK;
399 }
400
401
402
403
404
405
406
407
408 idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
409 sem = &sma->sems[idx];
410
411
412
413
414
415 if (!sma->use_global_lock) {
416
417
418
419
420 spin_lock(&sem->lock);
421
422
423 if (!smp_load_acquire(&sma->use_global_lock)) {
424
425 return sops->sem_num;
426 }
427 spin_unlock(&sem->lock);
428 }
429
430
431 ipc_lock_object(&sma->sem_perm);
432
433 if (sma->use_global_lock == 0) {
434
435
436
437
438
439
440
441
442
443 spin_lock(&sem->lock);
444
445 ipc_unlock_object(&sma->sem_perm);
446 return sops->sem_num;
447 } else {
448
449
450
451
452
453 return SEM_GLOBAL_LOCK;
454 }
455}
456
457static inline void sem_unlock(struct sem_array *sma, int locknum)
458{
459 if (locknum == SEM_GLOBAL_LOCK) {
460 unmerge_queues(sma);
461 complexmode_tryleave(sma);
462 ipc_unlock_object(&sma->sem_perm);
463 } else {
464 struct sem *sem = &sma->sems[locknum];
465 spin_unlock(&sem->lock);
466 }
467}
468
469
470
471
472
473
474
475static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
476{
477 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
478
479 if (IS_ERR(ipcp))
480 return ERR_CAST(ipcp);
481
482 return container_of(ipcp, struct sem_array, sem_perm);
483}
484
485static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
486 int id)
487{
488 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
489
490 if (IS_ERR(ipcp))
491 return ERR_CAST(ipcp);
492
493 return container_of(ipcp, struct sem_array, sem_perm);
494}
495
496static inline void sem_lock_and_putref(struct sem_array *sma)
497{
498 sem_lock(sma, NULL, -1);
499 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
500}
501
502static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
503{
504 ipc_rmid(&sem_ids(ns), &s->sem_perm);
505}
506
507static struct sem_array *sem_alloc(size_t nsems)
508{
509 struct sem_array *sma;
510
511 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
512 return NULL;
513
514 sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
515 if (unlikely(!sma))
516 return NULL;
517
518 return sma;
519}
520
521
522
523
524
525
526
527
528static int newary(struct ipc_namespace *ns, struct ipc_params *params)
529{
530 int retval;
531 struct sem_array *sma;
532 key_t key = params->key;
533 int nsems = params->u.nsems;
534 int semflg = params->flg;
535 int i;
536
537 if (!nsems)
538 return -EINVAL;
539 if (ns->used_sems + nsems > ns->sc_semmns)
540 return -ENOSPC;
541
542 sma = sem_alloc(nsems);
543 if (!sma)
544 return -ENOMEM;
545
546 sma->sem_perm.mode = (semflg & S_IRWXUGO);
547 sma->sem_perm.key = key;
548
549 sma->sem_perm.security = NULL;
550 retval = security_sem_alloc(&sma->sem_perm);
551 if (retval) {
552 kvfree(sma);
553 return retval;
554 }
555
556 for (i = 0; i < nsems; i++) {
557 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
558 INIT_LIST_HEAD(&sma->sems[i].pending_const);
559 spin_lock_init(&sma->sems[i].lock);
560 }
561
562 sma->complex_count = 0;
563 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
564 INIT_LIST_HEAD(&sma->pending_alter);
565 INIT_LIST_HEAD(&sma->pending_const);
566 INIT_LIST_HEAD(&sma->list_id);
567 sma->sem_nsems = nsems;
568 sma->sem_ctime = ktime_get_real_seconds();
569
570
571 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
572 if (retval < 0) {
573 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
574 return retval;
575 }
576 ns->used_sems += nsems;
577
578 sem_unlock(sma, -1);
579 rcu_read_unlock();
580
581 return sma->sem_perm.id;
582}
583
584
585
586
587
588static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
589 struct ipc_params *params)
590{
591 struct sem_array *sma;
592
593 sma = container_of(ipcp, struct sem_array, sem_perm);
594 if (params->u.nsems > sma->sem_nsems)
595 return -EINVAL;
596
597 return 0;
598}
599
600long ksys_semget(key_t key, int nsems, int semflg)
601{
602 struct ipc_namespace *ns;
603 static const struct ipc_ops sem_ops = {
604 .getnew = newary,
605 .associate = security_sem_associate,
606 .more_checks = sem_more_checks,
607 };
608 struct ipc_params sem_params;
609
610 ns = current->nsproxy->ipc_ns;
611
612 if (nsems < 0 || nsems > ns->sc_semmsl)
613 return -EINVAL;
614
615 sem_params.key = key;
616 sem_params.flg = semflg;
617 sem_params.u.nsems = nsems;
618
619 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
620}
621
622SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
623{
624 return ksys_semget(key, nsems, semflg);
625}
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
645{
646 int result, sem_op, nsops;
647 struct pid *pid;
648 struct sembuf *sop;
649 struct sem *curr;
650 struct sembuf *sops;
651 struct sem_undo *un;
652
653 sops = q->sops;
654 nsops = q->nsops;
655 un = q->undo;
656
657 for (sop = sops; sop < sops + nsops; sop++) {
658 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
659 curr = &sma->sems[idx];
660 sem_op = sop->sem_op;
661 result = curr->semval;
662
663 if (!sem_op && result)
664 goto would_block;
665
666 result += sem_op;
667 if (result < 0)
668 goto would_block;
669 if (result > SEMVMX)
670 goto out_of_range;
671
672 if (sop->sem_flg & SEM_UNDO) {
673 int undo = un->semadj[sop->sem_num] - sem_op;
674
675 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
676 goto out_of_range;
677 un->semadj[sop->sem_num] = undo;
678 }
679
680 curr->semval = result;
681 }
682
683 sop--;
684 pid = q->pid;
685 while (sop >= sops) {
686 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
687 sop--;
688 }
689
690 return 0;
691
692out_of_range:
693 result = -ERANGE;
694 goto undo;
695
696would_block:
697 q->blocking = sop;
698
699 if (sop->sem_flg & IPC_NOWAIT)
700 result = -EAGAIN;
701 else
702 result = 1;
703
704undo:
705 sop--;
706 while (sop >= sops) {
707 sem_op = sop->sem_op;
708 sma->sems[sop->sem_num].semval -= sem_op;
709 if (sop->sem_flg & SEM_UNDO)
710 un->semadj[sop->sem_num] += sem_op;
711 sop--;
712 }
713
714 return result;
715}
716
717static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
718{
719 int result, sem_op, nsops;
720 struct sembuf *sop;
721 struct sem *curr;
722 struct sembuf *sops;
723 struct sem_undo *un;
724
725 sops = q->sops;
726 nsops = q->nsops;
727 un = q->undo;
728
729 if (unlikely(q->dupsop))
730 return perform_atomic_semop_slow(sma, q);
731
732
733
734
735
736
737
738 for (sop = sops; sop < sops + nsops; sop++) {
739 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
740
741 curr = &sma->sems[idx];
742 sem_op = sop->sem_op;
743 result = curr->semval;
744
745 if (!sem_op && result)
746 goto would_block;
747
748 result += sem_op;
749 if (result < 0)
750 goto would_block;
751
752 if (result > SEMVMX)
753 return -ERANGE;
754
755 if (sop->sem_flg & SEM_UNDO) {
756 int undo = un->semadj[sop->sem_num] - sem_op;
757
758
759 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
760 return -ERANGE;
761 }
762 }
763
764 for (sop = sops; sop < sops + nsops; sop++) {
765 curr = &sma->sems[sop->sem_num];
766 sem_op = sop->sem_op;
767 result = curr->semval;
768
769 if (sop->sem_flg & SEM_UNDO) {
770 int undo = un->semadj[sop->sem_num] - sem_op;
771
772 un->semadj[sop->sem_num] = undo;
773 }
774 curr->semval += sem_op;
775 ipc_update_pid(&curr->sempid, q->pid);
776 }
777
778 return 0;
779
780would_block:
781 q->blocking = sop;
782 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
783}
784
785static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
786 struct wake_q_head *wake_q)
787{
788 get_task_struct(q->sleeper);
789
790
791 smp_store_release(&q->status, error);
792
793 wake_q_add_safe(wake_q, q->sleeper);
794}
795
796static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
797{
798 list_del(&q->list);
799 if (q->nsops > 1)
800 sma->complex_count--;
801}
802
803
804
805
806
807
808
809
810
811
812
813static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
814{
815
816 if (!list_empty(&sma->pending_alter))
817 return 1;
818
819
820 if (q->nsops > 1)
821 return 1;
822
823
824
825
826
827
828
829
830
831
832
833
834 return 0;
835}
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851static int wake_const_ops(struct sem_array *sma, int semnum,
852 struct wake_q_head *wake_q)
853{
854 struct sem_queue *q, *tmp;
855 struct list_head *pending_list;
856 int semop_completed = 0;
857
858 if (semnum == -1)
859 pending_list = &sma->pending_const;
860 else
861 pending_list = &sma->sems[semnum].pending_const;
862
863 list_for_each_entry_safe(q, tmp, pending_list, list) {
864 int error = perform_atomic_semop(sma, q);
865
866 if (error > 0)
867 continue;
868
869 unlink_queue(sma, q);
870
871 wake_up_sem_queue_prepare(q, error, wake_q);
872 if (error == 0)
873 semop_completed = 1;
874 }
875
876 return semop_completed;
877}
878
879
880
881
882
883
884
885
886
887
888
889
890static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
891 int nsops, struct wake_q_head *wake_q)
892{
893 int i;
894 int semop_completed = 0;
895 int got_zero = 0;
896
897
898 if (sops) {
899 for (i = 0; i < nsops; i++) {
900 int num = sops[i].sem_num;
901
902 if (sma->sems[num].semval == 0) {
903 got_zero = 1;
904 semop_completed |= wake_const_ops(sma, num, wake_q);
905 }
906 }
907 } else {
908
909
910
911
912 for (i = 0; i < sma->sem_nsems; i++) {
913 if (sma->sems[i].semval == 0) {
914 got_zero = 1;
915 semop_completed |= wake_const_ops(sma, i, wake_q);
916 }
917 }
918 }
919
920
921
922
923 if (got_zero)
924 semop_completed |= wake_const_ops(sma, -1, wake_q);
925
926 return semop_completed;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
947{
948 struct sem_queue *q, *tmp;
949 struct list_head *pending_list;
950 int semop_completed = 0;
951
952 if (semnum == -1)
953 pending_list = &sma->pending_alter;
954 else
955 pending_list = &sma->sems[semnum].pending_alter;
956
957again:
958 list_for_each_entry_safe(q, tmp, pending_list, list) {
959 int error, restart;
960
961
962
963
964
965
966
967
968 if (semnum != -1 && sma->sems[semnum].semval == 0)
969 break;
970
971 error = perform_atomic_semop(sma, q);
972
973
974 if (error > 0)
975 continue;
976
977 unlink_queue(sma, q);
978
979 if (error) {
980 restart = 0;
981 } else {
982 semop_completed = 1;
983 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
984 restart = check_restart(sma, q);
985 }
986
987 wake_up_sem_queue_prepare(q, error, wake_q);
988 if (restart)
989 goto again;
990 }
991 return semop_completed;
992}
993
994
995
996
997
998
999
1000
1001
1002static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1003{
1004 if (sops == NULL) {
1005 sma->sems[0].sem_otime = ktime_get_real_seconds();
1006 } else {
1007 sma->sems[sops[0].sem_num].sem_otime =
1008 ktime_get_real_seconds();
1009 }
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1027 int otime, struct wake_q_head *wake_q)
1028{
1029 int i;
1030
1031 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1032
1033 if (!list_empty(&sma->pending_alter)) {
1034
1035 otime |= update_queue(sma, -1, wake_q);
1036 } else {
1037 if (!sops) {
1038
1039
1040
1041
1042 for (i = 0; i < sma->sem_nsems; i++)
1043 otime |= update_queue(sma, i, wake_q);
1044 } else {
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 for (i = 0; i < nsops; i++) {
1055 if (sops[i].sem_op > 0) {
1056 otime |= update_queue(sma,
1057 sops[i].sem_num, wake_q);
1058 }
1059 }
1060 }
1061 }
1062 if (otime)
1063 set_semotime(sma, sops);
1064}
1065
1066
1067
1068
1069static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1070 bool count_zero)
1071{
1072 struct sembuf *sop = q->blocking;
1073
1074
1075
1076
1077
1078
1079
1080
1081 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1082 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1083 current->comm, task_pid_nr(current));
1084
1085 if (sop->sem_num != semnum)
1086 return 0;
1087
1088 if (count_zero && sop->sem_op == 0)
1089 return 1;
1090 if (!count_zero && sop->sem_op < 0)
1091 return 1;
1092
1093 return 0;
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103static int count_semcnt(struct sem_array *sma, ushort semnum,
1104 bool count_zero)
1105{
1106 struct list_head *l;
1107 struct sem_queue *q;
1108 int semcnt;
1109
1110 semcnt = 0;
1111
1112 if (count_zero)
1113 l = &sma->sems[semnum].pending_const;
1114 else
1115 l = &sma->sems[semnum].pending_alter;
1116
1117 list_for_each_entry(q, l, list) {
1118
1119
1120
1121 semcnt++;
1122 }
1123
1124
1125 list_for_each_entry(q, &sma->pending_alter, list) {
1126 semcnt += check_qop(sma, semnum, q, count_zero);
1127 }
1128 if (count_zero) {
1129 list_for_each_entry(q, &sma->pending_const, list) {
1130 semcnt += check_qop(sma, semnum, q, count_zero);
1131 }
1132 }
1133 return semcnt;
1134}
1135
1136
1137
1138
1139
1140static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1141{
1142 struct sem_undo *un, *tu;
1143 struct sem_queue *q, *tq;
1144 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1145 int i;
1146 DEFINE_WAKE_Q(wake_q);
1147
1148
1149 ipc_assert_locked_object(&sma->sem_perm);
1150 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1151 list_del(&un->list_id);
1152 spin_lock(&un->ulp->lock);
1153 un->semid = -1;
1154 list_del_rcu(&un->list_proc);
1155 spin_unlock(&un->ulp->lock);
1156 kfree_rcu(un, rcu);
1157 }
1158
1159
1160 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1161 unlink_queue(sma, q);
1162 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1163 }
1164
1165 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1166 unlink_queue(sma, q);
1167 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1168 }
1169 for (i = 0; i < sma->sem_nsems; i++) {
1170 struct sem *sem = &sma->sems[i];
1171 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1172 unlink_queue(sma, q);
1173 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1174 }
1175 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1176 unlink_queue(sma, q);
1177 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1178 }
1179 ipc_update_pid(&sem->sempid, NULL);
1180 }
1181
1182
1183 sem_rmid(ns, sma);
1184 sem_unlock(sma, -1);
1185 rcu_read_unlock();
1186
1187 wake_up_q(&wake_q);
1188 ns->used_sems -= sma->sem_nsems;
1189 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1190}
1191
1192static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1193{
1194 switch (version) {
1195 case IPC_64:
1196 return copy_to_user(buf, in, sizeof(*in));
1197 case IPC_OLD:
1198 {
1199 struct semid_ds out;
1200
1201 memset(&out, 0, sizeof(out));
1202
1203 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1204
1205 out.sem_otime = in->sem_otime;
1206 out.sem_ctime = in->sem_ctime;
1207 out.sem_nsems = in->sem_nsems;
1208
1209 return copy_to_user(buf, &out, sizeof(out));
1210 }
1211 default:
1212 return -EINVAL;
1213 }
1214}
1215
1216static time64_t get_semotime(struct sem_array *sma)
1217{
1218 int i;
1219 time64_t res;
1220
1221 res = sma->sems[0].sem_otime;
1222 for (i = 1; i < sma->sem_nsems; i++) {
1223 time64_t to = sma->sems[i].sem_otime;
1224
1225 if (to > res)
1226 res = to;
1227 }
1228 return res;
1229}
1230
1231static int semctl_stat(struct ipc_namespace *ns, int semid,
1232 int cmd, struct semid64_ds *semid64)
1233{
1234 struct sem_array *sma;
1235 time64_t semotime;
1236 int err;
1237
1238 memset(semid64, 0, sizeof(*semid64));
1239
1240 rcu_read_lock();
1241 if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1242 sma = sem_obtain_object(ns, semid);
1243 if (IS_ERR(sma)) {
1244 err = PTR_ERR(sma);
1245 goto out_unlock;
1246 }
1247 } else {
1248 sma = sem_obtain_object_check(ns, semid);
1249 if (IS_ERR(sma)) {
1250 err = PTR_ERR(sma);
1251 goto out_unlock;
1252 }
1253 }
1254
1255
1256 if (cmd == SEM_STAT_ANY)
1257 audit_ipc_obj(&sma->sem_perm);
1258 else {
1259 err = -EACCES;
1260 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1261 goto out_unlock;
1262 }
1263
1264 err = security_sem_semctl(&sma->sem_perm, cmd);
1265 if (err)
1266 goto out_unlock;
1267
1268 ipc_lock_object(&sma->sem_perm);
1269
1270 if (!ipc_valid_object(&sma->sem_perm)) {
1271 ipc_unlock_object(&sma->sem_perm);
1272 err = -EIDRM;
1273 goto out_unlock;
1274 }
1275
1276 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1277 semotime = get_semotime(sma);
1278 semid64->sem_otime = semotime;
1279 semid64->sem_ctime = sma->sem_ctime;
1280#ifndef CONFIG_64BIT
1281 semid64->sem_otime_high = semotime >> 32;
1282 semid64->sem_ctime_high = sma->sem_ctime >> 32;
1283#endif
1284 semid64->sem_nsems = sma->sem_nsems;
1285
1286 if (cmd == IPC_STAT) {
1287
1288
1289
1290
1291 err = 0;
1292 } else {
1293
1294
1295
1296
1297 err = sma->sem_perm.id;
1298 }
1299 ipc_unlock_object(&sma->sem_perm);
1300out_unlock:
1301 rcu_read_unlock();
1302 return err;
1303}
1304
1305static int semctl_info(struct ipc_namespace *ns, int semid,
1306 int cmd, void __user *p)
1307{
1308 struct seminfo seminfo;
1309 int max_idx;
1310 int err;
1311
1312 err = security_sem_semctl(NULL, cmd);
1313 if (err)
1314 return err;
1315
1316 memset(&seminfo, 0, sizeof(seminfo));
1317 seminfo.semmni = ns->sc_semmni;
1318 seminfo.semmns = ns->sc_semmns;
1319 seminfo.semmsl = ns->sc_semmsl;
1320 seminfo.semopm = ns->sc_semopm;
1321 seminfo.semvmx = SEMVMX;
1322 seminfo.semmnu = SEMMNU;
1323 seminfo.semmap = SEMMAP;
1324 seminfo.semume = SEMUME;
1325 down_read(&sem_ids(ns).rwsem);
1326 if (cmd == SEM_INFO) {
1327 seminfo.semusz = sem_ids(ns).in_use;
1328 seminfo.semaem = ns->used_sems;
1329 } else {
1330 seminfo.semusz = SEMUSZ;
1331 seminfo.semaem = SEMAEM;
1332 }
1333 max_idx = ipc_get_maxidx(&sem_ids(ns));
1334 up_read(&sem_ids(ns).rwsem);
1335 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1336 return -EFAULT;
1337 return (max_idx < 0) ? 0 : max_idx;
1338}
1339
1340static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1341 int val)
1342{
1343 struct sem_undo *un;
1344 struct sem_array *sma;
1345 struct sem *curr;
1346 int err;
1347 DEFINE_WAKE_Q(wake_q);
1348
1349 if (val > SEMVMX || val < 0)
1350 return -ERANGE;
1351
1352 rcu_read_lock();
1353 sma = sem_obtain_object_check(ns, semid);
1354 if (IS_ERR(sma)) {
1355 rcu_read_unlock();
1356 return PTR_ERR(sma);
1357 }
1358
1359 if (semnum < 0 || semnum >= sma->sem_nsems) {
1360 rcu_read_unlock();
1361 return -EINVAL;
1362 }
1363
1364
1365 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1366 rcu_read_unlock();
1367 return -EACCES;
1368 }
1369
1370 err = security_sem_semctl(&sma->sem_perm, SETVAL);
1371 if (err) {
1372 rcu_read_unlock();
1373 return -EACCES;
1374 }
1375
1376 sem_lock(sma, NULL, -1);
1377
1378 if (!ipc_valid_object(&sma->sem_perm)) {
1379 sem_unlock(sma, -1);
1380 rcu_read_unlock();
1381 return -EIDRM;
1382 }
1383
1384 semnum = array_index_nospec(semnum, sma->sem_nsems);
1385 curr = &sma->sems[semnum];
1386
1387 ipc_assert_locked_object(&sma->sem_perm);
1388 list_for_each_entry(un, &sma->list_id, list_id)
1389 un->semadj[semnum] = 0;
1390
1391 curr->semval = val;
1392 ipc_update_pid(&curr->sempid, task_tgid(current));
1393 sma->sem_ctime = ktime_get_real_seconds();
1394
1395 do_smart_update(sma, NULL, 0, 0, &wake_q);
1396 sem_unlock(sma, -1);
1397 rcu_read_unlock();
1398 wake_up_q(&wake_q);
1399 return 0;
1400}
1401
1402static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1403 int cmd, void __user *p)
1404{
1405 struct sem_array *sma;
1406 struct sem *curr;
1407 int err, nsems;
1408 ushort fast_sem_io[SEMMSL_FAST];
1409 ushort *sem_io = fast_sem_io;
1410 DEFINE_WAKE_Q(wake_q);
1411
1412 rcu_read_lock();
1413 sma = sem_obtain_object_check(ns, semid);
1414 if (IS_ERR(sma)) {
1415 rcu_read_unlock();
1416 return PTR_ERR(sma);
1417 }
1418
1419 nsems = sma->sem_nsems;
1420
1421 err = -EACCES;
1422 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1423 goto out_rcu_wakeup;
1424
1425 err = security_sem_semctl(&sma->sem_perm, cmd);
1426 if (err)
1427 goto out_rcu_wakeup;
1428
1429 err = -EACCES;
1430 switch (cmd) {
1431 case GETALL:
1432 {
1433 ushort __user *array = p;
1434 int i;
1435
1436 sem_lock(sma, NULL, -1);
1437 if (!ipc_valid_object(&sma->sem_perm)) {
1438 err = -EIDRM;
1439 goto out_unlock;
1440 }
1441 if (nsems > SEMMSL_FAST) {
1442 if (!ipc_rcu_getref(&sma->sem_perm)) {
1443 err = -EIDRM;
1444 goto out_unlock;
1445 }
1446 sem_unlock(sma, -1);
1447 rcu_read_unlock();
1448 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1449 GFP_KERNEL);
1450 if (sem_io == NULL) {
1451 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1452 return -ENOMEM;
1453 }
1454
1455 rcu_read_lock();
1456 sem_lock_and_putref(sma);
1457 if (!ipc_valid_object(&sma->sem_perm)) {
1458 err = -EIDRM;
1459 goto out_unlock;
1460 }
1461 }
1462 for (i = 0; i < sma->sem_nsems; i++)
1463 sem_io[i] = sma->sems[i].semval;
1464 sem_unlock(sma, -1);
1465 rcu_read_unlock();
1466 err = 0;
1467 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1468 err = -EFAULT;
1469 goto out_free;
1470 }
1471 case SETALL:
1472 {
1473 int i;
1474 struct sem_undo *un;
1475
1476 if (!ipc_rcu_getref(&sma->sem_perm)) {
1477 err = -EIDRM;
1478 goto out_rcu_wakeup;
1479 }
1480 rcu_read_unlock();
1481
1482 if (nsems > SEMMSL_FAST) {
1483 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1484 GFP_KERNEL);
1485 if (sem_io == NULL) {
1486 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1487 return -ENOMEM;
1488 }
1489 }
1490
1491 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1492 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1493 err = -EFAULT;
1494 goto out_free;
1495 }
1496
1497 for (i = 0; i < nsems; i++) {
1498 if (sem_io[i] > SEMVMX) {
1499 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1500 err = -ERANGE;
1501 goto out_free;
1502 }
1503 }
1504 rcu_read_lock();
1505 sem_lock_and_putref(sma);
1506 if (!ipc_valid_object(&sma->sem_perm)) {
1507 err = -EIDRM;
1508 goto out_unlock;
1509 }
1510
1511 for (i = 0; i < nsems; i++) {
1512 sma->sems[i].semval = sem_io[i];
1513 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1514 }
1515
1516 ipc_assert_locked_object(&sma->sem_perm);
1517 list_for_each_entry(un, &sma->list_id, list_id) {
1518 for (i = 0; i < nsems; i++)
1519 un->semadj[i] = 0;
1520 }
1521 sma->sem_ctime = ktime_get_real_seconds();
1522
1523 do_smart_update(sma, NULL, 0, 0, &wake_q);
1524 err = 0;
1525 goto out_unlock;
1526 }
1527
1528 }
1529 err = -EINVAL;
1530 if (semnum < 0 || semnum >= nsems)
1531 goto out_rcu_wakeup;
1532
1533 sem_lock(sma, NULL, -1);
1534 if (!ipc_valid_object(&sma->sem_perm)) {
1535 err = -EIDRM;
1536 goto out_unlock;
1537 }
1538
1539 semnum = array_index_nospec(semnum, nsems);
1540 curr = &sma->sems[semnum];
1541
1542 switch (cmd) {
1543 case GETVAL:
1544 err = curr->semval;
1545 goto out_unlock;
1546 case GETPID:
1547 err = pid_vnr(curr->sempid);
1548 goto out_unlock;
1549 case GETNCNT:
1550 err = count_semcnt(sma, semnum, 0);
1551 goto out_unlock;
1552 case GETZCNT:
1553 err = count_semcnt(sma, semnum, 1);
1554 goto out_unlock;
1555 }
1556
1557out_unlock:
1558 sem_unlock(sma, -1);
1559out_rcu_wakeup:
1560 rcu_read_unlock();
1561 wake_up_q(&wake_q);
1562out_free:
1563 if (sem_io != fast_sem_io)
1564 kvfree(sem_io);
1565 return err;
1566}
1567
1568static inline unsigned long
1569copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1570{
1571 switch (version) {
1572 case IPC_64:
1573 if (copy_from_user(out, buf, sizeof(*out)))
1574 return -EFAULT;
1575 return 0;
1576 case IPC_OLD:
1577 {
1578 struct semid_ds tbuf_old;
1579
1580 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1581 return -EFAULT;
1582
1583 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1584 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1585 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1586
1587 return 0;
1588 }
1589 default:
1590 return -EINVAL;
1591 }
1592}
1593
1594
1595
1596
1597
1598
1599static int semctl_down(struct ipc_namespace *ns, int semid,
1600 int cmd, struct semid64_ds *semid64)
1601{
1602 struct sem_array *sma;
1603 int err;
1604 struct kern_ipc_perm *ipcp;
1605
1606 down_write(&sem_ids(ns).rwsem);
1607 rcu_read_lock();
1608
1609 ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1610 &semid64->sem_perm, 0);
1611 if (IS_ERR(ipcp)) {
1612 err = PTR_ERR(ipcp);
1613 goto out_unlock1;
1614 }
1615
1616 sma = container_of(ipcp, struct sem_array, sem_perm);
1617
1618 err = security_sem_semctl(&sma->sem_perm, cmd);
1619 if (err)
1620 goto out_unlock1;
1621
1622 switch (cmd) {
1623 case IPC_RMID:
1624 sem_lock(sma, NULL, -1);
1625
1626 freeary(ns, ipcp);
1627 goto out_up;
1628 case IPC_SET:
1629 sem_lock(sma, NULL, -1);
1630 err = ipc_update_perm(&semid64->sem_perm, ipcp);
1631 if (err)
1632 goto out_unlock0;
1633 sma->sem_ctime = ktime_get_real_seconds();
1634 break;
1635 default:
1636 err = -EINVAL;
1637 goto out_unlock1;
1638 }
1639
1640out_unlock0:
1641 sem_unlock(sma, -1);
1642out_unlock1:
1643 rcu_read_unlock();
1644out_up:
1645 up_write(&sem_ids(ns).rwsem);
1646 return err;
1647}
1648
1649static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1650{
1651 struct ipc_namespace *ns;
1652 void __user *p = (void __user *)arg;
1653 struct semid64_ds semid64;
1654 int err;
1655
1656 if (semid < 0)
1657 return -EINVAL;
1658
1659 ns = current->nsproxy->ipc_ns;
1660
1661 switch (cmd) {
1662 case IPC_INFO:
1663 case SEM_INFO:
1664 return semctl_info(ns, semid, cmd, p);
1665 case IPC_STAT:
1666 case SEM_STAT:
1667 case SEM_STAT_ANY:
1668 err = semctl_stat(ns, semid, cmd, &semid64);
1669 if (err < 0)
1670 return err;
1671 if (copy_semid_to_user(p, &semid64, version))
1672 err = -EFAULT;
1673 return err;
1674 case GETALL:
1675 case GETVAL:
1676 case GETPID:
1677 case GETNCNT:
1678 case GETZCNT:
1679 case SETALL:
1680 return semctl_main(ns, semid, semnum, cmd, p);
1681 case SETVAL: {
1682 int val;
1683#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1684
1685 val = arg >> 32;
1686#else
1687
1688 val = arg;
1689#endif
1690 return semctl_setval(ns, semid, semnum, val);
1691 }
1692 case IPC_SET:
1693 if (copy_semid_from_user(&semid64, p, version))
1694 return -EFAULT;
1695
1696 case IPC_RMID:
1697 return semctl_down(ns, semid, cmd, &semid64);
1698 default:
1699 return -EINVAL;
1700 }
1701}
1702
1703SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1704{
1705 return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1706}
1707
1708#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1709long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1710{
1711 int version = ipc_parse_version(&cmd);
1712
1713 return ksys_semctl(semid, semnum, cmd, arg, version);
1714}
1715
1716SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1717{
1718 return ksys_old_semctl(semid, semnum, cmd, arg);
1719}
1720#endif
1721
1722#ifdef CONFIG_COMPAT
1723
1724struct compat_semid_ds {
1725 struct compat_ipc_perm sem_perm;
1726 old_time32_t sem_otime;
1727 old_time32_t sem_ctime;
1728 compat_uptr_t sem_base;
1729 compat_uptr_t sem_pending;
1730 compat_uptr_t sem_pending_last;
1731 compat_uptr_t undo;
1732 unsigned short sem_nsems;
1733};
1734
1735static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1736 int version)
1737{
1738 memset(out, 0, sizeof(*out));
1739 if (version == IPC_64) {
1740 struct compat_semid64_ds __user *p = buf;
1741 return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1742 } else {
1743 struct compat_semid_ds __user *p = buf;
1744 return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1745 }
1746}
1747
1748static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1749 int version)
1750{
1751 if (version == IPC_64) {
1752 struct compat_semid64_ds v;
1753 memset(&v, 0, sizeof(v));
1754 to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1755 v.sem_otime = lower_32_bits(in->sem_otime);
1756 v.sem_otime_high = upper_32_bits(in->sem_otime);
1757 v.sem_ctime = lower_32_bits(in->sem_ctime);
1758 v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1759 v.sem_nsems = in->sem_nsems;
1760 return copy_to_user(buf, &v, sizeof(v));
1761 } else {
1762 struct compat_semid_ds v;
1763 memset(&v, 0, sizeof(v));
1764 to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1765 v.sem_otime = in->sem_otime;
1766 v.sem_ctime = in->sem_ctime;
1767 v.sem_nsems = in->sem_nsems;
1768 return copy_to_user(buf, &v, sizeof(v));
1769 }
1770}
1771
1772static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1773{
1774 void __user *p = compat_ptr(arg);
1775 struct ipc_namespace *ns;
1776 struct semid64_ds semid64;
1777 int err;
1778
1779 ns = current->nsproxy->ipc_ns;
1780
1781 if (semid < 0)
1782 return -EINVAL;
1783
1784 switch (cmd & (~IPC_64)) {
1785 case IPC_INFO:
1786 case SEM_INFO:
1787 return semctl_info(ns, semid, cmd, p);
1788 case IPC_STAT:
1789 case SEM_STAT:
1790 case SEM_STAT_ANY:
1791 err = semctl_stat(ns, semid, cmd, &semid64);
1792 if (err < 0)
1793 return err;
1794 if (copy_compat_semid_to_user(p, &semid64, version))
1795 err = -EFAULT;
1796 return err;
1797 case GETVAL:
1798 case GETPID:
1799 case GETNCNT:
1800 case GETZCNT:
1801 case GETALL:
1802 case SETALL:
1803 return semctl_main(ns, semid, semnum, cmd, p);
1804 case SETVAL:
1805 return semctl_setval(ns, semid, semnum, arg);
1806 case IPC_SET:
1807 if (copy_compat_semid_from_user(&semid64, p, version))
1808 return -EFAULT;
1809
1810 case IPC_RMID:
1811 return semctl_down(ns, semid, cmd, &semid64);
1812 default:
1813 return -EINVAL;
1814 }
1815}
1816
1817COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1818{
1819 return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1820}
1821
1822#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1823long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1824{
1825 int version = compat_ipc_parse_version(&cmd);
1826
1827 return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1828}
1829
1830COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1831{
1832 return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1833}
1834#endif
1835#endif
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848static inline int get_undo_list(struct sem_undo_list **undo_listp)
1849{
1850 struct sem_undo_list *undo_list;
1851
1852 undo_list = current->sysvsem.undo_list;
1853 if (!undo_list) {
1854 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1855 if (undo_list == NULL)
1856 return -ENOMEM;
1857 spin_lock_init(&undo_list->lock);
1858 refcount_set(&undo_list->refcnt, 1);
1859 INIT_LIST_HEAD(&undo_list->list_proc);
1860
1861 current->sysvsem.undo_list = undo_list;
1862 }
1863 *undo_listp = undo_list;
1864 return 0;
1865}
1866
1867static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1868{
1869 struct sem_undo *un;
1870
1871 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1872 spin_is_locked(&ulp->lock)) {
1873 if (un->semid == semid)
1874 return un;
1875 }
1876 return NULL;
1877}
1878
1879static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1880{
1881 struct sem_undo *un;
1882
1883 assert_spin_locked(&ulp->lock);
1884
1885 un = __lookup_undo(ulp, semid);
1886 if (un) {
1887 list_del_rcu(&un->list_proc);
1888 list_add_rcu(&un->list_proc, &ulp->list_proc);
1889 }
1890 return un;
1891}
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1905{
1906 struct sem_array *sma;
1907 struct sem_undo_list *ulp;
1908 struct sem_undo *un, *new;
1909 int nsems, error;
1910
1911 error = get_undo_list(&ulp);
1912 if (error)
1913 return ERR_PTR(error);
1914
1915 rcu_read_lock();
1916 spin_lock(&ulp->lock);
1917 un = lookup_undo(ulp, semid);
1918 spin_unlock(&ulp->lock);
1919 if (likely(un != NULL))
1920 goto out;
1921
1922
1923
1924 sma = sem_obtain_object_check(ns, semid);
1925 if (IS_ERR(sma)) {
1926 rcu_read_unlock();
1927 return ERR_CAST(sma);
1928 }
1929
1930 nsems = sma->sem_nsems;
1931 if (!ipc_rcu_getref(&sma->sem_perm)) {
1932 rcu_read_unlock();
1933 un = ERR_PTR(-EIDRM);
1934 goto out;
1935 }
1936 rcu_read_unlock();
1937
1938
1939 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1940 if (!new) {
1941 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1942 return ERR_PTR(-ENOMEM);
1943 }
1944
1945
1946 rcu_read_lock();
1947 sem_lock_and_putref(sma);
1948 if (!ipc_valid_object(&sma->sem_perm)) {
1949 sem_unlock(sma, -1);
1950 rcu_read_unlock();
1951 kfree(new);
1952 un = ERR_PTR(-EIDRM);
1953 goto out;
1954 }
1955 spin_lock(&ulp->lock);
1956
1957
1958
1959
1960 un = lookup_undo(ulp, semid);
1961 if (un) {
1962 kfree(new);
1963 goto success;
1964 }
1965
1966 new->semadj = (short *) &new[1];
1967 new->ulp = ulp;
1968 new->semid = semid;
1969 assert_spin_locked(&ulp->lock);
1970 list_add_rcu(&new->list_proc, &ulp->list_proc);
1971 ipc_assert_locked_object(&sma->sem_perm);
1972 list_add(&new->list_id, &sma->list_id);
1973 un = new;
1974
1975success:
1976 spin_unlock(&ulp->lock);
1977 sem_unlock(sma, -1);
1978out:
1979 return un;
1980}
1981
1982static long do_semtimedop(int semid, struct sembuf __user *tsops,
1983 unsigned nsops, const struct timespec64 *timeout)
1984{
1985 int error = -EINVAL;
1986 struct sem_array *sma;
1987 struct sembuf fast_sops[SEMOPM_FAST];
1988 struct sembuf *sops = fast_sops, *sop;
1989 struct sem_undo *un;
1990 int max, locknum;
1991 bool undos = false, alter = false, dupsop = false;
1992 struct sem_queue queue;
1993 unsigned long dup = 0, jiffies_left = 0;
1994 struct ipc_namespace *ns;
1995
1996 ns = current->nsproxy->ipc_ns;
1997
1998 if (nsops < 1 || semid < 0)
1999 return -EINVAL;
2000 if (nsops > ns->sc_semopm)
2001 return -E2BIG;
2002 if (nsops > SEMOPM_FAST) {
2003 sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
2004 if (sops == NULL)
2005 return -ENOMEM;
2006 }
2007
2008 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2009 error = -EFAULT;
2010 goto out_free;
2011 }
2012
2013 if (timeout) {
2014 if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
2015 timeout->tv_nsec >= 1000000000L) {
2016 error = -EINVAL;
2017 goto out_free;
2018 }
2019 jiffies_left = timespec64_to_jiffies(timeout);
2020 }
2021
2022 max = 0;
2023 for (sop = sops; sop < sops + nsops; sop++) {
2024 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2025
2026 if (sop->sem_num >= max)
2027 max = sop->sem_num;
2028 if (sop->sem_flg & SEM_UNDO)
2029 undos = true;
2030 if (dup & mask) {
2031
2032
2033
2034
2035
2036
2037 dupsop = true;
2038 }
2039 if (sop->sem_op != 0) {
2040 alter = true;
2041 dup |= mask;
2042 }
2043 }
2044
2045 if (undos) {
2046
2047 un = find_alloc_undo(ns, semid);
2048 if (IS_ERR(un)) {
2049 error = PTR_ERR(un);
2050 goto out_free;
2051 }
2052 } else {
2053 un = NULL;
2054 rcu_read_lock();
2055 }
2056
2057 sma = sem_obtain_object_check(ns, semid);
2058 if (IS_ERR(sma)) {
2059 rcu_read_unlock();
2060 error = PTR_ERR(sma);
2061 goto out_free;
2062 }
2063
2064 error = -EFBIG;
2065 if (max >= sma->sem_nsems) {
2066 rcu_read_unlock();
2067 goto out_free;
2068 }
2069
2070 error = -EACCES;
2071 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2072 rcu_read_unlock();
2073 goto out_free;
2074 }
2075
2076 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2077 if (error) {
2078 rcu_read_unlock();
2079 goto out_free;
2080 }
2081
2082 error = -EIDRM;
2083 locknum = sem_lock(sma, sops, nsops);
2084
2085
2086
2087
2088
2089
2090
2091
2092 if (!ipc_valid_object(&sma->sem_perm))
2093 goto out_unlock_free;
2094
2095
2096
2097
2098
2099
2100
2101 if (un && un->semid == -1)
2102 goto out_unlock_free;
2103
2104 queue.sops = sops;
2105 queue.nsops = nsops;
2106 queue.undo = un;
2107 queue.pid = task_tgid(current);
2108 queue.alter = alter;
2109 queue.dupsop = dupsop;
2110
2111 error = perform_atomic_semop(sma, &queue);
2112 if (error == 0) {
2113 DEFINE_WAKE_Q(wake_q);
2114
2115
2116
2117
2118
2119 if (alter)
2120 do_smart_update(sma, sops, nsops, 1, &wake_q);
2121 else
2122 set_semotime(sma, sops);
2123
2124 sem_unlock(sma, locknum);
2125 rcu_read_unlock();
2126 wake_up_q(&wake_q);
2127
2128 goto out_free;
2129 }
2130 if (error < 0)
2131 goto out_unlock_free;
2132
2133
2134
2135
2136
2137 if (nsops == 1) {
2138 struct sem *curr;
2139 int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2140 curr = &sma->sems[idx];
2141
2142 if (alter) {
2143 if (sma->complex_count) {
2144 list_add_tail(&queue.list,
2145 &sma->pending_alter);
2146 } else {
2147
2148 list_add_tail(&queue.list,
2149 &curr->pending_alter);
2150 }
2151 } else {
2152 list_add_tail(&queue.list, &curr->pending_const);
2153 }
2154 } else {
2155 if (!sma->complex_count)
2156 merge_queues(sma);
2157
2158 if (alter)
2159 list_add_tail(&queue.list, &sma->pending_alter);
2160 else
2161 list_add_tail(&queue.list, &sma->pending_const);
2162
2163 sma->complex_count++;
2164 }
2165
2166 do {
2167
2168 WRITE_ONCE(queue.status, -EINTR);
2169 queue.sleeper = current;
2170
2171
2172 __set_current_state(TASK_INTERRUPTIBLE);
2173 sem_unlock(sma, locknum);
2174 rcu_read_unlock();
2175
2176 if (timeout)
2177 jiffies_left = schedule_timeout(jiffies_left);
2178 else
2179 schedule();
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192 error = READ_ONCE(queue.status);
2193 if (error != -EINTR) {
2194
2195 smp_acquire__after_ctrl_dep();
2196 goto out_free;
2197 }
2198
2199 rcu_read_lock();
2200 locknum = sem_lock(sma, sops, nsops);
2201
2202 if (!ipc_valid_object(&sma->sem_perm))
2203 goto out_unlock_free;
2204
2205
2206
2207
2208 error = READ_ONCE(queue.status);
2209
2210
2211
2212
2213
2214 if (error != -EINTR)
2215 goto out_unlock_free;
2216
2217
2218
2219
2220 if (timeout && jiffies_left == 0)
2221 error = -EAGAIN;
2222 } while (error == -EINTR && !signal_pending(current));
2223
2224 unlink_queue(sma, &queue);
2225
2226out_unlock_free:
2227 sem_unlock(sma, locknum);
2228 rcu_read_unlock();
2229out_free:
2230 if (sops != fast_sops)
2231 kvfree(sops);
2232 return error;
2233}
2234
2235long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2236 unsigned int nsops, const struct __kernel_timespec __user *timeout)
2237{
2238 if (timeout) {
2239 struct timespec64 ts;
2240 if (get_timespec64(&ts, timeout))
2241 return -EFAULT;
2242 return do_semtimedop(semid, tsops, nsops, &ts);
2243 }
2244 return do_semtimedop(semid, tsops, nsops, NULL);
2245}
2246
2247SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2248 unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2249{
2250 return ksys_semtimedop(semid, tsops, nsops, timeout);
2251}
2252
2253#ifdef CONFIG_COMPAT_32BIT_TIME
2254long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2255 unsigned int nsops,
2256 const struct old_timespec32 __user *timeout)
2257{
2258 if (timeout) {
2259 struct timespec64 ts;
2260 if (get_old_timespec32(&ts, timeout))
2261 return -EFAULT;
2262 return do_semtimedop(semid, tsems, nsops, &ts);
2263 }
2264 return do_semtimedop(semid, tsems, nsops, NULL);
2265}
2266
2267SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2268 unsigned int, nsops,
2269 const struct old_timespec32 __user *, timeout)
2270{
2271 return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2272}
2273#endif
2274
2275SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2276 unsigned, nsops)
2277{
2278 return do_semtimedop(semid, tsops, nsops, NULL);
2279}
2280
2281
2282
2283
2284
2285int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2286{
2287 struct sem_undo_list *undo_list;
2288 int error;
2289
2290 if (clone_flags & CLONE_SYSVSEM) {
2291 error = get_undo_list(&undo_list);
2292 if (error)
2293 return error;
2294 refcount_inc(&undo_list->refcnt);
2295 tsk->sysvsem.undo_list = undo_list;
2296 } else
2297 tsk->sysvsem.undo_list = NULL;
2298
2299 return 0;
2300}
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314void exit_sem(struct task_struct *tsk)
2315{
2316 struct sem_undo_list *ulp;
2317
2318 ulp = tsk->sysvsem.undo_list;
2319 if (!ulp)
2320 return;
2321 tsk->sysvsem.undo_list = NULL;
2322
2323 if (!refcount_dec_and_test(&ulp->refcnt))
2324 return;
2325
2326 for (;;) {
2327 struct sem_array *sma;
2328 struct sem_undo *un;
2329 int semid, i;
2330 DEFINE_WAKE_Q(wake_q);
2331
2332 cond_resched();
2333
2334 rcu_read_lock();
2335 un = list_entry_rcu(ulp->list_proc.next,
2336 struct sem_undo, list_proc);
2337 if (&un->list_proc == &ulp->list_proc) {
2338
2339
2340
2341
2342
2343
2344 spin_lock(&ulp->lock);
2345 spin_unlock(&ulp->lock);
2346 rcu_read_unlock();
2347 break;
2348 }
2349 spin_lock(&ulp->lock);
2350 semid = un->semid;
2351 spin_unlock(&ulp->lock);
2352
2353
2354 if (semid == -1) {
2355 rcu_read_unlock();
2356 continue;
2357 }
2358
2359 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2360
2361 if (IS_ERR(sma)) {
2362 rcu_read_unlock();
2363 continue;
2364 }
2365
2366 sem_lock(sma, NULL, -1);
2367
2368 if (!ipc_valid_object(&sma->sem_perm)) {
2369 sem_unlock(sma, -1);
2370 rcu_read_unlock();
2371 continue;
2372 }
2373 un = __lookup_undo(ulp, semid);
2374 if (un == NULL) {
2375
2376
2377
2378 sem_unlock(sma, -1);
2379 rcu_read_unlock();
2380 continue;
2381 }
2382
2383
2384 ipc_assert_locked_object(&sma->sem_perm);
2385 list_del(&un->list_id);
2386
2387 spin_lock(&ulp->lock);
2388 list_del_rcu(&un->list_proc);
2389 spin_unlock(&ulp->lock);
2390
2391
2392 for (i = 0; i < sma->sem_nsems; i++) {
2393 struct sem *semaphore = &sma->sems[i];
2394 if (un->semadj[i]) {
2395 semaphore->semval += un->semadj[i];
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409 if (semaphore->semval < 0)
2410 semaphore->semval = 0;
2411 if (semaphore->semval > SEMVMX)
2412 semaphore->semval = SEMVMX;
2413 ipc_update_pid(&semaphore->sempid, task_tgid(current));
2414 }
2415 }
2416
2417 do_smart_update(sma, NULL, 0, 1, &wake_q);
2418 sem_unlock(sma, -1);
2419 rcu_read_unlock();
2420 wake_up_q(&wake_q);
2421
2422 kfree_rcu(un, rcu);
2423 }
2424 kfree(ulp);
2425}
2426
2427#ifdef CONFIG_PROC_FS
2428static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2429{
2430 struct user_namespace *user_ns = seq_user_ns(s);
2431 struct kern_ipc_perm *ipcp = it;
2432 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2433 time64_t sem_otime;
2434
2435
2436
2437
2438
2439
2440
2441 complexmode_enter(sma);
2442
2443 sem_otime = get_semotime(sma);
2444
2445 seq_printf(s,
2446 "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2447 sma->sem_perm.key,
2448 sma->sem_perm.id,
2449 sma->sem_perm.mode,
2450 sma->sem_nsems,
2451 from_kuid_munged(user_ns, sma->sem_perm.uid),
2452 from_kgid_munged(user_ns, sma->sem_perm.gid),
2453 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2454 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2455 sem_otime,
2456 sma->sem_ctime);
2457
2458 complexmode_tryleave(sma);
2459
2460 return 0;
2461}
2462#endif
2463