1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#include <linux/compat.h>
74#include <linux/slab.h>
75#include <linux/spinlock.h>
76#include <linux/init.h>
77#include <linux/proc_fs.h>
78#include <linux/time.h>
79#include <linux/security.h>
80#include <linux/syscalls.h>
81#include <linux/audit.h>
82#include <linux/capability.h>
83#include <linux/seq_file.h>
84#include <linux/rwsem.h>
85#include <linux/nsproxy.h>
86#include <linux/ipc_namespace.h>
87#include <linux/sched/wake_q.h>
88#include <linux/nospec.h>
89#include <linux/rhashtable.h>
90
91#include <linux/uaccess.h>
92#include "util.h"
93
94
95struct sem {
96 int semval;
97
98
99
100
101
102
103
104 struct pid *sempid;
105 spinlock_t lock;
106 struct list_head pending_alter;
107
108 struct list_head pending_const;
109
110 time64_t sem_otime;
111} ____cacheline_aligned_in_smp;
112
113
114struct sem_array {
115 struct kern_ipc_perm sem_perm;
116 time64_t sem_ctime;
117 struct list_head pending_alter;
118
119 struct list_head pending_const;
120
121 struct list_head list_id;
122 int sem_nsems;
123 int complex_count;
124 unsigned int use_global_lock;
125
126 struct sem sems[];
127} __randomize_layout;
128
129
130struct sem_queue {
131 struct list_head list;
132 struct task_struct *sleeper;
133 struct sem_undo *undo;
134 struct pid *pid;
135 int status;
136 struct sembuf *sops;
137 struct sembuf *blocking;
138 int nsops;
139 bool alter;
140 bool dupsop;
141};
142
143
144
145
146struct sem_undo {
147 struct list_head list_proc;
148
149
150 struct rcu_head rcu;
151 struct sem_undo_list *ulp;
152 struct list_head list_id;
153
154 int semid;
155 short *semadj;
156
157};
158
159
160
161
162struct sem_undo_list {
163 refcount_t refcnt;
164 spinlock_t lock;
165 struct list_head list_proc;
166};
167
168
169#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
170
171static int newary(struct ipc_namespace *, struct ipc_params *);
172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
173#ifdef CONFIG_PROC_FS
174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
175#endif
176
177#define SEMMSL_FAST 256
178#define SEMOPM_FAST 64
179
180
181
182
183
184
185#define USE_GLOBAL_LOCK_HYSTERESIS 10
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244#define sc_semmsl sem_ctls[0]
245#define sc_semmns sem_ctls[1]
246#define sc_semopm sem_ctls[2]
247#define sc_semmni sem_ctls[3]
248
249void sem_init_ns(struct ipc_namespace *ns)
250{
251 ns->sc_semmsl = SEMMSL;
252 ns->sc_semmns = SEMMNS;
253 ns->sc_semopm = SEMOPM;
254 ns->sc_semmni = SEMMNI;
255 ns->used_sems = 0;
256 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
257}
258
259#ifdef CONFIG_IPC_NS
260void sem_exit_ns(struct ipc_namespace *ns)
261{
262 free_ipcs(ns, &sem_ids(ns), freeary);
263 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
264 rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
265}
266#endif
267
268void __init sem_init(void)
269{
270 sem_init_ns(&init_ipc_ns);
271 ipc_init_proc_interface("sysvipc/sem",
272 " key semid perms nsems uid gid cuid cgid otime ctime\n",
273 IPC_SEM_IDS, sysvipc_sem_proc_show);
274}
275
276
277
278
279
280
281
282
283static void unmerge_queues(struct sem_array *sma)
284{
285 struct sem_queue *q, *tq;
286
287
288 if (sma->complex_count)
289 return;
290
291
292
293
294
295 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
296 struct sem *curr;
297 curr = &sma->sems[q->sops[0].sem_num];
298
299 list_add_tail(&q->list, &curr->pending_alter);
300 }
301 INIT_LIST_HEAD(&sma->pending_alter);
302}
303
304
305
306
307
308
309
310
311
312
313static void merge_queues(struct sem_array *sma)
314{
315 int i;
316 for (i = 0; i < sma->sem_nsems; i++) {
317 struct sem *sem = &sma->sems[i];
318
319 list_splice_init(&sem->pending_alter, &sma->pending_alter);
320 }
321}
322
323static void sem_rcu_free(struct rcu_head *head)
324{
325 struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
326 struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
327
328 security_sem_free(&sma->sem_perm);
329 kvfree(sma);
330}
331
332
333
334
335
336static void complexmode_enter(struct sem_array *sma)
337{
338 int i;
339 struct sem *sem;
340
341 if (sma->use_global_lock > 0) {
342
343
344
345
346
347 WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
348 return;
349 }
350 WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
351
352 for (i = 0; i < sma->sem_nsems; i++) {
353 sem = &sma->sems[i];
354 spin_lock(&sem->lock);
355 spin_unlock(&sem->lock);
356 }
357}
358
359
360
361
362
363static void complexmode_tryleave(struct sem_array *sma)
364{
365 if (sma->complex_count) {
366
367
368
369 return;
370 }
371 if (sma->use_global_lock == 1) {
372
373
374 smp_store_release(&sma->use_global_lock, 0);
375 } else {
376 WRITE_ONCE(sma->use_global_lock,
377 sma->use_global_lock-1);
378 }
379}
380
381#define SEM_GLOBAL_LOCK (-1)
382
383
384
385
386
387
388
389static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
390 int nsops)
391{
392 struct sem *sem;
393 int idx;
394
395 if (nsops != 1) {
396
397 ipc_lock_object(&sma->sem_perm);
398
399
400 complexmode_enter(sma);
401 return SEM_GLOBAL_LOCK;
402 }
403
404
405
406
407
408
409
410
411 idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
412 sem = &sma->sems[idx];
413
414
415
416
417
418 if (!READ_ONCE(sma->use_global_lock)) {
419
420
421
422
423 spin_lock(&sem->lock);
424
425
426 if (!smp_load_acquire(&sma->use_global_lock)) {
427
428 return sops->sem_num;
429 }
430 spin_unlock(&sem->lock);
431 }
432
433
434 ipc_lock_object(&sma->sem_perm);
435
436 if (sma->use_global_lock == 0) {
437
438
439
440
441
442
443
444
445
446 spin_lock(&sem->lock);
447
448 ipc_unlock_object(&sma->sem_perm);
449 return sops->sem_num;
450 } else {
451
452
453
454
455
456 return SEM_GLOBAL_LOCK;
457 }
458}
459
460static inline void sem_unlock(struct sem_array *sma, int locknum)
461{
462 if (locknum == SEM_GLOBAL_LOCK) {
463 unmerge_queues(sma);
464 complexmode_tryleave(sma);
465 ipc_unlock_object(&sma->sem_perm);
466 } else {
467 struct sem *sem = &sma->sems[locknum];
468 spin_unlock(&sem->lock);
469 }
470}
471
472
473
474
475
476
477
478static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
479{
480 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
481
482 if (IS_ERR(ipcp))
483 return ERR_CAST(ipcp);
484
485 return container_of(ipcp, struct sem_array, sem_perm);
486}
487
488static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
489 int id)
490{
491 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
492
493 if (IS_ERR(ipcp))
494 return ERR_CAST(ipcp);
495
496 return container_of(ipcp, struct sem_array, sem_perm);
497}
498
499static inline void sem_lock_and_putref(struct sem_array *sma)
500{
501 sem_lock(sma, NULL, -1);
502 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
503}
504
505static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
506{
507 ipc_rmid(&sem_ids(ns), &s->sem_perm);
508}
509
510static struct sem_array *sem_alloc(size_t nsems)
511{
512 struct sem_array *sma;
513
514 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
515 return NULL;
516
517 sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
518 if (unlikely(!sma))
519 return NULL;
520
521 return sma;
522}
523
524
525
526
527
528
529
530
531static int newary(struct ipc_namespace *ns, struct ipc_params *params)
532{
533 int retval;
534 struct sem_array *sma;
535 key_t key = params->key;
536 int nsems = params->u.nsems;
537 int semflg = params->flg;
538 int i;
539
540 if (!nsems)
541 return -EINVAL;
542 if (ns->used_sems + nsems > ns->sc_semmns)
543 return -ENOSPC;
544
545 sma = sem_alloc(nsems);
546 if (!sma)
547 return -ENOMEM;
548
549 sma->sem_perm.mode = (semflg & S_IRWXUGO);
550 sma->sem_perm.key = key;
551
552 sma->sem_perm.security = NULL;
553 retval = security_sem_alloc(&sma->sem_perm);
554 if (retval) {
555 kvfree(sma);
556 return retval;
557 }
558
559 for (i = 0; i < nsems; i++) {
560 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
561 INIT_LIST_HEAD(&sma->sems[i].pending_const);
562 spin_lock_init(&sma->sems[i].lock);
563 }
564
565 sma->complex_count = 0;
566 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
567 INIT_LIST_HEAD(&sma->pending_alter);
568 INIT_LIST_HEAD(&sma->pending_const);
569 INIT_LIST_HEAD(&sma->list_id);
570 sma->sem_nsems = nsems;
571 sma->sem_ctime = ktime_get_real_seconds();
572
573
574 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
575 if (retval < 0) {
576 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
577 return retval;
578 }
579 ns->used_sems += nsems;
580
581 sem_unlock(sma, -1);
582 rcu_read_unlock();
583
584 return sma->sem_perm.id;
585}
586
587
588
589
590
591static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
592{
593 struct sem_array *sma;
594
595 sma = container_of(ipcp, struct sem_array, sem_perm);
596 if (params->u.nsems > sma->sem_nsems)
597 return -EINVAL;
598
599 return 0;
600}
601
602long ksys_semget(key_t key, int nsems, int semflg)
603{
604 struct ipc_namespace *ns;
605 static const struct ipc_ops sem_ops = {
606 .getnew = newary,
607 .associate = security_sem_associate,
608 .more_checks = sem_more_checks,
609 };
610 struct ipc_params sem_params;
611
612 ns = current->nsproxy->ipc_ns;
613
614 if (nsems < 0 || nsems > ns->sc_semmsl)
615 return -EINVAL;
616
617 sem_params.key = key;
618 sem_params.flg = semflg;
619 sem_params.u.nsems = nsems;
620
621 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
622}
623
624SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
625{
626 return ksys_semget(key, nsems, semflg);
627}
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
647{
648 int result, sem_op, nsops;
649 struct pid *pid;
650 struct sembuf *sop;
651 struct sem *curr;
652 struct sembuf *sops;
653 struct sem_undo *un;
654
655 sops = q->sops;
656 nsops = q->nsops;
657 un = q->undo;
658
659 for (sop = sops; sop < sops + nsops; sop++) {
660 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
661 curr = &sma->sems[idx];
662 sem_op = sop->sem_op;
663 result = curr->semval;
664
665 if (!sem_op && result)
666 goto would_block;
667
668 result += sem_op;
669 if (result < 0)
670 goto would_block;
671 if (result > SEMVMX)
672 goto out_of_range;
673
674 if (sop->sem_flg & SEM_UNDO) {
675 int undo = un->semadj[sop->sem_num] - sem_op;
676
677 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
678 goto out_of_range;
679 un->semadj[sop->sem_num] = undo;
680 }
681
682 curr->semval = result;
683 }
684
685 sop--;
686 pid = q->pid;
687 while (sop >= sops) {
688 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
689 sop--;
690 }
691
692 return 0;
693
694out_of_range:
695 result = -ERANGE;
696 goto undo;
697
698would_block:
699 q->blocking = sop;
700
701 if (sop->sem_flg & IPC_NOWAIT)
702 result = -EAGAIN;
703 else
704 result = 1;
705
706undo:
707 sop--;
708 while (sop >= sops) {
709 sem_op = sop->sem_op;
710 sma->sems[sop->sem_num].semval -= sem_op;
711 if (sop->sem_flg & SEM_UNDO)
712 un->semadj[sop->sem_num] += sem_op;
713 sop--;
714 }
715
716 return result;
717}
718
719static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
720{
721 int result, sem_op, nsops;
722 struct sembuf *sop;
723 struct sem *curr;
724 struct sembuf *sops;
725 struct sem_undo *un;
726
727 sops = q->sops;
728 nsops = q->nsops;
729 un = q->undo;
730
731 if (unlikely(q->dupsop))
732 return perform_atomic_semop_slow(sma, q);
733
734
735
736
737
738
739
740 for (sop = sops; sop < sops + nsops; sop++) {
741 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
742
743 curr = &sma->sems[idx];
744 sem_op = sop->sem_op;
745 result = curr->semval;
746
747 if (!sem_op && result)
748 goto would_block;
749
750 result += sem_op;
751 if (result < 0)
752 goto would_block;
753
754 if (result > SEMVMX)
755 return -ERANGE;
756
757 if (sop->sem_flg & SEM_UNDO) {
758 int undo = un->semadj[sop->sem_num] - sem_op;
759
760
761 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
762 return -ERANGE;
763 }
764 }
765
766 for (sop = sops; sop < sops + nsops; sop++) {
767 curr = &sma->sems[sop->sem_num];
768 sem_op = sop->sem_op;
769 result = curr->semval;
770
771 if (sop->sem_flg & SEM_UNDO) {
772 int undo = un->semadj[sop->sem_num] - sem_op;
773
774 un->semadj[sop->sem_num] = undo;
775 }
776 curr->semval += sem_op;
777 ipc_update_pid(&curr->sempid, q->pid);
778 }
779
780 return 0;
781
782would_block:
783 q->blocking = sop;
784 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
785}
786
787static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
788 struct wake_q_head *wake_q)
789{
790 struct task_struct *sleeper;
791
792 sleeper = get_task_struct(q->sleeper);
793
794
795 smp_store_release(&q->status, error);
796
797 wake_q_add_safe(wake_q, sleeper);
798}
799
800static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
801{
802 list_del(&q->list);
803 if (q->nsops > 1)
804 sma->complex_count--;
805}
806
807
808
809
810
811
812
813
814
815
816
817static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
818{
819
820 if (!list_empty(&sma->pending_alter))
821 return 1;
822
823
824 if (q->nsops > 1)
825 return 1;
826
827
828
829
830
831
832
833
834
835
836
837
838 return 0;
839}
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855static int wake_const_ops(struct sem_array *sma, int semnum,
856 struct wake_q_head *wake_q)
857{
858 struct sem_queue *q, *tmp;
859 struct list_head *pending_list;
860 int semop_completed = 0;
861
862 if (semnum == -1)
863 pending_list = &sma->pending_const;
864 else
865 pending_list = &sma->sems[semnum].pending_const;
866
867 list_for_each_entry_safe(q, tmp, pending_list, list) {
868 int error = perform_atomic_semop(sma, q);
869
870 if (error > 0)
871 continue;
872
873 unlink_queue(sma, q);
874
875 wake_up_sem_queue_prepare(q, error, wake_q);
876 if (error == 0)
877 semop_completed = 1;
878 }
879
880 return semop_completed;
881}
882
883
884
885
886
887
888
889
890
891
892
893
894static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
895 int nsops, struct wake_q_head *wake_q)
896{
897 int i;
898 int semop_completed = 0;
899 int got_zero = 0;
900
901
902 if (sops) {
903 for (i = 0; i < nsops; i++) {
904 int num = sops[i].sem_num;
905
906 if (sma->sems[num].semval == 0) {
907 got_zero = 1;
908 semop_completed |= wake_const_ops(sma, num, wake_q);
909 }
910 }
911 } else {
912
913
914
915
916 for (i = 0; i < sma->sem_nsems; i++) {
917 if (sma->sems[i].semval == 0) {
918 got_zero = 1;
919 semop_completed |= wake_const_ops(sma, i, wake_q);
920 }
921 }
922 }
923
924
925
926
927 if (got_zero)
928 semop_completed |= wake_const_ops(sma, -1, wake_q);
929
930 return semop_completed;
931}
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
951{
952 struct sem_queue *q, *tmp;
953 struct list_head *pending_list;
954 int semop_completed = 0;
955
956 if (semnum == -1)
957 pending_list = &sma->pending_alter;
958 else
959 pending_list = &sma->sems[semnum].pending_alter;
960
961again:
962 list_for_each_entry_safe(q, tmp, pending_list, list) {
963 int error, restart;
964
965
966
967
968
969
970
971
972 if (semnum != -1 && sma->sems[semnum].semval == 0)
973 break;
974
975 error = perform_atomic_semop(sma, q);
976
977
978 if (error > 0)
979 continue;
980
981 unlink_queue(sma, q);
982
983 if (error) {
984 restart = 0;
985 } else {
986 semop_completed = 1;
987 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
988 restart = check_restart(sma, q);
989 }
990
991 wake_up_sem_queue_prepare(q, error, wake_q);
992 if (restart)
993 goto again;
994 }
995 return semop_completed;
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1007{
1008 if (sops == NULL) {
1009 sma->sems[0].sem_otime = ktime_get_real_seconds();
1010 } else {
1011 sma->sems[sops[0].sem_num].sem_otime =
1012 ktime_get_real_seconds();
1013 }
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1031 int otime, struct wake_q_head *wake_q)
1032{
1033 int i;
1034
1035 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1036
1037 if (!list_empty(&sma->pending_alter)) {
1038
1039 otime |= update_queue(sma, -1, wake_q);
1040 } else {
1041 if (!sops) {
1042
1043
1044
1045
1046 for (i = 0; i < sma->sem_nsems; i++)
1047 otime |= update_queue(sma, i, wake_q);
1048 } else {
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 for (i = 0; i < nsops; i++) {
1059 if (sops[i].sem_op > 0) {
1060 otime |= update_queue(sma,
1061 sops[i].sem_num, wake_q);
1062 }
1063 }
1064 }
1065 }
1066 if (otime)
1067 set_semotime(sma, sops);
1068}
1069
1070
1071
1072
1073static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1074 bool count_zero)
1075{
1076 struct sembuf *sop = q->blocking;
1077
1078
1079
1080
1081
1082
1083
1084
1085 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1086 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1087 current->comm, task_pid_nr(current));
1088
1089 if (sop->sem_num != semnum)
1090 return 0;
1091
1092 if (count_zero && sop->sem_op == 0)
1093 return 1;
1094 if (!count_zero && sop->sem_op < 0)
1095 return 1;
1096
1097 return 0;
1098}
1099
1100
1101
1102
1103
1104
1105
1106
1107static int count_semcnt(struct sem_array *sma, ushort semnum,
1108 bool count_zero)
1109{
1110 struct list_head *l;
1111 struct sem_queue *q;
1112 int semcnt;
1113
1114 semcnt = 0;
1115
1116 if (count_zero)
1117 l = &sma->sems[semnum].pending_const;
1118 else
1119 l = &sma->sems[semnum].pending_alter;
1120
1121 list_for_each_entry(q, l, list) {
1122
1123
1124
1125 semcnt++;
1126 }
1127
1128
1129 list_for_each_entry(q, &sma->pending_alter, list) {
1130 semcnt += check_qop(sma, semnum, q, count_zero);
1131 }
1132 if (count_zero) {
1133 list_for_each_entry(q, &sma->pending_const, list) {
1134 semcnt += check_qop(sma, semnum, q, count_zero);
1135 }
1136 }
1137 return semcnt;
1138}
1139
1140
1141
1142
1143
1144static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1145{
1146 struct sem_undo *un, *tu;
1147 struct sem_queue *q, *tq;
1148 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1149 int i;
1150 DEFINE_WAKE_Q(wake_q);
1151
1152
1153 ipc_assert_locked_object(&sma->sem_perm);
1154 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1155 list_del(&un->list_id);
1156 spin_lock(&un->ulp->lock);
1157 un->semid = -1;
1158 list_del_rcu(&un->list_proc);
1159 spin_unlock(&un->ulp->lock);
1160 kvfree_rcu(un, rcu);
1161 }
1162
1163
1164 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1165 unlink_queue(sma, q);
1166 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1167 }
1168
1169 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1170 unlink_queue(sma, q);
1171 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1172 }
1173 for (i = 0; i < sma->sem_nsems; i++) {
1174 struct sem *sem = &sma->sems[i];
1175 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1176 unlink_queue(sma, q);
1177 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1178 }
1179 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1180 unlink_queue(sma, q);
1181 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1182 }
1183 ipc_update_pid(&sem->sempid, NULL);
1184 }
1185
1186
1187 sem_rmid(ns, sma);
1188 sem_unlock(sma, -1);
1189 rcu_read_unlock();
1190
1191 wake_up_q(&wake_q);
1192 ns->used_sems -= sma->sem_nsems;
1193 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1194}
1195
1196static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1197{
1198 switch (version) {
1199 case IPC_64:
1200 return copy_to_user(buf, in, sizeof(*in));
1201 case IPC_OLD:
1202 {
1203 struct semid_ds out;
1204
1205 memset(&out, 0, sizeof(out));
1206
1207 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1208
1209 out.sem_otime = in->sem_otime;
1210 out.sem_ctime = in->sem_ctime;
1211 out.sem_nsems = in->sem_nsems;
1212
1213 return copy_to_user(buf, &out, sizeof(out));
1214 }
1215 default:
1216 return -EINVAL;
1217 }
1218}
1219
1220static time64_t get_semotime(struct sem_array *sma)
1221{
1222 int i;
1223 time64_t res;
1224
1225 res = sma->sems[0].sem_otime;
1226 for (i = 1; i < sma->sem_nsems; i++) {
1227 time64_t to = sma->sems[i].sem_otime;
1228
1229 if (to > res)
1230 res = to;
1231 }
1232 return res;
1233}
1234
1235static int semctl_stat(struct ipc_namespace *ns, int semid,
1236 int cmd, struct semid64_ds *semid64)
1237{
1238 struct sem_array *sma;
1239 time64_t semotime;
1240 int err;
1241
1242 memset(semid64, 0, sizeof(*semid64));
1243
1244 rcu_read_lock();
1245 if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1246 sma = sem_obtain_object(ns, semid);
1247 if (IS_ERR(sma)) {
1248 err = PTR_ERR(sma);
1249 goto out_unlock;
1250 }
1251 } else {
1252 sma = sem_obtain_object_check(ns, semid);
1253 if (IS_ERR(sma)) {
1254 err = PTR_ERR(sma);
1255 goto out_unlock;
1256 }
1257 }
1258
1259
1260 if (cmd == SEM_STAT_ANY)
1261 audit_ipc_obj(&sma->sem_perm);
1262 else {
1263 err = -EACCES;
1264 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1265 goto out_unlock;
1266 }
1267
1268 err = security_sem_semctl(&sma->sem_perm, cmd);
1269 if (err)
1270 goto out_unlock;
1271
1272 ipc_lock_object(&sma->sem_perm);
1273
1274 if (!ipc_valid_object(&sma->sem_perm)) {
1275 ipc_unlock_object(&sma->sem_perm);
1276 err = -EIDRM;
1277 goto out_unlock;
1278 }
1279
1280 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1281 semotime = get_semotime(sma);
1282 semid64->sem_otime = semotime;
1283 semid64->sem_ctime = sma->sem_ctime;
1284#ifndef CONFIG_64BIT
1285 semid64->sem_otime_high = semotime >> 32;
1286 semid64->sem_ctime_high = sma->sem_ctime >> 32;
1287#endif
1288 semid64->sem_nsems = sma->sem_nsems;
1289
1290 if (cmd == IPC_STAT) {
1291
1292
1293
1294
1295 err = 0;
1296 } else {
1297
1298
1299
1300
1301 err = sma->sem_perm.id;
1302 }
1303 ipc_unlock_object(&sma->sem_perm);
1304out_unlock:
1305 rcu_read_unlock();
1306 return err;
1307}
1308
1309static int semctl_info(struct ipc_namespace *ns, int semid,
1310 int cmd, void __user *p)
1311{
1312 struct seminfo seminfo;
1313 int max_idx;
1314 int err;
1315
1316 err = security_sem_semctl(NULL, cmd);
1317 if (err)
1318 return err;
1319
1320 memset(&seminfo, 0, sizeof(seminfo));
1321 seminfo.semmni = ns->sc_semmni;
1322 seminfo.semmns = ns->sc_semmns;
1323 seminfo.semmsl = ns->sc_semmsl;
1324 seminfo.semopm = ns->sc_semopm;
1325 seminfo.semvmx = SEMVMX;
1326 seminfo.semmnu = SEMMNU;
1327 seminfo.semmap = SEMMAP;
1328 seminfo.semume = SEMUME;
1329 down_read(&sem_ids(ns).rwsem);
1330 if (cmd == SEM_INFO) {
1331 seminfo.semusz = sem_ids(ns).in_use;
1332 seminfo.semaem = ns->used_sems;
1333 } else {
1334 seminfo.semusz = SEMUSZ;
1335 seminfo.semaem = SEMAEM;
1336 }
1337 max_idx = ipc_get_maxidx(&sem_ids(ns));
1338 up_read(&sem_ids(ns).rwsem);
1339 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1340 return -EFAULT;
1341 return (max_idx < 0) ? 0 : max_idx;
1342}
1343
1344static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1345 int val)
1346{
1347 struct sem_undo *un;
1348 struct sem_array *sma;
1349 struct sem *curr;
1350 int err;
1351 DEFINE_WAKE_Q(wake_q);
1352
1353 if (val > SEMVMX || val < 0)
1354 return -ERANGE;
1355
1356 rcu_read_lock();
1357 sma = sem_obtain_object_check(ns, semid);
1358 if (IS_ERR(sma)) {
1359 rcu_read_unlock();
1360 return PTR_ERR(sma);
1361 }
1362
1363 if (semnum < 0 || semnum >= sma->sem_nsems) {
1364 rcu_read_unlock();
1365 return -EINVAL;
1366 }
1367
1368
1369 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1370 rcu_read_unlock();
1371 return -EACCES;
1372 }
1373
1374 err = security_sem_semctl(&sma->sem_perm, SETVAL);
1375 if (err) {
1376 rcu_read_unlock();
1377 return -EACCES;
1378 }
1379
1380 sem_lock(sma, NULL, -1);
1381
1382 if (!ipc_valid_object(&sma->sem_perm)) {
1383 sem_unlock(sma, -1);
1384 rcu_read_unlock();
1385 return -EIDRM;
1386 }
1387
1388 semnum = array_index_nospec(semnum, sma->sem_nsems);
1389 curr = &sma->sems[semnum];
1390
1391 ipc_assert_locked_object(&sma->sem_perm);
1392 list_for_each_entry(un, &sma->list_id, list_id)
1393 un->semadj[semnum] = 0;
1394
1395 curr->semval = val;
1396 ipc_update_pid(&curr->sempid, task_tgid(current));
1397 sma->sem_ctime = ktime_get_real_seconds();
1398
1399 do_smart_update(sma, NULL, 0, 0, &wake_q);
1400 sem_unlock(sma, -1);
1401 rcu_read_unlock();
1402 wake_up_q(&wake_q);
1403 return 0;
1404}
1405
1406static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1407 int cmd, void __user *p)
1408{
1409 struct sem_array *sma;
1410 struct sem *curr;
1411 int err, nsems;
1412 ushort fast_sem_io[SEMMSL_FAST];
1413 ushort *sem_io = fast_sem_io;
1414 DEFINE_WAKE_Q(wake_q);
1415
1416 rcu_read_lock();
1417 sma = sem_obtain_object_check(ns, semid);
1418 if (IS_ERR(sma)) {
1419 rcu_read_unlock();
1420 return PTR_ERR(sma);
1421 }
1422
1423 nsems = sma->sem_nsems;
1424
1425 err = -EACCES;
1426 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1427 goto out_rcu_wakeup;
1428
1429 err = security_sem_semctl(&sma->sem_perm, cmd);
1430 if (err)
1431 goto out_rcu_wakeup;
1432
1433 err = -EACCES;
1434 switch (cmd) {
1435 case GETALL:
1436 {
1437 ushort __user *array = p;
1438 int i;
1439
1440 sem_lock(sma, NULL, -1);
1441 if (!ipc_valid_object(&sma->sem_perm)) {
1442 err = -EIDRM;
1443 goto out_unlock;
1444 }
1445 if (nsems > SEMMSL_FAST) {
1446 if (!ipc_rcu_getref(&sma->sem_perm)) {
1447 err = -EIDRM;
1448 goto out_unlock;
1449 }
1450 sem_unlock(sma, -1);
1451 rcu_read_unlock();
1452 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1453 GFP_KERNEL);
1454 if (sem_io == NULL) {
1455 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1456 return -ENOMEM;
1457 }
1458
1459 rcu_read_lock();
1460 sem_lock_and_putref(sma);
1461 if (!ipc_valid_object(&sma->sem_perm)) {
1462 err = -EIDRM;
1463 goto out_unlock;
1464 }
1465 }
1466 for (i = 0; i < sma->sem_nsems; i++)
1467 sem_io[i] = sma->sems[i].semval;
1468 sem_unlock(sma, -1);
1469 rcu_read_unlock();
1470 err = 0;
1471 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1472 err = -EFAULT;
1473 goto out_free;
1474 }
1475 case SETALL:
1476 {
1477 int i;
1478 struct sem_undo *un;
1479
1480 if (!ipc_rcu_getref(&sma->sem_perm)) {
1481 err = -EIDRM;
1482 goto out_rcu_wakeup;
1483 }
1484 rcu_read_unlock();
1485
1486 if (nsems > SEMMSL_FAST) {
1487 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1488 GFP_KERNEL);
1489 if (sem_io == NULL) {
1490 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1491 return -ENOMEM;
1492 }
1493 }
1494
1495 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1496 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1497 err = -EFAULT;
1498 goto out_free;
1499 }
1500
1501 for (i = 0; i < nsems; i++) {
1502 if (sem_io[i] > SEMVMX) {
1503 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1504 err = -ERANGE;
1505 goto out_free;
1506 }
1507 }
1508 rcu_read_lock();
1509 sem_lock_and_putref(sma);
1510 if (!ipc_valid_object(&sma->sem_perm)) {
1511 err = -EIDRM;
1512 goto out_unlock;
1513 }
1514
1515 for (i = 0; i < nsems; i++) {
1516 sma->sems[i].semval = sem_io[i];
1517 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1518 }
1519
1520 ipc_assert_locked_object(&sma->sem_perm);
1521 list_for_each_entry(un, &sma->list_id, list_id) {
1522 for (i = 0; i < nsems; i++)
1523 un->semadj[i] = 0;
1524 }
1525 sma->sem_ctime = ktime_get_real_seconds();
1526
1527 do_smart_update(sma, NULL, 0, 0, &wake_q);
1528 err = 0;
1529 goto out_unlock;
1530 }
1531
1532 }
1533 err = -EINVAL;
1534 if (semnum < 0 || semnum >= nsems)
1535 goto out_rcu_wakeup;
1536
1537 sem_lock(sma, NULL, -1);
1538 if (!ipc_valid_object(&sma->sem_perm)) {
1539 err = -EIDRM;
1540 goto out_unlock;
1541 }
1542
1543 semnum = array_index_nospec(semnum, nsems);
1544 curr = &sma->sems[semnum];
1545
1546 switch (cmd) {
1547 case GETVAL:
1548 err = curr->semval;
1549 goto out_unlock;
1550 case GETPID:
1551 err = pid_vnr(curr->sempid);
1552 goto out_unlock;
1553 case GETNCNT:
1554 err = count_semcnt(sma, semnum, 0);
1555 goto out_unlock;
1556 case GETZCNT:
1557 err = count_semcnt(sma, semnum, 1);
1558 goto out_unlock;
1559 }
1560
1561out_unlock:
1562 sem_unlock(sma, -1);
1563out_rcu_wakeup:
1564 rcu_read_unlock();
1565 wake_up_q(&wake_q);
1566out_free:
1567 if (sem_io != fast_sem_io)
1568 kvfree(sem_io);
1569 return err;
1570}
1571
1572static inline unsigned long
1573copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1574{
1575 switch (version) {
1576 case IPC_64:
1577 if (copy_from_user(out, buf, sizeof(*out)))
1578 return -EFAULT;
1579 return 0;
1580 case IPC_OLD:
1581 {
1582 struct semid_ds tbuf_old;
1583
1584 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1585 return -EFAULT;
1586
1587 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1588 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1589 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1590
1591 return 0;
1592 }
1593 default:
1594 return -EINVAL;
1595 }
1596}
1597
1598
1599
1600
1601
1602
1603static int semctl_down(struct ipc_namespace *ns, int semid,
1604 int cmd, struct semid64_ds *semid64)
1605{
1606 struct sem_array *sma;
1607 int err;
1608 struct kern_ipc_perm *ipcp;
1609
1610 down_write(&sem_ids(ns).rwsem);
1611 rcu_read_lock();
1612
1613 ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1614 &semid64->sem_perm, 0);
1615 if (IS_ERR(ipcp)) {
1616 err = PTR_ERR(ipcp);
1617 goto out_unlock1;
1618 }
1619
1620 sma = container_of(ipcp, struct sem_array, sem_perm);
1621
1622 err = security_sem_semctl(&sma->sem_perm, cmd);
1623 if (err)
1624 goto out_unlock1;
1625
1626 switch (cmd) {
1627 case IPC_RMID:
1628 sem_lock(sma, NULL, -1);
1629
1630 freeary(ns, ipcp);
1631 goto out_up;
1632 case IPC_SET:
1633 sem_lock(sma, NULL, -1);
1634 err = ipc_update_perm(&semid64->sem_perm, ipcp);
1635 if (err)
1636 goto out_unlock0;
1637 sma->sem_ctime = ktime_get_real_seconds();
1638 break;
1639 default:
1640 err = -EINVAL;
1641 goto out_unlock1;
1642 }
1643
1644out_unlock0:
1645 sem_unlock(sma, -1);
1646out_unlock1:
1647 rcu_read_unlock();
1648out_up:
1649 up_write(&sem_ids(ns).rwsem);
1650 return err;
1651}
1652
1653static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1654{
1655 struct ipc_namespace *ns;
1656 void __user *p = (void __user *)arg;
1657 struct semid64_ds semid64;
1658 int err;
1659
1660 if (semid < 0)
1661 return -EINVAL;
1662
1663 ns = current->nsproxy->ipc_ns;
1664
1665 switch (cmd) {
1666 case IPC_INFO:
1667 case SEM_INFO:
1668 return semctl_info(ns, semid, cmd, p);
1669 case IPC_STAT:
1670 case SEM_STAT:
1671 case SEM_STAT_ANY:
1672 err = semctl_stat(ns, semid, cmd, &semid64);
1673 if (err < 0)
1674 return err;
1675 if (copy_semid_to_user(p, &semid64, version))
1676 err = -EFAULT;
1677 return err;
1678 case GETALL:
1679 case GETVAL:
1680 case GETPID:
1681 case GETNCNT:
1682 case GETZCNT:
1683 case SETALL:
1684 return semctl_main(ns, semid, semnum, cmd, p);
1685 case SETVAL: {
1686 int val;
1687#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1688
1689 val = arg >> 32;
1690#else
1691
1692 val = arg;
1693#endif
1694 return semctl_setval(ns, semid, semnum, val);
1695 }
1696 case IPC_SET:
1697 if (copy_semid_from_user(&semid64, p, version))
1698 return -EFAULT;
1699 fallthrough;
1700 case IPC_RMID:
1701 return semctl_down(ns, semid, cmd, &semid64);
1702 default:
1703 return -EINVAL;
1704 }
1705}
1706
1707SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1708{
1709 return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1710}
1711
1712#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1713long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1714{
1715 int version = ipc_parse_version(&cmd);
1716
1717 return ksys_semctl(semid, semnum, cmd, arg, version);
1718}
1719
1720SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1721{
1722 return ksys_old_semctl(semid, semnum, cmd, arg);
1723}
1724#endif
1725
1726#ifdef CONFIG_COMPAT
1727
1728struct compat_semid_ds {
1729 struct compat_ipc_perm sem_perm;
1730 old_time32_t sem_otime;
1731 old_time32_t sem_ctime;
1732 compat_uptr_t sem_base;
1733 compat_uptr_t sem_pending;
1734 compat_uptr_t sem_pending_last;
1735 compat_uptr_t undo;
1736 unsigned short sem_nsems;
1737};
1738
1739static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1740 int version)
1741{
1742 memset(out, 0, sizeof(*out));
1743 if (version == IPC_64) {
1744 struct compat_semid64_ds __user *p = buf;
1745 return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1746 } else {
1747 struct compat_semid_ds __user *p = buf;
1748 return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1749 }
1750}
1751
1752static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1753 int version)
1754{
1755 if (version == IPC_64) {
1756 struct compat_semid64_ds v;
1757 memset(&v, 0, sizeof(v));
1758 to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1759 v.sem_otime = lower_32_bits(in->sem_otime);
1760 v.sem_otime_high = upper_32_bits(in->sem_otime);
1761 v.sem_ctime = lower_32_bits(in->sem_ctime);
1762 v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1763 v.sem_nsems = in->sem_nsems;
1764 return copy_to_user(buf, &v, sizeof(v));
1765 } else {
1766 struct compat_semid_ds v;
1767 memset(&v, 0, sizeof(v));
1768 to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1769 v.sem_otime = in->sem_otime;
1770 v.sem_ctime = in->sem_ctime;
1771 v.sem_nsems = in->sem_nsems;
1772 return copy_to_user(buf, &v, sizeof(v));
1773 }
1774}
1775
1776static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1777{
1778 void __user *p = compat_ptr(arg);
1779 struct ipc_namespace *ns;
1780 struct semid64_ds semid64;
1781 int err;
1782
1783 ns = current->nsproxy->ipc_ns;
1784
1785 if (semid < 0)
1786 return -EINVAL;
1787
1788 switch (cmd & (~IPC_64)) {
1789 case IPC_INFO:
1790 case SEM_INFO:
1791 return semctl_info(ns, semid, cmd, p);
1792 case IPC_STAT:
1793 case SEM_STAT:
1794 case SEM_STAT_ANY:
1795 err = semctl_stat(ns, semid, cmd, &semid64);
1796 if (err < 0)
1797 return err;
1798 if (copy_compat_semid_to_user(p, &semid64, version))
1799 err = -EFAULT;
1800 return err;
1801 case GETVAL:
1802 case GETPID:
1803 case GETNCNT:
1804 case GETZCNT:
1805 case GETALL:
1806 case SETALL:
1807 return semctl_main(ns, semid, semnum, cmd, p);
1808 case SETVAL:
1809 return semctl_setval(ns, semid, semnum, arg);
1810 case IPC_SET:
1811 if (copy_compat_semid_from_user(&semid64, p, version))
1812 return -EFAULT;
1813 fallthrough;
1814 case IPC_RMID:
1815 return semctl_down(ns, semid, cmd, &semid64);
1816 default:
1817 return -EINVAL;
1818 }
1819}
1820
1821COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1822{
1823 return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1824}
1825
1826#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1827long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1828{
1829 int version = compat_ipc_parse_version(&cmd);
1830
1831 return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1832}
1833
1834COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1835{
1836 return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1837}
1838#endif
1839#endif
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852static inline int get_undo_list(struct sem_undo_list **undo_listp)
1853{
1854 struct sem_undo_list *undo_list;
1855
1856 undo_list = current->sysvsem.undo_list;
1857 if (!undo_list) {
1858 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
1859 if (undo_list == NULL)
1860 return -ENOMEM;
1861 spin_lock_init(&undo_list->lock);
1862 refcount_set(&undo_list->refcnt, 1);
1863 INIT_LIST_HEAD(&undo_list->list_proc);
1864
1865 current->sysvsem.undo_list = undo_list;
1866 }
1867 *undo_listp = undo_list;
1868 return 0;
1869}
1870
1871static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1872{
1873 struct sem_undo *un;
1874
1875 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1876 spin_is_locked(&ulp->lock)) {
1877 if (un->semid == semid)
1878 return un;
1879 }
1880 return NULL;
1881}
1882
1883static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1884{
1885 struct sem_undo *un;
1886
1887 assert_spin_locked(&ulp->lock);
1888
1889 un = __lookup_undo(ulp, semid);
1890 if (un) {
1891 list_del_rcu(&un->list_proc);
1892 list_add_rcu(&un->list_proc, &ulp->list_proc);
1893 }
1894 return un;
1895}
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1909{
1910 struct sem_array *sma;
1911 struct sem_undo_list *ulp;
1912 struct sem_undo *un, *new;
1913 int nsems, error;
1914
1915 error = get_undo_list(&ulp);
1916 if (error)
1917 return ERR_PTR(error);
1918
1919 rcu_read_lock();
1920 spin_lock(&ulp->lock);
1921 un = lookup_undo(ulp, semid);
1922 spin_unlock(&ulp->lock);
1923 if (likely(un != NULL))
1924 goto out;
1925
1926
1927
1928 sma = sem_obtain_object_check(ns, semid);
1929 if (IS_ERR(sma)) {
1930 rcu_read_unlock();
1931 return ERR_CAST(sma);
1932 }
1933
1934 nsems = sma->sem_nsems;
1935 if (!ipc_rcu_getref(&sma->sem_perm)) {
1936 rcu_read_unlock();
1937 un = ERR_PTR(-EIDRM);
1938 goto out;
1939 }
1940 rcu_read_unlock();
1941
1942
1943 new = kvzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems,
1944 GFP_KERNEL_ACCOUNT);
1945 if (!new) {
1946 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1947 return ERR_PTR(-ENOMEM);
1948 }
1949
1950
1951 rcu_read_lock();
1952 sem_lock_and_putref(sma);
1953 if (!ipc_valid_object(&sma->sem_perm)) {
1954 sem_unlock(sma, -1);
1955 rcu_read_unlock();
1956 kvfree(new);
1957 un = ERR_PTR(-EIDRM);
1958 goto out;
1959 }
1960 spin_lock(&ulp->lock);
1961
1962
1963
1964
1965 un = lookup_undo(ulp, semid);
1966 if (un) {
1967 kvfree(new);
1968 goto success;
1969 }
1970
1971 new->semadj = (short *) &new[1];
1972 new->ulp = ulp;
1973 new->semid = semid;
1974 assert_spin_locked(&ulp->lock);
1975 list_add_rcu(&new->list_proc, &ulp->list_proc);
1976 ipc_assert_locked_object(&sma->sem_perm);
1977 list_add(&new->list_id, &sma->list_id);
1978 un = new;
1979
1980success:
1981 spin_unlock(&ulp->lock);
1982 sem_unlock(sma, -1);
1983out:
1984 return un;
1985}
1986
1987long __do_semtimedop(int semid, struct sembuf *sops,
1988 unsigned nsops, const struct timespec64 *timeout,
1989 struct ipc_namespace *ns)
1990{
1991 int error = -EINVAL;
1992 struct sem_array *sma;
1993 struct sembuf *sop;
1994 struct sem_undo *un;
1995 int max, locknum;
1996 bool undos = false, alter = false, dupsop = false;
1997 struct sem_queue queue;
1998 unsigned long dup = 0, jiffies_left = 0;
1999
2000 if (nsops < 1 || semid < 0)
2001 return -EINVAL;
2002 if (nsops > ns->sc_semopm)
2003 return -E2BIG;
2004
2005 if (timeout) {
2006 if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
2007 timeout->tv_nsec >= 1000000000L) {
2008 error = -EINVAL;
2009 goto out;
2010 }
2011 jiffies_left = timespec64_to_jiffies(timeout);
2012 }
2013
2014
2015 max = 0;
2016 for (sop = sops; sop < sops + nsops; sop++) {
2017 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2018
2019 if (sop->sem_num >= max)
2020 max = sop->sem_num;
2021 if (sop->sem_flg & SEM_UNDO)
2022 undos = true;
2023 if (dup & mask) {
2024
2025
2026
2027
2028
2029
2030 dupsop = true;
2031 }
2032 if (sop->sem_op != 0) {
2033 alter = true;
2034 dup |= mask;
2035 }
2036 }
2037
2038 if (undos) {
2039
2040 un = find_alloc_undo(ns, semid);
2041 if (IS_ERR(un)) {
2042 error = PTR_ERR(un);
2043 goto out;
2044 }
2045 } else {
2046 un = NULL;
2047 rcu_read_lock();
2048 }
2049
2050 sma = sem_obtain_object_check(ns, semid);
2051 if (IS_ERR(sma)) {
2052 rcu_read_unlock();
2053 error = PTR_ERR(sma);
2054 goto out;
2055 }
2056
2057 error = -EFBIG;
2058 if (max >= sma->sem_nsems) {
2059 rcu_read_unlock();
2060 goto out;
2061 }
2062
2063 error = -EACCES;
2064 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2065 rcu_read_unlock();
2066 goto out;
2067 }
2068
2069 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2070 if (error) {
2071 rcu_read_unlock();
2072 goto out;
2073 }
2074
2075 error = -EIDRM;
2076 locknum = sem_lock(sma, sops, nsops);
2077
2078
2079
2080
2081
2082
2083
2084
2085 if (!ipc_valid_object(&sma->sem_perm))
2086 goto out_unlock;
2087
2088
2089
2090
2091
2092
2093
2094 if (un && un->semid == -1)
2095 goto out_unlock;
2096
2097 queue.sops = sops;
2098 queue.nsops = nsops;
2099 queue.undo = un;
2100 queue.pid = task_tgid(current);
2101 queue.alter = alter;
2102 queue.dupsop = dupsop;
2103
2104 error = perform_atomic_semop(sma, &queue);
2105 if (error == 0) {
2106 DEFINE_WAKE_Q(wake_q);
2107
2108
2109
2110
2111
2112 if (alter)
2113 do_smart_update(sma, sops, nsops, 1, &wake_q);
2114 else
2115 set_semotime(sma, sops);
2116
2117 sem_unlock(sma, locknum);
2118 rcu_read_unlock();
2119 wake_up_q(&wake_q);
2120
2121 goto out;
2122 }
2123 if (error < 0)
2124 goto out_unlock;
2125
2126
2127
2128
2129
2130 if (nsops == 1) {
2131 struct sem *curr;
2132 int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2133 curr = &sma->sems[idx];
2134
2135 if (alter) {
2136 if (sma->complex_count) {
2137 list_add_tail(&queue.list,
2138 &sma->pending_alter);
2139 } else {
2140
2141 list_add_tail(&queue.list,
2142 &curr->pending_alter);
2143 }
2144 } else {
2145 list_add_tail(&queue.list, &curr->pending_const);
2146 }
2147 } else {
2148 if (!sma->complex_count)
2149 merge_queues(sma);
2150
2151 if (alter)
2152 list_add_tail(&queue.list, &sma->pending_alter);
2153 else
2154 list_add_tail(&queue.list, &sma->pending_const);
2155
2156 sma->complex_count++;
2157 }
2158
2159 do {
2160
2161 WRITE_ONCE(queue.status, -EINTR);
2162 queue.sleeper = current;
2163
2164
2165 __set_current_state(TASK_INTERRUPTIBLE);
2166 sem_unlock(sma, locknum);
2167 rcu_read_unlock();
2168
2169 if (timeout)
2170 jiffies_left = schedule_timeout(jiffies_left);
2171 else
2172 schedule();
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185 error = READ_ONCE(queue.status);
2186 if (error != -EINTR) {
2187
2188 smp_acquire__after_ctrl_dep();
2189 goto out;
2190 }
2191
2192 rcu_read_lock();
2193 locknum = sem_lock(sma, sops, nsops);
2194
2195 if (!ipc_valid_object(&sma->sem_perm))
2196 goto out_unlock;
2197
2198
2199
2200
2201 error = READ_ONCE(queue.status);
2202
2203
2204
2205
2206
2207 if (error != -EINTR)
2208 goto out_unlock;
2209
2210
2211
2212
2213 if (timeout && jiffies_left == 0)
2214 error = -EAGAIN;
2215 } while (error == -EINTR && !signal_pending(current));
2216
2217 unlink_queue(sma, &queue);
2218
2219out_unlock:
2220 sem_unlock(sma, locknum);
2221 rcu_read_unlock();
2222out:
2223 return error;
2224}
2225
2226static long do_semtimedop(int semid, struct sembuf __user *tsops,
2227 unsigned nsops, const struct timespec64 *timeout)
2228{
2229 struct sembuf fast_sops[SEMOPM_FAST];
2230 struct sembuf *sops = fast_sops;
2231 struct ipc_namespace *ns;
2232 int ret;
2233
2234 ns = current->nsproxy->ipc_ns;
2235 if (nsops > ns->sc_semopm)
2236 return -E2BIG;
2237 if (nsops < 1)
2238 return -EINVAL;
2239
2240 if (nsops > SEMOPM_FAST) {
2241 sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
2242 if (sops == NULL)
2243 return -ENOMEM;
2244 }
2245
2246 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2247 ret = -EFAULT;
2248 goto out_free;
2249 }
2250
2251 ret = __do_semtimedop(semid, sops, nsops, timeout, ns);
2252
2253out_free:
2254 if (sops != fast_sops)
2255 kvfree(sops);
2256
2257 return ret;
2258}
2259
2260long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2261 unsigned int nsops, const struct __kernel_timespec __user *timeout)
2262{
2263 if (timeout) {
2264 struct timespec64 ts;
2265 if (get_timespec64(&ts, timeout))
2266 return -EFAULT;
2267 return do_semtimedop(semid, tsops, nsops, &ts);
2268 }
2269 return do_semtimedop(semid, tsops, nsops, NULL);
2270}
2271
2272SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2273 unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2274{
2275 return ksys_semtimedop(semid, tsops, nsops, timeout);
2276}
2277
2278#ifdef CONFIG_COMPAT_32BIT_TIME
2279long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2280 unsigned int nsops,
2281 const struct old_timespec32 __user *timeout)
2282{
2283 if (timeout) {
2284 struct timespec64 ts;
2285 if (get_old_timespec32(&ts, timeout))
2286 return -EFAULT;
2287 return do_semtimedop(semid, tsems, nsops, &ts);
2288 }
2289 return do_semtimedop(semid, tsems, nsops, NULL);
2290}
2291
2292SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2293 unsigned int, nsops,
2294 const struct old_timespec32 __user *, timeout)
2295{
2296 return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2297}
2298#endif
2299
2300SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2301 unsigned, nsops)
2302{
2303 return do_semtimedop(semid, tsops, nsops, NULL);
2304}
2305
2306
2307
2308
2309
2310int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2311{
2312 struct sem_undo_list *undo_list;
2313 int error;
2314
2315 if (clone_flags & CLONE_SYSVSEM) {
2316 error = get_undo_list(&undo_list);
2317 if (error)
2318 return error;
2319 refcount_inc(&undo_list->refcnt);
2320 tsk->sysvsem.undo_list = undo_list;
2321 } else
2322 tsk->sysvsem.undo_list = NULL;
2323
2324 return 0;
2325}
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339void exit_sem(struct task_struct *tsk)
2340{
2341 struct sem_undo_list *ulp;
2342
2343 ulp = tsk->sysvsem.undo_list;
2344 if (!ulp)
2345 return;
2346 tsk->sysvsem.undo_list = NULL;
2347
2348 if (!refcount_dec_and_test(&ulp->refcnt))
2349 return;
2350
2351 for (;;) {
2352 struct sem_array *sma;
2353 struct sem_undo *un;
2354 int semid, i;
2355 DEFINE_WAKE_Q(wake_q);
2356
2357 cond_resched();
2358
2359 rcu_read_lock();
2360 un = list_entry_rcu(ulp->list_proc.next,
2361 struct sem_undo, list_proc);
2362 if (&un->list_proc == &ulp->list_proc) {
2363
2364
2365
2366
2367
2368
2369 spin_lock(&ulp->lock);
2370 spin_unlock(&ulp->lock);
2371 rcu_read_unlock();
2372 break;
2373 }
2374 spin_lock(&ulp->lock);
2375 semid = un->semid;
2376 spin_unlock(&ulp->lock);
2377
2378
2379 if (semid == -1) {
2380 rcu_read_unlock();
2381 continue;
2382 }
2383
2384 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2385
2386 if (IS_ERR(sma)) {
2387 rcu_read_unlock();
2388 continue;
2389 }
2390
2391 sem_lock(sma, NULL, -1);
2392
2393 if (!ipc_valid_object(&sma->sem_perm)) {
2394 sem_unlock(sma, -1);
2395 rcu_read_unlock();
2396 continue;
2397 }
2398 un = __lookup_undo(ulp, semid);
2399 if (un == NULL) {
2400
2401
2402
2403 sem_unlock(sma, -1);
2404 rcu_read_unlock();
2405 continue;
2406 }
2407
2408
2409 ipc_assert_locked_object(&sma->sem_perm);
2410 list_del(&un->list_id);
2411
2412 spin_lock(&ulp->lock);
2413 list_del_rcu(&un->list_proc);
2414 spin_unlock(&ulp->lock);
2415
2416
2417 for (i = 0; i < sma->sem_nsems; i++) {
2418 struct sem *semaphore = &sma->sems[i];
2419 if (un->semadj[i]) {
2420 semaphore->semval += un->semadj[i];
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434 if (semaphore->semval < 0)
2435 semaphore->semval = 0;
2436 if (semaphore->semval > SEMVMX)
2437 semaphore->semval = SEMVMX;
2438 ipc_update_pid(&semaphore->sempid, task_tgid(current));
2439 }
2440 }
2441
2442 do_smart_update(sma, NULL, 0, 1, &wake_q);
2443 sem_unlock(sma, -1);
2444 rcu_read_unlock();
2445 wake_up_q(&wake_q);
2446
2447 kvfree_rcu(un, rcu);
2448 }
2449 kfree(ulp);
2450}
2451
2452#ifdef CONFIG_PROC_FS
2453static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2454{
2455 struct user_namespace *user_ns = seq_user_ns(s);
2456 struct kern_ipc_perm *ipcp = it;
2457 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2458 time64_t sem_otime;
2459
2460
2461
2462
2463
2464
2465
2466
2467 complexmode_enter(sma);
2468
2469 sem_otime = get_semotime(sma);
2470
2471 seq_printf(s,
2472 "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2473 sma->sem_perm.key,
2474 sma->sem_perm.id,
2475 sma->sem_perm.mode,
2476 sma->sem_nsems,
2477 from_kuid_munged(user_ns, sma->sem_perm.uid),
2478 from_kgid_munged(user_ns, sma->sem_perm.gid),
2479 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2480 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2481 sem_otime,
2482 sma->sem_ctime);
2483
2484 complexmode_tryleave(sma);
2485
2486 return 0;
2487}
2488#endif
2489