linux/ipc/sem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
  73#include <linux/compat.h>
  74#include <linux/slab.h>
  75#include <linux/spinlock.h>
  76#include <linux/init.h>
  77#include <linux/proc_fs.h>
  78#include <linux/time.h>
  79#include <linux/security.h>
  80#include <linux/syscalls.h>
  81#include <linux/audit.h>
  82#include <linux/capability.h>
  83#include <linux/seq_file.h>
  84#include <linux/rwsem.h>
  85#include <linux/nsproxy.h>
  86#include <linux/ipc_namespace.h>
  87#include <linux/sched/wake_q.h>
  88#include <linux/nospec.h>
  89#include <linux/rhashtable.h>
  90
  91#include <linux/uaccess.h>
  92#include "util.h"
  93
  94/* One semaphore structure for each semaphore in the system. */
  95struct sem {
  96        int     semval;         /* current value */
  97        /*
  98         * PID of the process that last modified the semaphore. For
  99         * Linux, specifically these are:
 100         *  - semop
 101         *  - semctl, via SETVAL and SETALL.
 102         *  - at task exit when performing undo adjustments (see exit_sem).
 103         */
 104        struct pid *sempid;
 105        spinlock_t      lock;   /* spinlock for fine-grained semtimedop */
 106        struct list_head pending_alter; /* pending single-sop operations */
 107                                        /* that alter the semaphore */
 108        struct list_head pending_const; /* pending single-sop operations */
 109                                        /* that do not alter the semaphore*/
 110        time64_t         sem_otime;     /* candidate for sem_otime */
 111} ____cacheline_aligned_in_smp;
 112
 113/* One sem_array data structure for each set of semaphores in the system. */
 114struct sem_array {
 115        struct kern_ipc_perm    sem_perm;       /* permissions .. see ipc.h */
 116        time64_t                sem_ctime;      /* create/last semctl() time */
 117        struct list_head        pending_alter;  /* pending operations */
 118                                                /* that alter the array */
 119        struct list_head        pending_const;  /* pending complex operations */
 120                                                /* that do not alter semvals */
 121        struct list_head        list_id;        /* undo requests on this array */
 122        int                     sem_nsems;      /* no. of semaphores in array */
 123        int                     complex_count;  /* pending complex operations */
 124        unsigned int            use_global_lock;/* >0: global lock required */
 125
 126        struct sem              sems[];
 127} __randomize_layout;
 128
 129/* One queue for each sleeping process in the system. */
 130struct sem_queue {
 131        struct list_head        list;    /* queue of pending operations */
 132        struct task_struct      *sleeper; /* this process */
 133        struct sem_undo         *undo;   /* undo structure */
 134        struct pid              *pid;    /* process id of requesting process */
 135        int                     status;  /* completion status of operation */
 136        struct sembuf           *sops;   /* array of pending operations */
 137        struct sembuf           *blocking; /* the operation that blocked */
 138        int                     nsops;   /* number of operations */
 139        bool                    alter;   /* does *sops alter the array? */
 140        bool                    dupsop;  /* sops on more than one sem_num */
 141};
 142
 143/* Each task has a list of undo requests. They are executed automatically
 144 * when the process exits.
 145 */
 146struct sem_undo {
 147        struct list_head        list_proc;      /* per-process list: *
 148                                                 * all undos from one process
 149                                                 * rcu protected */
 150        struct rcu_head         rcu;            /* rcu struct for sem_undo */
 151        struct sem_undo_list    *ulp;           /* back ptr to sem_undo_list */
 152        struct list_head        list_id;        /* per semaphore array list:
 153                                                 * all undos for one array */
 154        int                     semid;          /* semaphore set identifier */
 155        short                   *semadj;        /* array of adjustments */
 156                                                /* one per semaphore */
 157};
 158
 159/* sem_undo_list controls shared access to the list of sem_undo structures
 160 * that may be shared among all a CLONE_SYSVSEM task group.
 161 */
 162struct sem_undo_list {
 163        refcount_t              refcnt;
 164        spinlock_t              lock;
 165        struct list_head        list_proc;
 166};
 167
 168
 169#define sem_ids(ns)     ((ns)->ids[IPC_SEM_IDS])
 170
 171static int newary(struct ipc_namespace *, struct ipc_params *);
 172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 173#ifdef CONFIG_PROC_FS
 174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 175#endif
 176
 177#define SEMMSL_FAST     256 /* 512 bytes on stack */
 178#define SEMOPM_FAST     64  /* ~ 372 bytes on stack */
 179
 180/*
 181 * Switching from the mode suitable for simple ops
 182 * to the mode for complex ops is costly. Therefore:
 183 * use some hysteresis
 184 */
 185#define USE_GLOBAL_LOCK_HYSTERESIS      10
 186
 187/*
 188 * Locking:
 189 * a) global sem_lock() for read/write
 190 *      sem_undo.id_next,
 191 *      sem_array.complex_count,
 192 *      sem_array.pending{_alter,_const},
 193 *      sem_array.sem_undo
 194 *
 195 * b) global or semaphore sem_lock() for read/write:
 196 *      sem_array.sems[i].pending_{const,alter}:
 197 *
 198 * c) special:
 199 *      sem_undo_list.list_proc:
 200 *      * undo_list->lock for write
 201 *      * rcu for read
 202 *      use_global_lock:
 203 *      * global sem_lock() for write
 204 *      * either local or global sem_lock() for read.
 205 *
 206 * Memory ordering:
 207 * Most ordering is enforced by using spin_lock() and spin_unlock().
 208 * The special case is use_global_lock:
 209 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 210 * using smp_store_release().
 211 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 212 * smp_load_acquire().
 213 * Setting it from 0 to non-zero must be ordered with regards to
 214 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 215 * is inside a spin_lock() and after a write from 0 to non-zero a
 216 * spin_lock()+spin_unlock() is done.
 217 */
 218
 219#define sc_semmsl       sem_ctls[0]
 220#define sc_semmns       sem_ctls[1]
 221#define sc_semopm       sem_ctls[2]
 222#define sc_semmni       sem_ctls[3]
 223
 224void sem_init_ns(struct ipc_namespace *ns)
 225{
 226        ns->sc_semmsl = SEMMSL;
 227        ns->sc_semmns = SEMMNS;
 228        ns->sc_semopm = SEMOPM;
 229        ns->sc_semmni = SEMMNI;
 230        ns->used_sems = 0;
 231        ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 232}
 233
 234#ifdef CONFIG_IPC_NS
 235void sem_exit_ns(struct ipc_namespace *ns)
 236{
 237        free_ipcs(ns, &sem_ids(ns), freeary);
 238        idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 239        rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 240}
 241#endif
 242
 243void __init sem_init(void)
 244{
 245        sem_init_ns(&init_ipc_ns);
 246        ipc_init_proc_interface("sysvipc/sem",
 247                                "       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 248                                IPC_SEM_IDS, sysvipc_sem_proc_show);
 249}
 250
 251/**
 252 * unmerge_queues - unmerge queues, if possible.
 253 * @sma: semaphore array
 254 *
 255 * The function unmerges the wait queues if complex_count is 0.
 256 * It must be called prior to dropping the global semaphore array lock.
 257 */
 258static void unmerge_queues(struct sem_array *sma)
 259{
 260        struct sem_queue *q, *tq;
 261
 262        /* complex operations still around? */
 263        if (sma->complex_count)
 264                return;
 265        /*
 266         * We will switch back to simple mode.
 267         * Move all pending operation back into the per-semaphore
 268         * queues.
 269         */
 270        list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 271                struct sem *curr;
 272                curr = &sma->sems[q->sops[0].sem_num];
 273
 274                list_add_tail(&q->list, &curr->pending_alter);
 275        }
 276        INIT_LIST_HEAD(&sma->pending_alter);
 277}
 278
 279/**
 280 * merge_queues - merge single semop queues into global queue
 281 * @sma: semaphore array
 282 *
 283 * This function merges all per-semaphore queues into the global queue.
 284 * It is necessary to achieve FIFO ordering for the pending single-sop
 285 * operations when a multi-semop operation must sleep.
 286 * Only the alter operations must be moved, the const operations can stay.
 287 */
 288static void merge_queues(struct sem_array *sma)
 289{
 290        int i;
 291        for (i = 0; i < sma->sem_nsems; i++) {
 292                struct sem *sem = &sma->sems[i];
 293
 294                list_splice_init(&sem->pending_alter, &sma->pending_alter);
 295        }
 296}
 297
 298static void sem_rcu_free(struct rcu_head *head)
 299{
 300        struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 301        struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 302
 303        security_sem_free(&sma->sem_perm);
 304        kvfree(sma);
 305}
 306
 307/*
 308 * Enter the mode suitable for non-simple operations:
 309 * Caller must own sem_perm.lock.
 310 */
 311static void complexmode_enter(struct sem_array *sma)
 312{
 313        int i;
 314        struct sem *sem;
 315
 316        if (sma->use_global_lock > 0)  {
 317                /*
 318                 * We are already in global lock mode.
 319                 * Nothing to do, just reset the
 320                 * counter until we return to simple mode.
 321                 */
 322                sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 323                return;
 324        }
 325        sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 326
 327        for (i = 0; i < sma->sem_nsems; i++) {
 328                sem = &sma->sems[i];
 329                spin_lock(&sem->lock);
 330                spin_unlock(&sem->lock);
 331        }
 332}
 333
 334/*
 335 * Try to leave the mode that disallows simple operations:
 336 * Caller must own sem_perm.lock.
 337 */
 338static void complexmode_tryleave(struct sem_array *sma)
 339{
 340        if (sma->complex_count)  {
 341                /* Complex ops are sleeping.
 342                 * We must stay in complex mode
 343                 */
 344                return;
 345        }
 346        if (sma->use_global_lock == 1) {
 347                /*
 348                 * Immediately after setting use_global_lock to 0,
 349                 * a simple op can start. Thus: all memory writes
 350                 * performed by the current operation must be visible
 351                 * before we set use_global_lock to 0.
 352                 */
 353                smp_store_release(&sma->use_global_lock, 0);
 354        } else {
 355                sma->use_global_lock--;
 356        }
 357}
 358
 359#define SEM_GLOBAL_LOCK (-1)
 360/*
 361 * If the request contains only one semaphore operation, and there are
 362 * no complex transactions pending, lock only the semaphore involved.
 363 * Otherwise, lock the entire semaphore array, since we either have
 364 * multiple semaphores in our own semops, or we need to look at
 365 * semaphores from other pending complex operations.
 366 */
 367static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 368                              int nsops)
 369{
 370        struct sem *sem;
 371        int idx;
 372
 373        if (nsops != 1) {
 374                /* Complex operation - acquire a full lock */
 375                ipc_lock_object(&sma->sem_perm);
 376
 377                /* Prevent parallel simple ops */
 378                complexmode_enter(sma);
 379                return SEM_GLOBAL_LOCK;
 380        }
 381
 382        /*
 383         * Only one semaphore affected - try to optimize locking.
 384         * Optimized locking is possible if no complex operation
 385         * is either enqueued or processed right now.
 386         *
 387         * Both facts are tracked by use_global_mode.
 388         */
 389        idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
 390        sem = &sma->sems[idx];
 391
 392        /*
 393         * Initial check for use_global_lock. Just an optimization,
 394         * no locking, no memory barrier.
 395         */
 396        if (!sma->use_global_lock) {
 397                /*
 398                 * It appears that no complex operation is around.
 399                 * Acquire the per-semaphore lock.
 400                 */
 401                spin_lock(&sem->lock);
 402
 403                /* pairs with smp_store_release() */
 404                if (!smp_load_acquire(&sma->use_global_lock)) {
 405                        /* fast path successful! */
 406                        return sops->sem_num;
 407                }
 408                spin_unlock(&sem->lock);
 409        }
 410
 411        /* slow path: acquire the full lock */
 412        ipc_lock_object(&sma->sem_perm);
 413
 414        if (sma->use_global_lock == 0) {
 415                /*
 416                 * The use_global_lock mode ended while we waited for
 417                 * sma->sem_perm.lock. Thus we must switch to locking
 418                 * with sem->lock.
 419                 * Unlike in the fast path, there is no need to recheck
 420                 * sma->use_global_lock after we have acquired sem->lock:
 421                 * We own sma->sem_perm.lock, thus use_global_lock cannot
 422                 * change.
 423                 */
 424                spin_lock(&sem->lock);
 425
 426                ipc_unlock_object(&sma->sem_perm);
 427                return sops->sem_num;
 428        } else {
 429                /*
 430                 * Not a false alarm, thus continue to use the global lock
 431                 * mode. No need for complexmode_enter(), this was done by
 432                 * the caller that has set use_global_mode to non-zero.
 433                 */
 434                return SEM_GLOBAL_LOCK;
 435        }
 436}
 437
 438static inline void sem_unlock(struct sem_array *sma, int locknum)
 439{
 440        if (locknum == SEM_GLOBAL_LOCK) {
 441                unmerge_queues(sma);
 442                complexmode_tryleave(sma);
 443                ipc_unlock_object(&sma->sem_perm);
 444        } else {
 445                struct sem *sem = &sma->sems[locknum];
 446                spin_unlock(&sem->lock);
 447        }
 448}
 449
 450/*
 451 * sem_lock_(check_) routines are called in the paths where the rwsem
 452 * is not held.
 453 *
 454 * The caller holds the RCU read lock.
 455 */
 456static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 457{
 458        struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 459
 460        if (IS_ERR(ipcp))
 461                return ERR_CAST(ipcp);
 462
 463        return container_of(ipcp, struct sem_array, sem_perm);
 464}
 465
 466static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 467                                                        int id)
 468{
 469        struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 470
 471        if (IS_ERR(ipcp))
 472                return ERR_CAST(ipcp);
 473
 474        return container_of(ipcp, struct sem_array, sem_perm);
 475}
 476
 477static inline void sem_lock_and_putref(struct sem_array *sma)
 478{
 479        sem_lock(sma, NULL, -1);
 480        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 481}
 482
 483static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 484{
 485        ipc_rmid(&sem_ids(ns), &s->sem_perm);
 486}
 487
 488static struct sem_array *sem_alloc(size_t nsems)
 489{
 490        struct sem_array *sma;
 491
 492        if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 493                return NULL;
 494
 495        sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
 496        if (unlikely(!sma))
 497                return NULL;
 498
 499        return sma;
 500}
 501
 502/**
 503 * newary - Create a new semaphore set
 504 * @ns: namespace
 505 * @params: ptr to the structure that contains key, semflg and nsems
 506 *
 507 * Called with sem_ids.rwsem held (as a writer)
 508 */
 509static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 510{
 511        int retval;
 512        struct sem_array *sma;
 513        key_t key = params->key;
 514        int nsems = params->u.nsems;
 515        int semflg = params->flg;
 516        int i;
 517
 518        if (!nsems)
 519                return -EINVAL;
 520        if (ns->used_sems + nsems > ns->sc_semmns)
 521                return -ENOSPC;
 522
 523        sma = sem_alloc(nsems);
 524        if (!sma)
 525                return -ENOMEM;
 526
 527        sma->sem_perm.mode = (semflg & S_IRWXUGO);
 528        sma->sem_perm.key = key;
 529
 530        sma->sem_perm.security = NULL;
 531        retval = security_sem_alloc(&sma->sem_perm);
 532        if (retval) {
 533                kvfree(sma);
 534                return retval;
 535        }
 536
 537        for (i = 0; i < nsems; i++) {
 538                INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 539                INIT_LIST_HEAD(&sma->sems[i].pending_const);
 540                spin_lock_init(&sma->sems[i].lock);
 541        }
 542
 543        sma->complex_count = 0;
 544        sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 545        INIT_LIST_HEAD(&sma->pending_alter);
 546        INIT_LIST_HEAD(&sma->pending_const);
 547        INIT_LIST_HEAD(&sma->list_id);
 548        sma->sem_nsems = nsems;
 549        sma->sem_ctime = ktime_get_real_seconds();
 550
 551        /* ipc_addid() locks sma upon success. */
 552        retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 553        if (retval < 0) {
 554                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 555                return retval;
 556        }
 557        ns->used_sems += nsems;
 558
 559        sem_unlock(sma, -1);
 560        rcu_read_unlock();
 561
 562        return sma->sem_perm.id;
 563}
 564
 565
 566/*
 567 * Called with sem_ids.rwsem and ipcp locked.
 568 */
 569static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 570                                struct ipc_params *params)
 571{
 572        struct sem_array *sma;
 573
 574        sma = container_of(ipcp, struct sem_array, sem_perm);
 575        if (params->u.nsems > sma->sem_nsems)
 576                return -EINVAL;
 577
 578        return 0;
 579}
 580
 581long ksys_semget(key_t key, int nsems, int semflg)
 582{
 583        struct ipc_namespace *ns;
 584        static const struct ipc_ops sem_ops = {
 585                .getnew = newary,
 586                .associate = security_sem_associate,
 587                .more_checks = sem_more_checks,
 588        };
 589        struct ipc_params sem_params;
 590
 591        ns = current->nsproxy->ipc_ns;
 592
 593        if (nsems < 0 || nsems > ns->sc_semmsl)
 594                return -EINVAL;
 595
 596        sem_params.key = key;
 597        sem_params.flg = semflg;
 598        sem_params.u.nsems = nsems;
 599
 600        return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 601}
 602
 603SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 604{
 605        return ksys_semget(key, nsems, semflg);
 606}
 607
 608/**
 609 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 610 *                               operations on a given array.
 611 * @sma: semaphore array
 612 * @q: struct sem_queue that describes the operation
 613 *
 614 * Caller blocking are as follows, based the value
 615 * indicated by the semaphore operation (sem_op):
 616 *
 617 *  (1) >0 never blocks.
 618 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 619 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 620 *
 621 * Returns 0 if the operation was possible.
 622 * Returns 1 if the operation is impossible, the caller must sleep.
 623 * Returns <0 for error codes.
 624 */
 625static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 626{
 627        int result, sem_op, nsops;
 628        struct pid *pid;
 629        struct sembuf *sop;
 630        struct sem *curr;
 631        struct sembuf *sops;
 632        struct sem_undo *un;
 633
 634        sops = q->sops;
 635        nsops = q->nsops;
 636        un = q->undo;
 637
 638        for (sop = sops; sop < sops + nsops; sop++) {
 639                int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 640                curr = &sma->sems[idx];
 641                sem_op = sop->sem_op;
 642                result = curr->semval;
 643
 644                if (!sem_op && result)
 645                        goto would_block;
 646
 647                result += sem_op;
 648                if (result < 0)
 649                        goto would_block;
 650                if (result > SEMVMX)
 651                        goto out_of_range;
 652
 653                if (sop->sem_flg & SEM_UNDO) {
 654                        int undo = un->semadj[sop->sem_num] - sem_op;
 655                        /* Exceeding the undo range is an error. */
 656                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 657                                goto out_of_range;
 658                        un->semadj[sop->sem_num] = undo;
 659                }
 660
 661                curr->semval = result;
 662        }
 663
 664        sop--;
 665        pid = q->pid;
 666        while (sop >= sops) {
 667                ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 668                sop--;
 669        }
 670
 671        return 0;
 672
 673out_of_range:
 674        result = -ERANGE;
 675        goto undo;
 676
 677would_block:
 678        q->blocking = sop;
 679
 680        if (sop->sem_flg & IPC_NOWAIT)
 681                result = -EAGAIN;
 682        else
 683                result = 1;
 684
 685undo:
 686        sop--;
 687        while (sop >= sops) {
 688                sem_op = sop->sem_op;
 689                sma->sems[sop->sem_num].semval -= sem_op;
 690                if (sop->sem_flg & SEM_UNDO)
 691                        un->semadj[sop->sem_num] += sem_op;
 692                sop--;
 693        }
 694
 695        return result;
 696}
 697
 698static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 699{
 700        int result, sem_op, nsops;
 701        struct sembuf *sop;
 702        struct sem *curr;
 703        struct sembuf *sops;
 704        struct sem_undo *un;
 705
 706        sops = q->sops;
 707        nsops = q->nsops;
 708        un = q->undo;
 709
 710        if (unlikely(q->dupsop))
 711                return perform_atomic_semop_slow(sma, q);
 712
 713        /*
 714         * We scan the semaphore set twice, first to ensure that the entire
 715         * operation can succeed, therefore avoiding any pointless writes
 716         * to shared memory and having to undo such changes in order to block
 717         * until the operations can go through.
 718         */
 719        for (sop = sops; sop < sops + nsops; sop++) {
 720                int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 721
 722                curr = &sma->sems[idx];
 723                sem_op = sop->sem_op;
 724                result = curr->semval;
 725
 726                if (!sem_op && result)
 727                        goto would_block; /* wait-for-zero */
 728
 729                result += sem_op;
 730                if (result < 0)
 731                        goto would_block;
 732
 733                if (result > SEMVMX)
 734                        return -ERANGE;
 735
 736                if (sop->sem_flg & SEM_UNDO) {
 737                        int undo = un->semadj[sop->sem_num] - sem_op;
 738
 739                        /* Exceeding the undo range is an error. */
 740                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 741                                return -ERANGE;
 742                }
 743        }
 744
 745        for (sop = sops; sop < sops + nsops; sop++) {
 746                curr = &sma->sems[sop->sem_num];
 747                sem_op = sop->sem_op;
 748                result = curr->semval;
 749
 750                if (sop->sem_flg & SEM_UNDO) {
 751                        int undo = un->semadj[sop->sem_num] - sem_op;
 752
 753                        un->semadj[sop->sem_num] = undo;
 754                }
 755                curr->semval += sem_op;
 756                ipc_update_pid(&curr->sempid, q->pid);
 757        }
 758
 759        return 0;
 760
 761would_block:
 762        q->blocking = sop;
 763        return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 764}
 765
 766static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 767                                             struct wake_q_head *wake_q)
 768{
 769        wake_q_add(wake_q, q->sleeper);
 770        /*
 771         * Rely on the above implicit barrier, such that we can
 772         * ensure that we hold reference to the task before setting
 773         * q->status. Otherwise we could race with do_exit if the
 774         * task is awoken by an external event before calling
 775         * wake_up_process().
 776         */
 777        WRITE_ONCE(q->status, error);
 778}
 779
 780static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 781{
 782        list_del(&q->list);
 783        if (q->nsops > 1)
 784                sma->complex_count--;
 785}
 786
 787/** check_restart(sma, q)
 788 * @sma: semaphore array
 789 * @q: the operation that just completed
 790 *
 791 * update_queue is O(N^2) when it restarts scanning the whole queue of
 792 * waiting operations. Therefore this function checks if the restart is
 793 * really necessary. It is called after a previously waiting operation
 794 * modified the array.
 795 * Note that wait-for-zero operations are handled without restart.
 796 */
 797static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 798{
 799        /* pending complex alter operations are too difficult to analyse */
 800        if (!list_empty(&sma->pending_alter))
 801                return 1;
 802
 803        /* we were a sleeping complex operation. Too difficult */
 804        if (q->nsops > 1)
 805                return 1;
 806
 807        /* It is impossible that someone waits for the new value:
 808         * - complex operations always restart.
 809         * - wait-for-zero are handled seperately.
 810         * - q is a previously sleeping simple operation that
 811         *   altered the array. It must be a decrement, because
 812         *   simple increments never sleep.
 813         * - If there are older (higher priority) decrements
 814         *   in the queue, then they have observed the original
 815         *   semval value and couldn't proceed. The operation
 816         *   decremented to value - thus they won't proceed either.
 817         */
 818        return 0;
 819}
 820
 821/**
 822 * wake_const_ops - wake up non-alter tasks
 823 * @sma: semaphore array.
 824 * @semnum: semaphore that was modified.
 825 * @wake_q: lockless wake-queue head.
 826 *
 827 * wake_const_ops must be called after a semaphore in a semaphore array
 828 * was set to 0. If complex const operations are pending, wake_const_ops must
 829 * be called with semnum = -1, as well as with the number of each modified
 830 * semaphore.
 831 * The tasks that must be woken up are added to @wake_q. The return code
 832 * is stored in q->pid.
 833 * The function returns 1 if at least one operation was completed successfully.
 834 */
 835static int wake_const_ops(struct sem_array *sma, int semnum,
 836                          struct wake_q_head *wake_q)
 837{
 838        struct sem_queue *q, *tmp;
 839        struct list_head *pending_list;
 840        int semop_completed = 0;
 841
 842        if (semnum == -1)
 843                pending_list = &sma->pending_const;
 844        else
 845                pending_list = &sma->sems[semnum].pending_const;
 846
 847        list_for_each_entry_safe(q, tmp, pending_list, list) {
 848                int error = perform_atomic_semop(sma, q);
 849
 850                if (error > 0)
 851                        continue;
 852                /* operation completed, remove from queue & wakeup */
 853                unlink_queue(sma, q);
 854
 855                wake_up_sem_queue_prepare(q, error, wake_q);
 856                if (error == 0)
 857                        semop_completed = 1;
 858        }
 859
 860        return semop_completed;
 861}
 862
 863/**
 864 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 865 * @sma: semaphore array
 866 * @sops: operations that were performed
 867 * @nsops: number of operations
 868 * @wake_q: lockless wake-queue head
 869 *
 870 * Checks all required queue for wait-for-zero operations, based
 871 * on the actual changes that were performed on the semaphore array.
 872 * The function returns 1 if at least one operation was completed successfully.
 873 */
 874static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 875                                int nsops, struct wake_q_head *wake_q)
 876{
 877        int i;
 878        int semop_completed = 0;
 879        int got_zero = 0;
 880
 881        /* first: the per-semaphore queues, if known */
 882        if (sops) {
 883                for (i = 0; i < nsops; i++) {
 884                        int num = sops[i].sem_num;
 885
 886                        if (sma->sems[num].semval == 0) {
 887                                got_zero = 1;
 888                                semop_completed |= wake_const_ops(sma, num, wake_q);
 889                        }
 890                }
 891        } else {
 892                /*
 893                 * No sops means modified semaphores not known.
 894                 * Assume all were changed.
 895                 */
 896                for (i = 0; i < sma->sem_nsems; i++) {
 897                        if (sma->sems[i].semval == 0) {
 898                                got_zero = 1;
 899                                semop_completed |= wake_const_ops(sma, i, wake_q);
 900                        }
 901                }
 902        }
 903        /*
 904         * If one of the modified semaphores got 0,
 905         * then check the global queue, too.
 906         */
 907        if (got_zero)
 908                semop_completed |= wake_const_ops(sma, -1, wake_q);
 909
 910        return semop_completed;
 911}
 912
 913
 914/**
 915 * update_queue - look for tasks that can be completed.
 916 * @sma: semaphore array.
 917 * @semnum: semaphore that was modified.
 918 * @wake_q: lockless wake-queue head.
 919 *
 920 * update_queue must be called after a semaphore in a semaphore array
 921 * was modified. If multiple semaphores were modified, update_queue must
 922 * be called with semnum = -1, as well as with the number of each modified
 923 * semaphore.
 924 * The tasks that must be woken up are added to @wake_q. The return code
 925 * is stored in q->pid.
 926 * The function internally checks if const operations can now succeed.
 927 *
 928 * The function return 1 if at least one semop was completed successfully.
 929 */
 930static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 931{
 932        struct sem_queue *q, *tmp;
 933        struct list_head *pending_list;
 934        int semop_completed = 0;
 935
 936        if (semnum == -1)
 937                pending_list = &sma->pending_alter;
 938        else
 939                pending_list = &sma->sems[semnum].pending_alter;
 940
 941again:
 942        list_for_each_entry_safe(q, tmp, pending_list, list) {
 943                int error, restart;
 944
 945                /* If we are scanning the single sop, per-semaphore list of
 946                 * one semaphore and that semaphore is 0, then it is not
 947                 * necessary to scan further: simple increments
 948                 * that affect only one entry succeed immediately and cannot
 949                 * be in the  per semaphore pending queue, and decrements
 950                 * cannot be successful if the value is already 0.
 951                 */
 952                if (semnum != -1 && sma->sems[semnum].semval == 0)
 953                        break;
 954
 955                error = perform_atomic_semop(sma, q);
 956
 957                /* Does q->sleeper still need to sleep? */
 958                if (error > 0)
 959                        continue;
 960
 961                unlink_queue(sma, q);
 962
 963                if (error) {
 964                        restart = 0;
 965                } else {
 966                        semop_completed = 1;
 967                        do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 968                        restart = check_restart(sma, q);
 969                }
 970
 971                wake_up_sem_queue_prepare(q, error, wake_q);
 972                if (restart)
 973                        goto again;
 974        }
 975        return semop_completed;
 976}
 977
 978/**
 979 * set_semotime - set sem_otime
 980 * @sma: semaphore array
 981 * @sops: operations that modified the array, may be NULL
 982 *
 983 * sem_otime is replicated to avoid cache line trashing.
 984 * This function sets one instance to the current time.
 985 */
 986static void set_semotime(struct sem_array *sma, struct sembuf *sops)
 987{
 988        if (sops == NULL) {
 989                sma->sems[0].sem_otime = ktime_get_real_seconds();
 990        } else {
 991                sma->sems[sops[0].sem_num].sem_otime =
 992                                                ktime_get_real_seconds();
 993        }
 994}
 995
 996/**
 997 * do_smart_update - optimized update_queue
 998 * @sma: semaphore array
 999 * @sops: operations that were performed
1000 * @nsops: number of operations
1001 * @otime: force setting otime
1002 * @wake_q: lockless wake-queue head
1003 *
1004 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1005 * based on the actual changes that were performed on the semaphore array.
1006 * Note that the function does not do the actual wake-up: the caller is
1007 * responsible for calling wake_up_q().
1008 * It is safe to perform this call after dropping all locks.
1009 */
1010static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1011                            int otime, struct wake_q_head *wake_q)
1012{
1013        int i;
1014
1015        otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1016
1017        if (!list_empty(&sma->pending_alter)) {
1018                /* semaphore array uses the global queue - just process it. */
1019                otime |= update_queue(sma, -1, wake_q);
1020        } else {
1021                if (!sops) {
1022                        /*
1023                         * No sops, thus the modified semaphores are not
1024                         * known. Check all.
1025                         */
1026                        for (i = 0; i < sma->sem_nsems; i++)
1027                                otime |= update_queue(sma, i, wake_q);
1028                } else {
1029                        /*
1030                         * Check the semaphores that were increased:
1031                         * - No complex ops, thus all sleeping ops are
1032                         *   decrease.
1033                         * - if we decreased the value, then any sleeping
1034                         *   semaphore ops wont be able to run: If the
1035                         *   previous value was too small, then the new
1036                         *   value will be too small, too.
1037                         */
1038                        for (i = 0; i < nsops; i++) {
1039                                if (sops[i].sem_op > 0) {
1040                                        otime |= update_queue(sma,
1041                                                              sops[i].sem_num, wake_q);
1042                                }
1043                        }
1044                }
1045        }
1046        if (otime)
1047                set_semotime(sma, sops);
1048}
1049
1050/*
1051 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1052 */
1053static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1054                        bool count_zero)
1055{
1056        struct sembuf *sop = q->blocking;
1057
1058        /*
1059         * Linux always (since 0.99.10) reported a task as sleeping on all
1060         * semaphores. This violates SUS, therefore it was changed to the
1061         * standard compliant behavior.
1062         * Give the administrators a chance to notice that an application
1063         * might misbehave because it relies on the Linux behavior.
1064         */
1065        pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1066                        "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1067                        current->comm, task_pid_nr(current));
1068
1069        if (sop->sem_num != semnum)
1070                return 0;
1071
1072        if (count_zero && sop->sem_op == 0)
1073                return 1;
1074        if (!count_zero && sop->sem_op < 0)
1075                return 1;
1076
1077        return 0;
1078}
1079
1080/* The following counts are associated to each semaphore:
1081 *   semncnt        number of tasks waiting on semval being nonzero
1082 *   semzcnt        number of tasks waiting on semval being zero
1083 *
1084 * Per definition, a task waits only on the semaphore of the first semop
1085 * that cannot proceed, even if additional operation would block, too.
1086 */
1087static int count_semcnt(struct sem_array *sma, ushort semnum,
1088                        bool count_zero)
1089{
1090        struct list_head *l;
1091        struct sem_queue *q;
1092        int semcnt;
1093
1094        semcnt = 0;
1095        /* First: check the simple operations. They are easy to evaluate */
1096        if (count_zero)
1097                l = &sma->sems[semnum].pending_const;
1098        else
1099                l = &sma->sems[semnum].pending_alter;
1100
1101        list_for_each_entry(q, l, list) {
1102                /* all task on a per-semaphore list sleep on exactly
1103                 * that semaphore
1104                 */
1105                semcnt++;
1106        }
1107
1108        /* Then: check the complex operations. */
1109        list_for_each_entry(q, &sma->pending_alter, list) {
1110                semcnt += check_qop(sma, semnum, q, count_zero);
1111        }
1112        if (count_zero) {
1113                list_for_each_entry(q, &sma->pending_const, list) {
1114                        semcnt += check_qop(sma, semnum, q, count_zero);
1115                }
1116        }
1117        return semcnt;
1118}
1119
1120/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1121 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1122 * remains locked on exit.
1123 */
1124static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1125{
1126        struct sem_undo *un, *tu;
1127        struct sem_queue *q, *tq;
1128        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1129        int i;
1130        DEFINE_WAKE_Q(wake_q);
1131
1132        /* Free the existing undo structures for this semaphore set.  */
1133        ipc_assert_locked_object(&sma->sem_perm);
1134        list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1135                list_del(&un->list_id);
1136                spin_lock(&un->ulp->lock);
1137                un->semid = -1;
1138                list_del_rcu(&un->list_proc);
1139                spin_unlock(&un->ulp->lock);
1140                kfree_rcu(un, rcu);
1141        }
1142
1143        /* Wake up all pending processes and let them fail with EIDRM. */
1144        list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1145                unlink_queue(sma, q);
1146                wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1147        }
1148
1149        list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1150                unlink_queue(sma, q);
1151                wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1152        }
1153        for (i = 0; i < sma->sem_nsems; i++) {
1154                struct sem *sem = &sma->sems[i];
1155                list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1156                        unlink_queue(sma, q);
1157                        wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1158                }
1159                list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1160                        unlink_queue(sma, q);
1161                        wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1162                }
1163                ipc_update_pid(&sem->sempid, NULL);
1164        }
1165
1166        /* Remove the semaphore set from the IDR */
1167        sem_rmid(ns, sma);
1168        sem_unlock(sma, -1);
1169        rcu_read_unlock();
1170
1171        wake_up_q(&wake_q);
1172        ns->used_sems -= sma->sem_nsems;
1173        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1174}
1175
1176static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1177{
1178        switch (version) {
1179        case IPC_64:
1180                return copy_to_user(buf, in, sizeof(*in));
1181        case IPC_OLD:
1182            {
1183                struct semid_ds out;
1184
1185                memset(&out, 0, sizeof(out));
1186
1187                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1188
1189                out.sem_otime   = in->sem_otime;
1190                out.sem_ctime   = in->sem_ctime;
1191                out.sem_nsems   = in->sem_nsems;
1192
1193                return copy_to_user(buf, &out, sizeof(out));
1194            }
1195        default:
1196                return -EINVAL;
1197        }
1198}
1199
1200static time64_t get_semotime(struct sem_array *sma)
1201{
1202        int i;
1203        time64_t res;
1204
1205        res = sma->sems[0].sem_otime;
1206        for (i = 1; i < sma->sem_nsems; i++) {
1207                time64_t to = sma->sems[i].sem_otime;
1208
1209                if (to > res)
1210                        res = to;
1211        }
1212        return res;
1213}
1214
1215static int semctl_stat(struct ipc_namespace *ns, int semid,
1216                         int cmd, struct semid64_ds *semid64)
1217{
1218        struct sem_array *sma;
1219        time64_t semotime;
1220        int err;
1221
1222        memset(semid64, 0, sizeof(*semid64));
1223
1224        rcu_read_lock();
1225        if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1226                sma = sem_obtain_object(ns, semid);
1227                if (IS_ERR(sma)) {
1228                        err = PTR_ERR(sma);
1229                        goto out_unlock;
1230                }
1231        } else { /* IPC_STAT */
1232                sma = sem_obtain_object_check(ns, semid);
1233                if (IS_ERR(sma)) {
1234                        err = PTR_ERR(sma);
1235                        goto out_unlock;
1236                }
1237        }
1238
1239        /* see comment for SHM_STAT_ANY */
1240        if (cmd == SEM_STAT_ANY)
1241                audit_ipc_obj(&sma->sem_perm);
1242        else {
1243                err = -EACCES;
1244                if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1245                        goto out_unlock;
1246        }
1247
1248        err = security_sem_semctl(&sma->sem_perm, cmd);
1249        if (err)
1250                goto out_unlock;
1251
1252        ipc_lock_object(&sma->sem_perm);
1253
1254        if (!ipc_valid_object(&sma->sem_perm)) {
1255                ipc_unlock_object(&sma->sem_perm);
1256                err = -EIDRM;
1257                goto out_unlock;
1258        }
1259
1260        kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1261        semotime = get_semotime(sma);
1262        semid64->sem_otime = semotime;
1263        semid64->sem_ctime = sma->sem_ctime;
1264#ifndef CONFIG_64BIT
1265        semid64->sem_otime_high = semotime >> 32;
1266        semid64->sem_ctime_high = sma->sem_ctime >> 32;
1267#endif
1268        semid64->sem_nsems = sma->sem_nsems;
1269
1270        if (cmd == IPC_STAT) {
1271                /*
1272                 * As defined in SUS:
1273                 * Return 0 on success
1274                 */
1275                err = 0;
1276        } else {
1277                /*
1278                 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1279                 * Return the full id, including the sequence number
1280                 */
1281                err = sma->sem_perm.id;
1282        }
1283        ipc_unlock_object(&sma->sem_perm);
1284out_unlock:
1285        rcu_read_unlock();
1286        return err;
1287}
1288
1289static int semctl_info(struct ipc_namespace *ns, int semid,
1290                         int cmd, void __user *p)
1291{
1292        struct seminfo seminfo;
1293        int max_idx;
1294        int err;
1295
1296        err = security_sem_semctl(NULL, cmd);
1297        if (err)
1298                return err;
1299
1300        memset(&seminfo, 0, sizeof(seminfo));
1301        seminfo.semmni = ns->sc_semmni;
1302        seminfo.semmns = ns->sc_semmns;
1303        seminfo.semmsl = ns->sc_semmsl;
1304        seminfo.semopm = ns->sc_semopm;
1305        seminfo.semvmx = SEMVMX;
1306        seminfo.semmnu = SEMMNU;
1307        seminfo.semmap = SEMMAP;
1308        seminfo.semume = SEMUME;
1309        down_read(&sem_ids(ns).rwsem);
1310        if (cmd == SEM_INFO) {
1311                seminfo.semusz = sem_ids(ns).in_use;
1312                seminfo.semaem = ns->used_sems;
1313        } else {
1314                seminfo.semusz = SEMUSZ;
1315                seminfo.semaem = SEMAEM;
1316        }
1317        max_idx = ipc_get_maxidx(&sem_ids(ns));
1318        up_read(&sem_ids(ns).rwsem);
1319        if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1320                return -EFAULT;
1321        return (max_idx < 0) ? 0 : max_idx;
1322}
1323
1324static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1325                int val)
1326{
1327        struct sem_undo *un;
1328        struct sem_array *sma;
1329        struct sem *curr;
1330        int err;
1331        DEFINE_WAKE_Q(wake_q);
1332
1333        if (val > SEMVMX || val < 0)
1334                return -ERANGE;
1335
1336        rcu_read_lock();
1337        sma = sem_obtain_object_check(ns, semid);
1338        if (IS_ERR(sma)) {
1339                rcu_read_unlock();
1340                return PTR_ERR(sma);
1341        }
1342
1343        if (semnum < 0 || semnum >= sma->sem_nsems) {
1344                rcu_read_unlock();
1345                return -EINVAL;
1346        }
1347
1348
1349        if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1350                rcu_read_unlock();
1351                return -EACCES;
1352        }
1353
1354        err = security_sem_semctl(&sma->sem_perm, SETVAL);
1355        if (err) {
1356                rcu_read_unlock();
1357                return -EACCES;
1358        }
1359
1360        sem_lock(sma, NULL, -1);
1361
1362        if (!ipc_valid_object(&sma->sem_perm)) {
1363                sem_unlock(sma, -1);
1364                rcu_read_unlock();
1365                return -EIDRM;
1366        }
1367
1368        semnum = array_index_nospec(semnum, sma->sem_nsems);
1369        curr = &sma->sems[semnum];
1370
1371        ipc_assert_locked_object(&sma->sem_perm);
1372        list_for_each_entry(un, &sma->list_id, list_id)
1373                un->semadj[semnum] = 0;
1374
1375        curr->semval = val;
1376        ipc_update_pid(&curr->sempid, task_tgid(current));
1377        sma->sem_ctime = ktime_get_real_seconds();
1378        /* maybe some queued-up processes were waiting for this */
1379        do_smart_update(sma, NULL, 0, 0, &wake_q);
1380        sem_unlock(sma, -1);
1381        rcu_read_unlock();
1382        wake_up_q(&wake_q);
1383        return 0;
1384}
1385
1386static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1387                int cmd, void __user *p)
1388{
1389        struct sem_array *sma;
1390        struct sem *curr;
1391        int err, nsems;
1392        ushort fast_sem_io[SEMMSL_FAST];
1393        ushort *sem_io = fast_sem_io;
1394        DEFINE_WAKE_Q(wake_q);
1395
1396        rcu_read_lock();
1397        sma = sem_obtain_object_check(ns, semid);
1398        if (IS_ERR(sma)) {
1399                rcu_read_unlock();
1400                return PTR_ERR(sma);
1401        }
1402
1403        nsems = sma->sem_nsems;
1404
1405        err = -EACCES;
1406        if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1407                goto out_rcu_wakeup;
1408
1409        err = security_sem_semctl(&sma->sem_perm, cmd);
1410        if (err)
1411                goto out_rcu_wakeup;
1412
1413        err = -EACCES;
1414        switch (cmd) {
1415        case GETALL:
1416        {
1417                ushort __user *array = p;
1418                int i;
1419
1420                sem_lock(sma, NULL, -1);
1421                if (!ipc_valid_object(&sma->sem_perm)) {
1422                        err = -EIDRM;
1423                        goto out_unlock;
1424                }
1425                if (nsems > SEMMSL_FAST) {
1426                        if (!ipc_rcu_getref(&sma->sem_perm)) {
1427                                err = -EIDRM;
1428                                goto out_unlock;
1429                        }
1430                        sem_unlock(sma, -1);
1431                        rcu_read_unlock();
1432                        sem_io = kvmalloc_array(nsems, sizeof(ushort),
1433                                                GFP_KERNEL);
1434                        if (sem_io == NULL) {
1435                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1436                                return -ENOMEM;
1437                        }
1438
1439                        rcu_read_lock();
1440                        sem_lock_and_putref(sma);
1441                        if (!ipc_valid_object(&sma->sem_perm)) {
1442                                err = -EIDRM;
1443                                goto out_unlock;
1444                        }
1445                }
1446                for (i = 0; i < sma->sem_nsems; i++)
1447                        sem_io[i] = sma->sems[i].semval;
1448                sem_unlock(sma, -1);
1449                rcu_read_unlock();
1450                err = 0;
1451                if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1452                        err = -EFAULT;
1453                goto out_free;
1454        }
1455        case SETALL:
1456        {
1457                int i;
1458                struct sem_undo *un;
1459
1460                if (!ipc_rcu_getref(&sma->sem_perm)) {
1461                        err = -EIDRM;
1462                        goto out_rcu_wakeup;
1463                }
1464                rcu_read_unlock();
1465
1466                if (nsems > SEMMSL_FAST) {
1467                        sem_io = kvmalloc_array(nsems, sizeof(ushort),
1468                                                GFP_KERNEL);
1469                        if (sem_io == NULL) {
1470                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1471                                return -ENOMEM;
1472                        }
1473                }
1474
1475                if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1476                        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1477                        err = -EFAULT;
1478                        goto out_free;
1479                }
1480
1481                for (i = 0; i < nsems; i++) {
1482                        if (sem_io[i] > SEMVMX) {
1483                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1484                                err = -ERANGE;
1485                                goto out_free;
1486                        }
1487                }
1488                rcu_read_lock();
1489                sem_lock_and_putref(sma);
1490                if (!ipc_valid_object(&sma->sem_perm)) {
1491                        err = -EIDRM;
1492                        goto out_unlock;
1493                }
1494
1495                for (i = 0; i < nsems; i++) {
1496                        sma->sems[i].semval = sem_io[i];
1497                        ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1498                }
1499
1500                ipc_assert_locked_object(&sma->sem_perm);
1501                list_for_each_entry(un, &sma->list_id, list_id) {
1502                        for (i = 0; i < nsems; i++)
1503                                un->semadj[i] = 0;
1504                }
1505                sma->sem_ctime = ktime_get_real_seconds();
1506                /* maybe some queued-up processes were waiting for this */
1507                do_smart_update(sma, NULL, 0, 0, &wake_q);
1508                err = 0;
1509                goto out_unlock;
1510        }
1511        /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1512        }
1513        err = -EINVAL;
1514        if (semnum < 0 || semnum >= nsems)
1515                goto out_rcu_wakeup;
1516
1517        sem_lock(sma, NULL, -1);
1518        if (!ipc_valid_object(&sma->sem_perm)) {
1519                err = -EIDRM;
1520                goto out_unlock;
1521        }
1522
1523        semnum = array_index_nospec(semnum, nsems);
1524        curr = &sma->sems[semnum];
1525
1526        switch (cmd) {
1527        case GETVAL:
1528                err = curr->semval;
1529                goto out_unlock;
1530        case GETPID:
1531                err = pid_vnr(curr->sempid);
1532                goto out_unlock;
1533        case GETNCNT:
1534                err = count_semcnt(sma, semnum, 0);
1535                goto out_unlock;
1536        case GETZCNT:
1537                err = count_semcnt(sma, semnum, 1);
1538                goto out_unlock;
1539        }
1540
1541out_unlock:
1542        sem_unlock(sma, -1);
1543out_rcu_wakeup:
1544        rcu_read_unlock();
1545        wake_up_q(&wake_q);
1546out_free:
1547        if (sem_io != fast_sem_io)
1548                kvfree(sem_io);
1549        return err;
1550}
1551
1552static inline unsigned long
1553copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1554{
1555        switch (version) {
1556        case IPC_64:
1557                if (copy_from_user(out, buf, sizeof(*out)))
1558                        return -EFAULT;
1559                return 0;
1560        case IPC_OLD:
1561            {
1562                struct semid_ds tbuf_old;
1563
1564                if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1565                        return -EFAULT;
1566
1567                out->sem_perm.uid       = tbuf_old.sem_perm.uid;
1568                out->sem_perm.gid       = tbuf_old.sem_perm.gid;
1569                out->sem_perm.mode      = tbuf_old.sem_perm.mode;
1570
1571                return 0;
1572            }
1573        default:
1574                return -EINVAL;
1575        }
1576}
1577
1578/*
1579 * This function handles some semctl commands which require the rwsem
1580 * to be held in write mode.
1581 * NOTE: no locks must be held, the rwsem is taken inside this function.
1582 */
1583static int semctl_down(struct ipc_namespace *ns, int semid,
1584                       int cmd, struct semid64_ds *semid64)
1585{
1586        struct sem_array *sma;
1587        int err;
1588        struct kern_ipc_perm *ipcp;
1589
1590        down_write(&sem_ids(ns).rwsem);
1591        rcu_read_lock();
1592
1593        ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1594                                      &semid64->sem_perm, 0);
1595        if (IS_ERR(ipcp)) {
1596                err = PTR_ERR(ipcp);
1597                goto out_unlock1;
1598        }
1599
1600        sma = container_of(ipcp, struct sem_array, sem_perm);
1601
1602        err = security_sem_semctl(&sma->sem_perm, cmd);
1603        if (err)
1604                goto out_unlock1;
1605
1606        switch (cmd) {
1607        case IPC_RMID:
1608                sem_lock(sma, NULL, -1);
1609                /* freeary unlocks the ipc object and rcu */
1610                freeary(ns, ipcp);
1611                goto out_up;
1612        case IPC_SET:
1613                sem_lock(sma, NULL, -1);
1614                err = ipc_update_perm(&semid64->sem_perm, ipcp);
1615                if (err)
1616                        goto out_unlock0;
1617                sma->sem_ctime = ktime_get_real_seconds();
1618                break;
1619        default:
1620                err = -EINVAL;
1621                goto out_unlock1;
1622        }
1623
1624out_unlock0:
1625        sem_unlock(sma, -1);
1626out_unlock1:
1627        rcu_read_unlock();
1628out_up:
1629        up_write(&sem_ids(ns).rwsem);
1630        return err;
1631}
1632
1633static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1634{
1635        struct ipc_namespace *ns;
1636        void __user *p = (void __user *)arg;
1637        struct semid64_ds semid64;
1638        int err;
1639
1640        if (semid < 0)
1641                return -EINVAL;
1642
1643        ns = current->nsproxy->ipc_ns;
1644
1645        switch (cmd) {
1646        case IPC_INFO:
1647        case SEM_INFO:
1648                return semctl_info(ns, semid, cmd, p);
1649        case IPC_STAT:
1650        case SEM_STAT:
1651        case SEM_STAT_ANY:
1652                err = semctl_stat(ns, semid, cmd, &semid64);
1653                if (err < 0)
1654                        return err;
1655                if (copy_semid_to_user(p, &semid64, version))
1656                        err = -EFAULT;
1657                return err;
1658        case GETALL:
1659        case GETVAL:
1660        case GETPID:
1661        case GETNCNT:
1662        case GETZCNT:
1663        case SETALL:
1664                return semctl_main(ns, semid, semnum, cmd, p);
1665        case SETVAL: {
1666                int val;
1667#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1668                /* big-endian 64bit */
1669                val = arg >> 32;
1670#else
1671                /* 32bit or little-endian 64bit */
1672                val = arg;
1673#endif
1674                return semctl_setval(ns, semid, semnum, val);
1675        }
1676        case IPC_SET:
1677                if (copy_semid_from_user(&semid64, p, version))
1678                        return -EFAULT;
1679                /* fall through */
1680        case IPC_RMID:
1681                return semctl_down(ns, semid, cmd, &semid64);
1682        default:
1683                return -EINVAL;
1684        }
1685}
1686
1687SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1688{
1689        return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1690}
1691
1692#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1693long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1694{
1695        int version = ipc_parse_version(&cmd);
1696
1697        return ksys_semctl(semid, semnum, cmd, arg, version);
1698}
1699
1700SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1701{
1702        return ksys_old_semctl(semid, semnum, cmd, arg);
1703}
1704#endif
1705
1706#ifdef CONFIG_COMPAT
1707
1708struct compat_semid_ds {
1709        struct compat_ipc_perm sem_perm;
1710        old_time32_t sem_otime;
1711        old_time32_t sem_ctime;
1712        compat_uptr_t sem_base;
1713        compat_uptr_t sem_pending;
1714        compat_uptr_t sem_pending_last;
1715        compat_uptr_t undo;
1716        unsigned short sem_nsems;
1717};
1718
1719static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1720                                        int version)
1721{
1722        memset(out, 0, sizeof(*out));
1723        if (version == IPC_64) {
1724                struct compat_semid64_ds __user *p = buf;
1725                return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1726        } else {
1727                struct compat_semid_ds __user *p = buf;
1728                return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1729        }
1730}
1731
1732static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1733                                        int version)
1734{
1735        if (version == IPC_64) {
1736                struct compat_semid64_ds v;
1737                memset(&v, 0, sizeof(v));
1738                to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1739                v.sem_otime      = lower_32_bits(in->sem_otime);
1740                v.sem_otime_high = upper_32_bits(in->sem_otime);
1741                v.sem_ctime      = lower_32_bits(in->sem_ctime);
1742                v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1743                v.sem_nsems = in->sem_nsems;
1744                return copy_to_user(buf, &v, sizeof(v));
1745        } else {
1746                struct compat_semid_ds v;
1747                memset(&v, 0, sizeof(v));
1748                to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1749                v.sem_otime = in->sem_otime;
1750                v.sem_ctime = in->sem_ctime;
1751                v.sem_nsems = in->sem_nsems;
1752                return copy_to_user(buf, &v, sizeof(v));
1753        }
1754}
1755
1756static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1757{
1758        void __user *p = compat_ptr(arg);
1759        struct ipc_namespace *ns;
1760        struct semid64_ds semid64;
1761        int err;
1762
1763        ns = current->nsproxy->ipc_ns;
1764
1765        if (semid < 0)
1766                return -EINVAL;
1767
1768        switch (cmd & (~IPC_64)) {
1769        case IPC_INFO:
1770        case SEM_INFO:
1771                return semctl_info(ns, semid, cmd, p);
1772        case IPC_STAT:
1773        case SEM_STAT:
1774        case SEM_STAT_ANY:
1775                err = semctl_stat(ns, semid, cmd, &semid64);
1776                if (err < 0)
1777                        return err;
1778                if (copy_compat_semid_to_user(p, &semid64, version))
1779                        err = -EFAULT;
1780                return err;
1781        case GETVAL:
1782        case GETPID:
1783        case GETNCNT:
1784        case GETZCNT:
1785        case GETALL:
1786        case SETALL:
1787                return semctl_main(ns, semid, semnum, cmd, p);
1788        case SETVAL:
1789                return semctl_setval(ns, semid, semnum, arg);
1790        case IPC_SET:
1791                if (copy_compat_semid_from_user(&semid64, p, version))
1792                        return -EFAULT;
1793                /* fallthru */
1794        case IPC_RMID:
1795                return semctl_down(ns, semid, cmd, &semid64);
1796        default:
1797                return -EINVAL;
1798        }
1799}
1800
1801COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1802{
1803        return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1804}
1805
1806#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1807long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1808{
1809        int version = compat_ipc_parse_version(&cmd);
1810
1811        return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1812}
1813
1814COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1815{
1816        return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1817}
1818#endif
1819#endif
1820
1821/* If the task doesn't already have a undo_list, then allocate one
1822 * here.  We guarantee there is only one thread using this undo list,
1823 * and current is THE ONE
1824 *
1825 * If this allocation and assignment succeeds, but later
1826 * portions of this code fail, there is no need to free the sem_undo_list.
1827 * Just let it stay associated with the task, and it'll be freed later
1828 * at exit time.
1829 *
1830 * This can block, so callers must hold no locks.
1831 */
1832static inline int get_undo_list(struct sem_undo_list **undo_listp)
1833{
1834        struct sem_undo_list *undo_list;
1835
1836        undo_list = current->sysvsem.undo_list;
1837        if (!undo_list) {
1838                undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1839                if (undo_list == NULL)
1840                        return -ENOMEM;
1841                spin_lock_init(&undo_list->lock);
1842                refcount_set(&undo_list->refcnt, 1);
1843                INIT_LIST_HEAD(&undo_list->list_proc);
1844
1845                current->sysvsem.undo_list = undo_list;
1846        }
1847        *undo_listp = undo_list;
1848        return 0;
1849}
1850
1851static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1852{
1853        struct sem_undo *un;
1854
1855        list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1856                if (un->semid == semid)
1857                        return un;
1858        }
1859        return NULL;
1860}
1861
1862static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1863{
1864        struct sem_undo *un;
1865
1866        assert_spin_locked(&ulp->lock);
1867
1868        un = __lookup_undo(ulp, semid);
1869        if (un) {
1870                list_del_rcu(&un->list_proc);
1871                list_add_rcu(&un->list_proc, &ulp->list_proc);
1872        }
1873        return un;
1874}
1875
1876/**
1877 * find_alloc_undo - lookup (and if not present create) undo array
1878 * @ns: namespace
1879 * @semid: semaphore array id
1880 *
1881 * The function looks up (and if not present creates) the undo structure.
1882 * The size of the undo structure depends on the size of the semaphore
1883 * array, thus the alloc path is not that straightforward.
1884 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1885 * performs a rcu_read_lock().
1886 */
1887static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1888{
1889        struct sem_array *sma;
1890        struct sem_undo_list *ulp;
1891        struct sem_undo *un, *new;
1892        int nsems, error;
1893
1894        error = get_undo_list(&ulp);
1895        if (error)
1896                return ERR_PTR(error);
1897
1898        rcu_read_lock();
1899        spin_lock(&ulp->lock);
1900        un = lookup_undo(ulp, semid);
1901        spin_unlock(&ulp->lock);
1902        if (likely(un != NULL))
1903                goto out;
1904
1905        /* no undo structure around - allocate one. */
1906        /* step 1: figure out the size of the semaphore array */
1907        sma = sem_obtain_object_check(ns, semid);
1908        if (IS_ERR(sma)) {
1909                rcu_read_unlock();
1910                return ERR_CAST(sma);
1911        }
1912
1913        nsems = sma->sem_nsems;
1914        if (!ipc_rcu_getref(&sma->sem_perm)) {
1915                rcu_read_unlock();
1916                un = ERR_PTR(-EIDRM);
1917                goto out;
1918        }
1919        rcu_read_unlock();
1920
1921        /* step 2: allocate new undo structure */
1922        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1923        if (!new) {
1924                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1925                return ERR_PTR(-ENOMEM);
1926        }
1927
1928        /* step 3: Acquire the lock on semaphore array */
1929        rcu_read_lock();
1930        sem_lock_and_putref(sma);
1931        if (!ipc_valid_object(&sma->sem_perm)) {
1932                sem_unlock(sma, -1);
1933                rcu_read_unlock();
1934                kfree(new);
1935                un = ERR_PTR(-EIDRM);
1936                goto out;
1937        }
1938        spin_lock(&ulp->lock);
1939
1940        /*
1941         * step 4: check for races: did someone else allocate the undo struct?
1942         */
1943        un = lookup_undo(ulp, semid);
1944        if (un) {
1945                kfree(new);
1946                goto success;
1947        }
1948        /* step 5: initialize & link new undo structure */
1949        new->semadj = (short *) &new[1];
1950        new->ulp = ulp;
1951        new->semid = semid;
1952        assert_spin_locked(&ulp->lock);
1953        list_add_rcu(&new->list_proc, &ulp->list_proc);
1954        ipc_assert_locked_object(&sma->sem_perm);
1955        list_add(&new->list_id, &sma->list_id);
1956        un = new;
1957
1958success:
1959        spin_unlock(&ulp->lock);
1960        sem_unlock(sma, -1);
1961out:
1962        return un;
1963}
1964
1965static long do_semtimedop(int semid, struct sembuf __user *tsops,
1966                unsigned nsops, const struct timespec64 *timeout)
1967{
1968        int error = -EINVAL;
1969        struct sem_array *sma;
1970        struct sembuf fast_sops[SEMOPM_FAST];
1971        struct sembuf *sops = fast_sops, *sop;
1972        struct sem_undo *un;
1973        int max, locknum;
1974        bool undos = false, alter = false, dupsop = false;
1975        struct sem_queue queue;
1976        unsigned long dup = 0, jiffies_left = 0;
1977        struct ipc_namespace *ns;
1978
1979        ns = current->nsproxy->ipc_ns;
1980
1981        if (nsops < 1 || semid < 0)
1982                return -EINVAL;
1983        if (nsops > ns->sc_semopm)
1984                return -E2BIG;
1985        if (nsops > SEMOPM_FAST) {
1986                sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
1987                if (sops == NULL)
1988                        return -ENOMEM;
1989        }
1990
1991        if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1992                error =  -EFAULT;
1993                goto out_free;
1994        }
1995
1996        if (timeout) {
1997                if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
1998                        timeout->tv_nsec >= 1000000000L) {
1999                        error = -EINVAL;
2000                        goto out_free;
2001                }
2002                jiffies_left = timespec64_to_jiffies(timeout);
2003        }
2004
2005        max = 0;
2006        for (sop = sops; sop < sops + nsops; sop++) {
2007                unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2008
2009                if (sop->sem_num >= max)
2010                        max = sop->sem_num;
2011                if (sop->sem_flg & SEM_UNDO)
2012                        undos = true;
2013                if (dup & mask) {
2014                        /*
2015                         * There was a previous alter access that appears
2016                         * to have accessed the same semaphore, thus use
2017                         * the dupsop logic. "appears", because the detection
2018                         * can only check % BITS_PER_LONG.
2019                         */
2020                        dupsop = true;
2021                }
2022                if (sop->sem_op != 0) {
2023                        alter = true;
2024                        dup |= mask;
2025                }
2026        }
2027
2028        if (undos) {
2029                /* On success, find_alloc_undo takes the rcu_read_lock */
2030                un = find_alloc_undo(ns, semid);
2031                if (IS_ERR(un)) {
2032                        error = PTR_ERR(un);
2033                        goto out_free;
2034                }
2035        } else {
2036                un = NULL;
2037                rcu_read_lock();
2038        }
2039
2040        sma = sem_obtain_object_check(ns, semid);
2041        if (IS_ERR(sma)) {
2042                rcu_read_unlock();
2043                error = PTR_ERR(sma);
2044                goto out_free;
2045        }
2046
2047        error = -EFBIG;
2048        if (max >= sma->sem_nsems) {
2049                rcu_read_unlock();
2050                goto out_free;
2051        }
2052
2053        error = -EACCES;
2054        if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2055                rcu_read_unlock();
2056                goto out_free;
2057        }
2058
2059        error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2060        if (error) {
2061                rcu_read_unlock();
2062                goto out_free;
2063        }
2064
2065        error = -EIDRM;
2066        locknum = sem_lock(sma, sops, nsops);
2067        /*
2068         * We eventually might perform the following check in a lockless
2069         * fashion, considering ipc_valid_object() locking constraints.
2070         * If nsops == 1 and there is no contention for sem_perm.lock, then
2071         * only a per-semaphore lock is held and it's OK to proceed with the
2072         * check below. More details on the fine grained locking scheme
2073         * entangled here and why it's RMID race safe on comments at sem_lock()
2074         */
2075        if (!ipc_valid_object(&sma->sem_perm))
2076                goto out_unlock_free;
2077        /*
2078         * semid identifiers are not unique - find_alloc_undo may have
2079         * allocated an undo structure, it was invalidated by an RMID
2080         * and now a new array with received the same id. Check and fail.
2081         * This case can be detected checking un->semid. The existence of
2082         * "un" itself is guaranteed by rcu.
2083         */
2084        if (un && un->semid == -1)
2085                goto out_unlock_free;
2086
2087        queue.sops = sops;
2088        queue.nsops = nsops;
2089        queue.undo = un;
2090        queue.pid = task_tgid(current);
2091        queue.alter = alter;
2092        queue.dupsop = dupsop;
2093
2094        error = perform_atomic_semop(sma, &queue);
2095        if (error == 0) { /* non-blocking succesfull path */
2096                DEFINE_WAKE_Q(wake_q);
2097
2098                /*
2099                 * If the operation was successful, then do
2100                 * the required updates.
2101                 */
2102                if (alter)
2103                        do_smart_update(sma, sops, nsops, 1, &wake_q);
2104                else
2105                        set_semotime(sma, sops);
2106
2107                sem_unlock(sma, locknum);
2108                rcu_read_unlock();
2109                wake_up_q(&wake_q);
2110
2111                goto out_free;
2112        }
2113        if (error < 0) /* non-blocking error path */
2114                goto out_unlock_free;
2115
2116        /*
2117         * We need to sleep on this operation, so we put the current
2118         * task into the pending queue and go to sleep.
2119         */
2120        if (nsops == 1) {
2121                struct sem *curr;
2122                int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2123                curr = &sma->sems[idx];
2124
2125                if (alter) {
2126                        if (sma->complex_count) {
2127                                list_add_tail(&queue.list,
2128                                                &sma->pending_alter);
2129                        } else {
2130
2131                                list_add_tail(&queue.list,
2132                                                &curr->pending_alter);
2133                        }
2134                } else {
2135                        list_add_tail(&queue.list, &curr->pending_const);
2136                }
2137        } else {
2138                if (!sma->complex_count)
2139                        merge_queues(sma);
2140
2141                if (alter)
2142                        list_add_tail(&queue.list, &sma->pending_alter);
2143                else
2144                        list_add_tail(&queue.list, &sma->pending_const);
2145
2146                sma->complex_count++;
2147        }
2148
2149        do {
2150                WRITE_ONCE(queue.status, -EINTR);
2151                queue.sleeper = current;
2152
2153                __set_current_state(TASK_INTERRUPTIBLE);
2154                sem_unlock(sma, locknum);
2155                rcu_read_unlock();
2156
2157                if (timeout)
2158                        jiffies_left = schedule_timeout(jiffies_left);
2159                else
2160                        schedule();
2161
2162                /*
2163                 * fastpath: the semop has completed, either successfully or
2164                 * not, from the syscall pov, is quite irrelevant to us at this
2165                 * point; we're done.
2166                 *
2167                 * We _do_ care, nonetheless, about being awoken by a signal or
2168                 * spuriously.  The queue.status is checked again in the
2169                 * slowpath (aka after taking sem_lock), such that we can detect
2170                 * scenarios where we were awakened externally, during the
2171                 * window between wake_q_add() and wake_up_q().
2172                 */
2173                error = READ_ONCE(queue.status);
2174                if (error != -EINTR) {
2175                        /*
2176                         * User space could assume that semop() is a memory
2177                         * barrier: Without the mb(), the cpu could
2178                         * speculatively read in userspace stale data that was
2179                         * overwritten by the previous owner of the semaphore.
2180                         */
2181                        smp_mb();
2182                        goto out_free;
2183                }
2184
2185                rcu_read_lock();
2186                locknum = sem_lock(sma, sops, nsops);
2187
2188                if (!ipc_valid_object(&sma->sem_perm))
2189                        goto out_unlock_free;
2190
2191                error = READ_ONCE(queue.status);
2192
2193                /*
2194                 * If queue.status != -EINTR we are woken up by another process.
2195                 * Leave without unlink_queue(), but with sem_unlock().
2196                 */
2197                if (error != -EINTR)
2198                        goto out_unlock_free;
2199
2200                /*
2201                 * If an interrupt occurred we have to clean up the queue.
2202                 */
2203                if (timeout && jiffies_left == 0)
2204                        error = -EAGAIN;
2205        } while (error == -EINTR && !signal_pending(current)); /* spurious */
2206
2207        unlink_queue(sma, &queue);
2208
2209out_unlock_free:
2210        sem_unlock(sma, locknum);
2211        rcu_read_unlock();
2212out_free:
2213        if (sops != fast_sops)
2214                kvfree(sops);
2215        return error;
2216}
2217
2218long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2219                     unsigned int nsops, const struct __kernel_timespec __user *timeout)
2220{
2221        if (timeout) {
2222                struct timespec64 ts;
2223                if (get_timespec64(&ts, timeout))
2224                        return -EFAULT;
2225                return do_semtimedop(semid, tsops, nsops, &ts);
2226        }
2227        return do_semtimedop(semid, tsops, nsops, NULL);
2228}
2229
2230SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2231                unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2232{
2233        return ksys_semtimedop(semid, tsops, nsops, timeout);
2234}
2235
2236#ifdef CONFIG_COMPAT_32BIT_TIME
2237long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2238                            unsigned int nsops,
2239                            const struct old_timespec32 __user *timeout)
2240{
2241        if (timeout) {
2242                struct timespec64 ts;
2243                if (get_old_timespec32(&ts, timeout))
2244                        return -EFAULT;
2245                return do_semtimedop(semid, tsems, nsops, &ts);
2246        }
2247        return do_semtimedop(semid, tsems, nsops, NULL);
2248}
2249
2250SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2251                       unsigned int, nsops,
2252                       const struct old_timespec32 __user *, timeout)
2253{
2254        return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2255}
2256#endif
2257
2258SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2259                unsigned, nsops)
2260{
2261        return do_semtimedop(semid, tsops, nsops, NULL);
2262}
2263
2264/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2265 * parent and child tasks.
2266 */
2267
2268int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2269{
2270        struct sem_undo_list *undo_list;
2271        int error;
2272
2273        if (clone_flags & CLONE_SYSVSEM) {
2274                error = get_undo_list(&undo_list);
2275                if (error)
2276                        return error;
2277                refcount_inc(&undo_list->refcnt);
2278                tsk->sysvsem.undo_list = undo_list;
2279        } else
2280                tsk->sysvsem.undo_list = NULL;
2281
2282        return 0;
2283}
2284
2285/*
2286 * add semadj values to semaphores, free undo structures.
2287 * undo structures are not freed when semaphore arrays are destroyed
2288 * so some of them may be out of date.
2289 * IMPLEMENTATION NOTE: There is some confusion over whether the
2290 * set of adjustments that needs to be done should be done in an atomic
2291 * manner or not. That is, if we are attempting to decrement the semval
2292 * should we queue up and wait until we can do so legally?
2293 * The original implementation attempted to do this (queue and wait).
2294 * The current implementation does not do so. The POSIX standard
2295 * and SVID should be consulted to determine what behavior is mandated.
2296 */
2297void exit_sem(struct task_struct *tsk)
2298{
2299        struct sem_undo_list *ulp;
2300
2301        ulp = tsk->sysvsem.undo_list;
2302        if (!ulp)
2303                return;
2304        tsk->sysvsem.undo_list = NULL;
2305
2306        if (!refcount_dec_and_test(&ulp->refcnt))
2307                return;
2308
2309        for (;;) {
2310                struct sem_array *sma;
2311                struct sem_undo *un;
2312                int semid, i;
2313                DEFINE_WAKE_Q(wake_q);
2314
2315                cond_resched();
2316
2317                rcu_read_lock();
2318                un = list_entry_rcu(ulp->list_proc.next,
2319                                    struct sem_undo, list_proc);
2320                if (&un->list_proc == &ulp->list_proc) {
2321                        /*
2322                         * We must wait for freeary() before freeing this ulp,
2323                         * in case we raced with last sem_undo. There is a small
2324                         * possibility where we exit while freeary() didn't
2325                         * finish unlocking sem_undo_list.
2326                         */
2327                        spin_lock(&ulp->lock);
2328                        spin_unlock(&ulp->lock);
2329                        rcu_read_unlock();
2330                        break;
2331                }
2332                spin_lock(&ulp->lock);
2333                semid = un->semid;
2334                spin_unlock(&ulp->lock);
2335
2336                /* exit_sem raced with IPC_RMID, nothing to do */
2337                if (semid == -1) {
2338                        rcu_read_unlock();
2339                        continue;
2340                }
2341
2342                sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2343                /* exit_sem raced with IPC_RMID, nothing to do */
2344                if (IS_ERR(sma)) {
2345                        rcu_read_unlock();
2346                        continue;
2347                }
2348
2349                sem_lock(sma, NULL, -1);
2350                /* exit_sem raced with IPC_RMID, nothing to do */
2351                if (!ipc_valid_object(&sma->sem_perm)) {
2352                        sem_unlock(sma, -1);
2353                        rcu_read_unlock();
2354                        continue;
2355                }
2356                un = __lookup_undo(ulp, semid);
2357                if (un == NULL) {
2358                        /* exit_sem raced with IPC_RMID+semget() that created
2359                         * exactly the same semid. Nothing to do.
2360                         */
2361                        sem_unlock(sma, -1);
2362                        rcu_read_unlock();
2363                        continue;
2364                }
2365
2366                /* remove un from the linked lists */
2367                ipc_assert_locked_object(&sma->sem_perm);
2368                list_del(&un->list_id);
2369
2370                /* we are the last process using this ulp, acquiring ulp->lock
2371                 * isn't required. Besides that, we are also protected against
2372                 * IPC_RMID as we hold sma->sem_perm lock now
2373                 */
2374                list_del_rcu(&un->list_proc);
2375
2376                /* perform adjustments registered in un */
2377                for (i = 0; i < sma->sem_nsems; i++) {
2378                        struct sem *semaphore = &sma->sems[i];
2379                        if (un->semadj[i]) {
2380                                semaphore->semval += un->semadj[i];
2381                                /*
2382                                 * Range checks of the new semaphore value,
2383                                 * not defined by sus:
2384                                 * - Some unices ignore the undo entirely
2385                                 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2386                                 * - some cap the value (e.g. FreeBSD caps
2387                                 *   at 0, but doesn't enforce SEMVMX)
2388                                 *
2389                                 * Linux caps the semaphore value, both at 0
2390                                 * and at SEMVMX.
2391                                 *
2392                                 *      Manfred <manfred@colorfullife.com>
2393                                 */
2394                                if (semaphore->semval < 0)
2395                                        semaphore->semval = 0;
2396                                if (semaphore->semval > SEMVMX)
2397                                        semaphore->semval = SEMVMX;
2398                                ipc_update_pid(&semaphore->sempid, task_tgid(current));
2399                        }
2400                }
2401                /* maybe some queued-up processes were waiting for this */
2402                do_smart_update(sma, NULL, 0, 1, &wake_q);
2403                sem_unlock(sma, -1);
2404                rcu_read_unlock();
2405                wake_up_q(&wake_q);
2406
2407                kfree_rcu(un, rcu);
2408        }
2409        kfree(ulp);
2410}
2411
2412#ifdef CONFIG_PROC_FS
2413static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2414{
2415        struct user_namespace *user_ns = seq_user_ns(s);
2416        struct kern_ipc_perm *ipcp = it;
2417        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2418        time64_t sem_otime;
2419
2420        /*
2421         * The proc interface isn't aware of sem_lock(), it calls
2422         * ipc_lock_object() directly (in sysvipc_find_ipc).
2423         * In order to stay compatible with sem_lock(), we must
2424         * enter / leave complex_mode.
2425         */
2426        complexmode_enter(sma);
2427
2428        sem_otime = get_semotime(sma);
2429
2430        seq_printf(s,
2431                   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2432                   sma->sem_perm.key,
2433                   sma->sem_perm.id,
2434                   sma->sem_perm.mode,
2435                   sma->sem_nsems,
2436                   from_kuid_munged(user_ns, sma->sem_perm.uid),
2437                   from_kgid_munged(user_ns, sma->sem_perm.gid),
2438                   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2439                   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2440                   sem_otime,
2441                   sma->sem_ctime);
2442
2443        complexmode_tryleave(sma);
2444
2445        return 0;
2446}
2447#endif
2448