linux/ipc/sem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
  73#include <linux/compat.h>
  74#include <linux/slab.h>
  75#include <linux/spinlock.h>
  76#include <linux/init.h>
  77#include <linux/proc_fs.h>
  78#include <linux/time.h>
  79#include <linux/security.h>
  80#include <linux/syscalls.h>
  81#include <linux/audit.h>
  82#include <linux/capability.h>
  83#include <linux/seq_file.h>
  84#include <linux/rwsem.h>
  85#include <linux/nsproxy.h>
  86#include <linux/ipc_namespace.h>
  87#include <linux/sched/wake_q.h>
  88#include <linux/nospec.h>
  89#include <linux/rhashtable.h>
  90
  91#include <linux/uaccess.h>
  92#include "util.h"
  93
  94/* One semaphore structure for each semaphore in the system. */
  95struct sem {
  96        int     semval;         /* current value */
  97        /*
  98         * PID of the process that last modified the semaphore. For
  99         * Linux, specifically these are:
 100         *  - semop
 101         *  - semctl, via SETVAL and SETALL.
 102         *  - at task exit when performing undo adjustments (see exit_sem).
 103         */
 104        struct pid *sempid;
 105        spinlock_t      lock;   /* spinlock for fine-grained semtimedop */
 106        struct list_head pending_alter; /* pending single-sop operations */
 107                                        /* that alter the semaphore */
 108        struct list_head pending_const; /* pending single-sop operations */
 109                                        /* that do not alter the semaphore*/
 110        time64_t         sem_otime;     /* candidate for sem_otime */
 111} ____cacheline_aligned_in_smp;
 112
 113/* One sem_array data structure for each set of semaphores in the system. */
 114struct sem_array {
 115        struct kern_ipc_perm    sem_perm;       /* permissions .. see ipc.h */
 116        time64_t                sem_ctime;      /* create/last semctl() time */
 117        struct list_head        pending_alter;  /* pending operations */
 118                                                /* that alter the array */
 119        struct list_head        pending_const;  /* pending complex operations */
 120                                                /* that do not alter semvals */
 121        struct list_head        list_id;        /* undo requests on this array */
 122        int                     sem_nsems;      /* no. of semaphores in array */
 123        int                     complex_count;  /* pending complex operations */
 124        unsigned int            use_global_lock;/* >0: global lock required */
 125
 126        struct sem              sems[];
 127} __randomize_layout;
 128
 129/* One queue for each sleeping process in the system. */
 130struct sem_queue {
 131        struct list_head        list;    /* queue of pending operations */
 132        struct task_struct      *sleeper; /* this process */
 133        struct sem_undo         *undo;   /* undo structure */
 134        struct pid              *pid;    /* process id of requesting process */
 135        int                     status;  /* completion status of operation */
 136        struct sembuf           *sops;   /* array of pending operations */
 137        struct sembuf           *blocking; /* the operation that blocked */
 138        int                     nsops;   /* number of operations */
 139        bool                    alter;   /* does *sops alter the array? */
 140        bool                    dupsop;  /* sops on more than one sem_num */
 141};
 142
 143/* Each task has a list of undo requests. They are executed automatically
 144 * when the process exits.
 145 */
 146struct sem_undo {
 147        struct list_head        list_proc;      /* per-process list: *
 148                                                 * all undos from one process
 149                                                 * rcu protected */
 150        struct rcu_head         rcu;            /* rcu struct for sem_undo */
 151        struct sem_undo_list    *ulp;           /* back ptr to sem_undo_list */
 152        struct list_head        list_id;        /* per semaphore array list:
 153                                                 * all undos for one array */
 154        int                     semid;          /* semaphore set identifier */
 155        short                   *semadj;        /* array of adjustments */
 156                                                /* one per semaphore */
 157};
 158
 159/* sem_undo_list controls shared access to the list of sem_undo structures
 160 * that may be shared among all a CLONE_SYSVSEM task group.
 161 */
 162struct sem_undo_list {
 163        refcount_t              refcnt;
 164        spinlock_t              lock;
 165        struct list_head        list_proc;
 166};
 167
 168
 169#define sem_ids(ns)     ((ns)->ids[IPC_SEM_IDS])
 170
 171static int newary(struct ipc_namespace *, struct ipc_params *);
 172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 173#ifdef CONFIG_PROC_FS
 174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 175#endif
 176
 177#define SEMMSL_FAST     256 /* 512 bytes on stack */
 178#define SEMOPM_FAST     64  /* ~ 372 bytes on stack */
 179
 180/*
 181 * Switching from the mode suitable for simple ops
 182 * to the mode for complex ops is costly. Therefore:
 183 * use some hysteresis
 184 */
 185#define USE_GLOBAL_LOCK_HYSTERESIS      10
 186
 187/*
 188 * Locking:
 189 * a) global sem_lock() for read/write
 190 *      sem_undo.id_next,
 191 *      sem_array.complex_count,
 192 *      sem_array.pending{_alter,_const},
 193 *      sem_array.sem_undo
 194 *
 195 * b) global or semaphore sem_lock() for read/write:
 196 *      sem_array.sems[i].pending_{const,alter}:
 197 *
 198 * c) special:
 199 *      sem_undo_list.list_proc:
 200 *      * undo_list->lock for write
 201 *      * rcu for read
 202 *      use_global_lock:
 203 *      * global sem_lock() for write
 204 *      * either local or global sem_lock() for read.
 205 *
 206 * Memory ordering:
 207 * Most ordering is enforced by using spin_lock() and spin_unlock().
 208 * The special case is use_global_lock:
 209 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 210 * using smp_store_release().
 211 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 212 * smp_load_acquire().
 213 * Setting it from 0 to non-zero must be ordered with regards to
 214 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 215 * is inside a spin_lock() and after a write from 0 to non-zero a
 216 * spin_lock()+spin_unlock() is done.
 217 */
 218
 219#define sc_semmsl       sem_ctls[0]
 220#define sc_semmns       sem_ctls[1]
 221#define sc_semopm       sem_ctls[2]
 222#define sc_semmni       sem_ctls[3]
 223
 224void sem_init_ns(struct ipc_namespace *ns)
 225{
 226        ns->sc_semmsl = SEMMSL;
 227        ns->sc_semmns = SEMMNS;
 228        ns->sc_semopm = SEMOPM;
 229        ns->sc_semmni = SEMMNI;
 230        ns->used_sems = 0;
 231        ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 232}
 233
 234#ifdef CONFIG_IPC_NS
 235void sem_exit_ns(struct ipc_namespace *ns)
 236{
 237        free_ipcs(ns, &sem_ids(ns), freeary);
 238        idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 239        rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 240}
 241#endif
 242
 243void __init sem_init(void)
 244{
 245        sem_init_ns(&init_ipc_ns);
 246        ipc_init_proc_interface("sysvipc/sem",
 247                                "       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 248                                IPC_SEM_IDS, sysvipc_sem_proc_show);
 249}
 250
 251/**
 252 * unmerge_queues - unmerge queues, if possible.
 253 * @sma: semaphore array
 254 *
 255 * The function unmerges the wait queues if complex_count is 0.
 256 * It must be called prior to dropping the global semaphore array lock.
 257 */
 258static void unmerge_queues(struct sem_array *sma)
 259{
 260        struct sem_queue *q, *tq;
 261
 262        /* complex operations still around? */
 263        if (sma->complex_count)
 264                return;
 265        /*
 266         * We will switch back to simple mode.
 267         * Move all pending operation back into the per-semaphore
 268         * queues.
 269         */
 270        list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 271                struct sem *curr;
 272                curr = &sma->sems[q->sops[0].sem_num];
 273
 274                list_add_tail(&q->list, &curr->pending_alter);
 275        }
 276        INIT_LIST_HEAD(&sma->pending_alter);
 277}
 278
 279/**
 280 * merge_queues - merge single semop queues into global queue
 281 * @sma: semaphore array
 282 *
 283 * This function merges all per-semaphore queues into the global queue.
 284 * It is necessary to achieve FIFO ordering for the pending single-sop
 285 * operations when a multi-semop operation must sleep.
 286 * Only the alter operations must be moved, the const operations can stay.
 287 */
 288static void merge_queues(struct sem_array *sma)
 289{
 290        int i;
 291        for (i = 0; i < sma->sem_nsems; i++) {
 292                struct sem *sem = &sma->sems[i];
 293
 294                list_splice_init(&sem->pending_alter, &sma->pending_alter);
 295        }
 296}
 297
 298static void sem_rcu_free(struct rcu_head *head)
 299{
 300        struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 301        struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 302
 303        security_sem_free(&sma->sem_perm);
 304        kvfree(sma);
 305}
 306
 307/*
 308 * Enter the mode suitable for non-simple operations:
 309 * Caller must own sem_perm.lock.
 310 */
 311static void complexmode_enter(struct sem_array *sma)
 312{
 313        int i;
 314        struct sem *sem;
 315
 316        if (sma->use_global_lock > 0)  {
 317                /*
 318                 * We are already in global lock mode.
 319                 * Nothing to do, just reset the
 320                 * counter until we return to simple mode.
 321                 */
 322                sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 323                return;
 324        }
 325        sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 326
 327        for (i = 0; i < sma->sem_nsems; i++) {
 328                sem = &sma->sems[i];
 329                spin_lock(&sem->lock);
 330                spin_unlock(&sem->lock);
 331        }
 332}
 333
 334/*
 335 * Try to leave the mode that disallows simple operations:
 336 * Caller must own sem_perm.lock.
 337 */
 338static void complexmode_tryleave(struct sem_array *sma)
 339{
 340        if (sma->complex_count)  {
 341                /* Complex ops are sleeping.
 342                 * We must stay in complex mode
 343                 */
 344                return;
 345        }
 346        if (sma->use_global_lock == 1) {
 347                /*
 348                 * Immediately after setting use_global_lock to 0,
 349                 * a simple op can start. Thus: all memory writes
 350                 * performed by the current operation must be visible
 351                 * before we set use_global_lock to 0.
 352                 */
 353                smp_store_release(&sma->use_global_lock, 0);
 354        } else {
 355                sma->use_global_lock--;
 356        }
 357}
 358
 359#define SEM_GLOBAL_LOCK (-1)
 360/*
 361 * If the request contains only one semaphore operation, and there are
 362 * no complex transactions pending, lock only the semaphore involved.
 363 * Otherwise, lock the entire semaphore array, since we either have
 364 * multiple semaphores in our own semops, or we need to look at
 365 * semaphores from other pending complex operations.
 366 */
 367static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 368                              int nsops)
 369{
 370        struct sem *sem;
 371        int idx;
 372
 373        if (nsops != 1) {
 374                /* Complex operation - acquire a full lock */
 375                ipc_lock_object(&sma->sem_perm);
 376
 377                /* Prevent parallel simple ops */
 378                complexmode_enter(sma);
 379                return SEM_GLOBAL_LOCK;
 380        }
 381
 382        /*
 383         * Only one semaphore affected - try to optimize locking.
 384         * Optimized locking is possible if no complex operation
 385         * is either enqueued or processed right now.
 386         *
 387         * Both facts are tracked by use_global_mode.
 388         */
 389        idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
 390        sem = &sma->sems[idx];
 391
 392        /*
 393         * Initial check for use_global_lock. Just an optimization,
 394         * no locking, no memory barrier.
 395         */
 396        if (!sma->use_global_lock) {
 397                /*
 398                 * It appears that no complex operation is around.
 399                 * Acquire the per-semaphore lock.
 400                 */
 401                spin_lock(&sem->lock);
 402
 403                /* pairs with smp_store_release() */
 404                if (!smp_load_acquire(&sma->use_global_lock)) {
 405                        /* fast path successful! */
 406                        return sops->sem_num;
 407                }
 408                spin_unlock(&sem->lock);
 409        }
 410
 411        /* slow path: acquire the full lock */
 412        ipc_lock_object(&sma->sem_perm);
 413
 414        if (sma->use_global_lock == 0) {
 415                /*
 416                 * The use_global_lock mode ended while we waited for
 417                 * sma->sem_perm.lock. Thus we must switch to locking
 418                 * with sem->lock.
 419                 * Unlike in the fast path, there is no need to recheck
 420                 * sma->use_global_lock after we have acquired sem->lock:
 421                 * We own sma->sem_perm.lock, thus use_global_lock cannot
 422                 * change.
 423                 */
 424                spin_lock(&sem->lock);
 425
 426                ipc_unlock_object(&sma->sem_perm);
 427                return sops->sem_num;
 428        } else {
 429                /*
 430                 * Not a false alarm, thus continue to use the global lock
 431                 * mode. No need for complexmode_enter(), this was done by
 432                 * the caller that has set use_global_mode to non-zero.
 433                 */
 434                return SEM_GLOBAL_LOCK;
 435        }
 436}
 437
 438static inline void sem_unlock(struct sem_array *sma, int locknum)
 439{
 440        if (locknum == SEM_GLOBAL_LOCK) {
 441                unmerge_queues(sma);
 442                complexmode_tryleave(sma);
 443                ipc_unlock_object(&sma->sem_perm);
 444        } else {
 445                struct sem *sem = &sma->sems[locknum];
 446                spin_unlock(&sem->lock);
 447        }
 448}
 449
 450/*
 451 * sem_lock_(check_) routines are called in the paths where the rwsem
 452 * is not held.
 453 *
 454 * The caller holds the RCU read lock.
 455 */
 456static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 457{
 458        struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 459
 460        if (IS_ERR(ipcp))
 461                return ERR_CAST(ipcp);
 462
 463        return container_of(ipcp, struct sem_array, sem_perm);
 464}
 465
 466static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 467                                                        int id)
 468{
 469        struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 470
 471        if (IS_ERR(ipcp))
 472                return ERR_CAST(ipcp);
 473
 474        return container_of(ipcp, struct sem_array, sem_perm);
 475}
 476
 477static inline void sem_lock_and_putref(struct sem_array *sma)
 478{
 479        sem_lock(sma, NULL, -1);
 480        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 481}
 482
 483static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 484{
 485        ipc_rmid(&sem_ids(ns), &s->sem_perm);
 486}
 487
 488static struct sem_array *sem_alloc(size_t nsems)
 489{
 490        struct sem_array *sma;
 491        size_t size;
 492
 493        if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 494                return NULL;
 495
 496        size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
 497        sma = kvmalloc(size, GFP_KERNEL);
 498        if (unlikely(!sma))
 499                return NULL;
 500
 501        memset(sma, 0, size);
 502
 503        return sma;
 504}
 505
 506/**
 507 * newary - Create a new semaphore set
 508 * @ns: namespace
 509 * @params: ptr to the structure that contains key, semflg and nsems
 510 *
 511 * Called with sem_ids.rwsem held (as a writer)
 512 */
 513static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 514{
 515        int retval;
 516        struct sem_array *sma;
 517        key_t key = params->key;
 518        int nsems = params->u.nsems;
 519        int semflg = params->flg;
 520        int i;
 521
 522        if (!nsems)
 523                return -EINVAL;
 524        if (ns->used_sems + nsems > ns->sc_semmns)
 525                return -ENOSPC;
 526
 527        sma = sem_alloc(nsems);
 528        if (!sma)
 529                return -ENOMEM;
 530
 531        sma->sem_perm.mode = (semflg & S_IRWXUGO);
 532        sma->sem_perm.key = key;
 533
 534        sma->sem_perm.security = NULL;
 535        retval = security_sem_alloc(&sma->sem_perm);
 536        if (retval) {
 537                kvfree(sma);
 538                return retval;
 539        }
 540
 541        for (i = 0; i < nsems; i++) {
 542                INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 543                INIT_LIST_HEAD(&sma->sems[i].pending_const);
 544                spin_lock_init(&sma->sems[i].lock);
 545        }
 546
 547        sma->complex_count = 0;
 548        sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 549        INIT_LIST_HEAD(&sma->pending_alter);
 550        INIT_LIST_HEAD(&sma->pending_const);
 551        INIT_LIST_HEAD(&sma->list_id);
 552        sma->sem_nsems = nsems;
 553        sma->sem_ctime = ktime_get_real_seconds();
 554
 555        /* ipc_addid() locks sma upon success. */
 556        retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 557        if (retval < 0) {
 558                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 559                return retval;
 560        }
 561        ns->used_sems += nsems;
 562
 563        sem_unlock(sma, -1);
 564        rcu_read_unlock();
 565
 566        return sma->sem_perm.id;
 567}
 568
 569
 570/*
 571 * Called with sem_ids.rwsem and ipcp locked.
 572 */
 573static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 574                                struct ipc_params *params)
 575{
 576        struct sem_array *sma;
 577
 578        sma = container_of(ipcp, struct sem_array, sem_perm);
 579        if (params->u.nsems > sma->sem_nsems)
 580                return -EINVAL;
 581
 582        return 0;
 583}
 584
 585long ksys_semget(key_t key, int nsems, int semflg)
 586{
 587        struct ipc_namespace *ns;
 588        static const struct ipc_ops sem_ops = {
 589                .getnew = newary,
 590                .associate = security_sem_associate,
 591                .more_checks = sem_more_checks,
 592        };
 593        struct ipc_params sem_params;
 594
 595        ns = current->nsproxy->ipc_ns;
 596
 597        if (nsems < 0 || nsems > ns->sc_semmsl)
 598                return -EINVAL;
 599
 600        sem_params.key = key;
 601        sem_params.flg = semflg;
 602        sem_params.u.nsems = nsems;
 603
 604        return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 605}
 606
 607SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 608{
 609        return ksys_semget(key, nsems, semflg);
 610}
 611
 612/**
 613 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 614 *                               operations on a given array.
 615 * @sma: semaphore array
 616 * @q: struct sem_queue that describes the operation
 617 *
 618 * Caller blocking are as follows, based the value
 619 * indicated by the semaphore operation (sem_op):
 620 *
 621 *  (1) >0 never blocks.
 622 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 623 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 624 *
 625 * Returns 0 if the operation was possible.
 626 * Returns 1 if the operation is impossible, the caller must sleep.
 627 * Returns <0 for error codes.
 628 */
 629static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 630{
 631        int result, sem_op, nsops;
 632        struct pid *pid;
 633        struct sembuf *sop;
 634        struct sem *curr;
 635        struct sembuf *sops;
 636        struct sem_undo *un;
 637
 638        sops = q->sops;
 639        nsops = q->nsops;
 640        un = q->undo;
 641
 642        for (sop = sops; sop < sops + nsops; sop++) {
 643                int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 644                curr = &sma->sems[idx];
 645                sem_op = sop->sem_op;
 646                result = curr->semval;
 647
 648                if (!sem_op && result)
 649                        goto would_block;
 650
 651                result += sem_op;
 652                if (result < 0)
 653                        goto would_block;
 654                if (result > SEMVMX)
 655                        goto out_of_range;
 656
 657                if (sop->sem_flg & SEM_UNDO) {
 658                        int undo = un->semadj[sop->sem_num] - sem_op;
 659                        /* Exceeding the undo range is an error. */
 660                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 661                                goto out_of_range;
 662                        un->semadj[sop->sem_num] = undo;
 663                }
 664
 665                curr->semval = result;
 666        }
 667
 668        sop--;
 669        pid = q->pid;
 670        while (sop >= sops) {
 671                ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 672                sop--;
 673        }
 674
 675        return 0;
 676
 677out_of_range:
 678        result = -ERANGE;
 679        goto undo;
 680
 681would_block:
 682        q->blocking = sop;
 683
 684        if (sop->sem_flg & IPC_NOWAIT)
 685                result = -EAGAIN;
 686        else
 687                result = 1;
 688
 689undo:
 690        sop--;
 691        while (sop >= sops) {
 692                sem_op = sop->sem_op;
 693                sma->sems[sop->sem_num].semval -= sem_op;
 694                if (sop->sem_flg & SEM_UNDO)
 695                        un->semadj[sop->sem_num] += sem_op;
 696                sop--;
 697        }
 698
 699        return result;
 700}
 701
 702static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 703{
 704        int result, sem_op, nsops;
 705        struct sembuf *sop;
 706        struct sem *curr;
 707        struct sembuf *sops;
 708        struct sem_undo *un;
 709
 710        sops = q->sops;
 711        nsops = q->nsops;
 712        un = q->undo;
 713
 714        if (unlikely(q->dupsop))
 715                return perform_atomic_semop_slow(sma, q);
 716
 717        /*
 718         * We scan the semaphore set twice, first to ensure that the entire
 719         * operation can succeed, therefore avoiding any pointless writes
 720         * to shared memory and having to undo such changes in order to block
 721         * until the operations can go through.
 722         */
 723        for (sop = sops; sop < sops + nsops; sop++) {
 724                int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 725
 726                curr = &sma->sems[idx];
 727                sem_op = sop->sem_op;
 728                result = curr->semval;
 729
 730                if (!sem_op && result)
 731                        goto would_block; /* wait-for-zero */
 732
 733                result += sem_op;
 734                if (result < 0)
 735                        goto would_block;
 736
 737                if (result > SEMVMX)
 738                        return -ERANGE;
 739
 740                if (sop->sem_flg & SEM_UNDO) {
 741                        int undo = un->semadj[sop->sem_num] - sem_op;
 742
 743                        /* Exceeding the undo range is an error. */
 744                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 745                                return -ERANGE;
 746                }
 747        }
 748
 749        for (sop = sops; sop < sops + nsops; sop++) {
 750                curr = &sma->sems[sop->sem_num];
 751                sem_op = sop->sem_op;
 752                result = curr->semval;
 753
 754                if (sop->sem_flg & SEM_UNDO) {
 755                        int undo = un->semadj[sop->sem_num] - sem_op;
 756
 757                        un->semadj[sop->sem_num] = undo;
 758                }
 759                curr->semval += sem_op;
 760                ipc_update_pid(&curr->sempid, q->pid);
 761        }
 762
 763        return 0;
 764
 765would_block:
 766        q->blocking = sop;
 767        return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 768}
 769
 770static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 771                                             struct wake_q_head *wake_q)
 772{
 773        wake_q_add(wake_q, q->sleeper);
 774        /*
 775         * Rely on the above implicit barrier, such that we can
 776         * ensure that we hold reference to the task before setting
 777         * q->status. Otherwise we could race with do_exit if the
 778         * task is awoken by an external event before calling
 779         * wake_up_process().
 780         */
 781        WRITE_ONCE(q->status, error);
 782}
 783
 784static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 785{
 786        list_del(&q->list);
 787        if (q->nsops > 1)
 788                sma->complex_count--;
 789}
 790
 791/** check_restart(sma, q)
 792 * @sma: semaphore array
 793 * @q: the operation that just completed
 794 *
 795 * update_queue is O(N^2) when it restarts scanning the whole queue of
 796 * waiting operations. Therefore this function checks if the restart is
 797 * really necessary. It is called after a previously waiting operation
 798 * modified the array.
 799 * Note that wait-for-zero operations are handled without restart.
 800 */
 801static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 802{
 803        /* pending complex alter operations are too difficult to analyse */
 804        if (!list_empty(&sma->pending_alter))
 805                return 1;
 806
 807        /* we were a sleeping complex operation. Too difficult */
 808        if (q->nsops > 1)
 809                return 1;
 810
 811        /* It is impossible that someone waits for the new value:
 812         * - complex operations always restart.
 813         * - wait-for-zero are handled seperately.
 814         * - q is a previously sleeping simple operation that
 815         *   altered the array. It must be a decrement, because
 816         *   simple increments never sleep.
 817         * - If there are older (higher priority) decrements
 818         *   in the queue, then they have observed the original
 819         *   semval value and couldn't proceed. The operation
 820         *   decremented to value - thus they won't proceed either.
 821         */
 822        return 0;
 823}
 824
 825/**
 826 * wake_const_ops - wake up non-alter tasks
 827 * @sma: semaphore array.
 828 * @semnum: semaphore that was modified.
 829 * @wake_q: lockless wake-queue head.
 830 *
 831 * wake_const_ops must be called after a semaphore in a semaphore array
 832 * was set to 0. If complex const operations are pending, wake_const_ops must
 833 * be called with semnum = -1, as well as with the number of each modified
 834 * semaphore.
 835 * The tasks that must be woken up are added to @wake_q. The return code
 836 * is stored in q->pid.
 837 * The function returns 1 if at least one operation was completed successfully.
 838 */
 839static int wake_const_ops(struct sem_array *sma, int semnum,
 840                          struct wake_q_head *wake_q)
 841{
 842        struct sem_queue *q, *tmp;
 843        struct list_head *pending_list;
 844        int semop_completed = 0;
 845
 846        if (semnum == -1)
 847                pending_list = &sma->pending_const;
 848        else
 849                pending_list = &sma->sems[semnum].pending_const;
 850
 851        list_for_each_entry_safe(q, tmp, pending_list, list) {
 852                int error = perform_atomic_semop(sma, q);
 853
 854                if (error > 0)
 855                        continue;
 856                /* operation completed, remove from queue & wakeup */
 857                unlink_queue(sma, q);
 858
 859                wake_up_sem_queue_prepare(q, error, wake_q);
 860                if (error == 0)
 861                        semop_completed = 1;
 862        }
 863
 864        return semop_completed;
 865}
 866
 867/**
 868 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 869 * @sma: semaphore array
 870 * @sops: operations that were performed
 871 * @nsops: number of operations
 872 * @wake_q: lockless wake-queue head
 873 *
 874 * Checks all required queue for wait-for-zero operations, based
 875 * on the actual changes that were performed on the semaphore array.
 876 * The function returns 1 if at least one operation was completed successfully.
 877 */
 878static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 879                                int nsops, struct wake_q_head *wake_q)
 880{
 881        int i;
 882        int semop_completed = 0;
 883        int got_zero = 0;
 884
 885        /* first: the per-semaphore queues, if known */
 886        if (sops) {
 887                for (i = 0; i < nsops; i++) {
 888                        int num = sops[i].sem_num;
 889
 890                        if (sma->sems[num].semval == 0) {
 891                                got_zero = 1;
 892                                semop_completed |= wake_const_ops(sma, num, wake_q);
 893                        }
 894                }
 895        } else {
 896                /*
 897                 * No sops means modified semaphores not known.
 898                 * Assume all were changed.
 899                 */
 900                for (i = 0; i < sma->sem_nsems; i++) {
 901                        if (sma->sems[i].semval == 0) {
 902                                got_zero = 1;
 903                                semop_completed |= wake_const_ops(sma, i, wake_q);
 904                        }
 905                }
 906        }
 907        /*
 908         * If one of the modified semaphores got 0,
 909         * then check the global queue, too.
 910         */
 911        if (got_zero)
 912                semop_completed |= wake_const_ops(sma, -1, wake_q);
 913
 914        return semop_completed;
 915}
 916
 917
 918/**
 919 * update_queue - look for tasks that can be completed.
 920 * @sma: semaphore array.
 921 * @semnum: semaphore that was modified.
 922 * @wake_q: lockless wake-queue head.
 923 *
 924 * update_queue must be called after a semaphore in a semaphore array
 925 * was modified. If multiple semaphores were modified, update_queue must
 926 * be called with semnum = -1, as well as with the number of each modified
 927 * semaphore.
 928 * The tasks that must be woken up are added to @wake_q. The return code
 929 * is stored in q->pid.
 930 * The function internally checks if const operations can now succeed.
 931 *
 932 * The function return 1 if at least one semop was completed successfully.
 933 */
 934static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 935{
 936        struct sem_queue *q, *tmp;
 937        struct list_head *pending_list;
 938        int semop_completed = 0;
 939
 940        if (semnum == -1)
 941                pending_list = &sma->pending_alter;
 942        else
 943                pending_list = &sma->sems[semnum].pending_alter;
 944
 945again:
 946        list_for_each_entry_safe(q, tmp, pending_list, list) {
 947                int error, restart;
 948
 949                /* If we are scanning the single sop, per-semaphore list of
 950                 * one semaphore and that semaphore is 0, then it is not
 951                 * necessary to scan further: simple increments
 952                 * that affect only one entry succeed immediately and cannot
 953                 * be in the  per semaphore pending queue, and decrements
 954                 * cannot be successful if the value is already 0.
 955                 */
 956                if (semnum != -1 && sma->sems[semnum].semval == 0)
 957                        break;
 958
 959                error = perform_atomic_semop(sma, q);
 960
 961                /* Does q->sleeper still need to sleep? */
 962                if (error > 0)
 963                        continue;
 964
 965                unlink_queue(sma, q);
 966
 967                if (error) {
 968                        restart = 0;
 969                } else {
 970                        semop_completed = 1;
 971                        do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 972                        restart = check_restart(sma, q);
 973                }
 974
 975                wake_up_sem_queue_prepare(q, error, wake_q);
 976                if (restart)
 977                        goto again;
 978        }
 979        return semop_completed;
 980}
 981
 982/**
 983 * set_semotime - set sem_otime
 984 * @sma: semaphore array
 985 * @sops: operations that modified the array, may be NULL
 986 *
 987 * sem_otime is replicated to avoid cache line trashing.
 988 * This function sets one instance to the current time.
 989 */
 990static void set_semotime(struct sem_array *sma, struct sembuf *sops)
 991{
 992        if (sops == NULL) {
 993                sma->sems[0].sem_otime = ktime_get_real_seconds();
 994        } else {
 995                sma->sems[sops[0].sem_num].sem_otime =
 996                                                ktime_get_real_seconds();
 997        }
 998}
 999
1000/**
1001 * do_smart_update - optimized update_queue
1002 * @sma: semaphore array
1003 * @sops: operations that were performed
1004 * @nsops: number of operations
1005 * @otime: force setting otime
1006 * @wake_q: lockless wake-queue head
1007 *
1008 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1009 * based on the actual changes that were performed on the semaphore array.
1010 * Note that the function does not do the actual wake-up: the caller is
1011 * responsible for calling wake_up_q().
1012 * It is safe to perform this call after dropping all locks.
1013 */
1014static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1015                            int otime, struct wake_q_head *wake_q)
1016{
1017        int i;
1018
1019        otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1020
1021        if (!list_empty(&sma->pending_alter)) {
1022                /* semaphore array uses the global queue - just process it. */
1023                otime |= update_queue(sma, -1, wake_q);
1024        } else {
1025                if (!sops) {
1026                        /*
1027                         * No sops, thus the modified semaphores are not
1028                         * known. Check all.
1029                         */
1030                        for (i = 0; i < sma->sem_nsems; i++)
1031                                otime |= update_queue(sma, i, wake_q);
1032                } else {
1033                        /*
1034                         * Check the semaphores that were increased:
1035                         * - No complex ops, thus all sleeping ops are
1036                         *   decrease.
1037                         * - if we decreased the value, then any sleeping
1038                         *   semaphore ops wont be able to run: If the
1039                         *   previous value was too small, then the new
1040                         *   value will be too small, too.
1041                         */
1042                        for (i = 0; i < nsops; i++) {
1043                                if (sops[i].sem_op > 0) {
1044                                        otime |= update_queue(sma,
1045                                                              sops[i].sem_num, wake_q);
1046                                }
1047                        }
1048                }
1049        }
1050        if (otime)
1051                set_semotime(sma, sops);
1052}
1053
1054/*
1055 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1056 */
1057static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1058                        bool count_zero)
1059{
1060        struct sembuf *sop = q->blocking;
1061
1062        /*
1063         * Linux always (since 0.99.10) reported a task as sleeping on all
1064         * semaphores. This violates SUS, therefore it was changed to the
1065         * standard compliant behavior.
1066         * Give the administrators a chance to notice that an application
1067         * might misbehave because it relies on the Linux behavior.
1068         */
1069        pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1070                        "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1071                        current->comm, task_pid_nr(current));
1072
1073        if (sop->sem_num != semnum)
1074                return 0;
1075
1076        if (count_zero && sop->sem_op == 0)
1077                return 1;
1078        if (!count_zero && sop->sem_op < 0)
1079                return 1;
1080
1081        return 0;
1082}
1083
1084/* The following counts are associated to each semaphore:
1085 *   semncnt        number of tasks waiting on semval being nonzero
1086 *   semzcnt        number of tasks waiting on semval being zero
1087 *
1088 * Per definition, a task waits only on the semaphore of the first semop
1089 * that cannot proceed, even if additional operation would block, too.
1090 */
1091static int count_semcnt(struct sem_array *sma, ushort semnum,
1092                        bool count_zero)
1093{
1094        struct list_head *l;
1095        struct sem_queue *q;
1096        int semcnt;
1097
1098        semcnt = 0;
1099        /* First: check the simple operations. They are easy to evaluate */
1100        if (count_zero)
1101                l = &sma->sems[semnum].pending_const;
1102        else
1103                l = &sma->sems[semnum].pending_alter;
1104
1105        list_for_each_entry(q, l, list) {
1106                /* all task on a per-semaphore list sleep on exactly
1107                 * that semaphore
1108                 */
1109                semcnt++;
1110        }
1111
1112        /* Then: check the complex operations. */
1113        list_for_each_entry(q, &sma->pending_alter, list) {
1114                semcnt += check_qop(sma, semnum, q, count_zero);
1115        }
1116        if (count_zero) {
1117                list_for_each_entry(q, &sma->pending_const, list) {
1118                        semcnt += check_qop(sma, semnum, q, count_zero);
1119                }
1120        }
1121        return semcnt;
1122}
1123
1124/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1125 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1126 * remains locked on exit.
1127 */
1128static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1129{
1130        struct sem_undo *un, *tu;
1131        struct sem_queue *q, *tq;
1132        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1133        int i;
1134        DEFINE_WAKE_Q(wake_q);
1135
1136        /* Free the existing undo structures for this semaphore set.  */
1137        ipc_assert_locked_object(&sma->sem_perm);
1138        list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1139                list_del(&un->list_id);
1140                spin_lock(&un->ulp->lock);
1141                un->semid = -1;
1142                list_del_rcu(&un->list_proc);
1143                spin_unlock(&un->ulp->lock);
1144                kfree_rcu(un, rcu);
1145        }
1146
1147        /* Wake up all pending processes and let them fail with EIDRM. */
1148        list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1149                unlink_queue(sma, q);
1150                wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1151        }
1152
1153        list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1154                unlink_queue(sma, q);
1155                wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1156        }
1157        for (i = 0; i < sma->sem_nsems; i++) {
1158                struct sem *sem = &sma->sems[i];
1159                list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1160                        unlink_queue(sma, q);
1161                        wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1162                }
1163                list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1164                        unlink_queue(sma, q);
1165                        wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1166                }
1167                ipc_update_pid(&sem->sempid, NULL);
1168        }
1169
1170        /* Remove the semaphore set from the IDR */
1171        sem_rmid(ns, sma);
1172        sem_unlock(sma, -1);
1173        rcu_read_unlock();
1174
1175        wake_up_q(&wake_q);
1176        ns->used_sems -= sma->sem_nsems;
1177        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1178}
1179
1180static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1181{
1182        switch (version) {
1183        case IPC_64:
1184                return copy_to_user(buf, in, sizeof(*in));
1185        case IPC_OLD:
1186            {
1187                struct semid_ds out;
1188
1189                memset(&out, 0, sizeof(out));
1190
1191                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1192
1193                out.sem_otime   = in->sem_otime;
1194                out.sem_ctime   = in->sem_ctime;
1195                out.sem_nsems   = in->sem_nsems;
1196
1197                return copy_to_user(buf, &out, sizeof(out));
1198            }
1199        default:
1200                return -EINVAL;
1201        }
1202}
1203
1204static time64_t get_semotime(struct sem_array *sma)
1205{
1206        int i;
1207        time64_t res;
1208
1209        res = sma->sems[0].sem_otime;
1210        for (i = 1; i < sma->sem_nsems; i++) {
1211                time64_t to = sma->sems[i].sem_otime;
1212
1213                if (to > res)
1214                        res = to;
1215        }
1216        return res;
1217}
1218
1219static int semctl_stat(struct ipc_namespace *ns, int semid,
1220                         int cmd, struct semid64_ds *semid64)
1221{
1222        struct sem_array *sma;
1223        time64_t semotime;
1224        int err;
1225
1226        memset(semid64, 0, sizeof(*semid64));
1227
1228        rcu_read_lock();
1229        if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1230                sma = sem_obtain_object(ns, semid);
1231                if (IS_ERR(sma)) {
1232                        err = PTR_ERR(sma);
1233                        goto out_unlock;
1234                }
1235        } else { /* IPC_STAT */
1236                sma = sem_obtain_object_check(ns, semid);
1237                if (IS_ERR(sma)) {
1238                        err = PTR_ERR(sma);
1239                        goto out_unlock;
1240                }
1241        }
1242
1243        /* see comment for SHM_STAT_ANY */
1244        if (cmd == SEM_STAT_ANY)
1245                audit_ipc_obj(&sma->sem_perm);
1246        else {
1247                err = -EACCES;
1248                if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1249                        goto out_unlock;
1250        }
1251
1252        err = security_sem_semctl(&sma->sem_perm, cmd);
1253        if (err)
1254                goto out_unlock;
1255
1256        ipc_lock_object(&sma->sem_perm);
1257
1258        if (!ipc_valid_object(&sma->sem_perm)) {
1259                ipc_unlock_object(&sma->sem_perm);
1260                err = -EIDRM;
1261                goto out_unlock;
1262        }
1263
1264        kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1265        semotime = get_semotime(sma);
1266        semid64->sem_otime = semotime;
1267        semid64->sem_ctime = sma->sem_ctime;
1268#ifndef CONFIG_64BIT
1269        semid64->sem_otime_high = semotime >> 32;
1270        semid64->sem_ctime_high = sma->sem_ctime >> 32;
1271#endif
1272        semid64->sem_nsems = sma->sem_nsems;
1273
1274        if (cmd == IPC_STAT) {
1275                /*
1276                 * As defined in SUS:
1277                 * Return 0 on success
1278                 */
1279                err = 0;
1280        } else {
1281                /*
1282                 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1283                 * Return the full id, including the sequence number
1284                 */
1285                err = sma->sem_perm.id;
1286        }
1287        ipc_unlock_object(&sma->sem_perm);
1288out_unlock:
1289        rcu_read_unlock();
1290        return err;
1291}
1292
1293static int semctl_info(struct ipc_namespace *ns, int semid,
1294                         int cmd, void __user *p)
1295{
1296        struct seminfo seminfo;
1297        int max_idx;
1298        int err;
1299
1300        err = security_sem_semctl(NULL, cmd);
1301        if (err)
1302                return err;
1303
1304        memset(&seminfo, 0, sizeof(seminfo));
1305        seminfo.semmni = ns->sc_semmni;
1306        seminfo.semmns = ns->sc_semmns;
1307        seminfo.semmsl = ns->sc_semmsl;
1308        seminfo.semopm = ns->sc_semopm;
1309        seminfo.semvmx = SEMVMX;
1310        seminfo.semmnu = SEMMNU;
1311        seminfo.semmap = SEMMAP;
1312        seminfo.semume = SEMUME;
1313        down_read(&sem_ids(ns).rwsem);
1314        if (cmd == SEM_INFO) {
1315                seminfo.semusz = sem_ids(ns).in_use;
1316                seminfo.semaem = ns->used_sems;
1317        } else {
1318                seminfo.semusz = SEMUSZ;
1319                seminfo.semaem = SEMAEM;
1320        }
1321        max_idx = ipc_get_maxidx(&sem_ids(ns));
1322        up_read(&sem_ids(ns).rwsem);
1323        if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1324                return -EFAULT;
1325        return (max_idx < 0) ? 0 : max_idx;
1326}
1327
1328static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1329                int val)
1330{
1331        struct sem_undo *un;
1332        struct sem_array *sma;
1333        struct sem *curr;
1334        int err;
1335        DEFINE_WAKE_Q(wake_q);
1336
1337        if (val > SEMVMX || val < 0)
1338                return -ERANGE;
1339
1340        rcu_read_lock();
1341        sma = sem_obtain_object_check(ns, semid);
1342        if (IS_ERR(sma)) {
1343                rcu_read_unlock();
1344                return PTR_ERR(sma);
1345        }
1346
1347        if (semnum < 0 || semnum >= sma->sem_nsems) {
1348                rcu_read_unlock();
1349                return -EINVAL;
1350        }
1351
1352
1353        if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1354                rcu_read_unlock();
1355                return -EACCES;
1356        }
1357
1358        err = security_sem_semctl(&sma->sem_perm, SETVAL);
1359        if (err) {
1360                rcu_read_unlock();
1361                return -EACCES;
1362        }
1363
1364        sem_lock(sma, NULL, -1);
1365
1366        if (!ipc_valid_object(&sma->sem_perm)) {
1367                sem_unlock(sma, -1);
1368                rcu_read_unlock();
1369                return -EIDRM;
1370        }
1371
1372        semnum = array_index_nospec(semnum, sma->sem_nsems);
1373        curr = &sma->sems[semnum];
1374
1375        ipc_assert_locked_object(&sma->sem_perm);
1376        list_for_each_entry(un, &sma->list_id, list_id)
1377                un->semadj[semnum] = 0;
1378
1379        curr->semval = val;
1380        ipc_update_pid(&curr->sempid, task_tgid(current));
1381        sma->sem_ctime = ktime_get_real_seconds();
1382        /* maybe some queued-up processes were waiting for this */
1383        do_smart_update(sma, NULL, 0, 0, &wake_q);
1384        sem_unlock(sma, -1);
1385        rcu_read_unlock();
1386        wake_up_q(&wake_q);
1387        return 0;
1388}
1389
1390static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1391                int cmd, void __user *p)
1392{
1393        struct sem_array *sma;
1394        struct sem *curr;
1395        int err, nsems;
1396        ushort fast_sem_io[SEMMSL_FAST];
1397        ushort *sem_io = fast_sem_io;
1398        DEFINE_WAKE_Q(wake_q);
1399
1400        rcu_read_lock();
1401        sma = sem_obtain_object_check(ns, semid);
1402        if (IS_ERR(sma)) {
1403                rcu_read_unlock();
1404                return PTR_ERR(sma);
1405        }
1406
1407        nsems = sma->sem_nsems;
1408
1409        err = -EACCES;
1410        if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1411                goto out_rcu_wakeup;
1412
1413        err = security_sem_semctl(&sma->sem_perm, cmd);
1414        if (err)
1415                goto out_rcu_wakeup;
1416
1417        err = -EACCES;
1418        switch (cmd) {
1419        case GETALL:
1420        {
1421                ushort __user *array = p;
1422                int i;
1423
1424                sem_lock(sma, NULL, -1);
1425                if (!ipc_valid_object(&sma->sem_perm)) {
1426                        err = -EIDRM;
1427                        goto out_unlock;
1428                }
1429                if (nsems > SEMMSL_FAST) {
1430                        if (!ipc_rcu_getref(&sma->sem_perm)) {
1431                                err = -EIDRM;
1432                                goto out_unlock;
1433                        }
1434                        sem_unlock(sma, -1);
1435                        rcu_read_unlock();
1436                        sem_io = kvmalloc_array(nsems, sizeof(ushort),
1437                                                GFP_KERNEL);
1438                        if (sem_io == NULL) {
1439                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1440                                return -ENOMEM;
1441                        }
1442
1443                        rcu_read_lock();
1444                        sem_lock_and_putref(sma);
1445                        if (!ipc_valid_object(&sma->sem_perm)) {
1446                                err = -EIDRM;
1447                                goto out_unlock;
1448                        }
1449                }
1450                for (i = 0; i < sma->sem_nsems; i++)
1451                        sem_io[i] = sma->sems[i].semval;
1452                sem_unlock(sma, -1);
1453                rcu_read_unlock();
1454                err = 0;
1455                if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1456                        err = -EFAULT;
1457                goto out_free;
1458        }
1459        case SETALL:
1460        {
1461                int i;
1462                struct sem_undo *un;
1463
1464                if (!ipc_rcu_getref(&sma->sem_perm)) {
1465                        err = -EIDRM;
1466                        goto out_rcu_wakeup;
1467                }
1468                rcu_read_unlock();
1469
1470                if (nsems > SEMMSL_FAST) {
1471                        sem_io = kvmalloc_array(nsems, sizeof(ushort),
1472                                                GFP_KERNEL);
1473                        if (sem_io == NULL) {
1474                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1475                                return -ENOMEM;
1476                        }
1477                }
1478
1479                if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1480                        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1481                        err = -EFAULT;
1482                        goto out_free;
1483                }
1484
1485                for (i = 0; i < nsems; i++) {
1486                        if (sem_io[i] > SEMVMX) {
1487                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1488                                err = -ERANGE;
1489                                goto out_free;
1490                        }
1491                }
1492                rcu_read_lock();
1493                sem_lock_and_putref(sma);
1494                if (!ipc_valid_object(&sma->sem_perm)) {
1495                        err = -EIDRM;
1496                        goto out_unlock;
1497                }
1498
1499                for (i = 0; i < nsems; i++) {
1500                        sma->sems[i].semval = sem_io[i];
1501                        ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1502                }
1503
1504                ipc_assert_locked_object(&sma->sem_perm);
1505                list_for_each_entry(un, &sma->list_id, list_id) {
1506                        for (i = 0; i < nsems; i++)
1507                                un->semadj[i] = 0;
1508                }
1509                sma->sem_ctime = ktime_get_real_seconds();
1510                /* maybe some queued-up processes were waiting for this */
1511                do_smart_update(sma, NULL, 0, 0, &wake_q);
1512                err = 0;
1513                goto out_unlock;
1514        }
1515        /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1516        }
1517        err = -EINVAL;
1518        if (semnum < 0 || semnum >= nsems)
1519                goto out_rcu_wakeup;
1520
1521        sem_lock(sma, NULL, -1);
1522        if (!ipc_valid_object(&sma->sem_perm)) {
1523                err = -EIDRM;
1524                goto out_unlock;
1525        }
1526
1527        semnum = array_index_nospec(semnum, nsems);
1528        curr = &sma->sems[semnum];
1529
1530        switch (cmd) {
1531        case GETVAL:
1532                err = curr->semval;
1533                goto out_unlock;
1534        case GETPID:
1535                err = pid_vnr(curr->sempid);
1536                goto out_unlock;
1537        case GETNCNT:
1538                err = count_semcnt(sma, semnum, 0);
1539                goto out_unlock;
1540        case GETZCNT:
1541                err = count_semcnt(sma, semnum, 1);
1542                goto out_unlock;
1543        }
1544
1545out_unlock:
1546        sem_unlock(sma, -1);
1547out_rcu_wakeup:
1548        rcu_read_unlock();
1549        wake_up_q(&wake_q);
1550out_free:
1551        if (sem_io != fast_sem_io)
1552                kvfree(sem_io);
1553        return err;
1554}
1555
1556static inline unsigned long
1557copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1558{
1559        switch (version) {
1560        case IPC_64:
1561                if (copy_from_user(out, buf, sizeof(*out)))
1562                        return -EFAULT;
1563                return 0;
1564        case IPC_OLD:
1565            {
1566                struct semid_ds tbuf_old;
1567
1568                if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1569                        return -EFAULT;
1570
1571                out->sem_perm.uid       = tbuf_old.sem_perm.uid;
1572                out->sem_perm.gid       = tbuf_old.sem_perm.gid;
1573                out->sem_perm.mode      = tbuf_old.sem_perm.mode;
1574
1575                return 0;
1576            }
1577        default:
1578                return -EINVAL;
1579        }
1580}
1581
1582/*
1583 * This function handles some semctl commands which require the rwsem
1584 * to be held in write mode.
1585 * NOTE: no locks must be held, the rwsem is taken inside this function.
1586 */
1587static int semctl_down(struct ipc_namespace *ns, int semid,
1588                       int cmd, struct semid64_ds *semid64)
1589{
1590        struct sem_array *sma;
1591        int err;
1592        struct kern_ipc_perm *ipcp;
1593
1594        down_write(&sem_ids(ns).rwsem);
1595        rcu_read_lock();
1596
1597        ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1598                                      &semid64->sem_perm, 0);
1599        if (IS_ERR(ipcp)) {
1600                err = PTR_ERR(ipcp);
1601                goto out_unlock1;
1602        }
1603
1604        sma = container_of(ipcp, struct sem_array, sem_perm);
1605
1606        err = security_sem_semctl(&sma->sem_perm, cmd);
1607        if (err)
1608                goto out_unlock1;
1609
1610        switch (cmd) {
1611        case IPC_RMID:
1612                sem_lock(sma, NULL, -1);
1613                /* freeary unlocks the ipc object and rcu */
1614                freeary(ns, ipcp);
1615                goto out_up;
1616        case IPC_SET:
1617                sem_lock(sma, NULL, -1);
1618                err = ipc_update_perm(&semid64->sem_perm, ipcp);
1619                if (err)
1620                        goto out_unlock0;
1621                sma->sem_ctime = ktime_get_real_seconds();
1622                break;
1623        default:
1624                err = -EINVAL;
1625                goto out_unlock1;
1626        }
1627
1628out_unlock0:
1629        sem_unlock(sma, -1);
1630out_unlock1:
1631        rcu_read_unlock();
1632out_up:
1633        up_write(&sem_ids(ns).rwsem);
1634        return err;
1635}
1636
1637long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg)
1638{
1639        int version;
1640        struct ipc_namespace *ns;
1641        void __user *p = (void __user *)arg;
1642        struct semid64_ds semid64;
1643        int err;
1644
1645        if (semid < 0)
1646                return -EINVAL;
1647
1648        version = ipc_parse_version(&cmd);
1649        ns = current->nsproxy->ipc_ns;
1650
1651        switch (cmd) {
1652        case IPC_INFO:
1653        case SEM_INFO:
1654                return semctl_info(ns, semid, cmd, p);
1655        case IPC_STAT:
1656        case SEM_STAT:
1657        case SEM_STAT_ANY:
1658                err = semctl_stat(ns, semid, cmd, &semid64);
1659                if (err < 0)
1660                        return err;
1661                if (copy_semid_to_user(p, &semid64, version))
1662                        err = -EFAULT;
1663                return err;
1664        case GETALL:
1665        case GETVAL:
1666        case GETPID:
1667        case GETNCNT:
1668        case GETZCNT:
1669        case SETALL:
1670                return semctl_main(ns, semid, semnum, cmd, p);
1671        case SETVAL: {
1672                int val;
1673#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1674                /* big-endian 64bit */
1675                val = arg >> 32;
1676#else
1677                /* 32bit or little-endian 64bit */
1678                val = arg;
1679#endif
1680                return semctl_setval(ns, semid, semnum, val);
1681        }
1682        case IPC_SET:
1683                if (copy_semid_from_user(&semid64, p, version))
1684                        return -EFAULT;
1685        case IPC_RMID:
1686                return semctl_down(ns, semid, cmd, &semid64);
1687        default:
1688                return -EINVAL;
1689        }
1690}
1691
1692SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1693{
1694        return ksys_semctl(semid, semnum, cmd, arg);
1695}
1696
1697#ifdef CONFIG_COMPAT
1698
1699struct compat_semid_ds {
1700        struct compat_ipc_perm sem_perm;
1701        compat_time_t sem_otime;
1702        compat_time_t sem_ctime;
1703        compat_uptr_t sem_base;
1704        compat_uptr_t sem_pending;
1705        compat_uptr_t sem_pending_last;
1706        compat_uptr_t undo;
1707        unsigned short sem_nsems;
1708};
1709
1710static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1711                                        int version)
1712{
1713        memset(out, 0, sizeof(*out));
1714        if (version == IPC_64) {
1715                struct compat_semid64_ds __user *p = buf;
1716                return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1717        } else {
1718                struct compat_semid_ds __user *p = buf;
1719                return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1720        }
1721}
1722
1723static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1724                                        int version)
1725{
1726        if (version == IPC_64) {
1727                struct compat_semid64_ds v;
1728                memset(&v, 0, sizeof(v));
1729                to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1730                v.sem_otime      = lower_32_bits(in->sem_otime);
1731                v.sem_otime_high = upper_32_bits(in->sem_otime);
1732                v.sem_ctime      = lower_32_bits(in->sem_ctime);
1733                v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1734                v.sem_nsems = in->sem_nsems;
1735                return copy_to_user(buf, &v, sizeof(v));
1736        } else {
1737                struct compat_semid_ds v;
1738                memset(&v, 0, sizeof(v));
1739                to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1740                v.sem_otime = in->sem_otime;
1741                v.sem_ctime = in->sem_ctime;
1742                v.sem_nsems = in->sem_nsems;
1743                return copy_to_user(buf, &v, sizeof(v));
1744        }
1745}
1746
1747long compat_ksys_semctl(int semid, int semnum, int cmd, int arg)
1748{
1749        void __user *p = compat_ptr(arg);
1750        struct ipc_namespace *ns;
1751        struct semid64_ds semid64;
1752        int version = compat_ipc_parse_version(&cmd);
1753        int err;
1754
1755        ns = current->nsproxy->ipc_ns;
1756
1757        if (semid < 0)
1758                return -EINVAL;
1759
1760        switch (cmd & (~IPC_64)) {
1761        case IPC_INFO:
1762        case SEM_INFO:
1763                return semctl_info(ns, semid, cmd, p);
1764        case IPC_STAT:
1765        case SEM_STAT:
1766        case SEM_STAT_ANY:
1767                err = semctl_stat(ns, semid, cmd, &semid64);
1768                if (err < 0)
1769                        return err;
1770                if (copy_compat_semid_to_user(p, &semid64, version))
1771                        err = -EFAULT;
1772                return err;
1773        case GETVAL:
1774        case GETPID:
1775        case GETNCNT:
1776        case GETZCNT:
1777        case GETALL:
1778        case SETALL:
1779                return semctl_main(ns, semid, semnum, cmd, p);
1780        case SETVAL:
1781                return semctl_setval(ns, semid, semnum, arg);
1782        case IPC_SET:
1783                if (copy_compat_semid_from_user(&semid64, p, version))
1784                        return -EFAULT;
1785                /* fallthru */
1786        case IPC_RMID:
1787                return semctl_down(ns, semid, cmd, &semid64);
1788        default:
1789                return -EINVAL;
1790        }
1791}
1792
1793COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1794{
1795        return compat_ksys_semctl(semid, semnum, cmd, arg);
1796}
1797#endif
1798
1799/* If the task doesn't already have a undo_list, then allocate one
1800 * here.  We guarantee there is only one thread using this undo list,
1801 * and current is THE ONE
1802 *
1803 * If this allocation and assignment succeeds, but later
1804 * portions of this code fail, there is no need to free the sem_undo_list.
1805 * Just let it stay associated with the task, and it'll be freed later
1806 * at exit time.
1807 *
1808 * This can block, so callers must hold no locks.
1809 */
1810static inline int get_undo_list(struct sem_undo_list **undo_listp)
1811{
1812        struct sem_undo_list *undo_list;
1813
1814        undo_list = current->sysvsem.undo_list;
1815        if (!undo_list) {
1816                undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1817                if (undo_list == NULL)
1818                        return -ENOMEM;
1819                spin_lock_init(&undo_list->lock);
1820                refcount_set(&undo_list->refcnt, 1);
1821                INIT_LIST_HEAD(&undo_list->list_proc);
1822
1823                current->sysvsem.undo_list = undo_list;
1824        }
1825        *undo_listp = undo_list;
1826        return 0;
1827}
1828
1829static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1830{
1831        struct sem_undo *un;
1832
1833        list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1834                if (un->semid == semid)
1835                        return un;
1836        }
1837        return NULL;
1838}
1839
1840static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1841{
1842        struct sem_undo *un;
1843
1844        assert_spin_locked(&ulp->lock);
1845
1846        un = __lookup_undo(ulp, semid);
1847        if (un) {
1848                list_del_rcu(&un->list_proc);
1849                list_add_rcu(&un->list_proc, &ulp->list_proc);
1850        }
1851        return un;
1852}
1853
1854/**
1855 * find_alloc_undo - lookup (and if not present create) undo array
1856 * @ns: namespace
1857 * @semid: semaphore array id
1858 *
1859 * The function looks up (and if not present creates) the undo structure.
1860 * The size of the undo structure depends on the size of the semaphore
1861 * array, thus the alloc path is not that straightforward.
1862 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1863 * performs a rcu_read_lock().
1864 */
1865static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1866{
1867        struct sem_array *sma;
1868        struct sem_undo_list *ulp;
1869        struct sem_undo *un, *new;
1870        int nsems, error;
1871
1872        error = get_undo_list(&ulp);
1873        if (error)
1874                return ERR_PTR(error);
1875
1876        rcu_read_lock();
1877        spin_lock(&ulp->lock);
1878        un = lookup_undo(ulp, semid);
1879        spin_unlock(&ulp->lock);
1880        if (likely(un != NULL))
1881                goto out;
1882
1883        /* no undo structure around - allocate one. */
1884        /* step 1: figure out the size of the semaphore array */
1885        sma = sem_obtain_object_check(ns, semid);
1886        if (IS_ERR(sma)) {
1887                rcu_read_unlock();
1888                return ERR_CAST(sma);
1889        }
1890
1891        nsems = sma->sem_nsems;
1892        if (!ipc_rcu_getref(&sma->sem_perm)) {
1893                rcu_read_unlock();
1894                un = ERR_PTR(-EIDRM);
1895                goto out;
1896        }
1897        rcu_read_unlock();
1898
1899        /* step 2: allocate new undo structure */
1900        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1901        if (!new) {
1902                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1903                return ERR_PTR(-ENOMEM);
1904        }
1905
1906        /* step 3: Acquire the lock on semaphore array */
1907        rcu_read_lock();
1908        sem_lock_and_putref(sma);
1909        if (!ipc_valid_object(&sma->sem_perm)) {
1910                sem_unlock(sma, -1);
1911                rcu_read_unlock();
1912                kfree(new);
1913                un = ERR_PTR(-EIDRM);
1914                goto out;
1915        }
1916        spin_lock(&ulp->lock);
1917
1918        /*
1919         * step 4: check for races: did someone else allocate the undo struct?
1920         */
1921        un = lookup_undo(ulp, semid);
1922        if (un) {
1923                kfree(new);
1924                goto success;
1925        }
1926        /* step 5: initialize & link new undo structure */
1927        new->semadj = (short *) &new[1];
1928        new->ulp = ulp;
1929        new->semid = semid;
1930        assert_spin_locked(&ulp->lock);
1931        list_add_rcu(&new->list_proc, &ulp->list_proc);
1932        ipc_assert_locked_object(&sma->sem_perm);
1933        list_add(&new->list_id, &sma->list_id);
1934        un = new;
1935
1936success:
1937        spin_unlock(&ulp->lock);
1938        sem_unlock(sma, -1);
1939out:
1940        return un;
1941}
1942
1943static long do_semtimedop(int semid, struct sembuf __user *tsops,
1944                unsigned nsops, const struct timespec64 *timeout)
1945{
1946        int error = -EINVAL;
1947        struct sem_array *sma;
1948        struct sembuf fast_sops[SEMOPM_FAST];
1949        struct sembuf *sops = fast_sops, *sop;
1950        struct sem_undo *un;
1951        int max, locknum;
1952        bool undos = false, alter = false, dupsop = false;
1953        struct sem_queue queue;
1954        unsigned long dup = 0, jiffies_left = 0;
1955        struct ipc_namespace *ns;
1956
1957        ns = current->nsproxy->ipc_ns;
1958
1959        if (nsops < 1 || semid < 0)
1960                return -EINVAL;
1961        if (nsops > ns->sc_semopm)
1962                return -E2BIG;
1963        if (nsops > SEMOPM_FAST) {
1964                sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
1965                if (sops == NULL)
1966                        return -ENOMEM;
1967        }
1968
1969        if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1970                error =  -EFAULT;
1971                goto out_free;
1972        }
1973
1974        if (timeout) {
1975                if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
1976                        timeout->tv_nsec >= 1000000000L) {
1977                        error = -EINVAL;
1978                        goto out_free;
1979                }
1980                jiffies_left = timespec64_to_jiffies(timeout);
1981        }
1982
1983        max = 0;
1984        for (sop = sops; sop < sops + nsops; sop++) {
1985                unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
1986
1987                if (sop->sem_num >= max)
1988                        max = sop->sem_num;
1989                if (sop->sem_flg & SEM_UNDO)
1990                        undos = true;
1991                if (dup & mask) {
1992                        /*
1993                         * There was a previous alter access that appears
1994                         * to have accessed the same semaphore, thus use
1995                         * the dupsop logic. "appears", because the detection
1996                         * can only check % BITS_PER_LONG.
1997                         */
1998                        dupsop = true;
1999                }
2000                if (sop->sem_op != 0) {
2001                        alter = true;
2002                        dup |= mask;
2003                }
2004        }
2005
2006        if (undos) {
2007                /* On success, find_alloc_undo takes the rcu_read_lock */
2008                un = find_alloc_undo(ns, semid);
2009                if (IS_ERR(un)) {
2010                        error = PTR_ERR(un);
2011                        goto out_free;
2012                }
2013        } else {
2014                un = NULL;
2015                rcu_read_lock();
2016        }
2017
2018        sma = sem_obtain_object_check(ns, semid);
2019        if (IS_ERR(sma)) {
2020                rcu_read_unlock();
2021                error = PTR_ERR(sma);
2022                goto out_free;
2023        }
2024
2025        error = -EFBIG;
2026        if (max >= sma->sem_nsems) {
2027                rcu_read_unlock();
2028                goto out_free;
2029        }
2030
2031        error = -EACCES;
2032        if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2033                rcu_read_unlock();
2034                goto out_free;
2035        }
2036
2037        error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2038        if (error) {
2039                rcu_read_unlock();
2040                goto out_free;
2041        }
2042
2043        error = -EIDRM;
2044        locknum = sem_lock(sma, sops, nsops);
2045        /*
2046         * We eventually might perform the following check in a lockless
2047         * fashion, considering ipc_valid_object() locking constraints.
2048         * If nsops == 1 and there is no contention for sem_perm.lock, then
2049         * only a per-semaphore lock is held and it's OK to proceed with the
2050         * check below. More details on the fine grained locking scheme
2051         * entangled here and why it's RMID race safe on comments at sem_lock()
2052         */
2053        if (!ipc_valid_object(&sma->sem_perm))
2054                goto out_unlock_free;
2055        /*
2056         * semid identifiers are not unique - find_alloc_undo may have
2057         * allocated an undo structure, it was invalidated by an RMID
2058         * and now a new array with received the same id. Check and fail.
2059         * This case can be detected checking un->semid. The existence of
2060         * "un" itself is guaranteed by rcu.
2061         */
2062        if (un && un->semid == -1)
2063                goto out_unlock_free;
2064
2065        queue.sops = sops;
2066        queue.nsops = nsops;
2067        queue.undo = un;
2068        queue.pid = task_tgid(current);
2069        queue.alter = alter;
2070        queue.dupsop = dupsop;
2071
2072        error = perform_atomic_semop(sma, &queue);
2073        if (error == 0) { /* non-blocking succesfull path */
2074                DEFINE_WAKE_Q(wake_q);
2075
2076                /*
2077                 * If the operation was successful, then do
2078                 * the required updates.
2079                 */
2080                if (alter)
2081                        do_smart_update(sma, sops, nsops, 1, &wake_q);
2082                else
2083                        set_semotime(sma, sops);
2084
2085                sem_unlock(sma, locknum);
2086                rcu_read_unlock();
2087                wake_up_q(&wake_q);
2088
2089                goto out_free;
2090        }
2091        if (error < 0) /* non-blocking error path */
2092                goto out_unlock_free;
2093
2094        /*
2095         * We need to sleep on this operation, so we put the current
2096         * task into the pending queue and go to sleep.
2097         */
2098        if (nsops == 1) {
2099                struct sem *curr;
2100                int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2101                curr = &sma->sems[idx];
2102
2103                if (alter) {
2104                        if (sma->complex_count) {
2105                                list_add_tail(&queue.list,
2106                                                &sma->pending_alter);
2107                        } else {
2108
2109                                list_add_tail(&queue.list,
2110                                                &curr->pending_alter);
2111                        }
2112                } else {
2113                        list_add_tail(&queue.list, &curr->pending_const);
2114                }
2115        } else {
2116                if (!sma->complex_count)
2117                        merge_queues(sma);
2118
2119                if (alter)
2120                        list_add_tail(&queue.list, &sma->pending_alter);
2121                else
2122                        list_add_tail(&queue.list, &sma->pending_const);
2123
2124                sma->complex_count++;
2125        }
2126
2127        do {
2128                WRITE_ONCE(queue.status, -EINTR);
2129                queue.sleeper = current;
2130
2131                __set_current_state(TASK_INTERRUPTIBLE);
2132                sem_unlock(sma, locknum);
2133                rcu_read_unlock();
2134
2135                if (timeout)
2136                        jiffies_left = schedule_timeout(jiffies_left);
2137                else
2138                        schedule();
2139
2140                /*
2141                 * fastpath: the semop has completed, either successfully or
2142                 * not, from the syscall pov, is quite irrelevant to us at this
2143                 * point; we're done.
2144                 *
2145                 * We _do_ care, nonetheless, about being awoken by a signal or
2146                 * spuriously.  The queue.status is checked again in the
2147                 * slowpath (aka after taking sem_lock), such that we can detect
2148                 * scenarios where we were awakened externally, during the
2149                 * window between wake_q_add() and wake_up_q().
2150                 */
2151                error = READ_ONCE(queue.status);
2152                if (error != -EINTR) {
2153                        /*
2154                         * User space could assume that semop() is a memory
2155                         * barrier: Without the mb(), the cpu could
2156                         * speculatively read in userspace stale data that was
2157                         * overwritten by the previous owner of the semaphore.
2158                         */
2159                        smp_mb();
2160                        goto out_free;
2161                }
2162
2163                rcu_read_lock();
2164                locknum = sem_lock(sma, sops, nsops);
2165
2166                if (!ipc_valid_object(&sma->sem_perm))
2167                        goto out_unlock_free;
2168
2169                error = READ_ONCE(queue.status);
2170
2171                /*
2172                 * If queue.status != -EINTR we are woken up by another process.
2173                 * Leave without unlink_queue(), but with sem_unlock().
2174                 */
2175                if (error != -EINTR)
2176                        goto out_unlock_free;
2177
2178                /*
2179                 * If an interrupt occurred we have to clean up the queue.
2180                 */
2181                if (timeout && jiffies_left == 0)
2182                        error = -EAGAIN;
2183        } while (error == -EINTR && !signal_pending(current)); /* spurious */
2184
2185        unlink_queue(sma, &queue);
2186
2187out_unlock_free:
2188        sem_unlock(sma, locknum);
2189        rcu_read_unlock();
2190out_free:
2191        if (sops != fast_sops)
2192                kvfree(sops);
2193        return error;
2194}
2195
2196long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2197                     unsigned int nsops, const struct __kernel_timespec __user *timeout)
2198{
2199        if (timeout) {
2200                struct timespec64 ts;
2201                if (get_timespec64(&ts, timeout))
2202                        return -EFAULT;
2203                return do_semtimedop(semid, tsops, nsops, &ts);
2204        }
2205        return do_semtimedop(semid, tsops, nsops, NULL);
2206}
2207
2208SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2209                unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2210{
2211        return ksys_semtimedop(semid, tsops, nsops, timeout);
2212}
2213
2214#ifdef CONFIG_COMPAT_32BIT_TIME
2215long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2216                            unsigned int nsops,
2217                            const struct compat_timespec __user *timeout)
2218{
2219        if (timeout) {
2220                struct timespec64 ts;
2221                if (compat_get_timespec64(&ts, timeout))
2222                        return -EFAULT;
2223                return do_semtimedop(semid, tsems, nsops, &ts);
2224        }
2225        return do_semtimedop(semid, tsems, nsops, NULL);
2226}
2227
2228COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
2229                       unsigned int, nsops,
2230                       const struct compat_timespec __user *, timeout)
2231{
2232        return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2233}
2234#endif
2235
2236SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2237                unsigned, nsops)
2238{
2239        return do_semtimedop(semid, tsops, nsops, NULL);
2240}
2241
2242/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2243 * parent and child tasks.
2244 */
2245
2246int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2247{
2248        struct sem_undo_list *undo_list;
2249        int error;
2250
2251        if (clone_flags & CLONE_SYSVSEM) {
2252                error = get_undo_list(&undo_list);
2253                if (error)
2254                        return error;
2255                refcount_inc(&undo_list->refcnt);
2256                tsk->sysvsem.undo_list = undo_list;
2257        } else
2258                tsk->sysvsem.undo_list = NULL;
2259
2260        return 0;
2261}
2262
2263/*
2264 * add semadj values to semaphores, free undo structures.
2265 * undo structures are not freed when semaphore arrays are destroyed
2266 * so some of them may be out of date.
2267 * IMPLEMENTATION NOTE: There is some confusion over whether the
2268 * set of adjustments that needs to be done should be done in an atomic
2269 * manner or not. That is, if we are attempting to decrement the semval
2270 * should we queue up and wait until we can do so legally?
2271 * The original implementation attempted to do this (queue and wait).
2272 * The current implementation does not do so. The POSIX standard
2273 * and SVID should be consulted to determine what behavior is mandated.
2274 */
2275void exit_sem(struct task_struct *tsk)
2276{
2277        struct sem_undo_list *ulp;
2278
2279        ulp = tsk->sysvsem.undo_list;
2280        if (!ulp)
2281                return;
2282        tsk->sysvsem.undo_list = NULL;
2283
2284        if (!refcount_dec_and_test(&ulp->refcnt))
2285                return;
2286
2287        for (;;) {
2288                struct sem_array *sma;
2289                struct sem_undo *un;
2290                int semid, i;
2291                DEFINE_WAKE_Q(wake_q);
2292
2293                cond_resched();
2294
2295                rcu_read_lock();
2296                un = list_entry_rcu(ulp->list_proc.next,
2297                                    struct sem_undo, list_proc);
2298                if (&un->list_proc == &ulp->list_proc) {
2299                        /*
2300                         * We must wait for freeary() before freeing this ulp,
2301                         * in case we raced with last sem_undo. There is a small
2302                         * possibility where we exit while freeary() didn't
2303                         * finish unlocking sem_undo_list.
2304                         */
2305                        spin_lock(&ulp->lock);
2306                        spin_unlock(&ulp->lock);
2307                        rcu_read_unlock();
2308                        break;
2309                }
2310                spin_lock(&ulp->lock);
2311                semid = un->semid;
2312                spin_unlock(&ulp->lock);
2313
2314                /* exit_sem raced with IPC_RMID, nothing to do */
2315                if (semid == -1) {
2316                        rcu_read_unlock();
2317                        continue;
2318                }
2319
2320                sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2321                /* exit_sem raced with IPC_RMID, nothing to do */
2322                if (IS_ERR(sma)) {
2323                        rcu_read_unlock();
2324                        continue;
2325                }
2326
2327                sem_lock(sma, NULL, -1);
2328                /* exit_sem raced with IPC_RMID, nothing to do */
2329                if (!ipc_valid_object(&sma->sem_perm)) {
2330                        sem_unlock(sma, -1);
2331                        rcu_read_unlock();
2332                        continue;
2333                }
2334                un = __lookup_undo(ulp, semid);
2335                if (un == NULL) {
2336                        /* exit_sem raced with IPC_RMID+semget() that created
2337                         * exactly the same semid. Nothing to do.
2338                         */
2339                        sem_unlock(sma, -1);
2340                        rcu_read_unlock();
2341                        continue;
2342                }
2343
2344                /* remove un from the linked lists */
2345                ipc_assert_locked_object(&sma->sem_perm);
2346                list_del(&un->list_id);
2347
2348                /* we are the last process using this ulp, acquiring ulp->lock
2349                 * isn't required. Besides that, we are also protected against
2350                 * IPC_RMID as we hold sma->sem_perm lock now
2351                 */
2352                list_del_rcu(&un->list_proc);
2353
2354                /* perform adjustments registered in un */
2355                for (i = 0; i < sma->sem_nsems; i++) {
2356                        struct sem *semaphore = &sma->sems[i];
2357                        if (un->semadj[i]) {
2358                                semaphore->semval += un->semadj[i];
2359                                /*
2360                                 * Range checks of the new semaphore value,
2361                                 * not defined by sus:
2362                                 * - Some unices ignore the undo entirely
2363                                 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2364                                 * - some cap the value (e.g. FreeBSD caps
2365                                 *   at 0, but doesn't enforce SEMVMX)
2366                                 *
2367                                 * Linux caps the semaphore value, both at 0
2368                                 * and at SEMVMX.
2369                                 *
2370                                 *      Manfred <manfred@colorfullife.com>
2371                                 */
2372                                if (semaphore->semval < 0)
2373                                        semaphore->semval = 0;
2374                                if (semaphore->semval > SEMVMX)
2375                                        semaphore->semval = SEMVMX;
2376                                ipc_update_pid(&semaphore->sempid, task_tgid(current));
2377                        }
2378                }
2379                /* maybe some queued-up processes were waiting for this */
2380                do_smart_update(sma, NULL, 0, 1, &wake_q);
2381                sem_unlock(sma, -1);
2382                rcu_read_unlock();
2383                wake_up_q(&wake_q);
2384
2385                kfree_rcu(un, rcu);
2386        }
2387        kfree(ulp);
2388}
2389
2390#ifdef CONFIG_PROC_FS
2391static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2392{
2393        struct user_namespace *user_ns = seq_user_ns(s);
2394        struct kern_ipc_perm *ipcp = it;
2395        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2396        time64_t sem_otime;
2397
2398        /*
2399         * The proc interface isn't aware of sem_lock(), it calls
2400         * ipc_lock_object() directly (in sysvipc_find_ipc).
2401         * In order to stay compatible with sem_lock(), we must
2402         * enter / leave complex_mode.
2403         */
2404        complexmode_enter(sma);
2405
2406        sem_otime = get_semotime(sma);
2407
2408        seq_printf(s,
2409                   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2410                   sma->sem_perm.key,
2411                   sma->sem_perm.id,
2412                   sma->sem_perm.mode,
2413                   sma->sem_nsems,
2414                   from_kuid_munged(user_ns, sma->sem_perm.uid),
2415                   from_kgid_munged(user_ns, sma->sem_perm.gid),
2416                   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2417                   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2418                   sem_otime,
2419                   sma->sem_ctime);
2420
2421        complexmode_tryleave(sma);
2422
2423        return 0;
2424}
2425#endif
2426