linux/ipc/sem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
  73#include <linux/compat.h>
  74#include <linux/slab.h>
  75#include <linux/spinlock.h>
  76#include <linux/init.h>
  77#include <linux/proc_fs.h>
  78#include <linux/time.h>
  79#include <linux/security.h>
  80#include <linux/syscalls.h>
  81#include <linux/audit.h>
  82#include <linux/capability.h>
  83#include <linux/seq_file.h>
  84#include <linux/rwsem.h>
  85#include <linux/nsproxy.h>
  86#include <linux/ipc_namespace.h>
  87#include <linux/sched/wake_q.h>
  88#include <linux/nospec.h>
  89#include <linux/rhashtable.h>
  90
  91#include <linux/uaccess.h>
  92#include "util.h"
  93
  94/* One semaphore structure for each semaphore in the system. */
  95struct sem {
  96        int     semval;         /* current value */
  97        /*
  98         * PID of the process that last modified the semaphore. For
  99         * Linux, specifically these are:
 100         *  - semop
 101         *  - semctl, via SETVAL and SETALL.
 102         *  - at task exit when performing undo adjustments (see exit_sem).
 103         */
 104        struct pid *sempid;
 105        spinlock_t      lock;   /* spinlock for fine-grained semtimedop */
 106        struct list_head pending_alter; /* pending single-sop operations */
 107                                        /* that alter the semaphore */
 108        struct list_head pending_const; /* pending single-sop operations */
 109                                        /* that do not alter the semaphore*/
 110        time64_t         sem_otime;     /* candidate for sem_otime */
 111} ____cacheline_aligned_in_smp;
 112
 113/* One sem_array data structure for each set of semaphores in the system. */
 114struct sem_array {
 115        struct kern_ipc_perm    sem_perm;       /* permissions .. see ipc.h */
 116        time64_t                sem_ctime;      /* create/last semctl() time */
 117        struct list_head        pending_alter;  /* pending operations */
 118                                                /* that alter the array */
 119        struct list_head        pending_const;  /* pending complex operations */
 120                                                /* that do not alter semvals */
 121        struct list_head        list_id;        /* undo requests on this array */
 122        int                     sem_nsems;      /* no. of semaphores in array */
 123        int                     complex_count;  /* pending complex operations */
 124        unsigned int            use_global_lock;/* >0: global lock required */
 125
 126        struct sem              sems[];
 127} __randomize_layout;
 128
 129/* One queue for each sleeping process in the system. */
 130struct sem_queue {
 131        struct list_head        list;    /* queue of pending operations */
 132        struct task_struct      *sleeper; /* this process */
 133        struct sem_undo         *undo;   /* undo structure */
 134        struct pid              *pid;    /* process id of requesting process */
 135        int                     status;  /* completion status of operation */
 136        struct sembuf           *sops;   /* array of pending operations */
 137        struct sembuf           *blocking; /* the operation that blocked */
 138        int                     nsops;   /* number of operations */
 139        bool                    alter;   /* does *sops alter the array? */
 140        bool                    dupsop;  /* sops on more than one sem_num */
 141};
 142
 143/* Each task has a list of undo requests. They are executed automatically
 144 * when the process exits.
 145 */
 146struct sem_undo {
 147        struct list_head        list_proc;      /* per-process list: *
 148                                                 * all undos from one process
 149                                                 * rcu protected */
 150        struct rcu_head         rcu;            /* rcu struct for sem_undo */
 151        struct sem_undo_list    *ulp;           /* back ptr to sem_undo_list */
 152        struct list_head        list_id;        /* per semaphore array list:
 153                                                 * all undos for one array */
 154        int                     semid;          /* semaphore set identifier */
 155        short                   *semadj;        /* array of adjustments */
 156                                                /* one per semaphore */
 157};
 158
 159/* sem_undo_list controls shared access to the list of sem_undo structures
 160 * that may be shared among all a CLONE_SYSVSEM task group.
 161 */
 162struct sem_undo_list {
 163        refcount_t              refcnt;
 164        spinlock_t              lock;
 165        struct list_head        list_proc;
 166};
 167
 168
 169#define sem_ids(ns)     ((ns)->ids[IPC_SEM_IDS])
 170
 171static int newary(struct ipc_namespace *, struct ipc_params *);
 172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 173#ifdef CONFIG_PROC_FS
 174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 175#endif
 176
 177#define SEMMSL_FAST     256 /* 512 bytes on stack */
 178#define SEMOPM_FAST     64  /* ~ 372 bytes on stack */
 179
 180/*
 181 * Switching from the mode suitable for simple ops
 182 * to the mode for complex ops is costly. Therefore:
 183 * use some hysteresis
 184 */
 185#define USE_GLOBAL_LOCK_HYSTERESIS      10
 186
 187/*
 188 * Locking:
 189 * a) global sem_lock() for read/write
 190 *      sem_undo.id_next,
 191 *      sem_array.complex_count,
 192 *      sem_array.pending{_alter,_const},
 193 *      sem_array.sem_undo
 194 *
 195 * b) global or semaphore sem_lock() for read/write:
 196 *      sem_array.sems[i].pending_{const,alter}:
 197 *
 198 * c) special:
 199 *      sem_undo_list.list_proc:
 200 *      * undo_list->lock for write
 201 *      * rcu for read
 202 *      use_global_lock:
 203 *      * global sem_lock() for write
 204 *      * either local or global sem_lock() for read.
 205 *
 206 * Memory ordering:
 207 * Most ordering is enforced by using spin_lock() and spin_unlock().
 208 *
 209 * Exceptions:
 210 * 1) use_global_lock: (SEM_BARRIER_1)
 211 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 212 * using smp_store_release(): Immediately after setting it to 0,
 213 * a simple op can start.
 214 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 215 * smp_load_acquire().
 216 * Setting it from 0 to non-zero must be ordered with regards to
 217 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 218 * is inside a spin_lock() and after a write from 0 to non-zero a
 219 * spin_lock()+spin_unlock() is done.
 220 *
 221 * 2) queue.status: (SEM_BARRIER_2)
 222 * Initialization is done while holding sem_lock(), so no further barrier is
 223 * required.
 224 * Setting it to a result code is a RELEASE, this is ensured by both a
 225 * smp_store_release() (for case a) and while holding sem_lock()
 226 * (for case b).
 227 * The AQUIRE when reading the result code without holding sem_lock() is
 228 * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
 229 * (case a above).
 230 * Reading the result code while holding sem_lock() needs no further barriers,
 231 * the locks inside sem_lock() enforce ordering (case b above)
 232 *
 233 * 3) current->state:
 234 * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
 235 * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
 236 * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
 237 * when holding sem_lock(), no further barriers are required.
 238 *
 239 * See also ipc/mqueue.c for more details on the covered races.
 240 */
 241
 242#define sc_semmsl       sem_ctls[0]
 243#define sc_semmns       sem_ctls[1]
 244#define sc_semopm       sem_ctls[2]
 245#define sc_semmni       sem_ctls[3]
 246
 247void sem_init_ns(struct ipc_namespace *ns)
 248{
 249        ns->sc_semmsl = SEMMSL;
 250        ns->sc_semmns = SEMMNS;
 251        ns->sc_semopm = SEMOPM;
 252        ns->sc_semmni = SEMMNI;
 253        ns->used_sems = 0;
 254        ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 255}
 256
 257#ifdef CONFIG_IPC_NS
 258void sem_exit_ns(struct ipc_namespace *ns)
 259{
 260        free_ipcs(ns, &sem_ids(ns), freeary);
 261        idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 262        rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 263}
 264#endif
 265
 266void __init sem_init(void)
 267{
 268        sem_init_ns(&init_ipc_ns);
 269        ipc_init_proc_interface("sysvipc/sem",
 270                                "       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 271                                IPC_SEM_IDS, sysvipc_sem_proc_show);
 272}
 273
 274/**
 275 * unmerge_queues - unmerge queues, if possible.
 276 * @sma: semaphore array
 277 *
 278 * The function unmerges the wait queues if complex_count is 0.
 279 * It must be called prior to dropping the global semaphore array lock.
 280 */
 281static void unmerge_queues(struct sem_array *sma)
 282{
 283        struct sem_queue *q, *tq;
 284
 285        /* complex operations still around? */
 286        if (sma->complex_count)
 287                return;
 288        /*
 289         * We will switch back to simple mode.
 290         * Move all pending operation back into the per-semaphore
 291         * queues.
 292         */
 293        list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 294                struct sem *curr;
 295                curr = &sma->sems[q->sops[0].sem_num];
 296
 297                list_add_tail(&q->list, &curr->pending_alter);
 298        }
 299        INIT_LIST_HEAD(&sma->pending_alter);
 300}
 301
 302/**
 303 * merge_queues - merge single semop queues into global queue
 304 * @sma: semaphore array
 305 *
 306 * This function merges all per-semaphore queues into the global queue.
 307 * It is necessary to achieve FIFO ordering for the pending single-sop
 308 * operations when a multi-semop operation must sleep.
 309 * Only the alter operations must be moved, the const operations can stay.
 310 */
 311static void merge_queues(struct sem_array *sma)
 312{
 313        int i;
 314        for (i = 0; i < sma->sem_nsems; i++) {
 315                struct sem *sem = &sma->sems[i];
 316
 317                list_splice_init(&sem->pending_alter, &sma->pending_alter);
 318        }
 319}
 320
 321static void sem_rcu_free(struct rcu_head *head)
 322{
 323        struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 324        struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 325
 326        security_sem_free(&sma->sem_perm);
 327        kvfree(sma);
 328}
 329
 330/*
 331 * Enter the mode suitable for non-simple operations:
 332 * Caller must own sem_perm.lock.
 333 */
 334static void complexmode_enter(struct sem_array *sma)
 335{
 336        int i;
 337        struct sem *sem;
 338
 339        if (sma->use_global_lock > 0)  {
 340                /*
 341                 * We are already in global lock mode.
 342                 * Nothing to do, just reset the
 343                 * counter until we return to simple mode.
 344                 */
 345                sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 346                return;
 347        }
 348        sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 349
 350        for (i = 0; i < sma->sem_nsems; i++) {
 351                sem = &sma->sems[i];
 352                spin_lock(&sem->lock);
 353                spin_unlock(&sem->lock);
 354        }
 355}
 356
 357/*
 358 * Try to leave the mode that disallows simple operations:
 359 * Caller must own sem_perm.lock.
 360 */
 361static void complexmode_tryleave(struct sem_array *sma)
 362{
 363        if (sma->complex_count)  {
 364                /* Complex ops are sleeping.
 365                 * We must stay in complex mode
 366                 */
 367                return;
 368        }
 369        if (sma->use_global_lock == 1) {
 370
 371                /* See SEM_BARRIER_1 for purpose/pairing */
 372                smp_store_release(&sma->use_global_lock, 0);
 373        } else {
 374                sma->use_global_lock--;
 375        }
 376}
 377
 378#define SEM_GLOBAL_LOCK (-1)
 379/*
 380 * If the request contains only one semaphore operation, and there are
 381 * no complex transactions pending, lock only the semaphore involved.
 382 * Otherwise, lock the entire semaphore array, since we either have
 383 * multiple semaphores in our own semops, or we need to look at
 384 * semaphores from other pending complex operations.
 385 */
 386static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 387                              int nsops)
 388{
 389        struct sem *sem;
 390        int idx;
 391
 392        if (nsops != 1) {
 393                /* Complex operation - acquire a full lock */
 394                ipc_lock_object(&sma->sem_perm);
 395
 396                /* Prevent parallel simple ops */
 397                complexmode_enter(sma);
 398                return SEM_GLOBAL_LOCK;
 399        }
 400
 401        /*
 402         * Only one semaphore affected - try to optimize locking.
 403         * Optimized locking is possible if no complex operation
 404         * is either enqueued or processed right now.
 405         *
 406         * Both facts are tracked by use_global_mode.
 407         */
 408        idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
 409        sem = &sma->sems[idx];
 410
 411        /*
 412         * Initial check for use_global_lock. Just an optimization,
 413         * no locking, no memory barrier.
 414         */
 415        if (!sma->use_global_lock) {
 416                /*
 417                 * It appears that no complex operation is around.
 418                 * Acquire the per-semaphore lock.
 419                 */
 420                spin_lock(&sem->lock);
 421
 422                /* see SEM_BARRIER_1 for purpose/pairing */
 423                if (!smp_load_acquire(&sma->use_global_lock)) {
 424                        /* fast path successful! */
 425                        return sops->sem_num;
 426                }
 427                spin_unlock(&sem->lock);
 428        }
 429
 430        /* slow path: acquire the full lock */
 431        ipc_lock_object(&sma->sem_perm);
 432
 433        if (sma->use_global_lock == 0) {
 434                /*
 435                 * The use_global_lock mode ended while we waited for
 436                 * sma->sem_perm.lock. Thus we must switch to locking
 437                 * with sem->lock.
 438                 * Unlike in the fast path, there is no need to recheck
 439                 * sma->use_global_lock after we have acquired sem->lock:
 440                 * We own sma->sem_perm.lock, thus use_global_lock cannot
 441                 * change.
 442                 */
 443                spin_lock(&sem->lock);
 444
 445                ipc_unlock_object(&sma->sem_perm);
 446                return sops->sem_num;
 447        } else {
 448                /*
 449                 * Not a false alarm, thus continue to use the global lock
 450                 * mode. No need for complexmode_enter(), this was done by
 451                 * the caller that has set use_global_mode to non-zero.
 452                 */
 453                return SEM_GLOBAL_LOCK;
 454        }
 455}
 456
 457static inline void sem_unlock(struct sem_array *sma, int locknum)
 458{
 459        if (locknum == SEM_GLOBAL_LOCK) {
 460                unmerge_queues(sma);
 461                complexmode_tryleave(sma);
 462                ipc_unlock_object(&sma->sem_perm);
 463        } else {
 464                struct sem *sem = &sma->sems[locknum];
 465                spin_unlock(&sem->lock);
 466        }
 467}
 468
 469/*
 470 * sem_lock_(check_) routines are called in the paths where the rwsem
 471 * is not held.
 472 *
 473 * The caller holds the RCU read lock.
 474 */
 475static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 476{
 477        struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 478
 479        if (IS_ERR(ipcp))
 480                return ERR_CAST(ipcp);
 481
 482        return container_of(ipcp, struct sem_array, sem_perm);
 483}
 484
 485static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 486                                                        int id)
 487{
 488        struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 489
 490        if (IS_ERR(ipcp))
 491                return ERR_CAST(ipcp);
 492
 493        return container_of(ipcp, struct sem_array, sem_perm);
 494}
 495
 496static inline void sem_lock_and_putref(struct sem_array *sma)
 497{
 498        sem_lock(sma, NULL, -1);
 499        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 500}
 501
 502static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 503{
 504        ipc_rmid(&sem_ids(ns), &s->sem_perm);
 505}
 506
 507static struct sem_array *sem_alloc(size_t nsems)
 508{
 509        struct sem_array *sma;
 510
 511        if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 512                return NULL;
 513
 514        sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
 515        if (unlikely(!sma))
 516                return NULL;
 517
 518        return sma;
 519}
 520
 521/**
 522 * newary - Create a new semaphore set
 523 * @ns: namespace
 524 * @params: ptr to the structure that contains key, semflg and nsems
 525 *
 526 * Called with sem_ids.rwsem held (as a writer)
 527 */
 528static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 529{
 530        int retval;
 531        struct sem_array *sma;
 532        key_t key = params->key;
 533        int nsems = params->u.nsems;
 534        int semflg = params->flg;
 535        int i;
 536
 537        if (!nsems)
 538                return -EINVAL;
 539        if (ns->used_sems + nsems > ns->sc_semmns)
 540                return -ENOSPC;
 541
 542        sma = sem_alloc(nsems);
 543        if (!sma)
 544                return -ENOMEM;
 545
 546        sma->sem_perm.mode = (semflg & S_IRWXUGO);
 547        sma->sem_perm.key = key;
 548
 549        sma->sem_perm.security = NULL;
 550        retval = security_sem_alloc(&sma->sem_perm);
 551        if (retval) {
 552                kvfree(sma);
 553                return retval;
 554        }
 555
 556        for (i = 0; i < nsems; i++) {
 557                INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 558                INIT_LIST_HEAD(&sma->sems[i].pending_const);
 559                spin_lock_init(&sma->sems[i].lock);
 560        }
 561
 562        sma->complex_count = 0;
 563        sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 564        INIT_LIST_HEAD(&sma->pending_alter);
 565        INIT_LIST_HEAD(&sma->pending_const);
 566        INIT_LIST_HEAD(&sma->list_id);
 567        sma->sem_nsems = nsems;
 568        sma->sem_ctime = ktime_get_real_seconds();
 569
 570        /* ipc_addid() locks sma upon success. */
 571        retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 572        if (retval < 0) {
 573                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 574                return retval;
 575        }
 576        ns->used_sems += nsems;
 577
 578        sem_unlock(sma, -1);
 579        rcu_read_unlock();
 580
 581        return sma->sem_perm.id;
 582}
 583
 584
 585/*
 586 * Called with sem_ids.rwsem and ipcp locked.
 587 */
 588static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 589                                struct ipc_params *params)
 590{
 591        struct sem_array *sma;
 592
 593        sma = container_of(ipcp, struct sem_array, sem_perm);
 594        if (params->u.nsems > sma->sem_nsems)
 595                return -EINVAL;
 596
 597        return 0;
 598}
 599
 600long ksys_semget(key_t key, int nsems, int semflg)
 601{
 602        struct ipc_namespace *ns;
 603        static const struct ipc_ops sem_ops = {
 604                .getnew = newary,
 605                .associate = security_sem_associate,
 606                .more_checks = sem_more_checks,
 607        };
 608        struct ipc_params sem_params;
 609
 610        ns = current->nsproxy->ipc_ns;
 611
 612        if (nsems < 0 || nsems > ns->sc_semmsl)
 613                return -EINVAL;
 614
 615        sem_params.key = key;
 616        sem_params.flg = semflg;
 617        sem_params.u.nsems = nsems;
 618
 619        return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 620}
 621
 622SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 623{
 624        return ksys_semget(key, nsems, semflg);
 625}
 626
 627/**
 628 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 629 *                               operations on a given array.
 630 * @sma: semaphore array
 631 * @q: struct sem_queue that describes the operation
 632 *
 633 * Caller blocking are as follows, based the value
 634 * indicated by the semaphore operation (sem_op):
 635 *
 636 *  (1) >0 never blocks.
 637 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 638 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 639 *
 640 * Returns 0 if the operation was possible.
 641 * Returns 1 if the operation is impossible, the caller must sleep.
 642 * Returns <0 for error codes.
 643 */
 644static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 645{
 646        int result, sem_op, nsops;
 647        struct pid *pid;
 648        struct sembuf *sop;
 649        struct sem *curr;
 650        struct sembuf *sops;
 651        struct sem_undo *un;
 652
 653        sops = q->sops;
 654        nsops = q->nsops;
 655        un = q->undo;
 656
 657        for (sop = sops; sop < sops + nsops; sop++) {
 658                int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 659                curr = &sma->sems[idx];
 660                sem_op = sop->sem_op;
 661                result = curr->semval;
 662
 663                if (!sem_op && result)
 664                        goto would_block;
 665
 666                result += sem_op;
 667                if (result < 0)
 668                        goto would_block;
 669                if (result > SEMVMX)
 670                        goto out_of_range;
 671
 672                if (sop->sem_flg & SEM_UNDO) {
 673                        int undo = un->semadj[sop->sem_num] - sem_op;
 674                        /* Exceeding the undo range is an error. */
 675                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 676                                goto out_of_range;
 677                        un->semadj[sop->sem_num] = undo;
 678                }
 679
 680                curr->semval = result;
 681        }
 682
 683        sop--;
 684        pid = q->pid;
 685        while (sop >= sops) {
 686                ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 687                sop--;
 688        }
 689
 690        return 0;
 691
 692out_of_range:
 693        result = -ERANGE;
 694        goto undo;
 695
 696would_block:
 697        q->blocking = sop;
 698
 699        if (sop->sem_flg & IPC_NOWAIT)
 700                result = -EAGAIN;
 701        else
 702                result = 1;
 703
 704undo:
 705        sop--;
 706        while (sop >= sops) {
 707                sem_op = sop->sem_op;
 708                sma->sems[sop->sem_num].semval -= sem_op;
 709                if (sop->sem_flg & SEM_UNDO)
 710                        un->semadj[sop->sem_num] += sem_op;
 711                sop--;
 712        }
 713
 714        return result;
 715}
 716
 717static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 718{
 719        int result, sem_op, nsops;
 720        struct sembuf *sop;
 721        struct sem *curr;
 722        struct sembuf *sops;
 723        struct sem_undo *un;
 724
 725        sops = q->sops;
 726        nsops = q->nsops;
 727        un = q->undo;
 728
 729        if (unlikely(q->dupsop))
 730                return perform_atomic_semop_slow(sma, q);
 731
 732        /*
 733         * We scan the semaphore set twice, first to ensure that the entire
 734         * operation can succeed, therefore avoiding any pointless writes
 735         * to shared memory and having to undo such changes in order to block
 736         * until the operations can go through.
 737         */
 738        for (sop = sops; sop < sops + nsops; sop++) {
 739                int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 740
 741                curr = &sma->sems[idx];
 742                sem_op = sop->sem_op;
 743                result = curr->semval;
 744
 745                if (!sem_op && result)
 746                        goto would_block; /* wait-for-zero */
 747
 748                result += sem_op;
 749                if (result < 0)
 750                        goto would_block;
 751
 752                if (result > SEMVMX)
 753                        return -ERANGE;
 754
 755                if (sop->sem_flg & SEM_UNDO) {
 756                        int undo = un->semadj[sop->sem_num] - sem_op;
 757
 758                        /* Exceeding the undo range is an error. */
 759                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 760                                return -ERANGE;
 761                }
 762        }
 763
 764        for (sop = sops; sop < sops + nsops; sop++) {
 765                curr = &sma->sems[sop->sem_num];
 766                sem_op = sop->sem_op;
 767                result = curr->semval;
 768
 769                if (sop->sem_flg & SEM_UNDO) {
 770                        int undo = un->semadj[sop->sem_num] - sem_op;
 771
 772                        un->semadj[sop->sem_num] = undo;
 773                }
 774                curr->semval += sem_op;
 775                ipc_update_pid(&curr->sempid, q->pid);
 776        }
 777
 778        return 0;
 779
 780would_block:
 781        q->blocking = sop;
 782        return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 783}
 784
 785static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 786                                             struct wake_q_head *wake_q)
 787{
 788        get_task_struct(q->sleeper);
 789
 790        /* see SEM_BARRIER_2 for purpuse/pairing */
 791        smp_store_release(&q->status, error);
 792
 793        wake_q_add_safe(wake_q, q->sleeper);
 794}
 795
 796static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 797{
 798        list_del(&q->list);
 799        if (q->nsops > 1)
 800                sma->complex_count--;
 801}
 802
 803/** check_restart(sma, q)
 804 * @sma: semaphore array
 805 * @q: the operation that just completed
 806 *
 807 * update_queue is O(N^2) when it restarts scanning the whole queue of
 808 * waiting operations. Therefore this function checks if the restart is
 809 * really necessary. It is called after a previously waiting operation
 810 * modified the array.
 811 * Note that wait-for-zero operations are handled without restart.
 812 */
 813static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 814{
 815        /* pending complex alter operations are too difficult to analyse */
 816        if (!list_empty(&sma->pending_alter))
 817                return 1;
 818
 819        /* we were a sleeping complex operation. Too difficult */
 820        if (q->nsops > 1)
 821                return 1;
 822
 823        /* It is impossible that someone waits for the new value:
 824         * - complex operations always restart.
 825         * - wait-for-zero are handled seperately.
 826         * - q is a previously sleeping simple operation that
 827         *   altered the array. It must be a decrement, because
 828         *   simple increments never sleep.
 829         * - If there are older (higher priority) decrements
 830         *   in the queue, then they have observed the original
 831         *   semval value and couldn't proceed. The operation
 832         *   decremented to value - thus they won't proceed either.
 833         */
 834        return 0;
 835}
 836
 837/**
 838 * wake_const_ops - wake up non-alter tasks
 839 * @sma: semaphore array.
 840 * @semnum: semaphore that was modified.
 841 * @wake_q: lockless wake-queue head.
 842 *
 843 * wake_const_ops must be called after a semaphore in a semaphore array
 844 * was set to 0. If complex const operations are pending, wake_const_ops must
 845 * be called with semnum = -1, as well as with the number of each modified
 846 * semaphore.
 847 * The tasks that must be woken up are added to @wake_q. The return code
 848 * is stored in q->pid.
 849 * The function returns 1 if at least one operation was completed successfully.
 850 */
 851static int wake_const_ops(struct sem_array *sma, int semnum,
 852                          struct wake_q_head *wake_q)
 853{
 854        struct sem_queue *q, *tmp;
 855        struct list_head *pending_list;
 856        int semop_completed = 0;
 857
 858        if (semnum == -1)
 859                pending_list = &sma->pending_const;
 860        else
 861                pending_list = &sma->sems[semnum].pending_const;
 862
 863        list_for_each_entry_safe(q, tmp, pending_list, list) {
 864                int error = perform_atomic_semop(sma, q);
 865
 866                if (error > 0)
 867                        continue;
 868                /* operation completed, remove from queue & wakeup */
 869                unlink_queue(sma, q);
 870
 871                wake_up_sem_queue_prepare(q, error, wake_q);
 872                if (error == 0)
 873                        semop_completed = 1;
 874        }
 875
 876        return semop_completed;
 877}
 878
 879/**
 880 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 881 * @sma: semaphore array
 882 * @sops: operations that were performed
 883 * @nsops: number of operations
 884 * @wake_q: lockless wake-queue head
 885 *
 886 * Checks all required queue for wait-for-zero operations, based
 887 * on the actual changes that were performed on the semaphore array.
 888 * The function returns 1 if at least one operation was completed successfully.
 889 */
 890static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 891                                int nsops, struct wake_q_head *wake_q)
 892{
 893        int i;
 894        int semop_completed = 0;
 895        int got_zero = 0;
 896
 897        /* first: the per-semaphore queues, if known */
 898        if (sops) {
 899                for (i = 0; i < nsops; i++) {
 900                        int num = sops[i].sem_num;
 901
 902                        if (sma->sems[num].semval == 0) {
 903                                got_zero = 1;
 904                                semop_completed |= wake_const_ops(sma, num, wake_q);
 905                        }
 906                }
 907        } else {
 908                /*
 909                 * No sops means modified semaphores not known.
 910                 * Assume all were changed.
 911                 */
 912                for (i = 0; i < sma->sem_nsems; i++) {
 913                        if (sma->sems[i].semval == 0) {
 914                                got_zero = 1;
 915                                semop_completed |= wake_const_ops(sma, i, wake_q);
 916                        }
 917                }
 918        }
 919        /*
 920         * If one of the modified semaphores got 0,
 921         * then check the global queue, too.
 922         */
 923        if (got_zero)
 924                semop_completed |= wake_const_ops(sma, -1, wake_q);
 925
 926        return semop_completed;
 927}
 928
 929
 930/**
 931 * update_queue - look for tasks that can be completed.
 932 * @sma: semaphore array.
 933 * @semnum: semaphore that was modified.
 934 * @wake_q: lockless wake-queue head.
 935 *
 936 * update_queue must be called after a semaphore in a semaphore array
 937 * was modified. If multiple semaphores were modified, update_queue must
 938 * be called with semnum = -1, as well as with the number of each modified
 939 * semaphore.
 940 * The tasks that must be woken up are added to @wake_q. The return code
 941 * is stored in q->pid.
 942 * The function internally checks if const operations can now succeed.
 943 *
 944 * The function return 1 if at least one semop was completed successfully.
 945 */
 946static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 947{
 948        struct sem_queue *q, *tmp;
 949        struct list_head *pending_list;
 950        int semop_completed = 0;
 951
 952        if (semnum == -1)
 953                pending_list = &sma->pending_alter;
 954        else
 955                pending_list = &sma->sems[semnum].pending_alter;
 956
 957again:
 958        list_for_each_entry_safe(q, tmp, pending_list, list) {
 959                int error, restart;
 960
 961                /* If we are scanning the single sop, per-semaphore list of
 962                 * one semaphore and that semaphore is 0, then it is not
 963                 * necessary to scan further: simple increments
 964                 * that affect only one entry succeed immediately and cannot
 965                 * be in the  per semaphore pending queue, and decrements
 966                 * cannot be successful if the value is already 0.
 967                 */
 968                if (semnum != -1 && sma->sems[semnum].semval == 0)
 969                        break;
 970
 971                error = perform_atomic_semop(sma, q);
 972
 973                /* Does q->sleeper still need to sleep? */
 974                if (error > 0)
 975                        continue;
 976
 977                unlink_queue(sma, q);
 978
 979                if (error) {
 980                        restart = 0;
 981                } else {
 982                        semop_completed = 1;
 983                        do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 984                        restart = check_restart(sma, q);
 985                }
 986
 987                wake_up_sem_queue_prepare(q, error, wake_q);
 988                if (restart)
 989                        goto again;
 990        }
 991        return semop_completed;
 992}
 993
 994/**
 995 * set_semotime - set sem_otime
 996 * @sma: semaphore array
 997 * @sops: operations that modified the array, may be NULL
 998 *
 999 * sem_otime is replicated to avoid cache line trashing.
1000 * This function sets one instance to the current time.
1001 */
1002static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1003{
1004        if (sops == NULL) {
1005                sma->sems[0].sem_otime = ktime_get_real_seconds();
1006        } else {
1007                sma->sems[sops[0].sem_num].sem_otime =
1008                                                ktime_get_real_seconds();
1009        }
1010}
1011
1012/**
1013 * do_smart_update - optimized update_queue
1014 * @sma: semaphore array
1015 * @sops: operations that were performed
1016 * @nsops: number of operations
1017 * @otime: force setting otime
1018 * @wake_q: lockless wake-queue head
1019 *
1020 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1021 * based on the actual changes that were performed on the semaphore array.
1022 * Note that the function does not do the actual wake-up: the caller is
1023 * responsible for calling wake_up_q().
1024 * It is safe to perform this call after dropping all locks.
1025 */
1026static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1027                            int otime, struct wake_q_head *wake_q)
1028{
1029        int i;
1030
1031        otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1032
1033        if (!list_empty(&sma->pending_alter)) {
1034                /* semaphore array uses the global queue - just process it. */
1035                otime |= update_queue(sma, -1, wake_q);
1036        } else {
1037                if (!sops) {
1038                        /*
1039                         * No sops, thus the modified semaphores are not
1040                         * known. Check all.
1041                         */
1042                        for (i = 0; i < sma->sem_nsems; i++)
1043                                otime |= update_queue(sma, i, wake_q);
1044                } else {
1045                        /*
1046                         * Check the semaphores that were increased:
1047                         * - No complex ops, thus all sleeping ops are
1048                         *   decrease.
1049                         * - if we decreased the value, then any sleeping
1050                         *   semaphore ops wont be able to run: If the
1051                         *   previous value was too small, then the new
1052                         *   value will be too small, too.
1053                         */
1054                        for (i = 0; i < nsops; i++) {
1055                                if (sops[i].sem_op > 0) {
1056                                        otime |= update_queue(sma,
1057                                                              sops[i].sem_num, wake_q);
1058                                }
1059                        }
1060                }
1061        }
1062        if (otime)
1063                set_semotime(sma, sops);
1064}
1065
1066/*
1067 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1068 */
1069static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1070                        bool count_zero)
1071{
1072        struct sembuf *sop = q->blocking;
1073
1074        /*
1075         * Linux always (since 0.99.10) reported a task as sleeping on all
1076         * semaphores. This violates SUS, therefore it was changed to the
1077         * standard compliant behavior.
1078         * Give the administrators a chance to notice that an application
1079         * might misbehave because it relies on the Linux behavior.
1080         */
1081        pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1082                        "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1083                        current->comm, task_pid_nr(current));
1084
1085        if (sop->sem_num != semnum)
1086                return 0;
1087
1088        if (count_zero && sop->sem_op == 0)
1089                return 1;
1090        if (!count_zero && sop->sem_op < 0)
1091                return 1;
1092
1093        return 0;
1094}
1095
1096/* The following counts are associated to each semaphore:
1097 *   semncnt        number of tasks waiting on semval being nonzero
1098 *   semzcnt        number of tasks waiting on semval being zero
1099 *
1100 * Per definition, a task waits only on the semaphore of the first semop
1101 * that cannot proceed, even if additional operation would block, too.
1102 */
1103static int count_semcnt(struct sem_array *sma, ushort semnum,
1104                        bool count_zero)
1105{
1106        struct list_head *l;
1107        struct sem_queue *q;
1108        int semcnt;
1109
1110        semcnt = 0;
1111        /* First: check the simple operations. They are easy to evaluate */
1112        if (count_zero)
1113                l = &sma->sems[semnum].pending_const;
1114        else
1115                l = &sma->sems[semnum].pending_alter;
1116
1117        list_for_each_entry(q, l, list) {
1118                /* all task on a per-semaphore list sleep on exactly
1119                 * that semaphore
1120                 */
1121                semcnt++;
1122        }
1123
1124        /* Then: check the complex operations. */
1125        list_for_each_entry(q, &sma->pending_alter, list) {
1126                semcnt += check_qop(sma, semnum, q, count_zero);
1127        }
1128        if (count_zero) {
1129                list_for_each_entry(q, &sma->pending_const, list) {
1130                        semcnt += check_qop(sma, semnum, q, count_zero);
1131                }
1132        }
1133        return semcnt;
1134}
1135
1136/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1137 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1138 * remains locked on exit.
1139 */
1140static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1141{
1142        struct sem_undo *un, *tu;
1143        struct sem_queue *q, *tq;
1144        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1145        int i;
1146        DEFINE_WAKE_Q(wake_q);
1147
1148        /* Free the existing undo structures for this semaphore set.  */
1149        ipc_assert_locked_object(&sma->sem_perm);
1150        list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1151                list_del(&un->list_id);
1152                spin_lock(&un->ulp->lock);
1153                un->semid = -1;
1154                list_del_rcu(&un->list_proc);
1155                spin_unlock(&un->ulp->lock);
1156                kfree_rcu(un, rcu);
1157        }
1158
1159        /* Wake up all pending processes and let them fail with EIDRM. */
1160        list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1161                unlink_queue(sma, q);
1162                wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1163        }
1164
1165        list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1166                unlink_queue(sma, q);
1167                wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1168        }
1169        for (i = 0; i < sma->sem_nsems; i++) {
1170                struct sem *sem = &sma->sems[i];
1171                list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1172                        unlink_queue(sma, q);
1173                        wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1174                }
1175                list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1176                        unlink_queue(sma, q);
1177                        wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1178                }
1179                ipc_update_pid(&sem->sempid, NULL);
1180        }
1181
1182        /* Remove the semaphore set from the IDR */
1183        sem_rmid(ns, sma);
1184        sem_unlock(sma, -1);
1185        rcu_read_unlock();
1186
1187        wake_up_q(&wake_q);
1188        ns->used_sems -= sma->sem_nsems;
1189        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1190}
1191
1192static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1193{
1194        switch (version) {
1195        case IPC_64:
1196                return copy_to_user(buf, in, sizeof(*in));
1197        case IPC_OLD:
1198            {
1199                struct semid_ds out;
1200
1201                memset(&out, 0, sizeof(out));
1202
1203                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1204
1205                out.sem_otime   = in->sem_otime;
1206                out.sem_ctime   = in->sem_ctime;
1207                out.sem_nsems   = in->sem_nsems;
1208
1209                return copy_to_user(buf, &out, sizeof(out));
1210            }
1211        default:
1212                return -EINVAL;
1213        }
1214}
1215
1216static time64_t get_semotime(struct sem_array *sma)
1217{
1218        int i;
1219        time64_t res;
1220
1221        res = sma->sems[0].sem_otime;
1222        for (i = 1; i < sma->sem_nsems; i++) {
1223                time64_t to = sma->sems[i].sem_otime;
1224
1225                if (to > res)
1226                        res = to;
1227        }
1228        return res;
1229}
1230
1231static int semctl_stat(struct ipc_namespace *ns, int semid,
1232                         int cmd, struct semid64_ds *semid64)
1233{
1234        struct sem_array *sma;
1235        time64_t semotime;
1236        int err;
1237
1238        memset(semid64, 0, sizeof(*semid64));
1239
1240        rcu_read_lock();
1241        if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1242                sma = sem_obtain_object(ns, semid);
1243                if (IS_ERR(sma)) {
1244                        err = PTR_ERR(sma);
1245                        goto out_unlock;
1246                }
1247        } else { /* IPC_STAT */
1248                sma = sem_obtain_object_check(ns, semid);
1249                if (IS_ERR(sma)) {
1250                        err = PTR_ERR(sma);
1251                        goto out_unlock;
1252                }
1253        }
1254
1255        /* see comment for SHM_STAT_ANY */
1256        if (cmd == SEM_STAT_ANY)
1257                audit_ipc_obj(&sma->sem_perm);
1258        else {
1259                err = -EACCES;
1260                if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1261                        goto out_unlock;
1262        }
1263
1264        err = security_sem_semctl(&sma->sem_perm, cmd);
1265        if (err)
1266                goto out_unlock;
1267
1268        ipc_lock_object(&sma->sem_perm);
1269
1270        if (!ipc_valid_object(&sma->sem_perm)) {
1271                ipc_unlock_object(&sma->sem_perm);
1272                err = -EIDRM;
1273                goto out_unlock;
1274        }
1275
1276        kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1277        semotime = get_semotime(sma);
1278        semid64->sem_otime = semotime;
1279        semid64->sem_ctime = sma->sem_ctime;
1280#ifndef CONFIG_64BIT
1281        semid64->sem_otime_high = semotime >> 32;
1282        semid64->sem_ctime_high = sma->sem_ctime >> 32;
1283#endif
1284        semid64->sem_nsems = sma->sem_nsems;
1285
1286        if (cmd == IPC_STAT) {
1287                /*
1288                 * As defined in SUS:
1289                 * Return 0 on success
1290                 */
1291                err = 0;
1292        } else {
1293                /*
1294                 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1295                 * Return the full id, including the sequence number
1296                 */
1297                err = sma->sem_perm.id;
1298        }
1299        ipc_unlock_object(&sma->sem_perm);
1300out_unlock:
1301        rcu_read_unlock();
1302        return err;
1303}
1304
1305static int semctl_info(struct ipc_namespace *ns, int semid,
1306                         int cmd, void __user *p)
1307{
1308        struct seminfo seminfo;
1309        int max_idx;
1310        int err;
1311
1312        err = security_sem_semctl(NULL, cmd);
1313        if (err)
1314                return err;
1315
1316        memset(&seminfo, 0, sizeof(seminfo));
1317        seminfo.semmni = ns->sc_semmni;
1318        seminfo.semmns = ns->sc_semmns;
1319        seminfo.semmsl = ns->sc_semmsl;
1320        seminfo.semopm = ns->sc_semopm;
1321        seminfo.semvmx = SEMVMX;
1322        seminfo.semmnu = SEMMNU;
1323        seminfo.semmap = SEMMAP;
1324        seminfo.semume = SEMUME;
1325        down_read(&sem_ids(ns).rwsem);
1326        if (cmd == SEM_INFO) {
1327                seminfo.semusz = sem_ids(ns).in_use;
1328                seminfo.semaem = ns->used_sems;
1329        } else {
1330                seminfo.semusz = SEMUSZ;
1331                seminfo.semaem = SEMAEM;
1332        }
1333        max_idx = ipc_get_maxidx(&sem_ids(ns));
1334        up_read(&sem_ids(ns).rwsem);
1335        if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1336                return -EFAULT;
1337        return (max_idx < 0) ? 0 : max_idx;
1338}
1339
1340static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1341                int val)
1342{
1343        struct sem_undo *un;
1344        struct sem_array *sma;
1345        struct sem *curr;
1346        int err;
1347        DEFINE_WAKE_Q(wake_q);
1348
1349        if (val > SEMVMX || val < 0)
1350                return -ERANGE;
1351
1352        rcu_read_lock();
1353        sma = sem_obtain_object_check(ns, semid);
1354        if (IS_ERR(sma)) {
1355                rcu_read_unlock();
1356                return PTR_ERR(sma);
1357        }
1358
1359        if (semnum < 0 || semnum >= sma->sem_nsems) {
1360                rcu_read_unlock();
1361                return -EINVAL;
1362        }
1363
1364
1365        if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1366                rcu_read_unlock();
1367                return -EACCES;
1368        }
1369
1370        err = security_sem_semctl(&sma->sem_perm, SETVAL);
1371        if (err) {
1372                rcu_read_unlock();
1373                return -EACCES;
1374        }
1375
1376        sem_lock(sma, NULL, -1);
1377
1378        if (!ipc_valid_object(&sma->sem_perm)) {
1379                sem_unlock(sma, -1);
1380                rcu_read_unlock();
1381                return -EIDRM;
1382        }
1383
1384        semnum = array_index_nospec(semnum, sma->sem_nsems);
1385        curr = &sma->sems[semnum];
1386
1387        ipc_assert_locked_object(&sma->sem_perm);
1388        list_for_each_entry(un, &sma->list_id, list_id)
1389                un->semadj[semnum] = 0;
1390
1391        curr->semval = val;
1392        ipc_update_pid(&curr->sempid, task_tgid(current));
1393        sma->sem_ctime = ktime_get_real_seconds();
1394        /* maybe some queued-up processes were waiting for this */
1395        do_smart_update(sma, NULL, 0, 0, &wake_q);
1396        sem_unlock(sma, -1);
1397        rcu_read_unlock();
1398        wake_up_q(&wake_q);
1399        return 0;
1400}
1401
1402static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1403                int cmd, void __user *p)
1404{
1405        struct sem_array *sma;
1406        struct sem *curr;
1407        int err, nsems;
1408        ushort fast_sem_io[SEMMSL_FAST];
1409        ushort *sem_io = fast_sem_io;
1410        DEFINE_WAKE_Q(wake_q);
1411
1412        rcu_read_lock();
1413        sma = sem_obtain_object_check(ns, semid);
1414        if (IS_ERR(sma)) {
1415                rcu_read_unlock();
1416                return PTR_ERR(sma);
1417        }
1418
1419        nsems = sma->sem_nsems;
1420
1421        err = -EACCES;
1422        if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1423                goto out_rcu_wakeup;
1424
1425        err = security_sem_semctl(&sma->sem_perm, cmd);
1426        if (err)
1427                goto out_rcu_wakeup;
1428
1429        err = -EACCES;
1430        switch (cmd) {
1431        case GETALL:
1432        {
1433                ushort __user *array = p;
1434                int i;
1435
1436                sem_lock(sma, NULL, -1);
1437                if (!ipc_valid_object(&sma->sem_perm)) {
1438                        err = -EIDRM;
1439                        goto out_unlock;
1440                }
1441                if (nsems > SEMMSL_FAST) {
1442                        if (!ipc_rcu_getref(&sma->sem_perm)) {
1443                                err = -EIDRM;
1444                                goto out_unlock;
1445                        }
1446                        sem_unlock(sma, -1);
1447                        rcu_read_unlock();
1448                        sem_io = kvmalloc_array(nsems, sizeof(ushort),
1449                                                GFP_KERNEL);
1450                        if (sem_io == NULL) {
1451                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1452                                return -ENOMEM;
1453                        }
1454
1455                        rcu_read_lock();
1456                        sem_lock_and_putref(sma);
1457                        if (!ipc_valid_object(&sma->sem_perm)) {
1458                                err = -EIDRM;
1459                                goto out_unlock;
1460                        }
1461                }
1462                for (i = 0; i < sma->sem_nsems; i++)
1463                        sem_io[i] = sma->sems[i].semval;
1464                sem_unlock(sma, -1);
1465                rcu_read_unlock();
1466                err = 0;
1467                if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1468                        err = -EFAULT;
1469                goto out_free;
1470        }
1471        case SETALL:
1472        {
1473                int i;
1474                struct sem_undo *un;
1475
1476                if (!ipc_rcu_getref(&sma->sem_perm)) {
1477                        err = -EIDRM;
1478                        goto out_rcu_wakeup;
1479                }
1480                rcu_read_unlock();
1481
1482                if (nsems > SEMMSL_FAST) {
1483                        sem_io = kvmalloc_array(nsems, sizeof(ushort),
1484                                                GFP_KERNEL);
1485                        if (sem_io == NULL) {
1486                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1487                                return -ENOMEM;
1488                        }
1489                }
1490
1491                if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1492                        ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1493                        err = -EFAULT;
1494                        goto out_free;
1495                }
1496
1497                for (i = 0; i < nsems; i++) {
1498                        if (sem_io[i] > SEMVMX) {
1499                                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1500                                err = -ERANGE;
1501                                goto out_free;
1502                        }
1503                }
1504                rcu_read_lock();
1505                sem_lock_and_putref(sma);
1506                if (!ipc_valid_object(&sma->sem_perm)) {
1507                        err = -EIDRM;
1508                        goto out_unlock;
1509                }
1510
1511                for (i = 0; i < nsems; i++) {
1512                        sma->sems[i].semval = sem_io[i];
1513                        ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1514                }
1515
1516                ipc_assert_locked_object(&sma->sem_perm);
1517                list_for_each_entry(un, &sma->list_id, list_id) {
1518                        for (i = 0; i < nsems; i++)
1519                                un->semadj[i] = 0;
1520                }
1521                sma->sem_ctime = ktime_get_real_seconds();
1522                /* maybe some queued-up processes were waiting for this */
1523                do_smart_update(sma, NULL, 0, 0, &wake_q);
1524                err = 0;
1525                goto out_unlock;
1526        }
1527        /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1528        }
1529        err = -EINVAL;
1530        if (semnum < 0 || semnum >= nsems)
1531                goto out_rcu_wakeup;
1532
1533        sem_lock(sma, NULL, -1);
1534        if (!ipc_valid_object(&sma->sem_perm)) {
1535                err = -EIDRM;
1536                goto out_unlock;
1537        }
1538
1539        semnum = array_index_nospec(semnum, nsems);
1540        curr = &sma->sems[semnum];
1541
1542        switch (cmd) {
1543        case GETVAL:
1544                err = curr->semval;
1545                goto out_unlock;
1546        case GETPID:
1547                err = pid_vnr(curr->sempid);
1548                goto out_unlock;
1549        case GETNCNT:
1550                err = count_semcnt(sma, semnum, 0);
1551                goto out_unlock;
1552        case GETZCNT:
1553                err = count_semcnt(sma, semnum, 1);
1554                goto out_unlock;
1555        }
1556
1557out_unlock:
1558        sem_unlock(sma, -1);
1559out_rcu_wakeup:
1560        rcu_read_unlock();
1561        wake_up_q(&wake_q);
1562out_free:
1563        if (sem_io != fast_sem_io)
1564                kvfree(sem_io);
1565        return err;
1566}
1567
1568static inline unsigned long
1569copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1570{
1571        switch (version) {
1572        case IPC_64:
1573                if (copy_from_user(out, buf, sizeof(*out)))
1574                        return -EFAULT;
1575                return 0;
1576        case IPC_OLD:
1577            {
1578                struct semid_ds tbuf_old;
1579
1580                if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1581                        return -EFAULT;
1582
1583                out->sem_perm.uid       = tbuf_old.sem_perm.uid;
1584                out->sem_perm.gid       = tbuf_old.sem_perm.gid;
1585                out->sem_perm.mode      = tbuf_old.sem_perm.mode;
1586
1587                return 0;
1588            }
1589        default:
1590                return -EINVAL;
1591        }
1592}
1593
1594/*
1595 * This function handles some semctl commands which require the rwsem
1596 * to be held in write mode.
1597 * NOTE: no locks must be held, the rwsem is taken inside this function.
1598 */
1599static int semctl_down(struct ipc_namespace *ns, int semid,
1600                       int cmd, struct semid64_ds *semid64)
1601{
1602        struct sem_array *sma;
1603        int err;
1604        struct kern_ipc_perm *ipcp;
1605
1606        down_write(&sem_ids(ns).rwsem);
1607        rcu_read_lock();
1608
1609        ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1610                                      &semid64->sem_perm, 0);
1611        if (IS_ERR(ipcp)) {
1612                err = PTR_ERR(ipcp);
1613                goto out_unlock1;
1614        }
1615
1616        sma = container_of(ipcp, struct sem_array, sem_perm);
1617
1618        err = security_sem_semctl(&sma->sem_perm, cmd);
1619        if (err)
1620                goto out_unlock1;
1621
1622        switch (cmd) {
1623        case IPC_RMID:
1624                sem_lock(sma, NULL, -1);
1625                /* freeary unlocks the ipc object and rcu */
1626                freeary(ns, ipcp);
1627                goto out_up;
1628        case IPC_SET:
1629                sem_lock(sma, NULL, -1);
1630                err = ipc_update_perm(&semid64->sem_perm, ipcp);
1631                if (err)
1632                        goto out_unlock0;
1633                sma->sem_ctime = ktime_get_real_seconds();
1634                break;
1635        default:
1636                err = -EINVAL;
1637                goto out_unlock1;
1638        }
1639
1640out_unlock0:
1641        sem_unlock(sma, -1);
1642out_unlock1:
1643        rcu_read_unlock();
1644out_up:
1645        up_write(&sem_ids(ns).rwsem);
1646        return err;
1647}
1648
1649static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1650{
1651        struct ipc_namespace *ns;
1652        void __user *p = (void __user *)arg;
1653        struct semid64_ds semid64;
1654        int err;
1655
1656        if (semid < 0)
1657                return -EINVAL;
1658
1659        ns = current->nsproxy->ipc_ns;
1660
1661        switch (cmd) {
1662        case IPC_INFO:
1663        case SEM_INFO:
1664                return semctl_info(ns, semid, cmd, p);
1665        case IPC_STAT:
1666        case SEM_STAT:
1667        case SEM_STAT_ANY:
1668                err = semctl_stat(ns, semid, cmd, &semid64);
1669                if (err < 0)
1670                        return err;
1671                if (copy_semid_to_user(p, &semid64, version))
1672                        err = -EFAULT;
1673                return err;
1674        case GETALL:
1675        case GETVAL:
1676        case GETPID:
1677        case GETNCNT:
1678        case GETZCNT:
1679        case SETALL:
1680                return semctl_main(ns, semid, semnum, cmd, p);
1681        case SETVAL: {
1682                int val;
1683#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1684                /* big-endian 64bit */
1685                val = arg >> 32;
1686#else
1687                /* 32bit or little-endian 64bit */
1688                val = arg;
1689#endif
1690                return semctl_setval(ns, semid, semnum, val);
1691        }
1692        case IPC_SET:
1693                if (copy_semid_from_user(&semid64, p, version))
1694                        return -EFAULT;
1695                /* fall through */
1696        case IPC_RMID:
1697                return semctl_down(ns, semid, cmd, &semid64);
1698        default:
1699                return -EINVAL;
1700        }
1701}
1702
1703SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1704{
1705        return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1706}
1707
1708#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1709long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1710{
1711        int version = ipc_parse_version(&cmd);
1712
1713        return ksys_semctl(semid, semnum, cmd, arg, version);
1714}
1715
1716SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1717{
1718        return ksys_old_semctl(semid, semnum, cmd, arg);
1719}
1720#endif
1721
1722#ifdef CONFIG_COMPAT
1723
1724struct compat_semid_ds {
1725        struct compat_ipc_perm sem_perm;
1726        old_time32_t sem_otime;
1727        old_time32_t sem_ctime;
1728        compat_uptr_t sem_base;
1729        compat_uptr_t sem_pending;
1730        compat_uptr_t sem_pending_last;
1731        compat_uptr_t undo;
1732        unsigned short sem_nsems;
1733};
1734
1735static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1736                                        int version)
1737{
1738        memset(out, 0, sizeof(*out));
1739        if (version == IPC_64) {
1740                struct compat_semid64_ds __user *p = buf;
1741                return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1742        } else {
1743                struct compat_semid_ds __user *p = buf;
1744                return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1745        }
1746}
1747
1748static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1749                                        int version)
1750{
1751        if (version == IPC_64) {
1752                struct compat_semid64_ds v;
1753                memset(&v, 0, sizeof(v));
1754                to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1755                v.sem_otime      = lower_32_bits(in->sem_otime);
1756                v.sem_otime_high = upper_32_bits(in->sem_otime);
1757                v.sem_ctime      = lower_32_bits(in->sem_ctime);
1758                v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1759                v.sem_nsems = in->sem_nsems;
1760                return copy_to_user(buf, &v, sizeof(v));
1761        } else {
1762                struct compat_semid_ds v;
1763                memset(&v, 0, sizeof(v));
1764                to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1765                v.sem_otime = in->sem_otime;
1766                v.sem_ctime = in->sem_ctime;
1767                v.sem_nsems = in->sem_nsems;
1768                return copy_to_user(buf, &v, sizeof(v));
1769        }
1770}
1771
1772static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1773{
1774        void __user *p = compat_ptr(arg);
1775        struct ipc_namespace *ns;
1776        struct semid64_ds semid64;
1777        int err;
1778
1779        ns = current->nsproxy->ipc_ns;
1780
1781        if (semid < 0)
1782                return -EINVAL;
1783
1784        switch (cmd & (~IPC_64)) {
1785        case IPC_INFO:
1786        case SEM_INFO:
1787                return semctl_info(ns, semid, cmd, p);
1788        case IPC_STAT:
1789        case SEM_STAT:
1790        case SEM_STAT_ANY:
1791                err = semctl_stat(ns, semid, cmd, &semid64);
1792                if (err < 0)
1793                        return err;
1794                if (copy_compat_semid_to_user(p, &semid64, version))
1795                        err = -EFAULT;
1796                return err;
1797        case GETVAL:
1798        case GETPID:
1799        case GETNCNT:
1800        case GETZCNT:
1801        case GETALL:
1802        case SETALL:
1803                return semctl_main(ns, semid, semnum, cmd, p);
1804        case SETVAL:
1805                return semctl_setval(ns, semid, semnum, arg);
1806        case IPC_SET:
1807                if (copy_compat_semid_from_user(&semid64, p, version))
1808                        return -EFAULT;
1809                /* fallthru */
1810        case IPC_RMID:
1811                return semctl_down(ns, semid, cmd, &semid64);
1812        default:
1813                return -EINVAL;
1814        }
1815}
1816
1817COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1818{
1819        return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1820}
1821
1822#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1823long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1824{
1825        int version = compat_ipc_parse_version(&cmd);
1826
1827        return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1828}
1829
1830COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1831{
1832        return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1833}
1834#endif
1835#endif
1836
1837/* If the task doesn't already have a undo_list, then allocate one
1838 * here.  We guarantee there is only one thread using this undo list,
1839 * and current is THE ONE
1840 *
1841 * If this allocation and assignment succeeds, but later
1842 * portions of this code fail, there is no need to free the sem_undo_list.
1843 * Just let it stay associated with the task, and it'll be freed later
1844 * at exit time.
1845 *
1846 * This can block, so callers must hold no locks.
1847 */
1848static inline int get_undo_list(struct sem_undo_list **undo_listp)
1849{
1850        struct sem_undo_list *undo_list;
1851
1852        undo_list = current->sysvsem.undo_list;
1853        if (!undo_list) {
1854                undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1855                if (undo_list == NULL)
1856                        return -ENOMEM;
1857                spin_lock_init(&undo_list->lock);
1858                refcount_set(&undo_list->refcnt, 1);
1859                INIT_LIST_HEAD(&undo_list->list_proc);
1860
1861                current->sysvsem.undo_list = undo_list;
1862        }
1863        *undo_listp = undo_list;
1864        return 0;
1865}
1866
1867static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1868{
1869        struct sem_undo *un;
1870
1871        list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1872                                spin_is_locked(&ulp->lock)) {
1873                if (un->semid == semid)
1874                        return un;
1875        }
1876        return NULL;
1877}
1878
1879static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1880{
1881        struct sem_undo *un;
1882
1883        assert_spin_locked(&ulp->lock);
1884
1885        un = __lookup_undo(ulp, semid);
1886        if (un) {
1887                list_del_rcu(&un->list_proc);
1888                list_add_rcu(&un->list_proc, &ulp->list_proc);
1889        }
1890        return un;
1891}
1892
1893/**
1894 * find_alloc_undo - lookup (and if not present create) undo array
1895 * @ns: namespace
1896 * @semid: semaphore array id
1897 *
1898 * The function looks up (and if not present creates) the undo structure.
1899 * The size of the undo structure depends on the size of the semaphore
1900 * array, thus the alloc path is not that straightforward.
1901 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1902 * performs a rcu_read_lock().
1903 */
1904static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1905{
1906        struct sem_array *sma;
1907        struct sem_undo_list *ulp;
1908        struct sem_undo *un, *new;
1909        int nsems, error;
1910
1911        error = get_undo_list(&ulp);
1912        if (error)
1913                return ERR_PTR(error);
1914
1915        rcu_read_lock();
1916        spin_lock(&ulp->lock);
1917        un = lookup_undo(ulp, semid);
1918        spin_unlock(&ulp->lock);
1919        if (likely(un != NULL))
1920                goto out;
1921
1922        /* no undo structure around - allocate one. */
1923        /* step 1: figure out the size of the semaphore array */
1924        sma = sem_obtain_object_check(ns, semid);
1925        if (IS_ERR(sma)) {
1926                rcu_read_unlock();
1927                return ERR_CAST(sma);
1928        }
1929
1930        nsems = sma->sem_nsems;
1931        if (!ipc_rcu_getref(&sma->sem_perm)) {
1932                rcu_read_unlock();
1933                un = ERR_PTR(-EIDRM);
1934                goto out;
1935        }
1936        rcu_read_unlock();
1937
1938        /* step 2: allocate new undo structure */
1939        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1940        if (!new) {
1941                ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1942                return ERR_PTR(-ENOMEM);
1943        }
1944
1945        /* step 3: Acquire the lock on semaphore array */
1946        rcu_read_lock();
1947        sem_lock_and_putref(sma);
1948        if (!ipc_valid_object(&sma->sem_perm)) {
1949                sem_unlock(sma, -1);
1950                rcu_read_unlock();
1951                kfree(new);
1952                un = ERR_PTR(-EIDRM);
1953                goto out;
1954        }
1955        spin_lock(&ulp->lock);
1956
1957        /*
1958         * step 4: check for races: did someone else allocate the undo struct?
1959         */
1960        un = lookup_undo(ulp, semid);
1961        if (un) {
1962                kfree(new);
1963                goto success;
1964        }
1965        /* step 5: initialize & link new undo structure */
1966        new->semadj = (short *) &new[1];
1967        new->ulp = ulp;
1968        new->semid = semid;
1969        assert_spin_locked(&ulp->lock);
1970        list_add_rcu(&new->list_proc, &ulp->list_proc);
1971        ipc_assert_locked_object(&sma->sem_perm);
1972        list_add(&new->list_id, &sma->list_id);
1973        un = new;
1974
1975success:
1976        spin_unlock(&ulp->lock);
1977        sem_unlock(sma, -1);
1978out:
1979        return un;
1980}
1981
1982static long do_semtimedop(int semid, struct sembuf __user *tsops,
1983                unsigned nsops, const struct timespec64 *timeout)
1984{
1985        int error = -EINVAL;
1986        struct sem_array *sma;
1987        struct sembuf fast_sops[SEMOPM_FAST];
1988        struct sembuf *sops = fast_sops, *sop;
1989        struct sem_undo *un;
1990        int max, locknum;
1991        bool undos = false, alter = false, dupsop = false;
1992        struct sem_queue queue;
1993        unsigned long dup = 0, jiffies_left = 0;
1994        struct ipc_namespace *ns;
1995
1996        ns = current->nsproxy->ipc_ns;
1997
1998        if (nsops < 1 || semid < 0)
1999                return -EINVAL;
2000        if (nsops > ns->sc_semopm)
2001                return -E2BIG;
2002        if (nsops > SEMOPM_FAST) {
2003                sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
2004                if (sops == NULL)
2005                        return -ENOMEM;
2006        }
2007
2008        if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2009                error =  -EFAULT;
2010                goto out_free;
2011        }
2012
2013        if (timeout) {
2014                if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
2015                        timeout->tv_nsec >= 1000000000L) {
2016                        error = -EINVAL;
2017                        goto out_free;
2018                }
2019                jiffies_left = timespec64_to_jiffies(timeout);
2020        }
2021
2022        max = 0;
2023        for (sop = sops; sop < sops + nsops; sop++) {
2024                unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2025
2026                if (sop->sem_num >= max)
2027                        max = sop->sem_num;
2028                if (sop->sem_flg & SEM_UNDO)
2029                        undos = true;
2030                if (dup & mask) {
2031                        /*
2032                         * There was a previous alter access that appears
2033                         * to have accessed the same semaphore, thus use
2034                         * the dupsop logic. "appears", because the detection
2035                         * can only check % BITS_PER_LONG.
2036                         */
2037                        dupsop = true;
2038                }
2039                if (sop->sem_op != 0) {
2040                        alter = true;
2041                        dup |= mask;
2042                }
2043        }
2044
2045        if (undos) {
2046                /* On success, find_alloc_undo takes the rcu_read_lock */
2047                un = find_alloc_undo(ns, semid);
2048                if (IS_ERR(un)) {
2049                        error = PTR_ERR(un);
2050                        goto out_free;
2051                }
2052        } else {
2053                un = NULL;
2054                rcu_read_lock();
2055        }
2056
2057        sma = sem_obtain_object_check(ns, semid);
2058        if (IS_ERR(sma)) {
2059                rcu_read_unlock();
2060                error = PTR_ERR(sma);
2061                goto out_free;
2062        }
2063
2064        error = -EFBIG;
2065        if (max >= sma->sem_nsems) {
2066                rcu_read_unlock();
2067                goto out_free;
2068        }
2069
2070        error = -EACCES;
2071        if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2072                rcu_read_unlock();
2073                goto out_free;
2074        }
2075
2076        error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2077        if (error) {
2078                rcu_read_unlock();
2079                goto out_free;
2080        }
2081
2082        error = -EIDRM;
2083        locknum = sem_lock(sma, sops, nsops);
2084        /*
2085         * We eventually might perform the following check in a lockless
2086         * fashion, considering ipc_valid_object() locking constraints.
2087         * If nsops == 1 and there is no contention for sem_perm.lock, then
2088         * only a per-semaphore lock is held and it's OK to proceed with the
2089         * check below. More details on the fine grained locking scheme
2090         * entangled here and why it's RMID race safe on comments at sem_lock()
2091         */
2092        if (!ipc_valid_object(&sma->sem_perm))
2093                goto out_unlock_free;
2094        /*
2095         * semid identifiers are not unique - find_alloc_undo may have
2096         * allocated an undo structure, it was invalidated by an RMID
2097         * and now a new array with received the same id. Check and fail.
2098         * This case can be detected checking un->semid. The existence of
2099         * "un" itself is guaranteed by rcu.
2100         */
2101        if (un && un->semid == -1)
2102                goto out_unlock_free;
2103
2104        queue.sops = sops;
2105        queue.nsops = nsops;
2106        queue.undo = un;
2107        queue.pid = task_tgid(current);
2108        queue.alter = alter;
2109        queue.dupsop = dupsop;
2110
2111        error = perform_atomic_semop(sma, &queue);
2112        if (error == 0) { /* non-blocking succesfull path */
2113                DEFINE_WAKE_Q(wake_q);
2114
2115                /*
2116                 * If the operation was successful, then do
2117                 * the required updates.
2118                 */
2119                if (alter)
2120                        do_smart_update(sma, sops, nsops, 1, &wake_q);
2121                else
2122                        set_semotime(sma, sops);
2123
2124                sem_unlock(sma, locknum);
2125                rcu_read_unlock();
2126                wake_up_q(&wake_q);
2127
2128                goto out_free;
2129        }
2130        if (error < 0) /* non-blocking error path */
2131                goto out_unlock_free;
2132
2133        /*
2134         * We need to sleep on this operation, so we put the current
2135         * task into the pending queue and go to sleep.
2136         */
2137        if (nsops == 1) {
2138                struct sem *curr;
2139                int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2140                curr = &sma->sems[idx];
2141
2142                if (alter) {
2143                        if (sma->complex_count) {
2144                                list_add_tail(&queue.list,
2145                                                &sma->pending_alter);
2146                        } else {
2147
2148                                list_add_tail(&queue.list,
2149                                                &curr->pending_alter);
2150                        }
2151                } else {
2152                        list_add_tail(&queue.list, &curr->pending_const);
2153                }
2154        } else {
2155                if (!sma->complex_count)
2156                        merge_queues(sma);
2157
2158                if (alter)
2159                        list_add_tail(&queue.list, &sma->pending_alter);
2160                else
2161                        list_add_tail(&queue.list, &sma->pending_const);
2162
2163                sma->complex_count++;
2164        }
2165
2166        do {
2167                /* memory ordering ensured by the lock in sem_lock() */
2168                WRITE_ONCE(queue.status, -EINTR);
2169                queue.sleeper = current;
2170
2171                /* memory ordering is ensured by the lock in sem_lock() */
2172                __set_current_state(TASK_INTERRUPTIBLE);
2173                sem_unlock(sma, locknum);
2174                rcu_read_unlock();
2175
2176                if (timeout)
2177                        jiffies_left = schedule_timeout(jiffies_left);
2178                else
2179                        schedule();
2180
2181                /*
2182                 * fastpath: the semop has completed, either successfully or
2183                 * not, from the syscall pov, is quite irrelevant to us at this
2184                 * point; we're done.
2185                 *
2186                 * We _do_ care, nonetheless, about being awoken by a signal or
2187                 * spuriously.  The queue.status is checked again in the
2188                 * slowpath (aka after taking sem_lock), such that we can detect
2189                 * scenarios where we were awakened externally, during the
2190                 * window between wake_q_add() and wake_up_q().
2191                 */
2192                error = READ_ONCE(queue.status);
2193                if (error != -EINTR) {
2194                        /* see SEM_BARRIER_2 for purpose/pairing */
2195                        smp_acquire__after_ctrl_dep();
2196                        goto out_free;
2197                }
2198
2199                rcu_read_lock();
2200                locknum = sem_lock(sma, sops, nsops);
2201
2202                if (!ipc_valid_object(&sma->sem_perm))
2203                        goto out_unlock_free;
2204
2205                /*
2206                 * No necessity for any barrier: We are protect by sem_lock()
2207                 */
2208                error = READ_ONCE(queue.status);
2209
2210                /*
2211                 * If queue.status != -EINTR we are woken up by another process.
2212                 * Leave without unlink_queue(), but with sem_unlock().
2213                 */
2214                if (error != -EINTR)
2215                        goto out_unlock_free;
2216
2217                /*
2218                 * If an interrupt occurred we have to clean up the queue.
2219                 */
2220                if (timeout && jiffies_left == 0)
2221                        error = -EAGAIN;
2222        } while (error == -EINTR && !signal_pending(current)); /* spurious */
2223
2224        unlink_queue(sma, &queue);
2225
2226out_unlock_free:
2227        sem_unlock(sma, locknum);
2228        rcu_read_unlock();
2229out_free:
2230        if (sops != fast_sops)
2231                kvfree(sops);
2232        return error;
2233}
2234
2235long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2236                     unsigned int nsops, const struct __kernel_timespec __user *timeout)
2237{
2238        if (timeout) {
2239                struct timespec64 ts;
2240                if (get_timespec64(&ts, timeout))
2241                        return -EFAULT;
2242                return do_semtimedop(semid, tsops, nsops, &ts);
2243        }
2244        return do_semtimedop(semid, tsops, nsops, NULL);
2245}
2246
2247SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2248                unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2249{
2250        return ksys_semtimedop(semid, tsops, nsops, timeout);
2251}
2252
2253#ifdef CONFIG_COMPAT_32BIT_TIME
2254long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2255                            unsigned int nsops,
2256                            const struct old_timespec32 __user *timeout)
2257{
2258        if (timeout) {
2259                struct timespec64 ts;
2260                if (get_old_timespec32(&ts, timeout))
2261                        return -EFAULT;
2262                return do_semtimedop(semid, tsems, nsops, &ts);
2263        }
2264        return do_semtimedop(semid, tsems, nsops, NULL);
2265}
2266
2267SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2268                       unsigned int, nsops,
2269                       const struct old_timespec32 __user *, timeout)
2270{
2271        return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2272}
2273#endif
2274
2275SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2276                unsigned, nsops)
2277{
2278        return do_semtimedop(semid, tsops, nsops, NULL);
2279}
2280
2281/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2282 * parent and child tasks.
2283 */
2284
2285int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2286{
2287        struct sem_undo_list *undo_list;
2288        int error;
2289
2290        if (clone_flags & CLONE_SYSVSEM) {
2291                error = get_undo_list(&undo_list);
2292                if (error)
2293                        return error;
2294                refcount_inc(&undo_list->refcnt);
2295                tsk->sysvsem.undo_list = undo_list;
2296        } else
2297                tsk->sysvsem.undo_list = NULL;
2298
2299        return 0;
2300}
2301
2302/*
2303 * add semadj values to semaphores, free undo structures.
2304 * undo structures are not freed when semaphore arrays are destroyed
2305 * so some of them may be out of date.
2306 * IMPLEMENTATION NOTE: There is some confusion over whether the
2307 * set of adjustments that needs to be done should be done in an atomic
2308 * manner or not. That is, if we are attempting to decrement the semval
2309 * should we queue up and wait until we can do so legally?
2310 * The original implementation attempted to do this (queue and wait).
2311 * The current implementation does not do so. The POSIX standard
2312 * and SVID should be consulted to determine what behavior is mandated.
2313 */
2314void exit_sem(struct task_struct *tsk)
2315{
2316        struct sem_undo_list *ulp;
2317
2318        ulp = tsk->sysvsem.undo_list;
2319        if (!ulp)
2320                return;
2321        tsk->sysvsem.undo_list = NULL;
2322
2323        if (!refcount_dec_and_test(&ulp->refcnt))
2324                return;
2325
2326        for (;;) {
2327                struct sem_array *sma;
2328                struct sem_undo *un;
2329                int semid, i;
2330                DEFINE_WAKE_Q(wake_q);
2331
2332                cond_resched();
2333
2334                rcu_read_lock();
2335                un = list_entry_rcu(ulp->list_proc.next,
2336                                    struct sem_undo, list_proc);
2337                if (&un->list_proc == &ulp->list_proc) {
2338                        /*
2339                         * We must wait for freeary() before freeing this ulp,
2340                         * in case we raced with last sem_undo. There is a small
2341                         * possibility where we exit while freeary() didn't
2342                         * finish unlocking sem_undo_list.
2343                         */
2344                        spin_lock(&ulp->lock);
2345                        spin_unlock(&ulp->lock);
2346                        rcu_read_unlock();
2347                        break;
2348                }
2349                spin_lock(&ulp->lock);
2350                semid = un->semid;
2351                spin_unlock(&ulp->lock);
2352
2353                /* exit_sem raced with IPC_RMID, nothing to do */
2354                if (semid == -1) {
2355                        rcu_read_unlock();
2356                        continue;
2357                }
2358
2359                sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2360                /* exit_sem raced with IPC_RMID, nothing to do */
2361                if (IS_ERR(sma)) {
2362                        rcu_read_unlock();
2363                        continue;
2364                }
2365
2366                sem_lock(sma, NULL, -1);
2367                /* exit_sem raced with IPC_RMID, nothing to do */
2368                if (!ipc_valid_object(&sma->sem_perm)) {
2369                        sem_unlock(sma, -1);
2370                        rcu_read_unlock();
2371                        continue;
2372                }
2373                un = __lookup_undo(ulp, semid);
2374                if (un == NULL) {
2375                        /* exit_sem raced with IPC_RMID+semget() that created
2376                         * exactly the same semid. Nothing to do.
2377                         */
2378                        sem_unlock(sma, -1);
2379                        rcu_read_unlock();
2380                        continue;
2381                }
2382
2383                /* remove un from the linked lists */
2384                ipc_assert_locked_object(&sma->sem_perm);
2385                list_del(&un->list_id);
2386
2387                spin_lock(&ulp->lock);
2388                list_del_rcu(&un->list_proc);
2389                spin_unlock(&ulp->lock);
2390
2391                /* perform adjustments registered in un */
2392                for (i = 0; i < sma->sem_nsems; i++) {
2393                        struct sem *semaphore = &sma->sems[i];
2394                        if (un->semadj[i]) {
2395                                semaphore->semval += un->semadj[i];
2396                                /*
2397                                 * Range checks of the new semaphore value,
2398                                 * not defined by sus:
2399                                 * - Some unices ignore the undo entirely
2400                                 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2401                                 * - some cap the value (e.g. FreeBSD caps
2402                                 *   at 0, but doesn't enforce SEMVMX)
2403                                 *
2404                                 * Linux caps the semaphore value, both at 0
2405                                 * and at SEMVMX.
2406                                 *
2407                                 *      Manfred <manfred@colorfullife.com>
2408                                 */
2409                                if (semaphore->semval < 0)
2410                                        semaphore->semval = 0;
2411                                if (semaphore->semval > SEMVMX)
2412                                        semaphore->semval = SEMVMX;
2413                                ipc_update_pid(&semaphore->sempid, task_tgid(current));
2414                        }
2415                }
2416                /* maybe some queued-up processes were waiting for this */
2417                do_smart_update(sma, NULL, 0, 1, &wake_q);
2418                sem_unlock(sma, -1);
2419                rcu_read_unlock();
2420                wake_up_q(&wake_q);
2421
2422                kfree_rcu(un, rcu);
2423        }
2424        kfree(ulp);
2425}
2426
2427#ifdef CONFIG_PROC_FS
2428static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2429{
2430        struct user_namespace *user_ns = seq_user_ns(s);
2431        struct kern_ipc_perm *ipcp = it;
2432        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2433        time64_t sem_otime;
2434
2435        /*
2436         * The proc interface isn't aware of sem_lock(), it calls
2437         * ipc_lock_object() directly (in sysvipc_find_ipc).
2438         * In order to stay compatible with sem_lock(), we must
2439         * enter / leave complex_mode.
2440         */
2441        complexmode_enter(sma);
2442
2443        sem_otime = get_semotime(sma);
2444
2445        seq_printf(s,
2446                   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2447                   sma->sem_perm.key,
2448                   sma->sem_perm.id,
2449                   sma->sem_perm.mode,
2450                   sma->sem_nsems,
2451                   from_kuid_munged(user_ns, sma->sem_perm.uid),
2452                   from_kgid_munged(user_ns, sma->sem_perm.gid),
2453                   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2454                   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2455                   sem_otime,
2456                   sma->sem_ctime);
2457
2458        complexmode_tryleave(sma);
2459
2460        return 0;
2461}
2462#endif
2463