linux/ipc/sem.c
<<
>>
Prefs
   1/*
   2 * linux/ipc/sem.c
   3 * Copyright (C) 1992 Krishna Balasubramanian
   4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   5 *
   6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   7 *
   8 * SMP-threaded, sysctl's added
   9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  10 * Enforced range limit on SEM_UNDO
  11 * (c) 2001 Red Hat Inc
  12 * Lockless wakeup
  13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  14 * Further wakeup optimizations, documentation
  15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  16 *
  17 * support for audit of ipc object properties and permission changes
  18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19 *
  20 * namespaces support
  21 * OpenVZ, SWsoft Inc.
  22 * Pavel Emelianov <xemul@openvz.org>
  23 *
  24 * Implementation notes: (May 2010)
  25 * This file implements System V semaphores.
  26 *
  27 * User space visible behavior:
  28 * - FIFO ordering for semop() operations (just FIFO, not starvation
  29 *   protection)
  30 * - multiple semaphore operations that alter the same semaphore in
  31 *   one semop() are handled.
  32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  33 *   SETALL calls.
  34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  35 * - undo adjustments at process exit are limited to 0..SEMVMX.
  36 * - namespace are supported.
  37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  38 *   to /proc/sys/kernel/sem.
  39 * - statistics about the usage are reported in /proc/sysvipc/sem.
  40 *
  41 * Internals:
  42 * - scalability:
  43 *   - all global variables are read-mostly.
  44 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  45 *   - most operations do write operations (actually: spin_lock calls) to
  46 *     the per-semaphore array structure.
  47 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  48 *         If multiple semaphores in one array are used, then cache line
  49 *         trashing on the semaphore array spinlock will limit the scaling.
  50 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  51 * - the task that performs a successful semop() scans the list of all
  52 *   sleeping tasks and completes any pending operations that can be fulfilled.
  53 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  54 *   (see update_queue())
  55 * - To improve the scalability, the actual wake-up calls are performed after
  56 *   dropping all locks. (see wake_up_sem_queue_prepare(),
  57 *   wake_up_sem_queue_do())
  58 * - All work is done by the waker, the woken up task does not have to do
  59 *   anything - not even acquiring a lock or dropping a refcount.
  60 * - A woken up task may not even touch the semaphore array anymore, it may
  61 *   have been destroyed already by a semctl(RMID).
  62 * - The synchronizations between wake-ups due to a timeout/signal and a
  63 *   wake-up due to a completed semaphore operation is achieved by using an
  64 *   intermediate state (IN_WAKEUP).
  65 * - UNDO values are stored in an array (one per process and per
  66 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  67 *   modes for the UNDO variables are supported (per process, per thread)
  68 *   (see copy_semundo, CLONE_SYSVSEM)
  69 * - There are two lists of the pending operations: a per-array list
  70 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  71 *   ordering without always scanning all pending operations.
  72 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  73 */
  74
  75#include <linux/slab.h>
  76#include <linux/spinlock.h>
  77#include <linux/init.h>
  78#include <linux/proc_fs.h>
  79#include <linux/time.h>
  80#include <linux/security.h>
  81#include <linux/syscalls.h>
  82#include <linux/audit.h>
  83#include <linux/capability.h>
  84#include <linux/seq_file.h>
  85#include <linux/rwsem.h>
  86#include <linux/nsproxy.h>
  87#include <linux/ipc_namespace.h>
  88
  89#include <linux/uaccess.h>
  90#include "util.h"
  91
  92/* One semaphore structure for each semaphore in the system. */
  93struct sem {
  94        int     semval;         /* current value */
  95        /*
  96         * PID of the process that last modified the semaphore. For
  97         * Linux, specifically these are:
  98         *  - semop
  99         *  - semctl, via SETVAL and SETALL.
 100         *  - at task exit when performing undo adjustments (see exit_sem).
 101         */
 102        int     sempid;
 103        spinlock_t      lock;   /* spinlock for fine-grained semtimedop */
 104        struct list_head pending_alter; /* pending single-sop operations */
 105                                        /* that alter the semaphore */
 106        struct list_head pending_const; /* pending single-sop operations */
 107                                        /* that do not alter the semaphore*/
 108        time_t  sem_otime;      /* candidate for sem_otime */
 109} ____cacheline_aligned_in_smp;
 110
 111/* One queue for each sleeping process in the system. */
 112struct sem_queue {
 113        struct list_head        list;    /* queue of pending operations */
 114        struct task_struct      *sleeper; /* this process */
 115        struct sem_undo         *undo;   /* undo structure */
 116        int                     pid;     /* process id of requesting process */
 117        int                     status;  /* completion status of operation */
 118        struct sembuf           *sops;   /* array of pending operations */
 119        struct sembuf           *blocking; /* the operation that blocked */
 120        int                     nsops;   /* number of operations */
 121        int                     alter;   /* does *sops alter the array? */
 122};
 123
 124/* Each task has a list of undo requests. They are executed automatically
 125 * when the process exits.
 126 */
 127struct sem_undo {
 128        struct list_head        list_proc;      /* per-process list: *
 129                                                 * all undos from one process
 130                                                 * rcu protected */
 131        struct rcu_head         rcu;            /* rcu struct for sem_undo */
 132        struct sem_undo_list    *ulp;           /* back ptr to sem_undo_list */
 133        struct list_head        list_id;        /* per semaphore array list:
 134                                                 * all undos for one array */
 135        int                     semid;          /* semaphore set identifier */
 136        short                   *semadj;        /* array of adjustments */
 137                                                /* one per semaphore */
 138};
 139
 140/* sem_undo_list controls shared access to the list of sem_undo structures
 141 * that may be shared among all a CLONE_SYSVSEM task group.
 142 */
 143struct sem_undo_list {
 144        atomic_t                refcnt;
 145        spinlock_t              lock;
 146        struct list_head        list_proc;
 147};
 148
 149
 150#define sem_ids(ns)     ((ns)->ids[IPC_SEM_IDS])
 151
 152#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
 153
 154static int newary(struct ipc_namespace *, struct ipc_params *);
 155static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 156#ifdef CONFIG_PROC_FS
 157static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 158#endif
 159
 160#define SEMMSL_FAST     256 /* 512 bytes on stack */
 161#define SEMOPM_FAST     64  /* ~ 372 bytes on stack */
 162
 163/*
 164 * Locking:
 165 *      sem_undo.id_next,
 166 *      sem_array.complex_count,
 167 *      sem_array.pending{_alter,_cont},
 168 *      sem_array.sem_undo: global sem_lock() for read/write
 169 *      sem_undo.proc_next: only "current" is allowed to read/write that field.
 170 *
 171 *      sem_array.sem_base[i].pending_{const,alter}:
 172 *              global or semaphore sem_lock() for read/write
 173 */
 174
 175#define sc_semmsl       sem_ctls[0]
 176#define sc_semmns       sem_ctls[1]
 177#define sc_semopm       sem_ctls[2]
 178#define sc_semmni       sem_ctls[3]
 179
 180void sem_init_ns(struct ipc_namespace *ns)
 181{
 182        ns->sc_semmsl = SEMMSL;
 183        ns->sc_semmns = SEMMNS;
 184        ns->sc_semopm = SEMOPM;
 185        ns->sc_semmni = SEMMNI;
 186        ns->used_sems = 0;
 187        ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 188}
 189
 190#ifdef CONFIG_IPC_NS
 191void sem_exit_ns(struct ipc_namespace *ns)
 192{
 193        free_ipcs(ns, &sem_ids(ns), freeary);
 194        idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 195}
 196#endif
 197
 198void __init sem_init(void)
 199{
 200        sem_init_ns(&init_ipc_ns);
 201        ipc_init_proc_interface("sysvipc/sem",
 202                                "       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 203                                IPC_SEM_IDS, sysvipc_sem_proc_show);
 204}
 205
 206/**
 207 * unmerge_queues - unmerge queues, if possible.
 208 * @sma: semaphore array
 209 *
 210 * The function unmerges the wait queues if complex_count is 0.
 211 * It must be called prior to dropping the global semaphore array lock.
 212 */
 213static void unmerge_queues(struct sem_array *sma)
 214{
 215        struct sem_queue *q, *tq;
 216
 217        /* complex operations still around? */
 218        if (sma->complex_count)
 219                return;
 220        /*
 221         * We will switch back to simple mode.
 222         * Move all pending operation back into the per-semaphore
 223         * queues.
 224         */
 225        list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 226                struct sem *curr;
 227                curr = &sma->sem_base[q->sops[0].sem_num];
 228
 229                list_add_tail(&q->list, &curr->pending_alter);
 230        }
 231        INIT_LIST_HEAD(&sma->pending_alter);
 232}
 233
 234/**
 235 * merge_queues - merge single semop queues into global queue
 236 * @sma: semaphore array
 237 *
 238 * This function merges all per-semaphore queues into the global queue.
 239 * It is necessary to achieve FIFO ordering for the pending single-sop
 240 * operations when a multi-semop operation must sleep.
 241 * Only the alter operations must be moved, the const operations can stay.
 242 */
 243static void merge_queues(struct sem_array *sma)
 244{
 245        int i;
 246        for (i = 0; i < sma->sem_nsems; i++) {
 247                struct sem *sem = sma->sem_base + i;
 248
 249                list_splice_init(&sem->pending_alter, &sma->pending_alter);
 250        }
 251}
 252
 253static void sem_rcu_free(struct rcu_head *head)
 254{
 255        struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 256        struct sem_array *sma = ipc_rcu_to_struct(p);
 257
 258        security_sem_free(sma);
 259        ipc_rcu_free(head);
 260}
 261
 262/*
 263 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
 264 * are only control barriers.
 265 * The code must pair with spin_unlock(&sem->lock) or
 266 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
 267 *
 268 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
 269 */
 270#define ipc_smp_acquire__after_spin_is_unlocked()       smp_rmb()
 271
 272/*
 273 * Wait until all currently ongoing simple ops have completed.
 274 * Caller must own sem_perm.lock.
 275 * New simple ops cannot start, because simple ops first check
 276 * that sem_perm.lock is free.
 277 * that a) sem_perm.lock is free and b) complex_count is 0.
 278 */
 279static void sem_wait_array(struct sem_array *sma)
 280{
 281        int i;
 282        struct sem *sem;
 283
 284        if (sma->complex_count)  {
 285                /* The thread that increased sma->complex_count waited on
 286                 * all sem->lock locks. Thus we don't need to wait again.
 287                 */
 288                return;
 289        }
 290
 291        for (i = 0; i < sma->sem_nsems; i++) {
 292                sem = sma->sem_base + i;
 293                spin_unlock_wait(&sem->lock);
 294        }
 295        ipc_smp_acquire__after_spin_is_unlocked();
 296}
 297
 298/*
 299 * If the request contains only one semaphore operation, and there are
 300 * no complex transactions pending, lock only the semaphore involved.
 301 * Otherwise, lock the entire semaphore array, since we either have
 302 * multiple semaphores in our own semops, or we need to look at
 303 * semaphores from other pending complex operations.
 304 */
 305static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 306                              int nsops)
 307{
 308        struct sem *sem;
 309
 310        if (nsops != 1) {
 311                /* Complex operation - acquire a full lock */
 312                ipc_lock_object(&sma->sem_perm);
 313
 314                /* And wait until all simple ops that are processed
 315                 * right now have dropped their locks.
 316                 */
 317                sem_wait_array(sma);
 318                return -1;
 319        }
 320
 321        /*
 322         * Only one semaphore affected - try to optimize locking.
 323         * The rules are:
 324         * - optimized locking is possible if no complex operation
 325         *   is either enqueued or processed right now.
 326         * - The test for enqueued complex ops is simple:
 327         *      sma->complex_count != 0
 328         * - Testing for complex ops that are processed right now is
 329         *   a bit more difficult. Complex ops acquire the full lock
 330         *   and first wait that the running simple ops have completed.
 331         *   (see above)
 332         *   Thus: If we own a simple lock and the global lock is free
 333         *      and complex_count is now 0, then it will stay 0 and
 334         *      thus just locking sem->lock is sufficient.
 335         */
 336        sem = sma->sem_base + sops->sem_num;
 337
 338        if (sma->complex_count == 0) {
 339                /*
 340                 * It appears that no complex operation is around.
 341                 * Acquire the per-semaphore lock.
 342                 */
 343                spin_lock(&sem->lock);
 344
 345                /* Then check that the global lock is free */
 346                if (!spin_is_locked(&sma->sem_perm.lock)) {
 347                        /*
 348                         * We need a memory barrier with acquire semantics,
 349                         * otherwise we can race with another thread that does:
 350                         *      complex_count++;
 351                         *      spin_unlock(sem_perm.lock);
 352                         */
 353                        ipc_smp_acquire__after_spin_is_unlocked();
 354
 355                        /*
 356                         * Now repeat the test of complex_count:
 357                         * It can't change anymore until we drop sem->lock.
 358                         * Thus: if is now 0, then it will stay 0.
 359                         */
 360                        if (sma->complex_count == 0) {
 361                                /* fast path successful! */
 362                                return sops->sem_num;
 363                        }
 364                }
 365                spin_unlock(&sem->lock);
 366        }
 367
 368        /* slow path: acquire the full lock */
 369        ipc_lock_object(&sma->sem_perm);
 370
 371        if (sma->complex_count == 0) {
 372                /* False alarm:
 373                 * There is no complex operation, thus we can switch
 374                 * back to the fast path.
 375                 */
 376                spin_lock(&sem->lock);
 377                ipc_unlock_object(&sma->sem_perm);
 378                return sops->sem_num;
 379        } else {
 380                /* Not a false alarm, thus complete the sequence for a
 381                 * full lock.
 382                 */
 383                sem_wait_array(sma);
 384                return -1;
 385        }
 386}
 387
 388static inline void sem_unlock(struct sem_array *sma, int locknum)
 389{
 390        if (locknum == -1) {
 391                unmerge_queues(sma);
 392                ipc_unlock_object(&sma->sem_perm);
 393        } else {
 394                struct sem *sem = sma->sem_base + locknum;
 395                spin_unlock(&sem->lock);
 396        }
 397}
 398
 399/*
 400 * sem_lock_(check_) routines are called in the paths where the rwsem
 401 * is not held.
 402 *
 403 * The caller holds the RCU read lock.
 404 */
 405static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
 406                        int id, struct sembuf *sops, int nsops, int *locknum)
 407{
 408        struct kern_ipc_perm *ipcp;
 409        struct sem_array *sma;
 410
 411        ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 412        if (IS_ERR(ipcp))
 413                return ERR_CAST(ipcp);
 414
 415        sma = container_of(ipcp, struct sem_array, sem_perm);
 416        *locknum = sem_lock(sma, sops, nsops);
 417
 418        /* ipc_rmid() may have already freed the ID while sem_lock
 419         * was spinning: verify that the structure is still valid
 420         */
 421        if (ipc_valid_object(ipcp))
 422                return container_of(ipcp, struct sem_array, sem_perm);
 423
 424        sem_unlock(sma, *locknum);
 425        return ERR_PTR(-EINVAL);
 426}
 427
 428static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 429{
 430        struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 431
 432        if (IS_ERR(ipcp))
 433                return ERR_CAST(ipcp);
 434
 435        return container_of(ipcp, struct sem_array, sem_perm);
 436}
 437
 438static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 439                                                        int id)
 440{
 441        struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 442
 443        if (IS_ERR(ipcp))
 444                return ERR_CAST(ipcp);
 445
 446        return container_of(ipcp, struct sem_array, sem_perm);
 447}
 448
 449static inline void sem_lock_and_putref(struct sem_array *sma)
 450{
 451        sem_lock(sma, NULL, -1);
 452        ipc_rcu_putref(sma, ipc_rcu_free);
 453}
 454
 455static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 456{
 457        ipc_rmid(&sem_ids(ns), &s->sem_perm);
 458}
 459
 460/*
 461 * Lockless wakeup algorithm:
 462 * Without the check/retry algorithm a lockless wakeup is possible:
 463 * - queue.status is initialized to -EINTR before blocking.
 464 * - wakeup is performed by
 465 *      * unlinking the queue entry from the pending list
 466 *      * setting queue.status to IN_WAKEUP
 467 *        This is the notification for the blocked thread that a
 468 *        result value is imminent.
 469 *      * call wake_up_process
 470 *      * set queue.status to the final value.
 471 * - the previously blocked thread checks queue.status:
 472 *      * if it's IN_WAKEUP, then it must wait until the value changes
 473 *      * if it's not -EINTR, then the operation was completed by
 474 *        update_queue. semtimedop can return queue.status without
 475 *        performing any operation on the sem array.
 476 *      * otherwise it must acquire the spinlock and check what's up.
 477 *
 478 * The two-stage algorithm is necessary to protect against the following
 479 * races:
 480 * - if queue.status is set after wake_up_process, then the woken up idle
 481 *   thread could race forward and try (and fail) to acquire sma->lock
 482 *   before update_queue had a chance to set queue.status
 483 * - if queue.status is written before wake_up_process and if the
 484 *   blocked process is woken up by a signal between writing
 485 *   queue.status and the wake_up_process, then the woken up
 486 *   process could return from semtimedop and die by calling
 487 *   sys_exit before wake_up_process is called. Then wake_up_process
 488 *   will oops, because the task structure is already invalid.
 489 *   (yes, this happened on s390 with sysv msg).
 490 *
 491 */
 492#define IN_WAKEUP       1
 493
 494/**
 495 * newary - Create a new semaphore set
 496 * @ns: namespace
 497 * @params: ptr to the structure that contains key, semflg and nsems
 498 *
 499 * Called with sem_ids.rwsem held (as a writer)
 500 */
 501static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 502{
 503        int id;
 504        int retval;
 505        struct sem_array *sma;
 506        int size;
 507        key_t key = params->key;
 508        int nsems = params->u.nsems;
 509        int semflg = params->flg;
 510        int i;
 511
 512        if (!nsems)
 513                return -EINVAL;
 514        if (ns->used_sems + nsems > ns->sc_semmns)
 515                return -ENOSPC;
 516
 517        size = sizeof(*sma) + nsems * sizeof(struct sem);
 518        sma = ipc_rcu_alloc(size);
 519        if (!sma)
 520                return -ENOMEM;
 521
 522        memset(sma, 0, size);
 523
 524        sma->sem_perm.mode = (semflg & S_IRWXUGO);
 525        sma->sem_perm.key = key;
 526
 527        sma->sem_perm.security = NULL;
 528        retval = security_sem_alloc(sma);
 529        if (retval) {
 530                ipc_rcu_putref(sma, ipc_rcu_free);
 531                return retval;
 532        }
 533
 534        sma->sem_base = (struct sem *) &sma[1];
 535
 536        for (i = 0; i < nsems; i++) {
 537                INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
 538                INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
 539                spin_lock_init(&sma->sem_base[i].lock);
 540        }
 541
 542        sma->complex_count = 0;
 543        INIT_LIST_HEAD(&sma->pending_alter);
 544        INIT_LIST_HEAD(&sma->pending_const);
 545        INIT_LIST_HEAD(&sma->list_id);
 546        sma->sem_nsems = nsems;
 547        sma->sem_ctime = get_seconds();
 548
 549        id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 550        if (id < 0) {
 551                ipc_rcu_putref(sma, sem_rcu_free);
 552                return id;
 553        }
 554        ns->used_sems += nsems;
 555
 556        sem_unlock(sma, -1);
 557        rcu_read_unlock();
 558
 559        return sma->sem_perm.id;
 560}
 561
 562
 563/*
 564 * Called with sem_ids.rwsem and ipcp locked.
 565 */
 566static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 567{
 568        struct sem_array *sma;
 569
 570        sma = container_of(ipcp, struct sem_array, sem_perm);
 571        return security_sem_associate(sma, semflg);
 572}
 573
 574/*
 575 * Called with sem_ids.rwsem and ipcp locked.
 576 */
 577static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 578                                struct ipc_params *params)
 579{
 580        struct sem_array *sma;
 581
 582        sma = container_of(ipcp, struct sem_array, sem_perm);
 583        if (params->u.nsems > sma->sem_nsems)
 584                return -EINVAL;
 585
 586        return 0;
 587}
 588
 589SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 590{
 591        struct ipc_namespace *ns;
 592        static const struct ipc_ops sem_ops = {
 593                .getnew = newary,
 594                .associate = sem_security,
 595                .more_checks = sem_more_checks,
 596        };
 597        struct ipc_params sem_params;
 598
 599        ns = current->nsproxy->ipc_ns;
 600
 601        if (nsems < 0 || nsems > ns->sc_semmsl)
 602                return -EINVAL;
 603
 604        sem_params.key = key;
 605        sem_params.flg = semflg;
 606        sem_params.u.nsems = nsems;
 607
 608        return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 609}
 610
 611/**
 612 * perform_atomic_semop - Perform (if possible) a semaphore operation
 613 * @sma: semaphore array
 614 * @q: struct sem_queue that describes the operation
 615 *
 616 * Returns 0 if the operation was possible.
 617 * Returns 1 if the operation is impossible, the caller must sleep.
 618 * Negative values are error codes.
 619 */
 620static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 621{
 622        int result, sem_op, nsops, pid;
 623        struct sembuf *sop;
 624        struct sem *curr;
 625        struct sembuf *sops;
 626        struct sem_undo *un;
 627
 628        sops = q->sops;
 629        nsops = q->nsops;
 630        un = q->undo;
 631
 632        for (sop = sops; sop < sops + nsops; sop++) {
 633                curr = sma->sem_base + sop->sem_num;
 634                sem_op = sop->sem_op;
 635                result = curr->semval;
 636
 637                if (!sem_op && result)
 638                        goto would_block;
 639
 640                result += sem_op;
 641                if (result < 0)
 642                        goto would_block;
 643                if (result > SEMVMX)
 644                        goto out_of_range;
 645
 646                if (sop->sem_flg & SEM_UNDO) {
 647                        int undo = un->semadj[sop->sem_num] - sem_op;
 648                        /* Exceeding the undo range is an error. */
 649                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 650                                goto out_of_range;
 651                        un->semadj[sop->sem_num] = undo;
 652                }
 653
 654                curr->semval = result;
 655        }
 656
 657        sop--;
 658        pid = q->pid;
 659        while (sop >= sops) {
 660                sma->sem_base[sop->sem_num].sempid = pid;
 661                sop--;
 662        }
 663
 664        return 0;
 665
 666out_of_range:
 667        result = -ERANGE;
 668        goto undo;
 669
 670would_block:
 671        q->blocking = sop;
 672
 673        if (sop->sem_flg & IPC_NOWAIT)
 674                result = -EAGAIN;
 675        else
 676                result = 1;
 677
 678undo:
 679        sop--;
 680        while (sop >= sops) {
 681                sem_op = sop->sem_op;
 682                sma->sem_base[sop->sem_num].semval -= sem_op;
 683                if (sop->sem_flg & SEM_UNDO)
 684                        un->semadj[sop->sem_num] += sem_op;
 685                sop--;
 686        }
 687
 688        return result;
 689}
 690
 691/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
 692 * @q: queue entry that must be signaled
 693 * @error: Error value for the signal
 694 *
 695 * Prepare the wake-up of the queue entry q.
 696 */
 697static void wake_up_sem_queue_prepare(struct list_head *pt,
 698                                struct sem_queue *q, int error)
 699{
 700        if (list_empty(pt)) {
 701                /*
 702                 * Hold preempt off so that we don't get preempted and have the
 703                 * wakee busy-wait until we're scheduled back on.
 704                 */
 705                preempt_disable();
 706        }
 707        q->status = IN_WAKEUP;
 708        q->pid = error;
 709
 710        list_add_tail(&q->list, pt);
 711}
 712
 713/**
 714 * wake_up_sem_queue_do - do the actual wake-up
 715 * @pt: list of tasks to be woken up
 716 *
 717 * Do the actual wake-up.
 718 * The function is called without any locks held, thus the semaphore array
 719 * could be destroyed already and the tasks can disappear as soon as the
 720 * status is set to the actual return code.
 721 */
 722static void wake_up_sem_queue_do(struct list_head *pt)
 723{
 724        struct sem_queue *q, *t;
 725        int did_something;
 726
 727        did_something = !list_empty(pt);
 728        list_for_each_entry_safe(q, t, pt, list) {
 729                wake_up_process(q->sleeper);
 730                /* q can disappear immediately after writing q->status. */
 731                smp_wmb();
 732                q->status = q->pid;
 733        }
 734        if (did_something)
 735                preempt_enable();
 736}
 737
 738static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 739{
 740        list_del(&q->list);
 741        if (q->nsops > 1)
 742                sma->complex_count--;
 743}
 744
 745/** check_restart(sma, q)
 746 * @sma: semaphore array
 747 * @q: the operation that just completed
 748 *
 749 * update_queue is O(N^2) when it restarts scanning the whole queue of
 750 * waiting operations. Therefore this function checks if the restart is
 751 * really necessary. It is called after a previously waiting operation
 752 * modified the array.
 753 * Note that wait-for-zero operations are handled without restart.
 754 */
 755static int check_restart(struct sem_array *sma, struct sem_queue *q)
 756{
 757        /* pending complex alter operations are too difficult to analyse */
 758        if (!list_empty(&sma->pending_alter))
 759                return 1;
 760
 761        /* we were a sleeping complex operation. Too difficult */
 762        if (q->nsops > 1)
 763                return 1;
 764
 765        /* It is impossible that someone waits for the new value:
 766         * - complex operations always restart.
 767         * - wait-for-zero are handled seperately.
 768         * - q is a previously sleeping simple operation that
 769         *   altered the array. It must be a decrement, because
 770         *   simple increments never sleep.
 771         * - If there are older (higher priority) decrements
 772         *   in the queue, then they have observed the original
 773         *   semval value and couldn't proceed. The operation
 774         *   decremented to value - thus they won't proceed either.
 775         */
 776        return 0;
 777}
 778
 779/**
 780 * wake_const_ops - wake up non-alter tasks
 781 * @sma: semaphore array.
 782 * @semnum: semaphore that was modified.
 783 * @pt: list head for the tasks that must be woken up.
 784 *
 785 * wake_const_ops must be called after a semaphore in a semaphore array
 786 * was set to 0. If complex const operations are pending, wake_const_ops must
 787 * be called with semnum = -1, as well as with the number of each modified
 788 * semaphore.
 789 * The tasks that must be woken up are added to @pt. The return code
 790 * is stored in q->pid.
 791 * The function returns 1 if at least one operation was completed successfully.
 792 */
 793static int wake_const_ops(struct sem_array *sma, int semnum,
 794                                struct list_head *pt)
 795{
 796        struct sem_queue *q;
 797        struct list_head *walk;
 798        struct list_head *pending_list;
 799        int semop_completed = 0;
 800
 801        if (semnum == -1)
 802                pending_list = &sma->pending_const;
 803        else
 804                pending_list = &sma->sem_base[semnum].pending_const;
 805
 806        walk = pending_list->next;
 807        while (walk != pending_list) {
 808                int error;
 809
 810                q = container_of(walk, struct sem_queue, list);
 811                walk = walk->next;
 812
 813                error = perform_atomic_semop(sma, q);
 814
 815                if (error <= 0) {
 816                        /* operation completed, remove from queue & wakeup */
 817
 818                        unlink_queue(sma, q);
 819
 820                        wake_up_sem_queue_prepare(pt, q, error);
 821                        if (error == 0)
 822                                semop_completed = 1;
 823                }
 824        }
 825        return semop_completed;
 826}
 827
 828/**
 829 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 830 * @sma: semaphore array
 831 * @sops: operations that were performed
 832 * @nsops: number of operations
 833 * @pt: list head of the tasks that must be woken up.
 834 *
 835 * Checks all required queue for wait-for-zero operations, based
 836 * on the actual changes that were performed on the semaphore array.
 837 * The function returns 1 if at least one operation was completed successfully.
 838 */
 839static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 840                                        int nsops, struct list_head *pt)
 841{
 842        int i;
 843        int semop_completed = 0;
 844        int got_zero = 0;
 845
 846        /* first: the per-semaphore queues, if known */
 847        if (sops) {
 848                for (i = 0; i < nsops; i++) {
 849                        int num = sops[i].sem_num;
 850
 851                        if (sma->sem_base[num].semval == 0) {
 852                                got_zero = 1;
 853                                semop_completed |= wake_const_ops(sma, num, pt);
 854                        }
 855                }
 856        } else {
 857                /*
 858                 * No sops means modified semaphores not known.
 859                 * Assume all were changed.
 860                 */
 861                for (i = 0; i < sma->sem_nsems; i++) {
 862                        if (sma->sem_base[i].semval == 0) {
 863                                got_zero = 1;
 864                                semop_completed |= wake_const_ops(sma, i, pt);
 865                        }
 866                }
 867        }
 868        /*
 869         * If one of the modified semaphores got 0,
 870         * then check the global queue, too.
 871         */
 872        if (got_zero)
 873                semop_completed |= wake_const_ops(sma, -1, pt);
 874
 875        return semop_completed;
 876}
 877
 878
 879/**
 880 * update_queue - look for tasks that can be completed.
 881 * @sma: semaphore array.
 882 * @semnum: semaphore that was modified.
 883 * @pt: list head for the tasks that must be woken up.
 884 *
 885 * update_queue must be called after a semaphore in a semaphore array
 886 * was modified. If multiple semaphores were modified, update_queue must
 887 * be called with semnum = -1, as well as with the number of each modified
 888 * semaphore.
 889 * The tasks that must be woken up are added to @pt. The return code
 890 * is stored in q->pid.
 891 * The function internally checks if const operations can now succeed.
 892 *
 893 * The function return 1 if at least one semop was completed successfully.
 894 */
 895static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
 896{
 897        struct sem_queue *q;
 898        struct list_head *walk;
 899        struct list_head *pending_list;
 900        int semop_completed = 0;
 901
 902        if (semnum == -1)
 903                pending_list = &sma->pending_alter;
 904        else
 905                pending_list = &sma->sem_base[semnum].pending_alter;
 906
 907again:
 908        walk = pending_list->next;
 909        while (walk != pending_list) {
 910                int error, restart;
 911
 912                q = container_of(walk, struct sem_queue, list);
 913                walk = walk->next;
 914
 915                /* If we are scanning the single sop, per-semaphore list of
 916                 * one semaphore and that semaphore is 0, then it is not
 917                 * necessary to scan further: simple increments
 918                 * that affect only one entry succeed immediately and cannot
 919                 * be in the  per semaphore pending queue, and decrements
 920                 * cannot be successful if the value is already 0.
 921                 */
 922                if (semnum != -1 && sma->sem_base[semnum].semval == 0)
 923                        break;
 924
 925                error = perform_atomic_semop(sma, q);
 926
 927                /* Does q->sleeper still need to sleep? */
 928                if (error > 0)
 929                        continue;
 930
 931                unlink_queue(sma, q);
 932
 933                if (error) {
 934                        restart = 0;
 935                } else {
 936                        semop_completed = 1;
 937                        do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
 938                        restart = check_restart(sma, q);
 939                }
 940
 941                wake_up_sem_queue_prepare(pt, q, error);
 942                if (restart)
 943                        goto again;
 944        }
 945        return semop_completed;
 946}
 947
 948/**
 949 * set_semotime - set sem_otime
 950 * @sma: semaphore array
 951 * @sops: operations that modified the array, may be NULL
 952 *
 953 * sem_otime is replicated to avoid cache line trashing.
 954 * This function sets one instance to the current time.
 955 */
 956static void set_semotime(struct sem_array *sma, struct sembuf *sops)
 957{
 958        if (sops == NULL) {
 959                sma->sem_base[0].sem_otime = get_seconds();
 960        } else {
 961                sma->sem_base[sops[0].sem_num].sem_otime =
 962                                                        get_seconds();
 963        }
 964}
 965
 966/**
 967 * do_smart_update - optimized update_queue
 968 * @sma: semaphore array
 969 * @sops: operations that were performed
 970 * @nsops: number of operations
 971 * @otime: force setting otime
 972 * @pt: list head of the tasks that must be woken up.
 973 *
 974 * do_smart_update() does the required calls to update_queue and wakeup_zero,
 975 * based on the actual changes that were performed on the semaphore array.
 976 * Note that the function does not do the actual wake-up: the caller is
 977 * responsible for calling wake_up_sem_queue_do(@pt).
 978 * It is safe to perform this call after dropping all locks.
 979 */
 980static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
 981                        int otime, struct list_head *pt)
 982{
 983        int i;
 984
 985        otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
 986
 987        if (!list_empty(&sma->pending_alter)) {
 988                /* semaphore array uses the global queue - just process it. */
 989                otime |= update_queue(sma, -1, pt);
 990        } else {
 991                if (!sops) {
 992                        /*
 993                         * No sops, thus the modified semaphores are not
 994                         * known. Check all.
 995                         */
 996                        for (i = 0; i < sma->sem_nsems; i++)
 997                                otime |= update_queue(sma, i, pt);
 998                } else {
 999                        /*
1000                         * Check the semaphores that were increased:
1001                         * - No complex ops, thus all sleeping ops are
1002                         *   decrease.
1003                         * - if we decreased the value, then any sleeping
1004                         *   semaphore ops wont be able to run: If the
1005                         *   previous value was too small, then the new
1006                         *   value will be too small, too.
1007                         */
1008                        for (i = 0; i < nsops; i++) {
1009                                if (sops[i].sem_op > 0) {
1010                                        otime |= update_queue(sma,
1011                                                        sops[i].sem_num, pt);
1012                                }
1013                        }
1014                }
1015        }
1016        if (otime)
1017                set_semotime(sma, sops);
1018}
1019
1020/*
1021 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1022 */
1023static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1024                        bool count_zero)
1025{
1026        struct sembuf *sop = q->blocking;
1027
1028        /*
1029         * Linux always (since 0.99.10) reported a task as sleeping on all
1030         * semaphores. This violates SUS, therefore it was changed to the
1031         * standard compliant behavior.
1032         * Give the administrators a chance to notice that an application
1033         * might misbehave because it relies on the Linux behavior.
1034         */
1035        pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1036                        "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1037                        current->comm, task_pid_nr(current));
1038
1039        if (sop->sem_num != semnum)
1040                return 0;
1041
1042        if (count_zero && sop->sem_op == 0)
1043                return 1;
1044        if (!count_zero && sop->sem_op < 0)
1045                return 1;
1046
1047        return 0;
1048}
1049
1050/* The following counts are associated to each semaphore:
1051 *   semncnt        number of tasks waiting on semval being nonzero
1052 *   semzcnt        number of tasks waiting on semval being zero
1053 *
1054 * Per definition, a task waits only on the semaphore of the first semop
1055 * that cannot proceed, even if additional operation would block, too.
1056 */
1057static int count_semcnt(struct sem_array *sma, ushort semnum,
1058                        bool count_zero)
1059{
1060        struct list_head *l;
1061        struct sem_queue *q;
1062        int semcnt;
1063
1064        semcnt = 0;
1065        /* First: check the simple operations. They are easy to evaluate */
1066        if (count_zero)
1067                l = &sma->sem_base[semnum].pending_const;
1068        else
1069                l = &sma->sem_base[semnum].pending_alter;
1070
1071        list_for_each_entry(q, l, list) {
1072                /* all task on a per-semaphore list sleep on exactly
1073                 * that semaphore
1074                 */
1075                semcnt++;
1076        }
1077
1078        /* Then: check the complex operations. */
1079        list_for_each_entry(q, &sma->pending_alter, list) {
1080                semcnt += check_qop(sma, semnum, q, count_zero);
1081        }
1082        if (count_zero) {
1083                list_for_each_entry(q, &sma->pending_const, list) {
1084                        semcnt += check_qop(sma, semnum, q, count_zero);
1085                }
1086        }
1087        return semcnt;
1088}
1089
1090/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1091 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1092 * remains locked on exit.
1093 */
1094static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1095{
1096        struct sem_undo *un, *tu;
1097        struct sem_queue *q, *tq;
1098        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1099        struct list_head tasks;
1100        int i;
1101
1102        /* Free the existing undo structures for this semaphore set.  */
1103        ipc_assert_locked_object(&sma->sem_perm);
1104        list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1105                list_del(&un->list_id);
1106                spin_lock(&un->ulp->lock);
1107                un->semid = -1;
1108                list_del_rcu(&un->list_proc);
1109                spin_unlock(&un->ulp->lock);
1110                kfree_rcu(un, rcu);
1111        }
1112
1113        /* Wake up all pending processes and let them fail with EIDRM. */
1114        INIT_LIST_HEAD(&tasks);
1115        list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1116                unlink_queue(sma, q);
1117                wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1118        }
1119
1120        list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1121                unlink_queue(sma, q);
1122                wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1123        }
1124        for (i = 0; i < sma->sem_nsems; i++) {
1125                struct sem *sem = sma->sem_base + i;
1126                list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1127                        unlink_queue(sma, q);
1128                        wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1129                }
1130                list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1131                        unlink_queue(sma, q);
1132                        wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1133                }
1134        }
1135
1136        /* Remove the semaphore set from the IDR */
1137        sem_rmid(ns, sma);
1138        sem_unlock(sma, -1);
1139        rcu_read_unlock();
1140
1141        wake_up_sem_queue_do(&tasks);
1142        ns->used_sems -= sma->sem_nsems;
1143        ipc_rcu_putref(sma, sem_rcu_free);
1144}
1145
1146static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1147{
1148        switch (version) {
1149        case IPC_64:
1150                return copy_to_user(buf, in, sizeof(*in));
1151        case IPC_OLD:
1152            {
1153                struct semid_ds out;
1154
1155                memset(&out, 0, sizeof(out));
1156
1157                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1158
1159                out.sem_otime   = in->sem_otime;
1160                out.sem_ctime   = in->sem_ctime;
1161                out.sem_nsems   = in->sem_nsems;
1162
1163                return copy_to_user(buf, &out, sizeof(out));
1164            }
1165        default:
1166                return -EINVAL;
1167        }
1168}
1169
1170static time_t get_semotime(struct sem_array *sma)
1171{
1172        int i;
1173        time_t res;
1174
1175        res = sma->sem_base[0].sem_otime;
1176        for (i = 1; i < sma->sem_nsems; i++) {
1177                time_t to = sma->sem_base[i].sem_otime;
1178
1179                if (to > res)
1180                        res = to;
1181        }
1182        return res;
1183}
1184
1185static int semctl_nolock(struct ipc_namespace *ns, int semid,
1186                         int cmd, int version, void __user *p)
1187{
1188        int err;
1189        struct sem_array *sma;
1190
1191        switch (cmd) {
1192        case IPC_INFO:
1193        case SEM_INFO:
1194        {
1195                struct seminfo seminfo;
1196                int max_id;
1197
1198                err = security_sem_semctl(NULL, cmd);
1199                if (err)
1200                        return err;
1201
1202                memset(&seminfo, 0, sizeof(seminfo));
1203                seminfo.semmni = ns->sc_semmni;
1204                seminfo.semmns = ns->sc_semmns;
1205                seminfo.semmsl = ns->sc_semmsl;
1206                seminfo.semopm = ns->sc_semopm;
1207                seminfo.semvmx = SEMVMX;
1208                seminfo.semmnu = SEMMNU;
1209                seminfo.semmap = SEMMAP;
1210                seminfo.semume = SEMUME;
1211                down_read(&sem_ids(ns).rwsem);
1212                if (cmd == SEM_INFO) {
1213                        seminfo.semusz = sem_ids(ns).in_use;
1214                        seminfo.semaem = ns->used_sems;
1215                } else {
1216                        seminfo.semusz = SEMUSZ;
1217                        seminfo.semaem = SEMAEM;
1218                }
1219                max_id = ipc_get_maxid(&sem_ids(ns));
1220                up_read(&sem_ids(ns).rwsem);
1221                if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1222                        return -EFAULT;
1223                return (max_id < 0) ? 0 : max_id;
1224        }
1225        case IPC_STAT:
1226        case SEM_STAT:
1227        {
1228                struct semid64_ds tbuf;
1229                int id = 0;
1230
1231                memset(&tbuf, 0, sizeof(tbuf));
1232
1233                rcu_read_lock();
1234                if (cmd == SEM_STAT) {
1235                        sma = sem_obtain_object(ns, semid);
1236                        if (IS_ERR(sma)) {
1237                                err = PTR_ERR(sma);
1238                                goto out_unlock;
1239                        }
1240                        id = sma->sem_perm.id;
1241                } else {
1242                        sma = sem_obtain_object_check(ns, semid);
1243                        if (IS_ERR(sma)) {
1244                                err = PTR_ERR(sma);
1245                                goto out_unlock;
1246                        }
1247                }
1248
1249                err = -EACCES;
1250                if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1251                        goto out_unlock;
1252
1253                err = security_sem_semctl(sma, cmd);
1254                if (err)
1255                        goto out_unlock;
1256
1257                kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1258                tbuf.sem_otime = get_semotime(sma);
1259                tbuf.sem_ctime = sma->sem_ctime;
1260                tbuf.sem_nsems = sma->sem_nsems;
1261                rcu_read_unlock();
1262                if (copy_semid_to_user(p, &tbuf, version))
1263                        return -EFAULT;
1264                return id;
1265        }
1266        default:
1267                return -EINVAL;
1268        }
1269out_unlock:
1270        rcu_read_unlock();
1271        return err;
1272}
1273
1274static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1275                unsigned long arg)
1276{
1277        struct sem_undo *un;
1278        struct sem_array *sma;
1279        struct sem *curr;
1280        int err;
1281        struct list_head tasks;
1282        int val;
1283#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1284        /* big-endian 64bit */
1285        val = arg >> 32;
1286#else
1287        /* 32bit or little-endian 64bit */
1288        val = arg;
1289#endif
1290
1291        if (val > SEMVMX || val < 0)
1292                return -ERANGE;
1293
1294        INIT_LIST_HEAD(&tasks);
1295
1296        rcu_read_lock();
1297        sma = sem_obtain_object_check(ns, semid);
1298        if (IS_ERR(sma)) {
1299                rcu_read_unlock();
1300                return PTR_ERR(sma);
1301        }
1302
1303        if (semnum < 0 || semnum >= sma->sem_nsems) {
1304                rcu_read_unlock();
1305                return -EINVAL;
1306        }
1307
1308
1309        if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1310                rcu_read_unlock();
1311                return -EACCES;
1312        }
1313
1314        err = security_sem_semctl(sma, SETVAL);
1315        if (err) {
1316                rcu_read_unlock();
1317                return -EACCES;
1318        }
1319
1320        sem_lock(sma, NULL, -1);
1321
1322        if (!ipc_valid_object(&sma->sem_perm)) {
1323                sem_unlock(sma, -1);
1324                rcu_read_unlock();
1325                return -EIDRM;
1326        }
1327
1328        curr = &sma->sem_base[semnum];
1329
1330        ipc_assert_locked_object(&sma->sem_perm);
1331        list_for_each_entry(un, &sma->list_id, list_id)
1332                un->semadj[semnum] = 0;
1333
1334        curr->semval = val;
1335        curr->sempid = task_tgid_vnr(current);
1336        sma->sem_ctime = get_seconds();
1337        /* maybe some queued-up processes were waiting for this */
1338        do_smart_update(sma, NULL, 0, 0, &tasks);
1339        sem_unlock(sma, -1);
1340        rcu_read_unlock();
1341        wake_up_sem_queue_do(&tasks);
1342        return 0;
1343}
1344
1345static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1346                int cmd, void __user *p)
1347{
1348        struct sem_array *sma;
1349        struct sem *curr;
1350        int err, nsems;
1351        ushort fast_sem_io[SEMMSL_FAST];
1352        ushort *sem_io = fast_sem_io;
1353        struct list_head tasks;
1354
1355        INIT_LIST_HEAD(&tasks);
1356
1357        rcu_read_lock();
1358        sma = sem_obtain_object_check(ns, semid);
1359        if (IS_ERR(sma)) {
1360                rcu_read_unlock();
1361                return PTR_ERR(sma);
1362        }
1363
1364        nsems = sma->sem_nsems;
1365
1366        err = -EACCES;
1367        if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1368                goto out_rcu_wakeup;
1369
1370        err = security_sem_semctl(sma, cmd);
1371        if (err)
1372                goto out_rcu_wakeup;
1373
1374        err = -EACCES;
1375        switch (cmd) {
1376        case GETALL:
1377        {
1378                ushort __user *array = p;
1379                int i;
1380
1381                sem_lock(sma, NULL, -1);
1382                if (!ipc_valid_object(&sma->sem_perm)) {
1383                        err = -EIDRM;
1384                        goto out_unlock;
1385                }
1386                if (nsems > SEMMSL_FAST) {
1387                        if (!ipc_rcu_getref(sma)) {
1388                                err = -EIDRM;
1389                                goto out_unlock;
1390                        }
1391                        sem_unlock(sma, -1);
1392                        rcu_read_unlock();
1393                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
1394                        if (sem_io == NULL) {
1395                                ipc_rcu_putref(sma, ipc_rcu_free);
1396                                return -ENOMEM;
1397                        }
1398
1399                        rcu_read_lock();
1400                        sem_lock_and_putref(sma);
1401                        if (!ipc_valid_object(&sma->sem_perm)) {
1402                                err = -EIDRM;
1403                                goto out_unlock;
1404                        }
1405                }
1406                for (i = 0; i < sma->sem_nsems; i++)
1407                        sem_io[i] = sma->sem_base[i].semval;
1408                sem_unlock(sma, -1);
1409                rcu_read_unlock();
1410                err = 0;
1411                if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1412                        err = -EFAULT;
1413                goto out_free;
1414        }
1415        case SETALL:
1416        {
1417                int i;
1418                struct sem_undo *un;
1419
1420                if (!ipc_rcu_getref(sma)) {
1421                        err = -EIDRM;
1422                        goto out_rcu_wakeup;
1423                }
1424                rcu_read_unlock();
1425
1426                if (nsems > SEMMSL_FAST) {
1427                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
1428                        if (sem_io == NULL) {
1429                                ipc_rcu_putref(sma, ipc_rcu_free);
1430                                return -ENOMEM;
1431                        }
1432                }
1433
1434                if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1435                        ipc_rcu_putref(sma, ipc_rcu_free);
1436                        err = -EFAULT;
1437                        goto out_free;
1438                }
1439
1440                for (i = 0; i < nsems; i++) {
1441                        if (sem_io[i] > SEMVMX) {
1442                                ipc_rcu_putref(sma, ipc_rcu_free);
1443                                err = -ERANGE;
1444                                goto out_free;
1445                        }
1446                }
1447                rcu_read_lock();
1448                sem_lock_and_putref(sma);
1449                if (!ipc_valid_object(&sma->sem_perm)) {
1450                        err = -EIDRM;
1451                        goto out_unlock;
1452                }
1453
1454                for (i = 0; i < nsems; i++) {
1455                        sma->sem_base[i].semval = sem_io[i];
1456                        sma->sem_base[i].sempid = task_tgid_vnr(current);
1457                }
1458
1459                ipc_assert_locked_object(&sma->sem_perm);
1460                list_for_each_entry(un, &sma->list_id, list_id) {
1461                        for (i = 0; i < nsems; i++)
1462                                un->semadj[i] = 0;
1463                }
1464                sma->sem_ctime = get_seconds();
1465                /* maybe some queued-up processes were waiting for this */
1466                do_smart_update(sma, NULL, 0, 0, &tasks);
1467                err = 0;
1468                goto out_unlock;
1469        }
1470        /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1471        }
1472        err = -EINVAL;
1473        if (semnum < 0 || semnum >= nsems)
1474                goto out_rcu_wakeup;
1475
1476        sem_lock(sma, NULL, -1);
1477        if (!ipc_valid_object(&sma->sem_perm)) {
1478                err = -EIDRM;
1479                goto out_unlock;
1480        }
1481        curr = &sma->sem_base[semnum];
1482
1483        switch (cmd) {
1484        case GETVAL:
1485                err = curr->semval;
1486                goto out_unlock;
1487        case GETPID:
1488                err = curr->sempid;
1489                goto out_unlock;
1490        case GETNCNT:
1491                err = count_semcnt(sma, semnum, 0);
1492                goto out_unlock;
1493        case GETZCNT:
1494                err = count_semcnt(sma, semnum, 1);
1495                goto out_unlock;
1496        }
1497
1498out_unlock:
1499        sem_unlock(sma, -1);
1500out_rcu_wakeup:
1501        rcu_read_unlock();
1502        wake_up_sem_queue_do(&tasks);
1503out_free:
1504        if (sem_io != fast_sem_io)
1505                ipc_free(sem_io);
1506        return err;
1507}
1508
1509static inline unsigned long
1510copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1511{
1512        switch (version) {
1513        case IPC_64:
1514                if (copy_from_user(out, buf, sizeof(*out)))
1515                        return -EFAULT;
1516                return 0;
1517        case IPC_OLD:
1518            {
1519                struct semid_ds tbuf_old;
1520
1521                if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1522                        return -EFAULT;
1523
1524                out->sem_perm.uid       = tbuf_old.sem_perm.uid;
1525                out->sem_perm.gid       = tbuf_old.sem_perm.gid;
1526                out->sem_perm.mode      = tbuf_old.sem_perm.mode;
1527
1528                return 0;
1529            }
1530        default:
1531                return -EINVAL;
1532        }
1533}
1534
1535/*
1536 * This function handles some semctl commands which require the rwsem
1537 * to be held in write mode.
1538 * NOTE: no locks must be held, the rwsem is taken inside this function.
1539 */
1540static int semctl_down(struct ipc_namespace *ns, int semid,
1541                       int cmd, int version, void __user *p)
1542{
1543        struct sem_array *sma;
1544        int err;
1545        struct semid64_ds semid64;
1546        struct kern_ipc_perm *ipcp;
1547
1548        if (cmd == IPC_SET) {
1549                if (copy_semid_from_user(&semid64, p, version))
1550                        return -EFAULT;
1551        }
1552
1553        down_write(&sem_ids(ns).rwsem);
1554        rcu_read_lock();
1555
1556        ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1557                                      &semid64.sem_perm, 0);
1558        if (IS_ERR(ipcp)) {
1559                err = PTR_ERR(ipcp);
1560                goto out_unlock1;
1561        }
1562
1563        sma = container_of(ipcp, struct sem_array, sem_perm);
1564
1565        err = security_sem_semctl(sma, cmd);
1566        if (err)
1567                goto out_unlock1;
1568
1569        switch (cmd) {
1570        case IPC_RMID:
1571                sem_lock(sma, NULL, -1);
1572                /* freeary unlocks the ipc object and rcu */
1573                freeary(ns, ipcp);
1574                goto out_up;
1575        case IPC_SET:
1576                sem_lock(sma, NULL, -1);
1577                err = ipc_update_perm(&semid64.sem_perm, ipcp);
1578                if (err)
1579                        goto out_unlock0;
1580                sma->sem_ctime = get_seconds();
1581                break;
1582        default:
1583                err = -EINVAL;
1584                goto out_unlock1;
1585        }
1586
1587out_unlock0:
1588        sem_unlock(sma, -1);
1589out_unlock1:
1590        rcu_read_unlock();
1591out_up:
1592        up_write(&sem_ids(ns).rwsem);
1593        return err;
1594}
1595
1596SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1597{
1598        int version;
1599        struct ipc_namespace *ns;
1600        void __user *p = (void __user *)arg;
1601
1602        if (semid < 0)
1603                return -EINVAL;
1604
1605        version = ipc_parse_version(&cmd);
1606        ns = current->nsproxy->ipc_ns;
1607
1608        switch (cmd) {
1609        case IPC_INFO:
1610        case SEM_INFO:
1611        case IPC_STAT:
1612        case SEM_STAT:
1613                return semctl_nolock(ns, semid, cmd, version, p);
1614        case GETALL:
1615        case GETVAL:
1616        case GETPID:
1617        case GETNCNT:
1618        case GETZCNT:
1619        case SETALL:
1620                return semctl_main(ns, semid, semnum, cmd, p);
1621        case SETVAL:
1622                return semctl_setval(ns, semid, semnum, arg);
1623        case IPC_RMID:
1624        case IPC_SET:
1625                return semctl_down(ns, semid, cmd, version, p);
1626        default:
1627                return -EINVAL;
1628        }
1629}
1630
1631/* If the task doesn't already have a undo_list, then allocate one
1632 * here.  We guarantee there is only one thread using this undo list,
1633 * and current is THE ONE
1634 *
1635 * If this allocation and assignment succeeds, but later
1636 * portions of this code fail, there is no need to free the sem_undo_list.
1637 * Just let it stay associated with the task, and it'll be freed later
1638 * at exit time.
1639 *
1640 * This can block, so callers must hold no locks.
1641 */
1642static inline int get_undo_list(struct sem_undo_list **undo_listp)
1643{
1644        struct sem_undo_list *undo_list;
1645
1646        undo_list = current->sysvsem.undo_list;
1647        if (!undo_list) {
1648                undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1649                if (undo_list == NULL)
1650                        return -ENOMEM;
1651                spin_lock_init(&undo_list->lock);
1652                atomic_set(&undo_list->refcnt, 1);
1653                INIT_LIST_HEAD(&undo_list->list_proc);
1654
1655                current->sysvsem.undo_list = undo_list;
1656        }
1657        *undo_listp = undo_list;
1658        return 0;
1659}
1660
1661static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1662{
1663        struct sem_undo *un;
1664
1665        list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1666                if (un->semid == semid)
1667                        return un;
1668        }
1669        return NULL;
1670}
1671
1672static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1673{
1674        struct sem_undo *un;
1675
1676        assert_spin_locked(&ulp->lock);
1677
1678        un = __lookup_undo(ulp, semid);
1679        if (un) {
1680                list_del_rcu(&un->list_proc);
1681                list_add_rcu(&un->list_proc, &ulp->list_proc);
1682        }
1683        return un;
1684}
1685
1686/**
1687 * find_alloc_undo - lookup (and if not present create) undo array
1688 * @ns: namespace
1689 * @semid: semaphore array id
1690 *
1691 * The function looks up (and if not present creates) the undo structure.
1692 * The size of the undo structure depends on the size of the semaphore
1693 * array, thus the alloc path is not that straightforward.
1694 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1695 * performs a rcu_read_lock().
1696 */
1697static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1698{
1699        struct sem_array *sma;
1700        struct sem_undo_list *ulp;
1701        struct sem_undo *un, *new;
1702        int nsems, error;
1703
1704        error = get_undo_list(&ulp);
1705        if (error)
1706                return ERR_PTR(error);
1707
1708        rcu_read_lock();
1709        spin_lock(&ulp->lock);
1710        un = lookup_undo(ulp, semid);
1711        spin_unlock(&ulp->lock);
1712        if (likely(un != NULL))
1713                goto out;
1714
1715        /* no undo structure around - allocate one. */
1716        /* step 1: figure out the size of the semaphore array */
1717        sma = sem_obtain_object_check(ns, semid);
1718        if (IS_ERR(sma)) {
1719                rcu_read_unlock();
1720                return ERR_CAST(sma);
1721        }
1722
1723        nsems = sma->sem_nsems;
1724        if (!ipc_rcu_getref(sma)) {
1725                rcu_read_unlock();
1726                un = ERR_PTR(-EIDRM);
1727                goto out;
1728        }
1729        rcu_read_unlock();
1730
1731        /* step 2: allocate new undo structure */
1732        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1733        if (!new) {
1734                ipc_rcu_putref(sma, ipc_rcu_free);
1735                return ERR_PTR(-ENOMEM);
1736        }
1737
1738        /* step 3: Acquire the lock on semaphore array */
1739        rcu_read_lock();
1740        sem_lock_and_putref(sma);
1741        if (!ipc_valid_object(&sma->sem_perm)) {
1742                sem_unlock(sma, -1);
1743                rcu_read_unlock();
1744                kfree(new);
1745                un = ERR_PTR(-EIDRM);
1746                goto out;
1747        }
1748        spin_lock(&ulp->lock);
1749
1750        /*
1751         * step 4: check for races: did someone else allocate the undo struct?
1752         */
1753        un = lookup_undo(ulp, semid);
1754        if (un) {
1755                kfree(new);
1756                goto success;
1757        }
1758        /* step 5: initialize & link new undo structure */
1759        new->semadj = (short *) &new[1];
1760        new->ulp = ulp;
1761        new->semid = semid;
1762        assert_spin_locked(&ulp->lock);
1763        list_add_rcu(&new->list_proc, &ulp->list_proc);
1764        ipc_assert_locked_object(&sma->sem_perm);
1765        list_add(&new->list_id, &sma->list_id);
1766        un = new;
1767
1768success:
1769        spin_unlock(&ulp->lock);
1770        sem_unlock(sma, -1);
1771out:
1772        return un;
1773}
1774
1775
1776/**
1777 * get_queue_result - retrieve the result code from sem_queue
1778 * @q: Pointer to queue structure
1779 *
1780 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1781 * q->status, then we must loop until the value is replaced with the final
1782 * value: This may happen if a task is woken up by an unrelated event (e.g.
1783 * signal) and in parallel the task is woken up by another task because it got
1784 * the requested semaphores.
1785 *
1786 * The function can be called with or without holding the semaphore spinlock.
1787 */
1788static int get_queue_result(struct sem_queue *q)
1789{
1790        int error;
1791
1792        error = q->status;
1793        while (unlikely(error == IN_WAKEUP)) {
1794                cpu_relax();
1795                error = q->status;
1796        }
1797
1798        return error;
1799}
1800
1801SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1802                unsigned, nsops, const struct timespec __user *, timeout)
1803{
1804        int error = -EINVAL;
1805        struct sem_array *sma;
1806        struct sembuf fast_sops[SEMOPM_FAST];
1807        struct sembuf *sops = fast_sops, *sop;
1808        struct sem_undo *un;
1809        int undos = 0, alter = 0, max, locknum;
1810        struct sem_queue queue;
1811        unsigned long jiffies_left = 0;
1812        struct ipc_namespace *ns;
1813        struct list_head tasks;
1814
1815        ns = current->nsproxy->ipc_ns;
1816
1817        if (nsops < 1 || semid < 0)
1818                return -EINVAL;
1819        if (nsops > ns->sc_semopm)
1820                return -E2BIG;
1821        if (nsops > SEMOPM_FAST) {
1822                sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1823                if (sops == NULL)
1824                        return -ENOMEM;
1825        }
1826        if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1827                error =  -EFAULT;
1828                goto out_free;
1829        }
1830        if (timeout) {
1831                struct timespec _timeout;
1832                if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1833                        error = -EFAULT;
1834                        goto out_free;
1835                }
1836                if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1837                        _timeout.tv_nsec >= 1000000000L) {
1838                        error = -EINVAL;
1839                        goto out_free;
1840                }
1841                jiffies_left = timespec_to_jiffies(&_timeout);
1842        }
1843        max = 0;
1844        for (sop = sops; sop < sops + nsops; sop++) {
1845                if (sop->sem_num >= max)
1846                        max = sop->sem_num;
1847                if (sop->sem_flg & SEM_UNDO)
1848                        undos = 1;
1849                if (sop->sem_op != 0)
1850                        alter = 1;
1851        }
1852
1853        INIT_LIST_HEAD(&tasks);
1854
1855        if (undos) {
1856                /* On success, find_alloc_undo takes the rcu_read_lock */
1857                un = find_alloc_undo(ns, semid);
1858                if (IS_ERR(un)) {
1859                        error = PTR_ERR(un);
1860                        goto out_free;
1861                }
1862        } else {
1863                un = NULL;
1864                rcu_read_lock();
1865        }
1866
1867        sma = sem_obtain_object_check(ns, semid);
1868        if (IS_ERR(sma)) {
1869                rcu_read_unlock();
1870                error = PTR_ERR(sma);
1871                goto out_free;
1872        }
1873
1874        error = -EFBIG;
1875        if (max >= sma->sem_nsems)
1876                goto out_rcu_wakeup;
1877
1878        error = -EACCES;
1879        if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1880                goto out_rcu_wakeup;
1881
1882        error = security_sem_semop(sma, sops, nsops, alter);
1883        if (error)
1884                goto out_rcu_wakeup;
1885
1886        error = -EIDRM;
1887        locknum = sem_lock(sma, sops, nsops);
1888        /*
1889         * We eventually might perform the following check in a lockless
1890         * fashion, considering ipc_valid_object() locking constraints.
1891         * If nsops == 1 and there is no contention for sem_perm.lock, then
1892         * only a per-semaphore lock is held and it's OK to proceed with the
1893         * check below. More details on the fine grained locking scheme
1894         * entangled here and why it's RMID race safe on comments at sem_lock()
1895         */
1896        if (!ipc_valid_object(&sma->sem_perm))
1897                goto out_unlock_free;
1898        /*
1899         * semid identifiers are not unique - find_alloc_undo may have
1900         * allocated an undo structure, it was invalidated by an RMID
1901         * and now a new array with received the same id. Check and fail.
1902         * This case can be detected checking un->semid. The existence of
1903         * "un" itself is guaranteed by rcu.
1904         */
1905        if (un && un->semid == -1)
1906                goto out_unlock_free;
1907
1908        queue.sops = sops;
1909        queue.nsops = nsops;
1910        queue.undo = un;
1911        queue.pid = task_tgid_vnr(current);
1912        queue.alter = alter;
1913
1914        error = perform_atomic_semop(sma, &queue);
1915        if (error == 0) {
1916                /* If the operation was successful, then do
1917                 * the required updates.
1918                 */
1919                if (alter)
1920                        do_smart_update(sma, sops, nsops, 1, &tasks);
1921                else
1922                        set_semotime(sma, sops);
1923        }
1924        if (error <= 0)
1925                goto out_unlock_free;
1926
1927        /* We need to sleep on this operation, so we put the current
1928         * task into the pending queue and go to sleep.
1929         */
1930
1931        if (nsops == 1) {
1932                struct sem *curr;
1933                curr = &sma->sem_base[sops->sem_num];
1934
1935                if (alter) {
1936                        if (sma->complex_count) {
1937                                list_add_tail(&queue.list,
1938                                                &sma->pending_alter);
1939                        } else {
1940
1941                                list_add_tail(&queue.list,
1942                                                &curr->pending_alter);
1943                        }
1944                } else {
1945                        list_add_tail(&queue.list, &curr->pending_const);
1946                }
1947        } else {
1948                if (!sma->complex_count)
1949                        merge_queues(sma);
1950
1951                if (alter)
1952                        list_add_tail(&queue.list, &sma->pending_alter);
1953                else
1954                        list_add_tail(&queue.list, &sma->pending_const);
1955
1956                sma->complex_count++;
1957        }
1958
1959        queue.status = -EINTR;
1960        queue.sleeper = current;
1961
1962sleep_again:
1963        __set_current_state(TASK_INTERRUPTIBLE);
1964        sem_unlock(sma, locknum);
1965        rcu_read_unlock();
1966
1967        if (timeout)
1968                jiffies_left = schedule_timeout(jiffies_left);
1969        else
1970                schedule();
1971
1972        error = get_queue_result(&queue);
1973
1974        if (error != -EINTR) {
1975                /* fast path: update_queue already obtained all requested
1976                 * resources.
1977                 * Perform a smp_mb(): User space could assume that semop()
1978                 * is a memory barrier: Without the mb(), the cpu could
1979                 * speculatively read in user space stale data that was
1980                 * overwritten by the previous owner of the semaphore.
1981                 */
1982                smp_mb();
1983
1984                goto out_free;
1985        }
1986
1987        rcu_read_lock();
1988        sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1989
1990        /*
1991         * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1992         */
1993        error = get_queue_result(&queue);
1994
1995        /*
1996         * Array removed? If yes, leave without sem_unlock().
1997         */
1998        if (IS_ERR(sma)) {
1999                rcu_read_unlock();
2000                goto out_free;
2001        }
2002
2003
2004        /*
2005         * If queue.status != -EINTR we are woken up by another process.
2006         * Leave without unlink_queue(), but with sem_unlock().
2007         */
2008        if (error != -EINTR)
2009                goto out_unlock_free;
2010
2011        /*
2012         * If an interrupt occurred we have to clean up the queue
2013         */
2014        if (timeout && jiffies_left == 0)
2015                error = -EAGAIN;
2016
2017        /*
2018         * If the wakeup was spurious, just retry
2019         */
2020        if (error == -EINTR && !signal_pending(current))
2021                goto sleep_again;
2022
2023        unlink_queue(sma, &queue);
2024
2025out_unlock_free:
2026        sem_unlock(sma, locknum);
2027out_rcu_wakeup:
2028        rcu_read_unlock();
2029        wake_up_sem_queue_do(&tasks);
2030out_free:
2031        if (sops != fast_sops)
2032                kfree(sops);
2033        return error;
2034}
2035
2036SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2037                unsigned, nsops)
2038{
2039        return sys_semtimedop(semid, tsops, nsops, NULL);
2040}
2041
2042/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2043 * parent and child tasks.
2044 */
2045
2046int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2047{
2048        struct sem_undo_list *undo_list;
2049        int error;
2050
2051        if (clone_flags & CLONE_SYSVSEM) {
2052                error = get_undo_list(&undo_list);
2053                if (error)
2054                        return error;
2055                atomic_inc(&undo_list->refcnt);
2056                tsk->sysvsem.undo_list = undo_list;
2057        } else
2058                tsk->sysvsem.undo_list = NULL;
2059
2060        return 0;
2061}
2062
2063/*
2064 * add semadj values to semaphores, free undo structures.
2065 * undo structures are not freed when semaphore arrays are destroyed
2066 * so some of them may be out of date.
2067 * IMPLEMENTATION NOTE: There is some confusion over whether the
2068 * set of adjustments that needs to be done should be done in an atomic
2069 * manner or not. That is, if we are attempting to decrement the semval
2070 * should we queue up and wait until we can do so legally?
2071 * The original implementation attempted to do this (queue and wait).
2072 * The current implementation does not do so. The POSIX standard
2073 * and SVID should be consulted to determine what behavior is mandated.
2074 */
2075void exit_sem(struct task_struct *tsk)
2076{
2077        struct sem_undo_list *ulp;
2078
2079        ulp = tsk->sysvsem.undo_list;
2080        if (!ulp)
2081                return;
2082        tsk->sysvsem.undo_list = NULL;
2083
2084        if (!atomic_dec_and_test(&ulp->refcnt))
2085                return;
2086
2087        for (;;) {
2088                struct sem_array *sma;
2089                struct sem_undo *un;
2090                struct list_head tasks;
2091                int semid, i;
2092
2093                rcu_read_lock();
2094                un = list_entry_rcu(ulp->list_proc.next,
2095                                    struct sem_undo, list_proc);
2096                if (&un->list_proc == &ulp->list_proc) {
2097                        /*
2098                         * We must wait for freeary() before freeing this ulp,
2099                         * in case we raced with last sem_undo. There is a small
2100                         * possibility where we exit while freeary() didn't
2101                         * finish unlocking sem_undo_list.
2102                         */
2103                        spin_unlock_wait(&ulp->lock);
2104                        rcu_read_unlock();
2105                        break;
2106                }
2107                spin_lock(&ulp->lock);
2108                semid = un->semid;
2109                spin_unlock(&ulp->lock);
2110
2111                /* exit_sem raced with IPC_RMID, nothing to do */
2112                if (semid == -1) {
2113                        rcu_read_unlock();
2114                        continue;
2115                }
2116
2117                sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2118                /* exit_sem raced with IPC_RMID, nothing to do */
2119                if (IS_ERR(sma)) {
2120                        rcu_read_unlock();
2121                        continue;
2122                }
2123
2124                sem_lock(sma, NULL, -1);
2125                /* exit_sem raced with IPC_RMID, nothing to do */
2126                if (!ipc_valid_object(&sma->sem_perm)) {
2127                        sem_unlock(sma, -1);
2128                        rcu_read_unlock();
2129                        continue;
2130                }
2131                un = __lookup_undo(ulp, semid);
2132                if (un == NULL) {
2133                        /* exit_sem raced with IPC_RMID+semget() that created
2134                         * exactly the same semid. Nothing to do.
2135                         */
2136                        sem_unlock(sma, -1);
2137                        rcu_read_unlock();
2138                        continue;
2139                }
2140
2141                /* remove un from the linked lists */
2142                ipc_assert_locked_object(&sma->sem_perm);
2143                list_del(&un->list_id);
2144
2145                /* we are the last process using this ulp, acquiring ulp->lock
2146                 * isn't required. Besides that, we are also protected against
2147                 * IPC_RMID as we hold sma->sem_perm lock now
2148                 */
2149                list_del_rcu(&un->list_proc);
2150
2151                /* perform adjustments registered in un */
2152                for (i = 0; i < sma->sem_nsems; i++) {
2153                        struct sem *semaphore = &sma->sem_base[i];
2154                        if (un->semadj[i]) {
2155                                semaphore->semval += un->semadj[i];
2156                                /*
2157                                 * Range checks of the new semaphore value,
2158                                 * not defined by sus:
2159                                 * - Some unices ignore the undo entirely
2160                                 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2161                                 * - some cap the value (e.g. FreeBSD caps
2162                                 *   at 0, but doesn't enforce SEMVMX)
2163                                 *
2164                                 * Linux caps the semaphore value, both at 0
2165                                 * and at SEMVMX.
2166                                 *
2167                                 *      Manfred <manfred@colorfullife.com>
2168                                 */
2169                                if (semaphore->semval < 0)
2170                                        semaphore->semval = 0;
2171                                if (semaphore->semval > SEMVMX)
2172                                        semaphore->semval = SEMVMX;
2173                                semaphore->sempid = task_tgid_vnr(current);
2174                        }
2175                }
2176                /* maybe some queued-up processes were waiting for this */
2177                INIT_LIST_HEAD(&tasks);
2178                do_smart_update(sma, NULL, 0, 1, &tasks);
2179                sem_unlock(sma, -1);
2180                rcu_read_unlock();
2181                wake_up_sem_queue_do(&tasks);
2182
2183                kfree_rcu(un, rcu);
2184        }
2185        kfree(ulp);
2186}
2187
2188#ifdef CONFIG_PROC_FS
2189static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2190{
2191        struct user_namespace *user_ns = seq_user_ns(s);
2192        struct sem_array *sma = it;
2193        time_t sem_otime;
2194
2195        /*
2196         * The proc interface isn't aware of sem_lock(), it calls
2197         * ipc_lock_object() directly (in sysvipc_find_ipc).
2198         * In order to stay compatible with sem_lock(), we must wait until
2199         * all simple semop() calls have left their critical regions.
2200         */
2201        sem_wait_array(sma);
2202
2203        sem_otime = get_semotime(sma);
2204
2205        seq_printf(s,
2206                   "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2207                   sma->sem_perm.key,
2208                   sma->sem_perm.id,
2209                   sma->sem_perm.mode,
2210                   sma->sem_nsems,
2211                   from_kuid_munged(user_ns, sma->sem_perm.uid),
2212                   from_kgid_munged(user_ns, sma->sem_perm.gid),
2213                   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2214                   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2215                   sem_otime,
2216                   sma->sem_ctime);
2217
2218        return 0;
2219}
2220#endif
2221