linux/arch/v850/kernel/semaphore.c
<<
>>
Prefs
   1/*
   2 * arch/v850/kernel/semaphore.c -- Semaphore support
   3 *
   4 *  Copyright (C) 1998-2000  IBM Corporation
   5 *  Copyright (C) 1999  Linus Torvalds
   6 *
   7 * This file is subject to the terms and conditions of the GNU General
   8 * Public License.  See the file COPYING in the main directory of this
   9 * archive for more details.
  10 *
  11 * This file is a copy of the s390 version, arch/s390/kernel/semaphore.c
  12 *    Author(s): Martin Schwidefsky
  13 * which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c
  14 */
  15
  16#include <linux/errno.h>
  17#include <linux/sched.h>
  18#include <linux/init.h>
  19
  20#include <asm/semaphore.h>
  21
  22/*
  23 * Semaphores are implemented using a two-way counter:
  24 * The "count" variable is decremented for each process
  25 * that tries to acquire the semaphore, while the "sleeping"
  26 * variable is a count of such acquires.
  27 *
  28 * Notably, the inline "up()" and "down()" functions can
  29 * efficiently test if they need to do any extra work (up
  30 * needs to do something only if count was negative before
  31 * the increment operation.
  32 *
  33 * "sleeping" and the contention routine ordering is
  34 * protected by the semaphore spinlock.
  35 *
  36 * Note that these functions are only called when there is
  37 * contention on the lock, and as such all this is the
  38 * "non-critical" part of the whole semaphore business. The
  39 * critical part is the inline stuff in <asm/semaphore.h>
  40 * where we want to avoid any extra jumps and calls.
  41 */
  42
  43/*
  44 * Logic:
  45 *  - only on a boundary condition do we need to care. When we go
  46 *    from a negative count to a non-negative, we wake people up.
  47 *  - when we go from a non-negative count to a negative do we
  48 *    (a) synchronize with the "sleeper" count and (b) make sure
  49 *    that we're on the wakeup list before we synchronize so that
  50 *    we cannot lose wakeup events.
  51 */
  52
  53void __up(struct semaphore *sem)
  54{
  55        wake_up(&sem->wait);
  56}
  57
  58static DEFINE_SPINLOCK(semaphore_lock);
  59
  60void __sched __down(struct semaphore * sem)
  61{
  62        struct task_struct *tsk = current;
  63        DECLARE_WAITQUEUE(wait, tsk);
  64        tsk->state = TASK_UNINTERRUPTIBLE;
  65        add_wait_queue_exclusive(&sem->wait, &wait);
  66
  67        spin_lock_irq(&semaphore_lock);
  68        sem->sleepers++;
  69        for (;;) {
  70                int sleepers = sem->sleepers;
  71
  72                /*
  73                 * Add "everybody else" into it. They aren't
  74                 * playing, because we own the spinlock.
  75                 */
  76                if (!atomic_add_negative(sleepers - 1, &sem->count)) {
  77                        sem->sleepers = 0;
  78                        break;
  79                }
  80                sem->sleepers = 1;      /* us - see -1 above */
  81                spin_unlock_irq(&semaphore_lock);
  82
  83                schedule();
  84                tsk->state = TASK_UNINTERRUPTIBLE;
  85                spin_lock_irq(&semaphore_lock);
  86        }
  87        spin_unlock_irq(&semaphore_lock);
  88        remove_wait_queue(&sem->wait, &wait);
  89        tsk->state = TASK_RUNNING;
  90        wake_up(&sem->wait);
  91}
  92
  93int __sched __down_interruptible(struct semaphore * sem)
  94{
  95        int retval = 0;
  96        struct task_struct *tsk = current;
  97        DECLARE_WAITQUEUE(wait, tsk);
  98        tsk->state = TASK_INTERRUPTIBLE;
  99        add_wait_queue_exclusive(&sem->wait, &wait);
 100
 101        spin_lock_irq(&semaphore_lock);
 102        sem->sleepers ++;
 103        for (;;) {
 104                int sleepers = sem->sleepers;
 105
 106                /*
 107                 * With signals pending, this turns into
 108                 * the trylock failure case - we won't be
 109                 * sleeping, and we* can't get the lock as
 110                 * it has contention. Just correct the count
 111                 * and exit.
 112                 */
 113                if (signal_pending(current)) {
 114                        retval = -EINTR;
 115                        sem->sleepers = 0;
 116                        atomic_add(sleepers, &sem->count);
 117                        break;
 118                }
 119
 120                /*
 121                 * Add "everybody else" into it. They aren't
 122                 * playing, because we own the spinlock. The
 123                 * "-1" is because we're still hoping to get
 124                 * the lock.
 125                 */
 126                if (!atomic_add_negative(sleepers - 1, &sem->count)) {
 127                        sem->sleepers = 0;
 128                        break;
 129                }
 130                sem->sleepers = 1;      /* us - see -1 above */
 131                spin_unlock_irq(&semaphore_lock);
 132
 133                schedule();
 134                tsk->state = TASK_INTERRUPTIBLE;
 135                spin_lock_irq(&semaphore_lock);
 136        }
 137        spin_unlock_irq(&semaphore_lock);
 138        tsk->state = TASK_RUNNING;
 139        remove_wait_queue(&sem->wait, &wait);
 140        wake_up(&sem->wait);
 141        return retval;
 142}
 143
 144/*
 145 * Trylock failed - make sure we correct for
 146 * having decremented the count.
 147 */
 148int __down_trylock(struct semaphore * sem)
 149{
 150        unsigned long flags;
 151        int sleepers;
 152
 153        spin_lock_irqsave(&semaphore_lock, flags);
 154        sleepers = sem->sleepers + 1;
 155        sem->sleepers = 0;
 156
 157        /*
 158         * Add "everybody else" and us into it. They aren't
 159         * playing, because we own the spinlock.
 160         */
 161        if (!atomic_add_negative(sleepers, &sem->count))
 162                wake_up(&sem->wait);
 163
 164        spin_unlock_irqrestore(&semaphore_lock, flags);
 165        return 1;
 166}
 167