linux/kernel/locking/semaphore.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2008 Intel Corporation
   3 * Author: Matthew Wilcox <willy@linux.intel.com>
   4 *
   5 * Distributed under the terms of the GNU GPL, version 2
   6 *
   7 * This file implements counting semaphores.
   8 * A counting semaphore may be acquired 'n' times before sleeping.
   9 * See mutex.c for single-acquisition sleeping locks which enforce
  10 * rules which allow code to be debugged more easily.
  11 */
  12
  13/*
  14 * Some notes on the implementation:
  15 *
  16 * The spinlock controls access to the other members of the semaphore.
  17 * down_trylock() and up() can be called from interrupt context, so we
  18 * have to disable interrupts when taking the lock.  It turns out various
  19 * parts of the kernel expect to be able to use down() on a semaphore in
  20 * interrupt context when they know it will succeed, so we have to use
  21 * irqsave variants for down(), down_interruptible() and down_killable()
  22 * too.
  23 *
  24 * The ->count variable represents how many more tasks can acquire this
  25 * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
  26 */
  27
  28#include <linux/compiler.h>
  29#include <linux/kernel.h>
  30#include <linux/export.h>
  31#include <linux/sched.h>
  32#include <linux/sched/debug.h>
  33#include <linux/semaphore.h>
  34#include <linux/spinlock.h>
  35#include <linux/ftrace.h>
  36
  37static noinline void __down(struct semaphore *sem);
  38static noinline int __down_interruptible(struct semaphore *sem);
  39static noinline int __down_killable(struct semaphore *sem);
  40static noinline int __down_timeout(struct semaphore *sem, long timeout);
  41static noinline void __up(struct semaphore *sem);
  42
  43/**
  44 * down - acquire the semaphore
  45 * @sem: the semaphore to be acquired
  46 *
  47 * Acquires the semaphore.  If no more tasks are allowed to acquire the
  48 * semaphore, calling this function will put the task to sleep until the
  49 * semaphore is released.
  50 *
  51 * Use of this function is deprecated, please use down_interruptible() or
  52 * down_killable() instead.
  53 */
  54void down(struct semaphore *sem)
  55{
  56        unsigned long flags;
  57
  58        raw_spin_lock_irqsave(&sem->lock, flags);
  59        if (likely(sem->count > 0))
  60                sem->count--;
  61        else
  62                __down(sem);
  63        raw_spin_unlock_irqrestore(&sem->lock, flags);
  64}
  65EXPORT_SYMBOL(down);
  66
  67/**
  68 * down_interruptible - acquire the semaphore unless interrupted
  69 * @sem: the semaphore to be acquired
  70 *
  71 * Attempts to acquire the semaphore.  If no more tasks are allowed to
  72 * acquire the semaphore, calling this function will put the task to sleep.
  73 * If the sleep is interrupted by a signal, this function will return -EINTR.
  74 * If the semaphore is successfully acquired, this function returns 0.
  75 */
  76int down_interruptible(struct semaphore *sem)
  77{
  78        unsigned long flags;
  79        int result = 0;
  80
  81        raw_spin_lock_irqsave(&sem->lock, flags);
  82        if (likely(sem->count > 0))
  83                sem->count--;
  84        else
  85                result = __down_interruptible(sem);
  86        raw_spin_unlock_irqrestore(&sem->lock, flags);
  87
  88        return result;
  89}
  90EXPORT_SYMBOL(down_interruptible);
  91
  92/**
  93 * down_killable - acquire the semaphore unless killed
  94 * @sem: the semaphore to be acquired
  95 *
  96 * Attempts to acquire the semaphore.  If no more tasks are allowed to
  97 * acquire the semaphore, calling this function will put the task to sleep.
  98 * If the sleep is interrupted by a fatal signal, this function will return
  99 * -EINTR.  If the semaphore is successfully acquired, this function returns
 100 * 0.
 101 */
 102int down_killable(struct semaphore *sem)
 103{
 104        unsigned long flags;
 105        int result = 0;
 106
 107        raw_spin_lock_irqsave(&sem->lock, flags);
 108        if (likely(sem->count > 0))
 109                sem->count--;
 110        else
 111                result = __down_killable(sem);
 112        raw_spin_unlock_irqrestore(&sem->lock, flags);
 113
 114        return result;
 115}
 116EXPORT_SYMBOL(down_killable);
 117
 118/**
 119 * down_trylock - try to acquire the semaphore, without waiting
 120 * @sem: the semaphore to be acquired
 121 *
 122 * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
 123 * been acquired successfully or 1 if it it cannot be acquired.
 124 *
 125 * NOTE: This return value is inverted from both spin_trylock and
 126 * mutex_trylock!  Be careful about this when converting code.
 127 *
 128 * Unlike mutex_trylock, this function can be used from interrupt context,
 129 * and the semaphore can be released by any task or interrupt.
 130 */
 131int down_trylock(struct semaphore *sem)
 132{
 133        unsigned long flags;
 134        int count;
 135
 136        raw_spin_lock_irqsave(&sem->lock, flags);
 137        count = sem->count - 1;
 138        if (likely(count >= 0))
 139                sem->count = count;
 140        raw_spin_unlock_irqrestore(&sem->lock, flags);
 141
 142        return (count < 0);
 143}
 144EXPORT_SYMBOL(down_trylock);
 145
 146/**
 147 * down_timeout - acquire the semaphore within a specified time
 148 * @sem: the semaphore to be acquired
 149 * @timeout: how long to wait before failing
 150 *
 151 * Attempts to acquire the semaphore.  If no more tasks are allowed to
 152 * acquire the semaphore, calling this function will put the task to sleep.
 153 * If the semaphore is not released within the specified number of jiffies,
 154 * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
 155 */
 156int down_timeout(struct semaphore *sem, long timeout)
 157{
 158        unsigned long flags;
 159        int result = 0;
 160
 161        raw_spin_lock_irqsave(&sem->lock, flags);
 162        if (likely(sem->count > 0))
 163                sem->count--;
 164        else
 165                result = __down_timeout(sem, timeout);
 166        raw_spin_unlock_irqrestore(&sem->lock, flags);
 167
 168        return result;
 169}
 170EXPORT_SYMBOL(down_timeout);
 171
 172/**
 173 * up - release the semaphore
 174 * @sem: the semaphore to release
 175 *
 176 * Release the semaphore.  Unlike mutexes, up() may be called from any
 177 * context and even by tasks which have never called down().
 178 */
 179void up(struct semaphore *sem)
 180{
 181        unsigned long flags;
 182
 183        raw_spin_lock_irqsave(&sem->lock, flags);
 184        if (likely(list_empty(&sem->wait_list)))
 185                sem->count++;
 186        else
 187                __up(sem);
 188        raw_spin_unlock_irqrestore(&sem->lock, flags);
 189}
 190EXPORT_SYMBOL(up);
 191
 192/* Functions for the contended case */
 193
 194struct semaphore_waiter {
 195        struct list_head list;
 196        struct task_struct *task;
 197        bool up;
 198};
 199
 200/*
 201 * Because this function is inlined, the 'state' parameter will be
 202 * constant, and thus optimised away by the compiler.  Likewise the
 203 * 'timeout' parameter for the cases without timeouts.
 204 */
 205static inline int __sched __down_common(struct semaphore *sem, long state,
 206                                                                long timeout)
 207{
 208        struct semaphore_waiter waiter;
 209
 210        list_add_tail(&waiter.list, &sem->wait_list);
 211        waiter.task = current;
 212        waiter.up = false;
 213
 214        for (;;) {
 215                if (signal_pending_state(state, current))
 216                        goto interrupted;
 217                if (unlikely(timeout <= 0))
 218                        goto timed_out;
 219                __set_current_state(state);
 220                raw_spin_unlock_irq(&sem->lock);
 221                timeout = schedule_timeout(timeout);
 222                raw_spin_lock_irq(&sem->lock);
 223                if (waiter.up)
 224                        return 0;
 225        }
 226
 227 timed_out:
 228        list_del(&waiter.list);
 229        return -ETIME;
 230
 231 interrupted:
 232        list_del(&waiter.list);
 233        return -EINTR;
 234}
 235
 236static noinline void __sched __down(struct semaphore *sem)
 237{
 238        __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 239}
 240
 241static noinline int __sched __down_interruptible(struct semaphore *sem)
 242{
 243        return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 244}
 245
 246static noinline int __sched __down_killable(struct semaphore *sem)
 247{
 248        return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
 249}
 250
 251static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
 252{
 253        return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
 254}
 255
 256static noinline void __sched __up(struct semaphore *sem)
 257{
 258        struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
 259                                                struct semaphore_waiter, list);
 260        list_del(&waiter->list);
 261        waiter->up = true;
 262        wake_up_process(waiter->task);
 263}
 264