linux/kernel/mutex.c
<<
>>
Prefs
   1/*
   2 * kernel/mutex.c
   3 *
   4 * Mutexes: blocking mutual exclusion locks
   5 *
   6 * Started by Ingo Molnar:
   7 *
   8 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   9 *
  10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  11 * David Howells for suggestions and improvements.
  12 *
  13 * Also see Documentation/mutex-design.txt.
  14 */
  15#include <linux/mutex.h>
  16#include <linux/sched.h>
  17#include <linux/module.h>
  18#include <linux/spinlock.h>
  19#include <linux/interrupt.h>
  20#include <linux/debug_locks.h>
  21
  22/*
  23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
  24 * which forces all calls into the slowpath:
  25 */
  26#ifdef CONFIG_DEBUG_MUTEXES
  27# include "mutex-debug.h"
  28# include <asm-generic/mutex-null.h>
  29#else
  30# include "mutex.h"
  31# include <asm/mutex.h>
  32#endif
  33
  34/***
  35 * mutex_init - initialize the mutex
  36 * @lock: the mutex to be initialized
  37 *
  38 * Initialize the mutex to unlocked state.
  39 *
  40 * It is not allowed to initialize an already locked mutex.
  41 */
  42void
  43__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  44{
  45        atomic_set(&lock->count, 1);
  46        spin_lock_init(&lock->wait_lock);
  47        INIT_LIST_HEAD(&lock->wait_list);
  48
  49        debug_mutex_init(lock, name, key);
  50}
  51
  52EXPORT_SYMBOL(__mutex_init);
  53
  54#ifndef CONFIG_DEBUG_LOCK_ALLOC
  55/*
  56 * We split the mutex lock/unlock logic into separate fastpath and
  57 * slowpath functions, to reduce the register pressure on the fastpath.
  58 * We also put the fastpath first in the kernel image, to make sure the
  59 * branch is predicted by the CPU as default-untaken.
  60 */
  61static void fastcall noinline __sched
  62__mutex_lock_slowpath(atomic_t *lock_count);
  63
  64/***
  65 * mutex_lock - acquire the mutex
  66 * @lock: the mutex to be acquired
  67 *
  68 * Lock the mutex exclusively for this task. If the mutex is not
  69 * available right now, it will sleep until it can get it.
  70 *
  71 * The mutex must later on be released by the same task that
  72 * acquired it. Recursive locking is not allowed. The task
  73 * may not exit without first unlocking the mutex. Also, kernel
  74 * memory where the mutex resides mutex must not be freed with
  75 * the mutex still locked. The mutex must first be initialized
  76 * (or statically defined) before it can be locked. memset()-ing
  77 * the mutex to 0 is not allowed.
  78 *
  79 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
  80 *   checks that will enforce the restrictions and will also do
  81 *   deadlock debugging. )
  82 *
  83 * This function is similar to (but not equivalent to) down().
  84 */
  85void inline fastcall __sched mutex_lock(struct mutex *lock)
  86{
  87        might_sleep();
  88        /*
  89         * The locking fastpath is the 1->0 transition from
  90         * 'unlocked' into 'locked' state.
  91         */
  92        __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
  93}
  94
  95EXPORT_SYMBOL(mutex_lock);
  96#endif
  97
  98static void fastcall noinline __sched
  99__mutex_unlock_slowpath(atomic_t *lock_count);
 100
 101/***
 102 * mutex_unlock - release the mutex
 103 * @lock: the mutex to be released
 104 *
 105 * Unlock a mutex that has been locked by this task previously.
 106 *
 107 * This function must not be used in interrupt context. Unlocking
 108 * of a not locked mutex is not allowed.
 109 *
 110 * This function is similar to (but not equivalent to) up().
 111 */
 112void fastcall __sched mutex_unlock(struct mutex *lock)
 113{
 114        /*
 115         * The unlocking fastpath is the 0->1 transition from 'locked'
 116         * into 'unlocked' state:
 117         */
 118        __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
 119}
 120
 121EXPORT_SYMBOL(mutex_unlock);
 122
 123/*
 124 * Lock a mutex (possibly interruptible), slowpath:
 125 */
 126static inline int __sched
 127__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 128                unsigned long ip)
 129{
 130        struct task_struct *task = current;
 131        struct mutex_waiter waiter;
 132        unsigned int old_val;
 133        unsigned long flags;
 134
 135        spin_lock_mutex(&lock->wait_lock, flags);
 136
 137        debug_mutex_lock_common(lock, &waiter);
 138        mutex_acquire(&lock->dep_map, subclass, 0, ip);
 139        debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
 140
 141        /* add waiting tasks to the end of the waitqueue (FIFO): */
 142        list_add_tail(&waiter.list, &lock->wait_list);
 143        waiter.task = task;
 144
 145        old_val = atomic_xchg(&lock->count, -1);
 146        if (old_val == 1)
 147                goto done;
 148
 149        lock_contended(&lock->dep_map, ip);
 150
 151        for (;;) {
 152                /*
 153                 * Lets try to take the lock again - this is needed even if
 154                 * we get here for the first time (shortly after failing to
 155                 * acquire the lock), to make sure that we get a wakeup once
 156                 * it's unlocked. Later on, if we sleep, this is the
 157                 * operation that gives us the lock. We xchg it to -1, so
 158                 * that when we release the lock, we properly wake up the
 159                 * other waiters:
 160                 */
 161                old_val = atomic_xchg(&lock->count, -1);
 162                if (old_val == 1)
 163                        break;
 164
 165                /*
 166                 * got a signal? (This code gets eliminated in the
 167                 * TASK_UNINTERRUPTIBLE case.)
 168                 */
 169                if (unlikely(state == TASK_INTERRUPTIBLE &&
 170                                                signal_pending(task))) {
 171                        mutex_remove_waiter(lock, &waiter, task_thread_info(task));
 172                        mutex_release(&lock->dep_map, 1, ip);
 173                        spin_unlock_mutex(&lock->wait_lock, flags);
 174
 175                        debug_mutex_free_waiter(&waiter);
 176                        return -EINTR;
 177                }
 178                __set_task_state(task, state);
 179
 180                /* didnt get the lock, go to sleep: */
 181                spin_unlock_mutex(&lock->wait_lock, flags);
 182                schedule();
 183                spin_lock_mutex(&lock->wait_lock, flags);
 184        }
 185
 186done:
 187        lock_acquired(&lock->dep_map);
 188        /* got the lock - rejoice! */
 189        mutex_remove_waiter(lock, &waiter, task_thread_info(task));
 190        debug_mutex_set_owner(lock, task_thread_info(task));
 191
 192        /* set it to 0 if there are no waiters left: */
 193        if (likely(list_empty(&lock->wait_list)))
 194                atomic_set(&lock->count, 0);
 195
 196        spin_unlock_mutex(&lock->wait_lock, flags);
 197
 198        debug_mutex_free_waiter(&waiter);
 199
 200        return 0;
 201}
 202
 203#ifdef CONFIG_DEBUG_LOCK_ALLOC
 204void __sched
 205mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 206{
 207        might_sleep();
 208        __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
 209}
 210
 211EXPORT_SYMBOL_GPL(mutex_lock_nested);
 212
 213int __sched
 214mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 215{
 216        might_sleep();
 217        return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
 218}
 219
 220EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 221#endif
 222
 223/*
 224 * Release the lock, slowpath:
 225 */
 226static fastcall inline void
 227__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
 228{
 229        struct mutex *lock = container_of(lock_count, struct mutex, count);
 230        unsigned long flags;
 231
 232        spin_lock_mutex(&lock->wait_lock, flags);
 233        mutex_release(&lock->dep_map, nested, _RET_IP_);
 234        debug_mutex_unlock(lock);
 235
 236        /*
 237         * some architectures leave the lock unlocked in the fastpath failure
 238         * case, others need to leave it locked. In the later case we have to
 239         * unlock it here
 240         */
 241        if (__mutex_slowpath_needs_to_unlock())
 242                atomic_set(&lock->count, 1);
 243
 244        if (!list_empty(&lock->wait_list)) {
 245                /* get the first entry from the wait-list: */
 246                struct mutex_waiter *waiter =
 247                                list_entry(lock->wait_list.next,
 248                                           struct mutex_waiter, list);
 249
 250                debug_mutex_wake_waiter(lock, waiter);
 251
 252                wake_up_process(waiter->task);
 253        }
 254
 255        debug_mutex_clear_owner(lock);
 256
 257        spin_unlock_mutex(&lock->wait_lock, flags);
 258}
 259
 260/*
 261 * Release the lock, slowpath:
 262 */
 263static fastcall noinline void
 264__mutex_unlock_slowpath(atomic_t *lock_count)
 265{
 266        __mutex_unlock_common_slowpath(lock_count, 1);
 267}
 268
 269#ifndef CONFIG_DEBUG_LOCK_ALLOC
 270/*
 271 * Here come the less common (and hence less performance-critical) APIs:
 272 * mutex_lock_interruptible() and mutex_trylock().
 273 */
 274static int fastcall noinline __sched
 275__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
 276
 277/***
 278 * mutex_lock_interruptible - acquire the mutex, interruptable
 279 * @lock: the mutex to be acquired
 280 *
 281 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
 282 * been acquired or sleep until the mutex becomes available. If a
 283 * signal arrives while waiting for the lock then this function
 284 * returns -EINTR.
 285 *
 286 * This function is similar to (but not equivalent to) down_interruptible().
 287 */
 288int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
 289{
 290        might_sleep();
 291        return __mutex_fastpath_lock_retval
 292                        (&lock->count, __mutex_lock_interruptible_slowpath);
 293}
 294
 295EXPORT_SYMBOL(mutex_lock_interruptible);
 296
 297static void fastcall noinline __sched
 298__mutex_lock_slowpath(atomic_t *lock_count)
 299{
 300        struct mutex *lock = container_of(lock_count, struct mutex, count);
 301
 302        __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
 303}
 304
 305static int fastcall noinline __sched
 306__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
 307{
 308        struct mutex *lock = container_of(lock_count, struct mutex, count);
 309
 310        return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
 311}
 312#endif
 313
 314/*
 315 * Spinlock based trylock, we take the spinlock and check whether we
 316 * can get the lock:
 317 */
 318static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
 319{
 320        struct mutex *lock = container_of(lock_count, struct mutex, count);
 321        unsigned long flags;
 322        int prev;
 323
 324        spin_lock_mutex(&lock->wait_lock, flags);
 325
 326        prev = atomic_xchg(&lock->count, -1);
 327        if (likely(prev == 1)) {
 328                debug_mutex_set_owner(lock, current_thread_info());
 329                mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 330        }
 331        /* Set it back to 0 if there are no waiters: */
 332        if (likely(list_empty(&lock->wait_list)))
 333                atomic_set(&lock->count, 0);
 334
 335        spin_unlock_mutex(&lock->wait_lock, flags);
 336
 337        return prev == 1;
 338}
 339
 340/***
 341 * mutex_trylock - try acquire the mutex, without waiting
 342 * @lock: the mutex to be acquired
 343 *
 344 * Try to acquire the mutex atomically. Returns 1 if the mutex
 345 * has been acquired successfully, and 0 on contention.
 346 *
 347 * NOTE: this function follows the spin_trylock() convention, so
 348 * it is negated to the down_trylock() return values! Be careful
 349 * about this when converting semaphore users to mutexes.
 350 *
 351 * This function must not be used in interrupt context. The
 352 * mutex must be released by the same task that acquired it.
 353 */
 354int fastcall __sched mutex_trylock(struct mutex *lock)
 355{
 356        return __mutex_fastpath_trylock(&lock->count,
 357                                        __mutex_trylock_slowpath);
 358}
 359
 360EXPORT_SYMBOL(mutex_trylock);
 361