linux/lib/kernel_lock.c
<<
>>
Prefs
   1/*
   2 * lib/kernel_lock.c
   3 *
   4 * This is the traditional BKL - big kernel lock. Largely
   5 * relegated to obsolescence, but used by various less
   6 * important (or lazy) subsystems.
   7 */
   8#include <linux/smp_lock.h>
   9#include <linux/module.h>
  10#include <linux/kallsyms.h>
  11#include <linux/semaphore.h>
  12
  13/*
  14 * The 'big kernel lock'
  15 *
  16 * This spinlock is taken and released recursively by lock_kernel()
  17 * and unlock_kernel().  It is transparently dropped and reacquired
  18 * over schedule().  It is used to protect legacy code that hasn't
  19 * been migrated to a proper locking design yet.
  20 *
  21 * Don't use in new code.
  22 */
  23static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
  24
  25
  26/*
  27 * Acquire/release the underlying lock from the scheduler.
  28 *
  29 * This is called with preemption disabled, and should
  30 * return an error value if it cannot get the lock and
  31 * TIF_NEED_RESCHED gets set.
  32 *
  33 * If it successfully gets the lock, it should increment
  34 * the preemption count like any spinlock does.
  35 *
  36 * (This works on UP too - _raw_spin_trylock will never
  37 * return false in that case)
  38 */
  39int __lockfunc __reacquire_kernel_lock(void)
  40{
  41        while (!_raw_spin_trylock(&kernel_flag)) {
  42                if (need_resched())
  43                        return -EAGAIN;
  44                cpu_relax();
  45        }
  46        preempt_disable();
  47        return 0;
  48}
  49
  50void __lockfunc __release_kernel_lock(void)
  51{
  52        _raw_spin_unlock(&kernel_flag);
  53        preempt_enable_no_resched();
  54}
  55
  56/*
  57 * These are the BKL spinlocks - we try to be polite about preemption.
  58 * If SMP is not on (ie UP preemption), this all goes away because the
  59 * _raw_spin_trylock() will always succeed.
  60 */
  61#ifdef CONFIG_PREEMPT
  62static inline void __lock_kernel(void)
  63{
  64        preempt_disable();
  65        if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
  66                /*
  67                 * If preemption was disabled even before this
  68                 * was called, there's nothing we can be polite
  69                 * about - just spin.
  70                 */
  71                if (preempt_count() > 1) {
  72                        _raw_spin_lock(&kernel_flag);
  73                        return;
  74                }
  75
  76                /*
  77                 * Otherwise, let's wait for the kernel lock
  78                 * with preemption enabled..
  79                 */
  80                do {
  81                        preempt_enable();
  82                        while (spin_is_locked(&kernel_flag))
  83                                cpu_relax();
  84                        preempt_disable();
  85                } while (!_raw_spin_trylock(&kernel_flag));
  86        }
  87}
  88
  89#else
  90
  91/*
  92 * Non-preemption case - just get the spinlock
  93 */
  94static inline void __lock_kernel(void)
  95{
  96        _raw_spin_lock(&kernel_flag);
  97}
  98#endif
  99
 100static inline void __unlock_kernel(void)
 101{
 102        /*
 103         * the BKL is not covered by lockdep, so we open-code the
 104         * unlocking sequence (and thus avoid the dep-chain ops):
 105         */
 106        _raw_spin_unlock(&kernel_flag);
 107        preempt_enable();
 108}
 109
 110/*
 111 * Getting the big kernel lock.
 112 *
 113 * This cannot happen asynchronously, so we only need to
 114 * worry about other CPU's.
 115 */
 116void __lockfunc lock_kernel(void)
 117{
 118        int depth = current->lock_depth+1;
 119        if (likely(!depth))
 120                __lock_kernel();
 121        current->lock_depth = depth;
 122}
 123
 124void __lockfunc unlock_kernel(void)
 125{
 126        BUG_ON(current->lock_depth < 0);
 127        if (likely(--current->lock_depth < 0))
 128                __unlock_kernel();
 129}
 130
 131EXPORT_SYMBOL(lock_kernel);
 132EXPORT_SYMBOL(unlock_kernel);
 133
 134