linux/include/asm-generic/qspinlock.h
<<
>>
Prefs
   1/*
   2 * Queued spinlock
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  15 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
  16 *
  17 * Authors: Waiman Long <waiman.long@hpe.com>
  18 */
  19#ifndef __ASM_GENERIC_QSPINLOCK_H
  20#define __ASM_GENERIC_QSPINLOCK_H
  21
  22#include <asm-generic/qspinlock_types.h>
  23
  24/**
  25 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
  26 * @lock : Pointer to queued spinlock structure
  27 *
  28 * There is a very slight possibility of live-lock if the lockers keep coming
  29 * and the waiter is just unfortunate enough to not see any unlock state.
  30 */
  31#ifndef queued_spin_unlock_wait
  32extern void queued_spin_unlock_wait(struct qspinlock *lock);
  33#endif
  34
  35/**
  36 * queued_spin_is_locked - is the spinlock locked?
  37 * @lock: Pointer to queued spinlock structure
  38 * Return: 1 if it is locked, 0 otherwise
  39 */
  40#ifndef queued_spin_is_locked
  41static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
  42{
  43        /*
  44         * See queued_spin_unlock_wait().
  45         *
  46         * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
  47         * isn't immediately observable.
  48         */
  49        return atomic_read(&lock->val);
  50}
  51#endif
  52
  53/**
  54 * queued_spin_value_unlocked - is the spinlock structure unlocked?
  55 * @lock: queued spinlock structure
  56 * Return: 1 if it is unlocked, 0 otherwise
  57 *
  58 * N.B. Whenever there are tasks waiting for the lock, it is considered
  59 *      locked wrt the lockref code to avoid lock stealing by the lockref
  60 *      code and change things underneath the lock. This also allows some
  61 *      optimizations to be applied without conflict with lockref.
  62 */
  63static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
  64{
  65        return !atomic_read(&lock.val);
  66}
  67
  68/**
  69 * queued_spin_is_contended - check if the lock is contended
  70 * @lock : Pointer to queued spinlock structure
  71 * Return: 1 if lock contended, 0 otherwise
  72 */
  73static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
  74{
  75        return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
  76}
  77/**
  78 * queued_spin_trylock - try to acquire the queued spinlock
  79 * @lock : Pointer to queued spinlock structure
  80 * Return: 1 if lock acquired, 0 if failed
  81 */
  82static __always_inline int queued_spin_trylock(struct qspinlock *lock)
  83{
  84        if (!atomic_read(&lock->val) &&
  85           (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
  86                return 1;
  87        return 0;
  88}
  89
  90extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  91
  92/**
  93 * queued_spin_lock - acquire a queued spinlock
  94 * @lock: Pointer to queued spinlock structure
  95 */
  96static __always_inline void queued_spin_lock(struct qspinlock *lock)
  97{
  98        u32 val;
  99
 100        val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
 101        if (likely(val == 0))
 102                return;
 103        queued_spin_lock_slowpath(lock, val);
 104}
 105
 106#ifndef queued_spin_unlock
 107/**
 108 * queued_spin_unlock - release a queued spinlock
 109 * @lock : Pointer to queued spinlock structure
 110 */
 111static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 112{
 113        /*
 114         * unlock() needs release semantics:
 115         */
 116        (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
 117}
 118#endif
 119
 120#ifndef virt_spin_lock
 121static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 122{
 123        return false;
 124}
 125#endif
 126
 127/*
 128 * Remapping spinlock architecture specific functions to the corresponding
 129 * queued spinlock functions.
 130 */
 131#define arch_spin_is_locked(l)          queued_spin_is_locked(l)
 132#define arch_spin_is_contended(l)       queued_spin_is_contended(l)
 133#define arch_spin_value_unlocked(l)     queued_spin_value_unlocked(l)
 134#define arch_spin_lock(l)               queued_spin_lock(l)
 135#define arch_spin_trylock(l)            queued_spin_trylock(l)
 136#define arch_spin_unlock(l)             queued_spin_unlock(l)
 137#define arch_spin_lock_flags(l, f)      queued_spin_lock(l)
 138#define arch_spin_unlock_wait(l)        queued_spin_unlock_wait(l)
 139
 140#endif /* __ASM_GENERIC_QSPINLOCK_H */
 141