linux/include/asm-generic/bitops/lock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
   3#define _ASM_GENERIC_BITOPS_LOCK_H_
   4
   5#include <linux/atomic.h>
   6#include <linux/compiler.h>
   7#include <asm/barrier.h>
   8
   9/**
  10 * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
  11 * @nr: Bit to set
  12 * @addr: Address to count from
  13 *
  14 * This operation is atomic and provides acquire barrier semantics if
  15 * the returned value is 0.
  16 * It can be used to implement bit locks.
  17 */
  18static __always_inline int
  19arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
  20{
  21        long old;
  22        unsigned long mask = BIT_MASK(nr);
  23
  24        p += BIT_WORD(nr);
  25        if (READ_ONCE(*p) & mask)
  26                return 1;
  27
  28        old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
  29        return !!(old & mask);
  30}
  31
  32
  33/**
  34 * arch_clear_bit_unlock - Clear a bit in memory, for unlock
  35 * @nr: the bit to set
  36 * @addr: the address to start counting from
  37 *
  38 * This operation is atomic and provides release barrier semantics.
  39 */
  40static __always_inline void
  41arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
  42{
  43        p += BIT_WORD(nr);
  44        arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
  45}
  46
  47/**
  48 * arch___clear_bit_unlock - Clear a bit in memory, for unlock
  49 * @nr: the bit to set
  50 * @addr: the address to start counting from
  51 *
  52 * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
  53 * the bits in the word are protected by this lock some archs can use weaker
  54 * ops to safely unlock.
  55 *
  56 * See for example x86's implementation.
  57 */
  58static inline void
  59arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
  60{
  61        unsigned long old;
  62
  63        p += BIT_WORD(nr);
  64        old = READ_ONCE(*p);
  65        old &= ~BIT_MASK(nr);
  66        arch_atomic_long_set_release((atomic_long_t *)p, old);
  67}
  68
  69/**
  70 * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
  71 *                                          byte is negative, for unlock.
  72 * @nr: the bit to clear
  73 * @addr: the address to start counting from
  74 *
  75 * This is a bit of a one-trick-pony for the filemap code, which clears
  76 * PG_locked and tests PG_waiters,
  77 */
  78#ifndef arch_clear_bit_unlock_is_negative_byte
  79static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
  80                                                          volatile unsigned long *p)
  81{
  82        long old;
  83        unsigned long mask = BIT_MASK(nr);
  84
  85        p += BIT_WORD(nr);
  86        old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
  87        return !!(old & BIT(7));
  88}
  89#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
  90#endif
  91
  92#include <asm-generic/bitops/instrumented-lock.h>
  93
  94#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
  95