linux/include/asm-generic/qrwlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Queue read/write lock
   4 *
   5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
   6 *
   7 * Authors: Waiman Long <waiman.long@hp.com>
   8 */
   9#ifndef __ASM_GENERIC_QRWLOCK_H
  10#define __ASM_GENERIC_QRWLOCK_H
  11
  12#include <linux/atomic.h>
  13#include <asm/barrier.h>
  14#include <asm/processor.h>
  15
  16#include <asm-generic/qrwlock_types.h>
  17
  18/* Must be included from asm/spinlock.h after defining arch_spin_is_locked.  */
  19
  20/*
  21 * Writer states & reader shift and bias.
  22 */
  23#define _QW_WAITING     0x100           /* A writer is waiting     */
  24#define _QW_LOCKED      0x0ff           /* A writer holds the lock */
  25#define _QW_WMASK       0x1ff           /* Writer mask             */
  26#define _QR_SHIFT       9               /* Reader count shift      */
  27#define _QR_BIAS        (1U << _QR_SHIFT)
  28
  29/*
  30 * External function declarations
  31 */
  32extern void queued_read_lock_slowpath(struct qrwlock *lock);
  33extern void queued_write_lock_slowpath(struct qrwlock *lock);
  34
  35/**
  36 * queued_read_trylock - try to acquire read lock of a queue rwlock
  37 * @lock : Pointer to queue rwlock structure
  38 * Return: 1 if lock acquired, 0 if failed
  39 */
  40static inline int queued_read_trylock(struct qrwlock *lock)
  41{
  42        int cnts;
  43
  44        cnts = atomic_read(&lock->cnts);
  45        if (likely(!(cnts & _QW_WMASK))) {
  46                cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
  47                if (likely(!(cnts & _QW_WMASK)))
  48                        return 1;
  49                atomic_sub(_QR_BIAS, &lock->cnts);
  50        }
  51        return 0;
  52}
  53
  54/**
  55 * queued_write_trylock - try to acquire write lock of a queue rwlock
  56 * @lock : Pointer to queue rwlock structure
  57 * Return: 1 if lock acquired, 0 if failed
  58 */
  59static inline int queued_write_trylock(struct qrwlock *lock)
  60{
  61        int cnts;
  62
  63        cnts = atomic_read(&lock->cnts);
  64        if (unlikely(cnts))
  65                return 0;
  66
  67        return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
  68                                _QW_LOCKED));
  69}
  70/**
  71 * queued_read_lock - acquire read lock of a queue rwlock
  72 * @lock: Pointer to queue rwlock structure
  73 */
  74static inline void queued_read_lock(struct qrwlock *lock)
  75{
  76        int cnts;
  77
  78        cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
  79        if (likely(!(cnts & _QW_WMASK)))
  80                return;
  81
  82        /* The slowpath will decrement the reader count, if necessary. */
  83        queued_read_lock_slowpath(lock);
  84}
  85
  86/**
  87 * queued_write_lock - acquire write lock of a queue rwlock
  88 * @lock : Pointer to queue rwlock structure
  89 */
  90static inline void queued_write_lock(struct qrwlock *lock)
  91{
  92        int cnts = 0;
  93        /* Optimize for the unfair lock case where the fair flag is 0. */
  94        if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
  95                return;
  96
  97        queued_write_lock_slowpath(lock);
  98}
  99
 100/**
 101 * queued_read_unlock - release read lock of a queue rwlock
 102 * @lock : Pointer to queue rwlock structure
 103 */
 104static inline void queued_read_unlock(struct qrwlock *lock)
 105{
 106        /*
 107         * Atomically decrement the reader count
 108         */
 109        (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
 110}
 111
 112/**
 113 * queued_write_unlock - release write lock of a queue rwlock
 114 * @lock : Pointer to queue rwlock structure
 115 */
 116static inline void queued_write_unlock(struct qrwlock *lock)
 117{
 118        smp_store_release(&lock->wlocked, 0);
 119}
 120
 121/**
 122 * queued_rwlock_is_contended - check if the lock is contended
 123 * @lock : Pointer to queue rwlock structure
 124 * Return: 1 if lock contended, 0 otherwise
 125 */
 126static inline int queued_rwlock_is_contended(struct qrwlock *lock)
 127{
 128        return arch_spin_is_locked(&lock->wait_lock);
 129}
 130
 131/*
 132 * Remapping rwlock architecture specific functions to the corresponding
 133 * queue rwlock functions.
 134 */
 135#define arch_read_lock(l)               queued_read_lock(l)
 136#define arch_write_lock(l)              queued_write_lock(l)
 137#define arch_read_trylock(l)            queued_read_trylock(l)
 138#define arch_write_trylock(l)           queued_write_trylock(l)
 139#define arch_read_unlock(l)             queued_read_unlock(l)
 140#define arch_write_unlock(l)            queued_write_unlock(l)
 141#define arch_rwlock_is_contended(l)     queued_rwlock_is_contended(l)
 142
 143#endif /* __ASM_GENERIC_QRWLOCK_H */
 144