linux/arch/parisc/include/asm/spinlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_SPINLOCK_H
   3#define __ASM_SPINLOCK_H
   4
   5#include <asm/barrier.h>
   6#include <asm/ldcw.h>
   7#include <asm/processor.h>
   8#include <asm/spinlock_types.h>
   9
  10static inline int arch_spin_is_locked(arch_spinlock_t *x)
  11{
  12        volatile unsigned int *a = __ldcw_align(x);
  13        return *a == 0;
  14}
  15
  16#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
  17
  18static inline void arch_spin_lock_flags(arch_spinlock_t *x,
  19                                         unsigned long flags)
  20{
  21        volatile unsigned int *a;
  22
  23        a = __ldcw_align(x);
  24        while (__ldcw(a) == 0)
  25                while (*a == 0)
  26                        if (flags & PSW_SM_I) {
  27                                local_irq_enable();
  28                                cpu_relax();
  29                                local_irq_disable();
  30                        } else
  31                                cpu_relax();
  32}
  33#define arch_spin_lock_flags arch_spin_lock_flags
  34
  35static inline void arch_spin_unlock(arch_spinlock_t *x)
  36{
  37        volatile unsigned int *a;
  38
  39        a = __ldcw_align(x);
  40#ifdef CONFIG_SMP
  41        (void) __ldcw(a);
  42#else
  43        mb();
  44#endif
  45        *a = 1;
  46}
  47
  48static inline int arch_spin_trylock(arch_spinlock_t *x)
  49{
  50        volatile unsigned int *a;
  51        int ret;
  52
  53        a = __ldcw_align(x);
  54        ret = __ldcw(a) != 0;
  55
  56        return ret;
  57}
  58
  59/*
  60 * Read-write spinlocks, allowing multiple readers but only one writer.
  61 * Linux rwlocks are unfair to writers; they can be starved for an indefinite
  62 * time by readers.  With care, they can also be taken in interrupt context.
  63 *
  64 * In the PA-RISC implementation, we have a spinlock and a counter.
  65 * Readers use the lock to serialise their access to the counter (which
  66 * records how many readers currently hold the lock).
  67 * Writers hold the spinlock, preventing any readers or other writers from
  68 * grabbing the rwlock.
  69 */
  70
  71/* Note that we have to ensure interrupts are disabled in case we're
  72 * interrupted by some other code that wants to grab the same read lock */
  73static  __inline__ void arch_read_lock(arch_rwlock_t *rw)
  74{
  75        unsigned long flags;
  76        local_irq_save(flags);
  77        arch_spin_lock_flags(&rw->lock, flags);
  78        rw->counter++;
  79        arch_spin_unlock(&rw->lock);
  80        local_irq_restore(flags);
  81}
  82
  83/* Note that we have to ensure interrupts are disabled in case we're
  84 * interrupted by some other code that wants to grab the same read lock */
  85static  __inline__ void arch_read_unlock(arch_rwlock_t *rw)
  86{
  87        unsigned long flags;
  88        local_irq_save(flags);
  89        arch_spin_lock_flags(&rw->lock, flags);
  90        rw->counter--;
  91        arch_spin_unlock(&rw->lock);
  92        local_irq_restore(flags);
  93}
  94
  95/* Note that we have to ensure interrupts are disabled in case we're
  96 * interrupted by some other code that wants to grab the same read lock */
  97static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
  98{
  99        unsigned long flags;
 100 retry:
 101        local_irq_save(flags);
 102        if (arch_spin_trylock(&rw->lock)) {
 103                rw->counter++;
 104                arch_spin_unlock(&rw->lock);
 105                local_irq_restore(flags);
 106                return 1;
 107        }
 108
 109        local_irq_restore(flags);
 110        /* If write-locked, we fail to acquire the lock */
 111        if (rw->counter < 0)
 112                return 0;
 113
 114        /* Wait until we have a realistic chance at the lock */
 115        while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
 116                cpu_relax();
 117
 118        goto retry;
 119}
 120
 121/* Note that we have to ensure interrupts are disabled in case we're
 122 * interrupted by some other code that wants to read_trylock() this lock */
 123static __inline__ void arch_write_lock(arch_rwlock_t *rw)
 124{
 125        unsigned long flags;
 126retry:
 127        local_irq_save(flags);
 128        arch_spin_lock_flags(&rw->lock, flags);
 129
 130        if (rw->counter != 0) {
 131                arch_spin_unlock(&rw->lock);
 132                local_irq_restore(flags);
 133
 134                while (rw->counter != 0)
 135                        cpu_relax();
 136
 137                goto retry;
 138        }
 139
 140        rw->counter = -1; /* mark as write-locked */
 141        mb();
 142        local_irq_restore(flags);
 143}
 144
 145static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
 146{
 147        rw->counter = 0;
 148        arch_spin_unlock(&rw->lock);
 149}
 150
 151/* Note that we have to ensure interrupts are disabled in case we're
 152 * interrupted by some other code that wants to read_trylock() this lock */
 153static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
 154{
 155        unsigned long flags;
 156        int result = 0;
 157
 158        local_irq_save(flags);
 159        if (arch_spin_trylock(&rw->lock)) {
 160                if (rw->counter == 0) {
 161                        rw->counter = -1;
 162                        result = 1;
 163                } else {
 164                        /* Read-locked.  Oh well. */
 165                        arch_spin_unlock(&rw->lock);
 166                }
 167        }
 168        local_irq_restore(flags);
 169
 170        return result;
 171}
 172
 173#endif /* __ASM_SPINLOCK_H */
 174