1
2
3
4
5
6
7
8
9
10
11#ifndef __ASM_SPINLOCK_H
12#define __ASM_SPINLOCK_H
13
14#include <linux/smp.h>
15
16extern int spin_retry;
17
18static inline int
19_raw_compare_and_swap(volatile unsigned int *lock,
20 unsigned int old, unsigned int new)
21{
22 asm volatile(
23 " cs %0,%3,%1"
24 : "=d" (old), "=Q" (*lock)
25 : "0" (old), "d" (new), "Q" (*lock)
26 : "cc", "memory" );
27 return old;
28}
29
30
31
32
33
34
35
36
37
38
39#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
40#define arch_spin_unlock_wait(lock) \
41 do { while (arch_spin_is_locked(lock)) \
42 arch_spin_relax(lock); } while (0)
43
44extern void arch_spin_lock_wait(arch_spinlock_t *);
45extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
46extern int arch_spin_trylock_retry(arch_spinlock_t *);
47extern void arch_spin_relax(arch_spinlock_t *lock);
48
49static inline void arch_spin_lock(arch_spinlock_t *lp)
50{
51 int old;
52
53 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
54 if (likely(old == 0))
55 return;
56 arch_spin_lock_wait(lp);
57}
58
59static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
60 unsigned long flags)
61{
62 int old;
63
64 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
65 if (likely(old == 0))
66 return;
67 arch_spin_lock_wait_flags(lp, flags);
68}
69
70static inline int arch_spin_trylock(arch_spinlock_t *lp)
71{
72 int old;
73
74 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
75 if (likely(old == 0))
76 return 1;
77 return arch_spin_trylock_retry(lp);
78}
79
80static inline void arch_spin_unlock(arch_spinlock_t *lp)
81{
82 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
83}
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
101
102
103
104
105
106#define arch_write_can_lock(x) ((x)->lock == 0)
107
108extern void _raw_read_lock_wait(arch_rwlock_t *lp);
109extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
110extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
111extern void _raw_write_lock_wait(arch_rwlock_t *lp);
112extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
113extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
114
115static inline void arch_read_lock(arch_rwlock_t *rw)
116{
117 unsigned int old;
118 old = rw->lock & 0x7fffffffU;
119 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
120 _raw_read_lock_wait(rw);
121}
122
123static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
124{
125 unsigned int old;
126 old = rw->lock & 0x7fffffffU;
127 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
128 _raw_read_lock_wait_flags(rw, flags);
129}
130
131static inline void arch_read_unlock(arch_rwlock_t *rw)
132{
133 unsigned int old, cmp;
134
135 old = rw->lock;
136 do {
137 cmp = old;
138 old = _raw_compare_and_swap(&rw->lock, old, old - 1);
139 } while (cmp != old);
140}
141
142static inline void arch_write_lock(arch_rwlock_t *rw)
143{
144 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
145 _raw_write_lock_wait(rw);
146}
147
148static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
149{
150 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
151 _raw_write_lock_wait_flags(rw, flags);
152}
153
154static inline void arch_write_unlock(arch_rwlock_t *rw)
155{
156 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
157}
158
159static inline int arch_read_trylock(arch_rwlock_t *rw)
160{
161 unsigned int old;
162 old = rw->lock & 0x7fffffffU;
163 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
164 return 1;
165 return _raw_read_trylock_retry(rw);
166}
167
168static inline int arch_write_trylock(arch_rwlock_t *rw)
169{
170 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
171 return 1;
172 return _raw_write_trylock_retry(rw);
173}
174
175#define arch_read_relax(lock) cpu_relax()
176#define arch_write_relax(lock) cpu_relax()
177
178#endif
179