1#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
8#include <linux/compiler.h>
9#include <asm/paravirt.h>
10
11
12
13
14
15
16
17
18
19
20
21
22#ifdef CONFIG_X86_32
23# define LOCK_PTR_REG "a"
24# define REG_PTR_MODE "k"
25#else
26# define LOCK_PTR_REG "D"
27# define REG_PTR_MODE "q"
28#endif
29
30#if defined(CONFIG_X86_32) && \
31 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
32
33
34
35
36# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
37#else
38# define UNLOCK_LOCK_PREFIX
39#endif
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58#if (NR_CPUS < 256)
59#define TICKET_SHIFT 8
60
61static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
62{
63 short inc = 0x0100;
64
65 asm volatile (
66 LOCK_PREFIX "xaddw %w0, %1\n"
67 "1:\t"
68 "cmpb %h0, %b0\n\t"
69 "je 2f\n\t"
70 "rep ; nop\n\t"
71 "movb %1, %b0\n\t"
72
73 "jmp 1b\n"
74 "2:"
75 : "+Q" (inc), "+m" (lock->slock)
76 :
77 : "memory", "cc");
78}
79
80static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
81{
82 int tmp, new;
83
84 asm volatile("movzwl %2, %0\n\t"
85 "cmpb %h0,%b0\n\t"
86 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
87 "jne 1f\n\t"
88 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
89 "1:"
90 "sete %b1\n\t"
91 "movzbl %b1,%0\n\t"
92 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
93 :
94 : "memory", "cc");
95
96 return tmp;
97}
98
99static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
100{
101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
102 : "+m" (lock->slock)
103 :
104 : "memory", "cc");
105}
106#else
107#define TICKET_SHIFT 16
108
109static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
110{
111 int inc = 0x00010000;
112 int tmp;
113
114 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
115 "movzwl %w0, %2\n\t"
116 "shrl $16, %0\n\t"
117 "1:\t"
118 "cmpl %0, %2\n\t"
119 "je 2f\n\t"
120 "rep ; nop\n\t"
121 "movzwl %1, %2\n\t"
122
123 "jmp 1b\n"
124 "2:"
125 : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
126 :
127 : "memory", "cc");
128}
129
130static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
131{
132 int tmp;
133 int new;
134
135 asm volatile("movl %2,%0\n\t"
136 "movl %0,%1\n\t"
137 "roll $16, %0\n\t"
138 "cmpl %0,%1\n\t"
139 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
140 "jne 1f\n\t"
141 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
142 "1:"
143 "sete %b1\n\t"
144 "movzbl %b1,%0\n\t"
145 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
146 :
147 : "memory", "cc");
148
149 return tmp;
150}
151
152static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
153{
154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
155 : "+m" (lock->slock)
156 :
157 : "memory", "cc");
158}
159#endif
160
161static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
162{
163 int tmp = ACCESS_ONCE(lock->slock);
164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166}
167
168static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
169{
170 int tmp = ACCESS_ONCE(lock->slock);
171
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173}
174
175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176
177static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
178{
179 return __ticket_spin_is_locked(lock);
180}
181
182static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
183{
184 return __ticket_spin_is_contended(lock);
185}
186#define __raw_spin_is_contended __raw_spin_is_contended
187
188static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
189{
190 __ticket_spin_lock(lock);
191}
192
193static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
194{
195 return __ticket_spin_trylock(lock);
196}
197
198static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
199{
200 __ticket_spin_unlock(lock);
201}
202
203static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
204 unsigned long flags)
205{
206 __raw_spin_lock(lock);
207}
208
209#endif
210
211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
212{
213 while (__raw_spin_is_locked(lock))
214 cpu_relax();
215}
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235static inline int __raw_read_can_lock(raw_rwlock_t *lock)
236{
237 return (int)(lock)->lock > 0;
238}
239
240
241
242
243
244static inline int __raw_write_can_lock(raw_rwlock_t *lock)
245{
246 return (lock)->lock == RW_LOCK_BIAS;
247}
248
249static inline void __raw_read_lock(raw_rwlock_t *rw)
250{
251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
252 "jns 1f\n"
253 "call __read_lock_failed\n\t"
254 "1:\n"
255 ::LOCK_PTR_REG (rw) : "memory");
256}
257
258static inline void __raw_write_lock(raw_rwlock_t *rw)
259{
260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
261 "jz 1f\n"
262 "call __write_lock_failed\n\t"
263 "1:\n"
264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
265}
266
267static inline int __raw_read_trylock(raw_rwlock_t *lock)
268{
269 atomic_t *count = (atomic_t *)lock;
270
271 if (atomic_dec_return(count) >= 0)
272 return 1;
273 atomic_inc(count);
274 return 0;
275}
276
277static inline int __raw_write_trylock(raw_rwlock_t *lock)
278{
279 atomic_t *count = (atomic_t *)lock;
280
281 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
282 return 1;
283 atomic_add(RW_LOCK_BIAS, count);
284 return 0;
285}
286
287static inline void __raw_read_unlock(raw_rwlock_t *rw)
288{
289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
290}
291
292static inline void __raw_write_unlock(raw_rwlock_t *rw)
293{
294 asm volatile(LOCK_PREFIX "addl %1, %0"
295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
296}
297
298#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
299#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
300
301#define _raw_spin_relax(lock) cpu_relax()
302#define _raw_read_relax(lock) cpu_relax()
303#define _raw_write_relax(lock) cpu_relax()
304
305
306static inline void smp_mb__after_lock(void) { }
307#define ARCH_HAS_SMP_MB_AFTER_LOCK
308
309#endif
310