1
2#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/typecheck.h>
51#include <linux/preempt.h>
52#include <linux/linkage.h>
53#include <linux/compiler.h>
54#include <linux/irqflags.h>
55#include <linux/thread_info.h>
56#include <linux/kernel.h>
57#include <linux/stringify.h>
58#include <linux/bottom_half.h>
59#include <asm/barrier.h>
60
61
62
63
64
65#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
66
67#define LOCK_SECTION_START(extra) \
68 ".subsection 1\n\t" \
69 extra \
70 ".ifndef " LOCK_SECTION_NAME "\n\t" \
71 LOCK_SECTION_NAME ":\n\t" \
72 ".endif\n"
73
74#define LOCK_SECTION_END \
75 ".previous\n\t"
76
77#define __lockfunc __attribute__((section(".spinlock.text")))
78
79
80
81
82#include <linux/spinlock_types.h>
83
84
85
86
87#ifdef CONFIG_SMP
88# include <asm/spinlock.h>
89#else
90# include <linux/spinlock_up.h>
91#endif
92
93#ifdef CONFIG_DEBUG_SPINLOCK
94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95 struct lock_class_key *key);
96# define raw_spin_lock_init(lock) \
97do { \
98 static struct lock_class_key __key; \
99 \
100 __raw_spin_lock_init((lock), #lock, &__key); \
101} while (0)
102
103#else
104# define raw_spin_lock_init(lock) \
105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
106#endif
107
108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
109
110#ifdef CONFIG_GENERIC_LOCKBREAK
111#define raw_spin_is_contended(lock) ((lock)->break_lock)
112#else
113
114#ifdef arch_spin_is_contended
115#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
116#else
117#define raw_spin_is_contended(lock) (((void)(lock), 0))
118#endif
119#endif
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153#ifndef smp_mb__after_spinlock
154#define smp_mb__after_spinlock() do { } while (0)
155#endif
156
157#ifdef CONFIG_DEBUG_SPINLOCK
158 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
159#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
160 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
161 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
162#else
163static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
164{
165 __acquire(lock);
166 arch_spin_lock(&lock->raw_lock);
167}
168
169static inline void
170do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
171{
172 __acquire(lock);
173 arch_spin_lock_flags(&lock->raw_lock, *flags);
174}
175
176static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
177{
178 return arch_spin_trylock(&(lock)->raw_lock);
179}
180
181static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
182{
183 arch_spin_unlock(&lock->raw_lock);
184 __release(lock);
185}
186#endif
187
188
189
190
191
192
193
194#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
195
196#define raw_spin_lock(lock) _raw_spin_lock(lock)
197
198#ifdef CONFIG_DEBUG_LOCK_ALLOC
199# define raw_spin_lock_nested(lock, subclass) \
200 _raw_spin_lock_nested(lock, subclass)
201
202# define raw_spin_lock_nest_lock(lock, nest_lock) \
203 do { \
204 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
205 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
206 } while (0)
207#else
208
209
210
211
212
213# define raw_spin_lock_nested(lock, subclass) \
214 _raw_spin_lock(((void)(subclass), (lock)))
215# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
216#endif
217
218#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
219
220#define raw_spin_lock_irqsave(lock, flags) \
221 do { \
222 typecheck(unsigned long, flags); \
223 flags = _raw_spin_lock_irqsave(lock); \
224 } while (0)
225
226#ifdef CONFIG_DEBUG_LOCK_ALLOC
227#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
228 do { \
229 typecheck(unsigned long, flags); \
230 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
231 } while (0)
232#else
233#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
234 do { \
235 typecheck(unsigned long, flags); \
236 flags = _raw_spin_lock_irqsave(lock); \
237 } while (0)
238#endif
239
240#else
241
242#define raw_spin_lock_irqsave(lock, flags) \
243 do { \
244 typecheck(unsigned long, flags); \
245 _raw_spin_lock_irqsave(lock, flags); \
246 } while (0)
247
248#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
249 raw_spin_lock_irqsave(lock, flags)
250
251#endif
252
253#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
254#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
255#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
256#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
257
258#define raw_spin_unlock_irqrestore(lock, flags) \
259 do { \
260 typecheck(unsigned long, flags); \
261 _raw_spin_unlock_irqrestore(lock, flags); \
262 } while (0)
263#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
264
265#define raw_spin_trylock_bh(lock) \
266 __cond_lock(lock, _raw_spin_trylock_bh(lock))
267
268#define raw_spin_trylock_irq(lock) \
269({ \
270 local_irq_disable(); \
271 raw_spin_trylock(lock) ? \
272 1 : ({ local_irq_enable(); 0; }); \
273})
274
275#define raw_spin_trylock_irqsave(lock, flags) \
276({ \
277 local_irq_save(flags); \
278 raw_spin_trylock(lock) ? \
279 1 : ({ local_irq_restore(flags); 0; }); \
280})
281
282
283
284
285
286#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
287
288
289#include <linux/rwlock.h>
290
291
292
293
294#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
295# include <linux/spinlock_api_smp.h>
296#else
297# include <linux/spinlock_api_up.h>
298#endif
299
300
301
302
303
304static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
305{
306 return &lock->rlock;
307}
308
309#define spin_lock_init(_lock) \
310do { \
311 spinlock_check(_lock); \
312 raw_spin_lock_init(&(_lock)->rlock); \
313} while (0)
314
315static __always_inline void spin_lock(spinlock_t *lock)
316{
317 raw_spin_lock(&lock->rlock);
318}
319
320static __always_inline void spin_lock_bh(spinlock_t *lock)
321{
322 raw_spin_lock_bh(&lock->rlock);
323}
324
325static __always_inline int spin_trylock(spinlock_t *lock)
326{
327 return raw_spin_trylock(&lock->rlock);
328}
329
330#define spin_lock_nested(lock, subclass) \
331do { \
332 raw_spin_lock_nested(spinlock_check(lock), subclass); \
333} while (0)
334
335#define spin_lock_nest_lock(lock, nest_lock) \
336do { \
337 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
338} while (0)
339
340static __always_inline void spin_lock_irq(spinlock_t *lock)
341{
342 raw_spin_lock_irq(&lock->rlock);
343}
344
345#define spin_lock_irqsave(lock, flags) \
346do { \
347 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
348} while (0)
349
350#define spin_lock_irqsave_nested(lock, flags, subclass) \
351do { \
352 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
353} while (0)
354
355static __always_inline void spin_unlock(spinlock_t *lock)
356{
357 raw_spin_unlock(&lock->rlock);
358}
359
360static __always_inline void spin_unlock_bh(spinlock_t *lock)
361{
362 raw_spin_unlock_bh(&lock->rlock);
363}
364
365static __always_inline void spin_unlock_irq(spinlock_t *lock)
366{
367 raw_spin_unlock_irq(&lock->rlock);
368}
369
370static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
371{
372 raw_spin_unlock_irqrestore(&lock->rlock, flags);
373}
374
375static __always_inline int spin_trylock_bh(spinlock_t *lock)
376{
377 return raw_spin_trylock_bh(&lock->rlock);
378}
379
380static __always_inline int spin_trylock_irq(spinlock_t *lock)
381{
382 return raw_spin_trylock_irq(&lock->rlock);
383}
384
385#define spin_trylock_irqsave(lock, flags) \
386({ \
387 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
388})
389
390static __always_inline int spin_is_locked(spinlock_t *lock)
391{
392 return raw_spin_is_locked(&lock->rlock);
393}
394
395static __always_inline int spin_is_contended(spinlock_t *lock)
396{
397 return raw_spin_is_contended(&lock->rlock);
398}
399
400static __always_inline int spin_can_lock(spinlock_t *lock)
401{
402 return raw_spin_can_lock(&lock->rlock);
403}
404
405#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
406
407
408
409
410
411#include <linux/atomic.h>
412
413
414
415
416
417
418
419
420extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
421#define atomic_dec_and_lock(atomic, lock) \
422 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
423
424#endif
425