1
2#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/typecheck.h>
51#include <linux/preempt.h>
52#include <linux/linkage.h>
53#include <linux/compiler.h>
54#include <linux/irqflags.h>
55#include <linux/thread_info.h>
56#include <linux/kernel.h>
57#include <linux/stringify.h>
58#include <linux/bottom_half.h>
59#include <asm/barrier.h>
60
61
62
63
64
65#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
66
67#define LOCK_SECTION_START(extra) \
68 ".subsection 1\n\t" \
69 extra \
70 ".ifndef " LOCK_SECTION_NAME "\n\t" \
71 LOCK_SECTION_NAME ":\n\t" \
72 ".endif\n"
73
74#define LOCK_SECTION_END \
75 ".previous\n\t"
76
77#define __lockfunc __attribute__((section(".spinlock.text")))
78
79
80
81
82#include <linux/spinlock_types.h>
83
84
85
86
87#ifdef CONFIG_SMP
88# include <asm/spinlock.h>
89#else
90# include <linux/spinlock_up.h>
91#endif
92
93#ifdef CONFIG_DEBUG_SPINLOCK
94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95 struct lock_class_key *key);
96# define raw_spin_lock_init(lock) \
97do { \
98 static struct lock_class_key __key; \
99 \
100 __raw_spin_lock_init((lock), #lock, &__key); \
101} while (0)
102
103#else
104# define raw_spin_lock_init(lock) \
105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
106#endif
107
108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
109
110#ifdef arch_spin_is_contended
111#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
112#else
113#define raw_spin_is_contended(lock) (((void)(lock), 0))
114#endif
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167#ifndef smp_mb__after_spinlock
168#define smp_mb__after_spinlock() do { } while (0)
169#endif
170
171#ifdef CONFIG_DEBUG_SPINLOCK
172 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
173#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
174 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
175 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
176#else
177static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
178{
179 __acquire(lock);
180 arch_spin_lock(&lock->raw_lock);
181}
182
183#ifndef arch_spin_lock_flags
184#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
185#endif
186
187static inline void
188do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
189{
190 __acquire(lock);
191 arch_spin_lock_flags(&lock->raw_lock, *flags);
192}
193
194static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
195{
196 return arch_spin_trylock(&(lock)->raw_lock);
197}
198
199static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
200{
201 arch_spin_unlock(&lock->raw_lock);
202 __release(lock);
203}
204#endif
205
206
207
208
209
210
211
212#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
213
214#define raw_spin_lock(lock) _raw_spin_lock(lock)
215
216#ifdef CONFIG_DEBUG_LOCK_ALLOC
217# define raw_spin_lock_nested(lock, subclass) \
218 _raw_spin_lock_nested(lock, subclass)
219
220# define raw_spin_lock_nest_lock(lock, nest_lock) \
221 do { \
222 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
223 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
224 } while (0)
225#else
226
227
228
229
230
231# define raw_spin_lock_nested(lock, subclass) \
232 _raw_spin_lock(((void)(subclass), (lock)))
233# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
234#endif
235
236#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
237
238#define raw_spin_lock_irqsave(lock, flags) \
239 do { \
240 typecheck(unsigned long, flags); \
241 flags = _raw_spin_lock_irqsave(lock); \
242 } while (0)
243
244#ifdef CONFIG_DEBUG_LOCK_ALLOC
245#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
246 do { \
247 typecheck(unsigned long, flags); \
248 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
249 } while (0)
250#else
251#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
252 do { \
253 typecheck(unsigned long, flags); \
254 flags = _raw_spin_lock_irqsave(lock); \
255 } while (0)
256#endif
257
258#else
259
260#define raw_spin_lock_irqsave(lock, flags) \
261 do { \
262 typecheck(unsigned long, flags); \
263 _raw_spin_lock_irqsave(lock, flags); \
264 } while (0)
265
266#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
267 raw_spin_lock_irqsave(lock, flags)
268
269#endif
270
271#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
272#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
273#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
274#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
275
276#define raw_spin_unlock_irqrestore(lock, flags) \
277 do { \
278 typecheck(unsigned long, flags); \
279 _raw_spin_unlock_irqrestore(lock, flags); \
280 } while (0)
281#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
282
283#define raw_spin_trylock_bh(lock) \
284 __cond_lock(lock, _raw_spin_trylock_bh(lock))
285
286#define raw_spin_trylock_irq(lock) \
287({ \
288 local_irq_disable(); \
289 raw_spin_trylock(lock) ? \
290 1 : ({ local_irq_enable(); 0; }); \
291})
292
293#define raw_spin_trylock_irqsave(lock, flags) \
294({ \
295 local_irq_save(flags); \
296 raw_spin_trylock(lock) ? \
297 1 : ({ local_irq_restore(flags); 0; }); \
298})
299
300
301#include <linux/rwlock.h>
302
303
304
305
306#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
307# include <linux/spinlock_api_smp.h>
308#else
309# include <linux/spinlock_api_up.h>
310#endif
311
312
313
314
315
316static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
317{
318 return &lock->rlock;
319}
320
321#define spin_lock_init(_lock) \
322do { \
323 spinlock_check(_lock); \
324 raw_spin_lock_init(&(_lock)->rlock); \
325} while (0)
326
327static __always_inline void spin_lock(spinlock_t *lock)
328{
329 raw_spin_lock(&lock->rlock);
330}
331
332static __always_inline void spin_lock_bh(spinlock_t *lock)
333{
334 raw_spin_lock_bh(&lock->rlock);
335}
336
337static __always_inline int spin_trylock(spinlock_t *lock)
338{
339 return raw_spin_trylock(&lock->rlock);
340}
341
342#define spin_lock_nested(lock, subclass) \
343do { \
344 raw_spin_lock_nested(spinlock_check(lock), subclass); \
345} while (0)
346
347#define spin_lock_nest_lock(lock, nest_lock) \
348do { \
349 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
350} while (0)
351
352static __always_inline void spin_lock_irq(spinlock_t *lock)
353{
354 raw_spin_lock_irq(&lock->rlock);
355}
356
357#define spin_lock_irqsave(lock, flags) \
358do { \
359 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
360} while (0)
361
362#define spin_lock_irqsave_nested(lock, flags, subclass) \
363do { \
364 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
365} while (0)
366
367static __always_inline void spin_unlock(spinlock_t *lock)
368{
369 raw_spin_unlock(&lock->rlock);
370}
371
372static __always_inline void spin_unlock_bh(spinlock_t *lock)
373{
374 raw_spin_unlock_bh(&lock->rlock);
375}
376
377static __always_inline void spin_unlock_irq(spinlock_t *lock)
378{
379 raw_spin_unlock_irq(&lock->rlock);
380}
381
382static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
383{
384 raw_spin_unlock_irqrestore(&lock->rlock, flags);
385}
386
387static __always_inline int spin_trylock_bh(spinlock_t *lock)
388{
389 return raw_spin_trylock_bh(&lock->rlock);
390}
391
392static __always_inline int spin_trylock_irq(spinlock_t *lock)
393{
394 return raw_spin_trylock_irq(&lock->rlock);
395}
396
397#define spin_trylock_irqsave(lock, flags) \
398({ \
399 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
400})
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420static __always_inline int spin_is_locked(spinlock_t *lock)
421{
422 return raw_spin_is_locked(&lock->rlock);
423}
424
425static __always_inline int spin_is_contended(spinlock_t *lock)
426{
427 return raw_spin_is_contended(&lock->rlock);
428}
429
430#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
431
432
433
434
435
436#include <linux/atomic.h>
437
438
439
440
441
442
443
444
445extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
446#define atomic_dec_and_lock(atomic, lock) \
447 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
448
449extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
450 unsigned long *flags);
451#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
452 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
453
454int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
455 size_t max_size, unsigned int cpu_mult,
456 gfp_t gfp, const char *name,
457 struct lock_class_key *key);
458
459#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
460 ({ \
461 static struct lock_class_key key; \
462 int ret; \
463 \
464 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
465 cpu_mult, gfp, #locks, &key); \
466 ret; \
467 })
468
469void free_bucket_spinlocks(spinlock_t *locks);
470
471#endif
472