1
2#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/typecheck.h>
51#include <linux/preempt.h>
52#include <linux/linkage.h>
53#include <linux/compiler.h>
54#include <linux/irqflags.h>
55#include <linux/thread_info.h>
56#include <linux/kernel.h>
57#include <linux/stringify.h>
58#include <linux/bottom_half.h>
59#include <asm/barrier.h>
60#include <asm/mmiowb.h>
61
62
63
64
65
66#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
67
68#define LOCK_SECTION_START(extra) \
69 ".subsection 1\n\t" \
70 extra \
71 ".ifndef " LOCK_SECTION_NAME "\n\t" \
72 LOCK_SECTION_NAME ":\n\t" \
73 ".endif\n"
74
75#define LOCK_SECTION_END \
76 ".previous\n\t"
77
78#define __lockfunc __attribute__((section(".spinlock.text")))
79
80
81
82
83#include <linux/spinlock_types.h>
84
85
86
87
88#ifdef CONFIG_SMP
89# include <asm/spinlock.h>
90#else
91# include <linux/spinlock_up.h>
92#endif
93
94#ifdef CONFIG_DEBUG_SPINLOCK
95 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
96 struct lock_class_key *key, short inner);
97
98# define raw_spin_lock_init(lock) \
99do { \
100 static struct lock_class_key __key; \
101 \
102 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
103} while (0)
104
105#else
106# define raw_spin_lock_init(lock) \
107 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
108#endif
109
110#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
111
112#ifdef arch_spin_is_contended
113#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
114#else
115#define raw_spin_is_contended(lock) (((void)(lock), 0))
116#endif
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169#ifndef smp_mb__after_spinlock
170#define smp_mb__after_spinlock() do { } while (0)
171#endif
172
173#ifdef CONFIG_DEBUG_SPINLOCK
174 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
175#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
176 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
177 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
178#else
179static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
180{
181 __acquire(lock);
182 arch_spin_lock(&lock->raw_lock);
183 mmiowb_spin_lock();
184}
185
186#ifndef arch_spin_lock_flags
187#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
188#endif
189
190static inline void
191do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
192{
193 __acquire(lock);
194 arch_spin_lock_flags(&lock->raw_lock, *flags);
195 mmiowb_spin_lock();
196}
197
198static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
199{
200 int ret = arch_spin_trylock(&(lock)->raw_lock);
201
202 if (ret)
203 mmiowb_spin_lock();
204
205 return ret;
206}
207
208static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
209{
210 mmiowb_spin_unlock();
211 arch_spin_unlock(&lock->raw_lock);
212 __release(lock);
213}
214#endif
215
216
217
218
219
220
221
222#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
223
224#define raw_spin_lock(lock) _raw_spin_lock(lock)
225
226#ifdef CONFIG_DEBUG_LOCK_ALLOC
227# define raw_spin_lock_nested(lock, subclass) \
228 _raw_spin_lock_nested(lock, subclass)
229
230# define raw_spin_lock_nest_lock(lock, nest_lock) \
231 do { \
232 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
233 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
234 } while (0)
235#else
236
237
238
239
240
241# define raw_spin_lock_nested(lock, subclass) \
242 _raw_spin_lock(((void)(subclass), (lock)))
243# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
244#endif
245
246#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
247
248#define raw_spin_lock_irqsave(lock, flags) \
249 do { \
250 typecheck(unsigned long, flags); \
251 flags = _raw_spin_lock_irqsave(lock); \
252 } while (0)
253
254#ifdef CONFIG_DEBUG_LOCK_ALLOC
255#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
256 do { \
257 typecheck(unsigned long, flags); \
258 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
259 } while (0)
260#else
261#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
262 do { \
263 typecheck(unsigned long, flags); \
264 flags = _raw_spin_lock_irqsave(lock); \
265 } while (0)
266#endif
267
268#else
269
270#define raw_spin_lock_irqsave(lock, flags) \
271 do { \
272 typecheck(unsigned long, flags); \
273 _raw_spin_lock_irqsave(lock, flags); \
274 } while (0)
275
276#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
277 raw_spin_lock_irqsave(lock, flags)
278
279#endif
280
281#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
282#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
283#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
284#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
285
286#define raw_spin_unlock_irqrestore(lock, flags) \
287 do { \
288 typecheck(unsigned long, flags); \
289 _raw_spin_unlock_irqrestore(lock, flags); \
290 } while (0)
291#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
292
293#define raw_spin_trylock_bh(lock) \
294 __cond_lock(lock, _raw_spin_trylock_bh(lock))
295
296#define raw_spin_trylock_irq(lock) \
297({ \
298 local_irq_disable(); \
299 raw_spin_trylock(lock) ? \
300 1 : ({ local_irq_enable(); 0; }); \
301})
302
303#define raw_spin_trylock_irqsave(lock, flags) \
304({ \
305 local_irq_save(flags); \
306 raw_spin_trylock(lock) ? \
307 1 : ({ local_irq_restore(flags); 0; }); \
308})
309
310
311#include <linux/rwlock.h>
312
313
314
315
316#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
317# include <linux/spinlock_api_smp.h>
318#else
319# include <linux/spinlock_api_up.h>
320#endif
321
322
323
324
325
326static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
327{
328 return &lock->rlock;
329}
330
331#ifdef CONFIG_DEBUG_SPINLOCK
332
333# define spin_lock_init(lock) \
334do { \
335 static struct lock_class_key __key; \
336 \
337 __raw_spin_lock_init(spinlock_check(lock), \
338 #lock, &__key, LD_WAIT_CONFIG); \
339} while (0)
340
341#else
342
343# define spin_lock_init(_lock) \
344do { \
345 spinlock_check(_lock); \
346 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
347} while (0)
348
349#endif
350
351static __always_inline void spin_lock(spinlock_t *lock)
352{
353 raw_spin_lock(&lock->rlock);
354}
355
356static __always_inline void spin_lock_bh(spinlock_t *lock)
357{
358 raw_spin_lock_bh(&lock->rlock);
359}
360
361static __always_inline int spin_trylock(spinlock_t *lock)
362{
363 return raw_spin_trylock(&lock->rlock);
364}
365
366#define spin_lock_nested(lock, subclass) \
367do { \
368 raw_spin_lock_nested(spinlock_check(lock), subclass); \
369} while (0)
370
371#define spin_lock_nest_lock(lock, nest_lock) \
372do { \
373 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
374} while (0)
375
376static __always_inline void spin_lock_irq(spinlock_t *lock)
377{
378 raw_spin_lock_irq(&lock->rlock);
379}
380
381#define spin_lock_irqsave(lock, flags) \
382do { \
383 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
384} while (0)
385
386#define spin_lock_irqsave_nested(lock, flags, subclass) \
387do { \
388 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
389} while (0)
390
391static __always_inline void spin_unlock(spinlock_t *lock)
392{
393 raw_spin_unlock(&lock->rlock);
394}
395
396static __always_inline void spin_unlock_bh(spinlock_t *lock)
397{
398 raw_spin_unlock_bh(&lock->rlock);
399}
400
401static __always_inline void spin_unlock_irq(spinlock_t *lock)
402{
403 raw_spin_unlock_irq(&lock->rlock);
404}
405
406static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
407{
408 raw_spin_unlock_irqrestore(&lock->rlock, flags);
409}
410
411static __always_inline int spin_trylock_bh(spinlock_t *lock)
412{
413 return raw_spin_trylock_bh(&lock->rlock);
414}
415
416static __always_inline int spin_trylock_irq(spinlock_t *lock)
417{
418 return raw_spin_trylock_irq(&lock->rlock);
419}
420
421#define spin_trylock_irqsave(lock, flags) \
422({ \
423 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
424})
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444static __always_inline int spin_is_locked(spinlock_t *lock)
445{
446 return raw_spin_is_locked(&lock->rlock);
447}
448
449static __always_inline int spin_is_contended(spinlock_t *lock)
450{
451 return raw_spin_is_contended(&lock->rlock);
452}
453
454#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
455
456
457
458
459
460#include <linux/atomic.h>
461
462
463
464
465
466
467
468
469extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
470#define atomic_dec_and_lock(atomic, lock) \
471 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
472
473extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
474 unsigned long *flags);
475#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
476 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
477
478int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
479 size_t max_size, unsigned int cpu_mult,
480 gfp_t gfp, const char *name,
481 struct lock_class_key *key);
482
483#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
484 ({ \
485 static struct lock_class_key key; \
486 int ret; \
487 \
488 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
489 cpu_mult, gfp, #locks, &key); \
490 ret; \
491 })
492
493void free_bucket_spinlocks(spinlock_t *locks);
494
495#endif
496