1
2#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#include <linux/typecheck.h>
55#include <linux/preempt.h>
56#include <linux/linkage.h>
57#include <linux/compiler.h>
58#include <linux/irqflags.h>
59#include <linux/thread_info.h>
60#include <linux/stringify.h>
61#include <linux/bottom_half.h>
62#include <linux/lockdep.h>
63#include <asm/barrier.h>
64#include <asm/mmiowb.h>
65
66
67
68
69
70#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
71
72#define LOCK_SECTION_START(extra) \
73 ".subsection 1\n\t" \
74 extra \
75 ".ifndef " LOCK_SECTION_NAME "\n\t" \
76 LOCK_SECTION_NAME ":\n\t" \
77 ".endif\n"
78
79#define LOCK_SECTION_END \
80 ".previous\n\t"
81
82#define __lockfunc __section(".spinlock.text")
83
84
85
86
87#include <linux/spinlock_types.h>
88
89
90
91
92#ifdef CONFIG_SMP
93# include <asm/spinlock.h>
94#else
95# include <linux/spinlock_up.h>
96#endif
97
98#ifdef CONFIG_DEBUG_SPINLOCK
99 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
100 struct lock_class_key *key, short inner);
101
102# define raw_spin_lock_init(lock) \
103do { \
104 static struct lock_class_key __key; \
105 \
106 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
107} while (0)
108
109#else
110# define raw_spin_lock_init(lock) \
111 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
112#endif
113
114#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
115
116#ifdef arch_spin_is_contended
117#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
118#else
119#define raw_spin_is_contended(lock) (((void)(lock), 0))
120#endif
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173#ifndef smp_mb__after_spinlock
174#define smp_mb__after_spinlock() kcsan_mb()
175#endif
176
177#ifdef CONFIG_DEBUG_SPINLOCK
178 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
179 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
180 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
181#else
182static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
183{
184 __acquire(lock);
185 arch_spin_lock(&lock->raw_lock);
186 mmiowb_spin_lock();
187}
188
189static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
190{
191 int ret = arch_spin_trylock(&(lock)->raw_lock);
192
193 if (ret)
194 mmiowb_spin_lock();
195
196 return ret;
197}
198
199static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
200{
201 mmiowb_spin_unlock();
202 arch_spin_unlock(&lock->raw_lock);
203 __release(lock);
204}
205#endif
206
207
208
209
210
211
212
213#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
214
215#define raw_spin_lock(lock) _raw_spin_lock(lock)
216
217#ifdef CONFIG_DEBUG_LOCK_ALLOC
218# define raw_spin_lock_nested(lock, subclass) \
219 _raw_spin_lock_nested(lock, subclass)
220
221# define raw_spin_lock_nest_lock(lock, nest_lock) \
222 do { \
223 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
224 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
225 } while (0)
226#else
227
228
229
230
231
232# define raw_spin_lock_nested(lock, subclass) \
233 _raw_spin_lock(((void)(subclass), (lock)))
234# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
235#endif
236
237#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
238
239#define raw_spin_lock_irqsave(lock, flags) \
240 do { \
241 typecheck(unsigned long, flags); \
242 flags = _raw_spin_lock_irqsave(lock); \
243 } while (0)
244
245#ifdef CONFIG_DEBUG_LOCK_ALLOC
246#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
247 do { \
248 typecheck(unsigned long, flags); \
249 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
250 } while (0)
251#else
252#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
253 do { \
254 typecheck(unsigned long, flags); \
255 flags = _raw_spin_lock_irqsave(lock); \
256 } while (0)
257#endif
258
259#else
260
261#define raw_spin_lock_irqsave(lock, flags) \
262 do { \
263 typecheck(unsigned long, flags); \
264 _raw_spin_lock_irqsave(lock, flags); \
265 } while (0)
266
267#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
268 raw_spin_lock_irqsave(lock, flags)
269
270#endif
271
272#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
273#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
274#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
275#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
276
277#define raw_spin_unlock_irqrestore(lock, flags) \
278 do { \
279 typecheck(unsigned long, flags); \
280 _raw_spin_unlock_irqrestore(lock, flags); \
281 } while (0)
282#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
283
284#define raw_spin_trylock_bh(lock) \
285 __cond_lock(lock, _raw_spin_trylock_bh(lock))
286
287#define raw_spin_trylock_irq(lock) \
288({ \
289 local_irq_disable(); \
290 raw_spin_trylock(lock) ? \
291 1 : ({ local_irq_enable(); 0; }); \
292})
293
294#define raw_spin_trylock_irqsave(lock, flags) \
295({ \
296 local_irq_save(flags); \
297 raw_spin_trylock(lock) ? \
298 1 : ({ local_irq_restore(flags); 0; }); \
299})
300
301#ifndef CONFIG_PREEMPT_RT
302
303#include <linux/rwlock.h>
304#endif
305
306
307
308
309#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
310# include <linux/spinlock_api_smp.h>
311#else
312# include <linux/spinlock_api_up.h>
313#endif
314
315
316#ifndef CONFIG_PREEMPT_RT
317
318
319
320
321
322static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
323{
324 return &lock->rlock;
325}
326
327#ifdef CONFIG_DEBUG_SPINLOCK
328
329# define spin_lock_init(lock) \
330do { \
331 static struct lock_class_key __key; \
332 \
333 __raw_spin_lock_init(spinlock_check(lock), \
334 #lock, &__key, LD_WAIT_CONFIG); \
335} while (0)
336
337#else
338
339# define spin_lock_init(_lock) \
340do { \
341 spinlock_check(_lock); \
342 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
343} while (0)
344
345#endif
346
347static __always_inline void spin_lock(spinlock_t *lock)
348{
349 raw_spin_lock(&lock->rlock);
350}
351
352static __always_inline void spin_lock_bh(spinlock_t *lock)
353{
354 raw_spin_lock_bh(&lock->rlock);
355}
356
357static __always_inline int spin_trylock(spinlock_t *lock)
358{
359 return raw_spin_trylock(&lock->rlock);
360}
361
362#define spin_lock_nested(lock, subclass) \
363do { \
364 raw_spin_lock_nested(spinlock_check(lock), subclass); \
365} while (0)
366
367#define spin_lock_nest_lock(lock, nest_lock) \
368do { \
369 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
370} while (0)
371
372static __always_inline void spin_lock_irq(spinlock_t *lock)
373{
374 raw_spin_lock_irq(&lock->rlock);
375}
376
377#define spin_lock_irqsave(lock, flags) \
378do { \
379 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
380} while (0)
381
382#define spin_lock_irqsave_nested(lock, flags, subclass) \
383do { \
384 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
385} while (0)
386
387static __always_inline void spin_unlock(spinlock_t *lock)
388{
389 raw_spin_unlock(&lock->rlock);
390}
391
392static __always_inline void spin_unlock_bh(spinlock_t *lock)
393{
394 raw_spin_unlock_bh(&lock->rlock);
395}
396
397static __always_inline void spin_unlock_irq(spinlock_t *lock)
398{
399 raw_spin_unlock_irq(&lock->rlock);
400}
401
402static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
403{
404 raw_spin_unlock_irqrestore(&lock->rlock, flags);
405}
406
407static __always_inline int spin_trylock_bh(spinlock_t *lock)
408{
409 return raw_spin_trylock_bh(&lock->rlock);
410}
411
412static __always_inline int spin_trylock_irq(spinlock_t *lock)
413{
414 return raw_spin_trylock_irq(&lock->rlock);
415}
416
417#define spin_trylock_irqsave(lock, flags) \
418({ \
419 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
420})
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440static __always_inline int spin_is_locked(spinlock_t *lock)
441{
442 return raw_spin_is_locked(&lock->rlock);
443}
444
445static __always_inline int spin_is_contended(spinlock_t *lock)
446{
447 return raw_spin_is_contended(&lock->rlock);
448}
449
450#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
451
452#else
453# include <linux/spinlock_rt.h>
454#endif
455
456
457
458
459
460#include <linux/atomic.h>
461
462
463
464
465
466
467
468
469extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
470#define atomic_dec_and_lock(atomic, lock) \
471 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
472
473extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
474 unsigned long *flags);
475#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
476 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
477
478int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
479 size_t max_size, unsigned int cpu_mult,
480 gfp_t gfp, const char *name,
481 struct lock_class_key *key);
482
483#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
484 ({ \
485 static struct lock_class_key key; \
486 int ret; \
487 \
488 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
489 cpu_mult, gfp, #locks, &key); \
490 ret; \
491 })
492
493void free_bucket_spinlocks(spinlock_t *locks);
494
495#endif
496