1
2
3
4
5
6
7
8#ifndef _LINUX_RWSEM_H
9#define _LINUX_RWSEM_H
10
11#include <linux/linkage.h>
12
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/atomic.h>
18#include <linux/err.h>
19#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
20#include <linux/osq_lock.h>
21#endif
22
23
24
25
26
27
28
29
30
31
32
33
34
35struct rw_semaphore {
36 atomic_long_t count;
37
38
39
40
41
42 atomic_long_t owner;
43#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
44 struct optimistic_spin_queue osq;
45#endif
46 raw_spinlock_t wait_lock;
47 struct list_head wait_list;
48#ifdef CONFIG_DEBUG_LOCK_ALLOC
49 struct lockdep_map dep_map;
50#endif
51};
52
53
54
55
56
57#define RWSEM_OWNER_UNKNOWN (-2L)
58
59
60static inline int rwsem_is_locked(struct rw_semaphore *sem)
61{
62 return atomic_long_read(&sem->count) != 0;
63}
64
65#define RWSEM_UNLOCKED_VALUE 0L
66#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
67
68
69
70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
72#else
73# define __RWSEM_DEP_MAP_INIT(lockname)
74#endif
75
76#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
77#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
78#else
79#define __RWSEM_OPT_INIT(lockname)
80#endif
81
82#define __RWSEM_INITIALIZER(name) \
83 { __RWSEM_INIT_COUNT(name), \
84 .owner = ATOMIC_LONG_INIT(0), \
85 .wait_list = LIST_HEAD_INIT((name).wait_list), \
86 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
87 __RWSEM_OPT_INIT(name) \
88 __RWSEM_DEP_MAP_INIT(name) }
89
90#define DECLARE_RWSEM(name) \
91 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
92
93extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
94 struct lock_class_key *key);
95
96#define init_rwsem(sem) \
97do { \
98 static struct lock_class_key __key; \
99 \
100 __init_rwsem((sem), #sem, &__key); \
101} while (0)
102
103
104
105
106
107
108
109static inline int rwsem_is_contended(struct rw_semaphore *sem)
110{
111 return !list_empty(&sem->wait_list);
112}
113
114
115
116
117extern void down_read(struct rw_semaphore *sem);
118extern int __must_check down_read_killable(struct rw_semaphore *sem);
119
120
121
122
123extern int down_read_trylock(struct rw_semaphore *sem);
124
125
126
127
128extern void down_write(struct rw_semaphore *sem);
129extern int __must_check down_write_killable(struct rw_semaphore *sem);
130
131
132
133
134extern int down_write_trylock(struct rw_semaphore *sem);
135
136
137
138
139extern void up_read(struct rw_semaphore *sem);
140
141
142
143
144extern void up_write(struct rw_semaphore *sem);
145
146
147
148
149extern void downgrade_write(struct rw_semaphore *sem);
150
151#ifdef CONFIG_DEBUG_LOCK_ALLOC
152
153
154
155
156
157
158
159
160
161
162
163
164
165extern void down_read_nested(struct rw_semaphore *sem, int subclass);
166extern void down_write_nested(struct rw_semaphore *sem, int subclass);
167extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
168extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
169
170# define down_write_nest_lock(sem, nest_lock) \
171do { \
172 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
173 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
174} while (0);
175
176
177
178
179
180
181
182extern void down_read_non_owner(struct rw_semaphore *sem);
183extern void up_read_non_owner(struct rw_semaphore *sem);
184#else
185# define down_read_nested(sem, subclass) down_read(sem)
186# define down_write_nest_lock(sem, nest_lock) down_write(sem)
187# define down_write_nested(sem, subclass) down_write(sem)
188# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
189# define down_read_non_owner(sem) down_read(sem)
190# define up_read_non_owner(sem) up_read(sem)
191#endif
192
193#endif
194