1#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/spinlock.h>
30#include <linux/preempt.h>
31
32typedef struct {
33 unsigned sequence;
34 spinlock_t lock;
35} seqlock_t;
36
37
38
39
40
41#define __SEQLOCK_UNLOCKED(lockname) \
42 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
43
44#define SEQLOCK_UNLOCKED \
45 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
46
47#define seqlock_init(x) \
48 do { \
49 (x)->sequence = 0; \
50 spin_lock_init(&(x)->lock); \
51 } while (0)
52
53#define DEFINE_SEQLOCK(x) \
54 seqlock_t x = __SEQLOCK_UNLOCKED(x)
55
56
57
58
59
60static inline void write_seqlock(seqlock_t *sl)
61{
62 spin_lock(&sl->lock);
63 ++sl->sequence;
64 smp_wmb();
65}
66
67static inline void write_sequnlock(seqlock_t *sl)
68{
69 smp_wmb();
70 sl->sequence++;
71 spin_unlock(&sl->lock);
72}
73
74static inline int write_tryseqlock(seqlock_t *sl)
75{
76 int ret = spin_trylock(&sl->lock);
77
78 if (ret) {
79 ++sl->sequence;
80 smp_wmb();
81 }
82 return ret;
83}
84
85
86static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
87{
88 unsigned ret;
89
90repeat:
91 ret = sl->sequence;
92 smp_rmb();
93 if (unlikely(ret & 1)) {
94 cpu_relax();
95 goto repeat;
96 }
97
98 return ret;
99}
100
101
102
103
104
105
106static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
107{
108 smp_rmb();
109
110 return unlikely(sl->sequence != start);
111}
112
113
114
115
116
117
118
119
120
121typedef struct seqcount {
122 unsigned sequence;
123} seqcount_t;
124
125#define SEQCNT_ZERO { 0 }
126#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141static inline unsigned __read_seqcount_begin(const seqcount_t *s)
142{
143 unsigned ret;
144
145repeat:
146 ret = s->sequence;
147 if (unlikely(ret & 1)) {
148 cpu_relax();
149 goto repeat;
150 }
151 return ret;
152}
153
154
155
156
157
158
159
160
161
162
163static inline unsigned read_seqcount_begin(const seqcount_t *s)
164{
165 unsigned ret = __read_seqcount_begin(s);
166 smp_rmb();
167 return ret;
168}
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
185{
186 return unlikely(s->sequence != start);
187}
188
189
190
191
192
193
194
195
196
197
198
199static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
200{
201 smp_rmb();
202
203 return __read_seqcount_retry(s, start);
204}
205
206
207
208
209
210
211static inline void write_seqcount_begin(seqcount_t *s)
212{
213 s->sequence++;
214 smp_wmb();
215}
216
217static inline void write_seqcount_end(seqcount_t *s)
218{
219 smp_wmb();
220 s->sequence++;
221}
222
223
224
225
226
227
228
229
230static inline void write_seqcount_barrier(seqcount_t *s)
231{
232 smp_wmb();
233 s->sequence+=2;
234}
235
236
237
238
239#define write_seqlock_irqsave(lock, flags) \
240 do { local_irq_save(flags); write_seqlock(lock); } while (0)
241#define write_seqlock_irq(lock) \
242 do { local_irq_disable(); write_seqlock(lock); } while (0)
243#define write_seqlock_bh(lock) \
244 do { local_bh_disable(); write_seqlock(lock); } while (0)
245
246#define write_sequnlock_irqrestore(lock, flags) \
247 do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
248#define write_sequnlock_irq(lock) \
249 do { write_sequnlock(lock); local_irq_enable(); } while(0)
250#define write_sequnlock_bh(lock) \
251 do { write_sequnlock(lock); local_bh_enable(); } while(0)
252
253#define read_seqbegin_irqsave(lock, flags) \
254 ({ local_irq_save(flags); read_seqbegin(lock); })
255
256#define read_seqretry_irqrestore(lock, iv, flags) \
257 ({ \
258 int ret = read_seqretry(lock, iv); \
259 local_irq_restore(flags); \
260 ret; \
261 })
262
263#endif
264