1#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/spinlock.h>
36#include <linux/preempt.h>
37#include <asm/processor.h>
38
39
40
41
42
43
44
45typedef struct seqcount {
46 unsigned sequence;
47} seqcount_t;
48
49#define SEQCNT_ZERO { 0 }
50#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
51
52
53static inline void __seqcount_init(seqcount_t *s, const char *name,
54 struct lock_class_key *key)
55{
56 s->sequence = 0;
57}
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72static inline unsigned __read_seqcount_begin(const seqcount_t *s)
73{
74 unsigned ret;
75
76repeat:
77 ret = READ_ONCE(s->sequence);
78 if (unlikely(ret & 1)) {
79 cpu_relax();
80 goto repeat;
81 }
82 return ret;
83}
84
85
86
87
88
89
90
91
92
93
94static inline unsigned raw_read_seqcount(const seqcount_t *s)
95{
96 unsigned ret = READ_ONCE(s->sequence);
97 smp_rmb();
98 return ret;
99}
100
101
102
103
104
105
106
107
108
109
110static inline unsigned read_seqcount_begin(const seqcount_t *s)
111{
112 unsigned ret = __read_seqcount_begin(s);
113 smp_rmb();
114 return ret;
115}
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131static inline unsigned raw_seqcount_begin(const seqcount_t *s)
132{
133 unsigned ret = READ_ONCE(s->sequence);
134 smp_rmb();
135 return ret & ~1;
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
153{
154 return unlikely(s->sequence != start);
155}
156
157
158
159
160
161
162
163
164
165
166
167static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
168{
169 smp_rmb();
170 return __read_seqcount_retry(s, start);
171}
172
173
174
175
176
177
178static inline void raw_write_seqcount_latch(seqcount_t *s)
179{
180 smp_wmb();
181 s->sequence++;
182 smp_wmb();
183}
184
185
186
187
188
189static inline void write_seqcount_begin(seqcount_t *s)
190{
191 s->sequence++;
192 smp_wmb();
193}
194
195static inline void write_seqcount_end(seqcount_t *s)
196{
197 smp_wmb();
198 s->sequence++;
199}
200
201
202
203
204
205
206
207
208static inline void write_seqcount_barrier(seqcount_t *s)
209{
210 smp_wmb();
211 s->sequence+=2;
212}
213
214typedef struct {
215 struct seqcount seqcount;
216 spinlock_t lock;
217} seqlock_t;
218
219
220
221
222
223#define __SEQLOCK_UNLOCKED(lockname) \
224 { \
225 .seqcount = SEQCNT_ZERO, \
226 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
227 }
228
229#define seqlock_init(x) \
230 do { \
231 seqcount_init(&(x)->seqcount); \
232 spin_lock_init(&(x)->lock); \
233 } while (0)
234
235#define DEFINE_SEQLOCK(x) \
236 seqlock_t x = __SEQLOCK_UNLOCKED(x)
237
238
239
240
241static inline unsigned read_seqbegin(const seqlock_t *sl)
242{
243 return read_seqcount_begin(&sl->seqcount);
244}
245
246static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
247{
248 return read_seqcount_retry(&sl->seqcount, start);
249}
250
251
252
253
254
255
256static inline void write_seqlock(seqlock_t *sl)
257{
258 spin_lock(&sl->lock);
259 write_seqcount_begin(&sl->seqcount);
260}
261
262static inline void write_sequnlock(seqlock_t *sl)
263{
264 write_seqcount_end(&sl->seqcount);
265 spin_unlock(&sl->lock);
266}
267
268static inline void write_seqlock_bh(seqlock_t *sl)
269{
270 spin_lock_bh(&sl->lock);
271 write_seqcount_begin(&sl->seqcount);
272}
273
274static inline void write_sequnlock_bh(seqlock_t *sl)
275{
276 write_seqcount_end(&sl->seqcount);
277 spin_unlock_bh(&sl->lock);
278}
279
280static inline void write_seqlock_irq(seqlock_t *sl)
281{
282 spin_lock_irq(&sl->lock);
283 write_seqcount_begin(&sl->seqcount);
284}
285
286static inline void write_sequnlock_irq(seqlock_t *sl)
287{
288 write_seqcount_end(&sl->seqcount);
289 spin_unlock_irq(&sl->lock);
290}
291
292static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
293{
294 unsigned long flags;
295
296 spin_lock_irqsave(&sl->lock, flags);
297 write_seqcount_begin(&sl->seqcount);
298 return flags;
299}
300
301#define write_seqlock_irqsave(lock, flags) \
302 do { flags = __write_seqlock_irqsave(lock); } while (0)
303
304static inline void
305write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
306{
307 write_seqcount_end(&sl->seqcount);
308 spin_unlock_irqrestore(&sl->lock, flags);
309}
310
311
312
313
314
315
316static inline void read_seqlock_excl(seqlock_t *sl)
317{
318 spin_lock(&sl->lock);
319}
320
321static inline void read_sequnlock_excl(seqlock_t *sl)
322{
323 spin_unlock(&sl->lock);
324}
325
326
327
328
329
330
331
332
333
334
335
336static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
337{
338 if (!(*seq & 1))
339 *seq = read_seqbegin(lock);
340 else
341 read_seqlock_excl(lock);
342}
343
344static inline int need_seqretry(seqlock_t *lock, int seq)
345{
346 return !(seq & 1) && read_seqretry(lock, seq);
347}
348
349static inline void done_seqretry(seqlock_t *lock, int seq)
350{
351 if (seq & 1)
352 read_sequnlock_excl(lock);
353}
354
355static inline void read_seqlock_excl_bh(seqlock_t *sl)
356{
357 spin_lock_bh(&sl->lock);
358}
359
360static inline void read_sequnlock_excl_bh(seqlock_t *sl)
361{
362 spin_unlock_bh(&sl->lock);
363}
364
365static inline void read_seqlock_excl_irq(seqlock_t *sl)
366{
367 spin_lock_irq(&sl->lock);
368}
369
370static inline void read_sequnlock_excl_irq(seqlock_t *sl)
371{
372 spin_unlock_irq(&sl->lock);
373}
374
375static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
376{
377 unsigned long flags;
378
379 spin_lock_irqsave(&sl->lock, flags);
380 return flags;
381}
382
383#define read_seqlock_excl_irqsave(lock, flags) \
384 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
385
386static inline void
387read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
388{
389 spin_unlock_irqrestore(&sl->lock, flags);
390}
391
392static inline unsigned long read_seqbegin_or_lock_irqsave(seqlock_t *lock,
393 int *seq)
394{
395 unsigned long flags = 0;
396 if (!(*seq & 1))
397 *seq = read_seqbegin(lock);
398 else
399 read_seqlock_excl_irqsave(lock, flags);
400 return flags;
401}
402
403static inline void done_seqretry_irqrestore(seqlock_t *lock, int seq,
404 unsigned long flags)
405{
406 if (seq & 1)
407 read_sequnlock_excl_irqrestore(lock, flags);
408}
409#endif
410