1#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/spinlock.h>
36#include <linux/preempt.h>
37#include <linux/lockdep.h>
38#include <asm/processor.h>
39
40
41
42
43
44
45
46typedef struct seqcount {
47 unsigned sequence;
48#ifdef CONFIG_DEBUG_LOCK_ALLOC
49 struct lockdep_map dep_map;
50#endif
51} seqcount_t;
52
53static inline void __seqcount_init(seqcount_t *s, const char *name,
54 struct lock_class_key *key)
55{
56
57
58
59 lockdep_init_map(&s->dep_map, name, key, 0);
60 s->sequence = 0;
61}
62
63#ifdef CONFIG_DEBUG_LOCK_ALLOC
64# define SEQCOUNT_DEP_MAP_INIT(lockname) \
65 .dep_map = { .name = #lockname } \
66
67# define seqcount_init(s) \
68 do { \
69 static struct lock_class_key __key; \
70 __seqcount_init((s), #s, &__key); \
71 } while (0)
72
73static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
74{
75 seqcount_t *l = (seqcount_t *)s;
76 unsigned long flags;
77
78 local_irq_save(flags);
79 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
80 seqcount_release(&l->dep_map, 1, _RET_IP_);
81 local_irq_restore(flags);
82}
83
84#else
85# define SEQCOUNT_DEP_MAP_INIT(lockname)
86# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
87# define seqcount_lockdep_reader_access(x)
88#endif
89
90#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106static inline unsigned __read_seqcount_begin(const seqcount_t *s)
107{
108 unsigned ret;
109
110repeat:
111 ret = ACCESS_ONCE(s->sequence);
112 if (unlikely(ret & 1)) {
113 cpu_relax();
114 goto repeat;
115 }
116 return ret;
117}
118
119
120
121
122
123
124
125
126
127
128static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
129{
130 unsigned ret = __read_seqcount_begin(s);
131 smp_rmb();
132 return ret;
133}
134
135
136
137
138
139
140
141
142
143
144static inline unsigned read_seqcount_begin(const seqcount_t *s)
145{
146 seqcount_lockdep_reader_access(s);
147 return raw_read_seqcount_begin(s);
148}
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164static inline unsigned raw_seqcount_begin(const seqcount_t *s)
165{
166 unsigned ret = ACCESS_ONCE(s->sequence);
167
168 seqcount_lockdep_reader_access(s);
169 smp_rmb();
170 return ret & ~1;
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
188{
189 return unlikely(s->sequence != start);
190}
191
192
193
194
195
196
197
198
199
200
201
202static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
203{
204 smp_rmb();
205 return __read_seqcount_retry(s, start);
206}
207
208
209
210static inline void raw_write_seqcount_begin(seqcount_t *s)
211{
212 s->sequence++;
213 smp_wmb();
214}
215
216static inline void raw_write_seqcount_end(seqcount_t *s)
217{
218 smp_wmb();
219 s->sequence++;
220}
221
222
223
224
225
226static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
227{
228 raw_write_seqcount_begin(s);
229 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
230}
231
232static inline void write_seqcount_begin(seqcount_t *s)
233{
234 write_seqcount_begin_nested(s, 0);
235}
236
237static inline void write_seqcount_end(seqcount_t *s)
238{
239 seqcount_release(&s->dep_map, 1, _RET_IP_);
240 raw_write_seqcount_end(s);
241}
242
243
244
245
246
247
248
249
250static inline void write_seqcount_barrier(seqcount_t *s)
251{
252 smp_wmb();
253 s->sequence+=2;
254}
255
256typedef struct {
257 struct seqcount seqcount;
258 spinlock_t lock;
259} seqlock_t;
260
261
262
263
264
265#define __SEQLOCK_UNLOCKED(lockname) \
266 { \
267 .seqcount = SEQCNT_ZERO(lockname), \
268 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
269 }
270
271#define seqlock_init(x) \
272 do { \
273 seqcount_init(&(x)->seqcount); \
274 spin_lock_init(&(x)->lock); \
275 } while (0)
276
277#define DEFINE_SEQLOCK(x) \
278 seqlock_t x = __SEQLOCK_UNLOCKED(x)
279
280
281
282
283static inline unsigned read_seqbegin(const seqlock_t *sl)
284{
285 return read_seqcount_begin(&sl->seqcount);
286}
287
288static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
289{
290 return read_seqcount_retry(&sl->seqcount, start);
291}
292
293
294
295
296
297
298static inline void write_seqlock(seqlock_t *sl)
299{
300 spin_lock(&sl->lock);
301 write_seqcount_begin(&sl->seqcount);
302}
303
304static inline void write_sequnlock(seqlock_t *sl)
305{
306 write_seqcount_end(&sl->seqcount);
307 spin_unlock(&sl->lock);
308}
309
310static inline void write_seqlock_bh(seqlock_t *sl)
311{
312 spin_lock_bh(&sl->lock);
313 write_seqcount_begin(&sl->seqcount);
314}
315
316static inline void write_sequnlock_bh(seqlock_t *sl)
317{
318 write_seqcount_end(&sl->seqcount);
319 spin_unlock_bh(&sl->lock);
320}
321
322static inline void write_seqlock_irq(seqlock_t *sl)
323{
324 spin_lock_irq(&sl->lock);
325 write_seqcount_begin(&sl->seqcount);
326}
327
328static inline void write_sequnlock_irq(seqlock_t *sl)
329{
330 write_seqcount_end(&sl->seqcount);
331 spin_unlock_irq(&sl->lock);
332}
333
334static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
335{
336 unsigned long flags;
337
338 spin_lock_irqsave(&sl->lock, flags);
339 write_seqcount_begin(&sl->seqcount);
340 return flags;
341}
342
343#define write_seqlock_irqsave(lock, flags) \
344 do { flags = __write_seqlock_irqsave(lock); } while (0)
345
346static inline void
347write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
348{
349 write_seqcount_end(&sl->seqcount);
350 spin_unlock_irqrestore(&sl->lock, flags);
351}
352
353
354
355
356
357
358static inline void read_seqlock_excl(seqlock_t *sl)
359{
360 spin_lock(&sl->lock);
361}
362
363static inline void read_sequnlock_excl(seqlock_t *sl)
364{
365 spin_unlock(&sl->lock);
366}
367
368
369
370
371
372
373
374
375
376
377
378static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
379{
380 if (!(*seq & 1))
381 *seq = read_seqbegin(lock);
382 else
383 read_seqlock_excl(lock);
384}
385
386static inline int need_seqretry(seqlock_t *lock, int seq)
387{
388 return !(seq & 1) && read_seqretry(lock, seq);
389}
390
391static inline void done_seqretry(seqlock_t *lock, int seq)
392{
393 if (seq & 1)
394 read_sequnlock_excl(lock);
395}
396
397static inline void read_seqlock_excl_bh(seqlock_t *sl)
398{
399 spin_lock_bh(&sl->lock);
400}
401
402static inline void read_sequnlock_excl_bh(seqlock_t *sl)
403{
404 spin_unlock_bh(&sl->lock);
405}
406
407static inline void read_seqlock_excl_irq(seqlock_t *sl)
408{
409 spin_lock_irq(&sl->lock);
410}
411
412static inline void read_sequnlock_excl_irq(seqlock_t *sl)
413{
414 spin_unlock_irq(&sl->lock);
415}
416
417static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
418{
419 unsigned long flags;
420
421 spin_lock_irqsave(&sl->lock, flags);
422 return flags;
423}
424
425#define read_seqlock_excl_irqsave(lock, flags) \
426 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
427
428static inline void
429read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
430{
431 spin_unlock_irqrestore(&sl->lock, flags);
432}
433
434#endif
435