1#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/spinlock.h>
30#include <linux/preempt.h>
31#include <asm/processor.h>
32
33typedef struct {
34 unsigned sequence;
35 spinlock_t lock;
36} seqlock_t;
37
38
39
40
41
42#define __SEQLOCK_UNLOCKED(lockname) \
43 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
44
45#define seqlock_init(x) \
46 do { \
47 (x)->sequence = 0; \
48 spin_lock_init(&(x)->lock); \
49 } while (0)
50
51#define DEFINE_SEQLOCK(x) \
52 seqlock_t x = __SEQLOCK_UNLOCKED(x)
53
54
55
56
57
58static inline void write_seqlock(seqlock_t *sl)
59{
60 spin_lock(&sl->lock);
61 ++sl->sequence;
62 smp_wmb();
63}
64
65static inline void write_sequnlock(seqlock_t *sl)
66{
67 smp_wmb();
68 sl->sequence++;
69 spin_unlock(&sl->lock);
70}
71
72static inline int write_tryseqlock(seqlock_t *sl)
73{
74 int ret = spin_trylock(&sl->lock);
75
76 if (ret) {
77 ++sl->sequence;
78 smp_wmb();
79 }
80 return ret;
81}
82
83
84static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
85{
86 unsigned ret;
87
88repeat:
89 ret = ACCESS_ONCE(sl->sequence);
90 if (unlikely(ret & 1)) {
91 cpu_relax();
92 goto repeat;
93 }
94 smp_rmb();
95
96 return ret;
97}
98
99
100
101
102
103
104static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
105{
106 smp_rmb();
107
108 return unlikely(sl->sequence != start);
109}
110
111
112
113
114
115
116
117
118
119typedef struct seqcount {
120 unsigned sequence;
121} seqcount_t;
122
123#define SEQCNT_ZERO { 0 }
124#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139static inline unsigned __read_seqcount_begin(const seqcount_t *s)
140{
141 unsigned ret;
142
143repeat:
144 ret = s->sequence;
145 if (unlikely(ret & 1)) {
146 cpu_relax();
147 goto repeat;
148 }
149 return ret;
150}
151
152
153
154
155
156
157
158
159
160
161static inline unsigned read_seqcount_begin(const seqcount_t *s)
162{
163 unsigned ret = __read_seqcount_begin(s);
164 smp_rmb();
165 return ret;
166}
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
183{
184 return unlikely(s->sequence != start);
185}
186
187
188
189
190
191
192
193
194
195
196
197static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
198{
199 smp_rmb();
200
201 return __read_seqcount_retry(s, start);
202}
203
204
205
206
207
208
209static inline void write_seqcount_begin(seqcount_t *s)
210{
211 s->sequence++;
212 smp_wmb();
213}
214
215static inline void write_seqcount_end(seqcount_t *s)
216{
217 smp_wmb();
218 s->sequence++;
219}
220
221
222
223
224
225
226
227
228static inline void write_seqcount_barrier(seqcount_t *s)
229{
230 smp_wmb();
231 s->sequence+=2;
232}
233
234
235
236
237#define write_seqlock_irqsave(lock, flags) \
238 do { local_irq_save(flags); write_seqlock(lock); } while (0)
239#define write_seqlock_irq(lock) \
240 do { local_irq_disable(); write_seqlock(lock); } while (0)
241#define write_seqlock_bh(lock) \
242 do { local_bh_disable(); write_seqlock(lock); } while (0)
243
244#define write_sequnlock_irqrestore(lock, flags) \
245 do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
246#define write_sequnlock_irq(lock) \
247 do { write_sequnlock(lock); local_irq_enable(); } while(0)
248#define write_sequnlock_bh(lock) \
249 do { write_sequnlock(lock); local_bh_enable(); } while(0)
250
251#define read_seqbegin_irqsave(lock, flags) \
252 ({ local_irq_save(flags); read_seqbegin(lock); })
253
254#define read_seqretry_irqrestore(lock, iv, flags) \
255 ({ \
256 int ret = read_seqretry(lock, iv); \
257 local_irq_restore(flags); \
258 ret; \
259 })
260
261#endif
262