1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include "qemu/osdep.h"
30#include "qemu-common.h"
31#include "qemu/rcu.h"
32#include "qemu/atomic.h"
33#include "qemu/thread.h"
34#include "qemu/main-loop.h"
35
36
37
38
39
40#define RCU_GP_LOCKED (1UL << 0)
41#define RCU_GP_CTR (1UL << 1)
42
43unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
44
45QemuEvent rcu_gp_event;
46static QemuMutex rcu_registry_lock;
47static QemuMutex rcu_sync_lock;
48
49
50
51
52
53static inline int rcu_gp_ongoing(unsigned long *ctr)
54{
55 unsigned long v;
56
57 v = atomic_read(ctr);
58 return v && (v != rcu_gp_ctr);
59}
60
61
62
63
64__thread struct rcu_reader_data rcu_reader;
65
66
67typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
68static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
69
70
71static void wait_for_readers(void)
72{
73 ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
74 struct rcu_reader_data *index, *tmp;
75
76 for (;;) {
77
78
79
80 qemu_event_reset(&rcu_gp_event);
81
82
83
84
85
86
87
88 QLIST_FOREACH(index, ®istry, node) {
89 atomic_set(&index->waiting, true);
90 }
91
92
93
94
95 smp_mb();
96
97 QLIST_FOREACH_SAFE(index, ®istry, node, tmp) {
98 if (!rcu_gp_ongoing(&index->ctr)) {
99 QLIST_REMOVE(index, node);
100 QLIST_INSERT_HEAD(&qsreaders, index, node);
101
102
103
104
105 atomic_set(&index->waiting, false);
106 }
107 }
108
109 if (QLIST_EMPTY(®istry)) {
110 break;
111 }
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130 qemu_mutex_unlock(&rcu_registry_lock);
131 qemu_event_wait(&rcu_gp_event);
132 qemu_mutex_lock(&rcu_registry_lock);
133 }
134
135
136 QLIST_SWAP(®istry, &qsreaders, node);
137}
138
139void synchronize_rcu(void)
140{
141 qemu_mutex_lock(&rcu_sync_lock);
142 qemu_mutex_lock(&rcu_registry_lock);
143
144 if (!QLIST_EMPTY(®istry)) {
145
146
147
148 if (sizeof(rcu_gp_ctr) < 8) {
149
150
151
152
153
154 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
155 wait_for_readers();
156 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
157 } else {
158
159 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
160 }
161
162 wait_for_readers();
163 }
164
165 qemu_mutex_unlock(&rcu_registry_lock);
166 qemu_mutex_unlock(&rcu_sync_lock);
167}
168
169
170#define RCU_CALL_MIN_SIZE 30
171
172
173
174
175static struct rcu_head dummy;
176static struct rcu_head *head = &dummy, **tail = &dummy.next;
177static int rcu_call_count;
178static QemuEvent rcu_call_ready_event;
179
180static void enqueue(struct rcu_head *node)
181{
182 struct rcu_head **old_tail;
183
184 node->next = NULL;
185 old_tail = atomic_xchg(&tail, &node->next);
186 atomic_mb_set(old_tail, node);
187}
188
189static struct rcu_head *try_dequeue(void)
190{
191 struct rcu_head *node, *next;
192
193retry:
194
195
196
197
198
199
200 if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) {
201 abort();
202 }
203
204
205
206
207 node = head;
208 next = atomic_mb_read(&head->next);
209 if (!next) {
210 return NULL;
211 }
212
213
214
215
216
217
218 head = next;
219
220
221 if (node == &dummy) {
222 enqueue(node);
223 goto retry;
224 }
225
226 return node;
227}
228
229static void *call_rcu_thread(void *opaque)
230{
231 struct rcu_head *node;
232
233 rcu_register_thread();
234
235 for (;;) {
236 int tries = 0;
237 int n = atomic_read(&rcu_call_count);
238
239
240
241
242
243 while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
244 g_usleep(10000);
245 if (n == 0) {
246 qemu_event_reset(&rcu_call_ready_event);
247 n = atomic_read(&rcu_call_count);
248 if (n == 0) {
249 qemu_event_wait(&rcu_call_ready_event);
250 }
251 }
252 n = atomic_read(&rcu_call_count);
253 }
254
255 atomic_sub(&rcu_call_count, n);
256 synchronize_rcu();
257 qemu_mutex_lock_iothread();
258 while (n > 0) {
259 node = try_dequeue();
260 while (!node) {
261 qemu_mutex_unlock_iothread();
262 qemu_event_reset(&rcu_call_ready_event);
263 node = try_dequeue();
264 if (!node) {
265 qemu_event_wait(&rcu_call_ready_event);
266 node = try_dequeue();
267 }
268 qemu_mutex_lock_iothread();
269 }
270
271 n--;
272 node->func(node);
273 }
274 qemu_mutex_unlock_iothread();
275 }
276 abort();
277}
278
279void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
280{
281 node->func = func;
282 enqueue(node);
283 atomic_inc(&rcu_call_count);
284 qemu_event_set(&rcu_call_ready_event);
285}
286
287void rcu_register_thread(void)
288{
289 assert(rcu_reader.ctr == 0);
290 qemu_mutex_lock(&rcu_registry_lock);
291 QLIST_INSERT_HEAD(®istry, &rcu_reader, node);
292 qemu_mutex_unlock(&rcu_registry_lock);
293}
294
295void rcu_unregister_thread(void)
296{
297 qemu_mutex_lock(&rcu_registry_lock);
298 QLIST_REMOVE(&rcu_reader, node);
299 qemu_mutex_unlock(&rcu_registry_lock);
300}
301
302static void rcu_init_complete(void)
303{
304 QemuThread thread;
305
306 qemu_mutex_init(&rcu_registry_lock);
307 qemu_mutex_init(&rcu_sync_lock);
308 qemu_event_init(&rcu_gp_event, true);
309
310 qemu_event_init(&rcu_call_ready_event, false);
311
312
313
314
315 qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
316 NULL, QEMU_THREAD_DETACHED);
317
318 rcu_register_thread();
319}
320
321#ifdef CONFIG_POSIX
322static void rcu_init_lock(void)
323{
324 qemu_mutex_lock(&rcu_sync_lock);
325 qemu_mutex_lock(&rcu_registry_lock);
326}
327
328static void rcu_init_unlock(void)
329{
330 qemu_mutex_unlock(&rcu_registry_lock);
331 qemu_mutex_unlock(&rcu_sync_lock);
332}
333#endif
334
335void rcu_after_fork(void)
336{
337 memset(®istry, 0, sizeof(registry));
338 rcu_init_complete();
339}
340
341static void __attribute__((__constructor__)) rcu_init(void)
342{
343#ifdef CONFIG_POSIX
344 pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_unlock);
345#endif
346 rcu_init_complete();
347}
348