1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include "qemu/osdep.h"
30#include "qemu-common.h"
31#include "qemu/coroutine.h"
32#include "qemu/coroutine_int.h"
33#include "qemu/processor.h"
34#include "qemu/queue.h"
35#include "block/aio.h"
36#include "trace.h"
37
38void qemu_co_queue_init(CoQueue *queue)
39{
40 QSIMPLEQ_INIT(&queue->entries);
41}
42
43void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
44{
45 Coroutine *self = qemu_coroutine_self();
46 QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
47
48 if (mutex) {
49 qemu_co_mutex_unlock(mutex);
50 }
51
52
53
54
55
56
57 qemu_coroutine_yield();
58 assert(qemu_in_coroutine());
59
60
61
62
63
64 if (mutex) {
65 qemu_co_mutex_lock(mutex);
66 }
67}
68
69
70
71
72
73
74
75
76
77void qemu_co_queue_run_restart(Coroutine *co)
78{
79 Coroutine *next;
80 QSIMPLEQ_HEAD(, Coroutine) tmp_queue_wakeup =
81 QSIMPLEQ_HEAD_INITIALIZER(tmp_queue_wakeup);
82
83 trace_qemu_co_queue_run_restart(co);
84
85
86
87
88
89
90
91
92
93
94
95 QSIMPLEQ_CONCAT(&tmp_queue_wakeup, &co->co_queue_wakeup);
96
97 while ((next = QSIMPLEQ_FIRST(&tmp_queue_wakeup))) {
98 QSIMPLEQ_REMOVE_HEAD(&tmp_queue_wakeup, co_queue_next);
99 qemu_coroutine_enter(next);
100 }
101}
102
103static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
104{
105 Coroutine *next;
106
107 if (QSIMPLEQ_EMPTY(&queue->entries)) {
108 return false;
109 }
110
111 while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) {
112 QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
113 aio_co_wake(next);
114 if (single) {
115 break;
116 }
117 }
118 return true;
119}
120
121bool coroutine_fn qemu_co_queue_next(CoQueue *queue)
122{
123 assert(qemu_in_coroutine());
124 return qemu_co_queue_do_restart(queue, true);
125}
126
127void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue)
128{
129 assert(qemu_in_coroutine());
130 qemu_co_queue_do_restart(queue, false);
131}
132
133bool qemu_co_enter_next(CoQueue *queue)
134{
135 Coroutine *next;
136
137 next = QSIMPLEQ_FIRST(&queue->entries);
138 if (!next) {
139 return false;
140 }
141
142 QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
143 qemu_coroutine_enter(next);
144 return true;
145}
146
147bool qemu_co_queue_empty(CoQueue *queue)
148{
149 return QSIMPLEQ_FIRST(&queue->entries) == NULL;
150}
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171typedef struct CoWaitRecord {
172 Coroutine *co;
173 QSLIST_ENTRY(CoWaitRecord) next;
174} CoWaitRecord;
175
176static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
177{
178 w->co = qemu_coroutine_self();
179 QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
180}
181
182static void move_waiters(CoMutex *mutex)
183{
184 QSLIST_HEAD(, CoWaitRecord) reversed;
185 QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push);
186 while (!QSLIST_EMPTY(&reversed)) {
187 CoWaitRecord *w = QSLIST_FIRST(&reversed);
188 QSLIST_REMOVE_HEAD(&reversed, next);
189 QSLIST_INSERT_HEAD(&mutex->to_pop, w, next);
190 }
191}
192
193static CoWaitRecord *pop_waiter(CoMutex *mutex)
194{
195 CoWaitRecord *w;
196
197 if (QSLIST_EMPTY(&mutex->to_pop)) {
198 move_waiters(mutex);
199 if (QSLIST_EMPTY(&mutex->to_pop)) {
200 return NULL;
201 }
202 }
203 w = QSLIST_FIRST(&mutex->to_pop);
204 QSLIST_REMOVE_HEAD(&mutex->to_pop, next);
205 return w;
206}
207
208static bool has_waiters(CoMutex *mutex)
209{
210 return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push);
211}
212
213void qemu_co_mutex_init(CoMutex *mutex)
214{
215 memset(mutex, 0, sizeof(*mutex));
216}
217
218static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co)
219{
220
221
222
223 smp_read_barrier_depends();
224 mutex->ctx = co->ctx;
225 aio_co_wake(co);
226}
227
228static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
229 CoMutex *mutex)
230{
231 Coroutine *self = qemu_coroutine_self();
232 CoWaitRecord w;
233 unsigned old_handoff;
234
235 trace_qemu_co_mutex_lock_entry(mutex, self);
236 w.co = self;
237 push_waiter(mutex, &w);
238
239
240
241
242 old_handoff = atomic_mb_read(&mutex->handoff);
243 if (old_handoff &&
244 has_waiters(mutex) &&
245 atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
246
247
248
249 CoWaitRecord *to_wake = pop_waiter(mutex);
250 Coroutine *co = to_wake->co;
251 if (co == self) {
252
253 assert(to_wake == &w);
254 mutex->ctx = ctx;
255 return;
256 }
257
258 qemu_co_mutex_wake(mutex, co);
259 }
260
261 qemu_coroutine_yield();
262 trace_qemu_co_mutex_lock_return(mutex, self);
263}
264
265void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
266{
267 AioContext *ctx = qemu_get_current_aio_context();
268 Coroutine *self = qemu_coroutine_self();
269 int waiters, i;
270
271
272
273
274
275
276
277
278 i = 0;
279retry_fast_path:
280 waiters = atomic_cmpxchg(&mutex->locked, 0, 1);
281 if (waiters != 0) {
282 while (waiters == 1 && ++i < 1000) {
283 if (atomic_read(&mutex->ctx) == ctx) {
284 break;
285 }
286 if (atomic_read(&mutex->locked) == 0) {
287 goto retry_fast_path;
288 }
289 cpu_relax();
290 }
291 waiters = atomic_fetch_inc(&mutex->locked);
292 }
293
294 if (waiters == 0) {
295
296 trace_qemu_co_mutex_lock_uncontended(mutex, self);
297 mutex->ctx = ctx;
298 } else {
299 qemu_co_mutex_lock_slowpath(ctx, mutex);
300 }
301 mutex->holder = self;
302 self->locks_held++;
303}
304
305void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
306{
307 Coroutine *self = qemu_coroutine_self();
308
309 trace_qemu_co_mutex_unlock_entry(mutex, self);
310
311 assert(mutex->locked);
312 assert(mutex->holder == self);
313 assert(qemu_in_coroutine());
314
315 mutex->ctx = NULL;
316 mutex->holder = NULL;
317 self->locks_held--;
318 if (atomic_fetch_dec(&mutex->locked) == 1) {
319
320 return;
321 }
322
323 for (;;) {
324 CoWaitRecord *to_wake = pop_waiter(mutex);
325 unsigned our_handoff;
326
327 if (to_wake) {
328 qemu_co_mutex_wake(mutex, to_wake->co);
329 break;
330 }
331
332
333
334
335
336 if (++mutex->sequence == 0) {
337 mutex->sequence = 1;
338 }
339
340 our_handoff = mutex->sequence;
341 atomic_mb_set(&mutex->handoff, our_handoff);
342 if (!has_waiters(mutex)) {
343
344
345
346 break;
347 }
348
349
350
351
352 if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
353 break;
354 }
355 }
356
357 trace_qemu_co_mutex_unlock_return(mutex, self);
358}
359
360void qemu_co_rwlock_init(CoRwlock *lock)
361{
362 memset(lock, 0, sizeof(*lock));
363 qemu_co_queue_init(&lock->queue);
364 qemu_co_mutex_init(&lock->mutex);
365}
366
367void qemu_co_rwlock_rdlock(CoRwlock *lock)
368{
369 Coroutine *self = qemu_coroutine_self();
370
371 qemu_co_mutex_lock(&lock->mutex);
372
373 while (lock->pending_writer) {
374 qemu_co_queue_wait(&lock->queue, &lock->mutex);
375 }
376 lock->reader++;
377 qemu_co_mutex_unlock(&lock->mutex);
378
379
380 self->locks_held++;
381}
382
383void qemu_co_rwlock_unlock(CoRwlock *lock)
384{
385 Coroutine *self = qemu_coroutine_self();
386
387 assert(qemu_in_coroutine());
388 if (!lock->reader) {
389
390 qemu_co_queue_restart_all(&lock->queue);
391 } else {
392 self->locks_held--;
393
394 qemu_co_mutex_lock(&lock->mutex);
395 lock->reader--;
396 assert(lock->reader >= 0);
397
398 if (!lock->reader) {
399 qemu_co_queue_next(&lock->queue);
400 }
401 }
402 qemu_co_mutex_unlock(&lock->mutex);
403}
404
405void qemu_co_rwlock_downgrade(CoRwlock *lock)
406{
407 Coroutine *self = qemu_coroutine_self();
408
409
410
411
412 assert(lock->reader == 0);
413 lock->reader++;
414 qemu_co_mutex_unlock(&lock->mutex);
415
416
417 self->locks_held++;
418}
419
420void qemu_co_rwlock_wrlock(CoRwlock *lock)
421{
422 qemu_co_mutex_lock(&lock->mutex);
423 lock->pending_writer++;
424 while (lock->reader) {
425 qemu_co_queue_wait(&lock->queue, &lock->mutex);
426 }
427 lock->pending_writer--;
428
429
430
431
432
433}
434
435void qemu_co_rwlock_upgrade(CoRwlock *lock)
436{
437 Coroutine *self = qemu_coroutine_self();
438
439 qemu_co_mutex_lock(&lock->mutex);
440 assert(lock->reader > 0);
441 lock->reader--;
442 lock->pending_writer++;
443 while (lock->reader) {
444 qemu_co_queue_wait(&lock->queue, &lock->mutex);
445 }
446 lock->pending_writer--;
447
448
449
450
451
452 self->locks_held--;
453}
454