1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu-common.h"
26#include "block/aio.h"
27#include "block/thread-pool.h"
28#include "qemu/main-loop.h"
29#include "qemu/atomic.h"
30
31
32
33
34struct QEMUBH {
35 AioContext *ctx;
36 QEMUBHFunc *cb;
37 void *opaque;
38 QEMUBH *next;
39 bool scheduled;
40 bool idle;
41 bool deleted;
42};
43
44QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
45{
46 QEMUBH *bh;
47 bh = g_new(QEMUBH, 1);
48 *bh = (QEMUBH){
49 .ctx = ctx,
50 .cb = cb,
51 .opaque = opaque,
52 };
53 qemu_mutex_lock(&ctx->bh_lock);
54 bh->next = ctx->first_bh;
55
56 smp_wmb();
57 ctx->first_bh = bh;
58 qemu_mutex_unlock(&ctx->bh_lock);
59 return bh;
60}
61
62void aio_bh_call(QEMUBH *bh)
63{
64 bh->cb(bh->opaque);
65}
66
67
68int aio_bh_poll(AioContext *ctx)
69{
70 QEMUBH *bh, **bhp, *next;
71 int ret;
72
73 ctx->walking_bh++;
74
75 ret = 0;
76 for (bh = ctx->first_bh; bh; bh = next) {
77
78 smp_read_barrier_depends();
79 next = bh->next;
80
81
82
83
84
85
86 if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
87
88 if (!bh->idle && bh != ctx->notify_dummy_bh) {
89 ret = 1;
90 }
91 bh->idle = 0;
92 aio_bh_call(bh);
93 }
94 }
95
96 ctx->walking_bh--;
97
98
99 if (!ctx->walking_bh) {
100 qemu_mutex_lock(&ctx->bh_lock);
101 bhp = &ctx->first_bh;
102 while (*bhp) {
103 bh = *bhp;
104 if (bh->deleted) {
105 *bhp = bh->next;
106 g_free(bh);
107 } else {
108 bhp = &bh->next;
109 }
110 }
111 qemu_mutex_unlock(&ctx->bh_lock);
112 }
113
114 return ret;
115}
116
117void qemu_bh_schedule_idle(QEMUBH *bh)
118{
119 bh->idle = 1;
120
121
122
123 atomic_mb_set(&bh->scheduled, 1);
124}
125
126void qemu_bh_schedule(QEMUBH *bh)
127{
128 AioContext *ctx;
129
130 ctx = bh->ctx;
131 bh->idle = 0;
132
133
134
135
136
137
138 if (atomic_xchg(&bh->scheduled, 1) == 0) {
139 aio_notify(ctx);
140 }
141}
142
143
144
145
146void qemu_bh_cancel(QEMUBH *bh)
147{
148 bh->scheduled = 0;
149}
150
151
152
153
154void qemu_bh_delete(QEMUBH *bh)
155{
156 bh->scheduled = 0;
157 bh->deleted = 1;
158}
159
160int64_t
161aio_compute_timeout(AioContext *ctx)
162{
163 int64_t deadline;
164 int timeout = -1;
165 QEMUBH *bh;
166
167 for (bh = ctx->first_bh; bh; bh = bh->next) {
168 if (!bh->deleted && bh->scheduled) {
169 if (bh->idle) {
170
171
172 timeout = 10000000;
173 } else {
174
175
176 return 0;
177 }
178 }
179 }
180
181 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
182 if (deadline == 0) {
183 return 0;
184 } else {
185 return qemu_soonest_timeout(timeout, deadline);
186 }
187}
188
189static gboolean
190aio_ctx_prepare(GSource *source, gint *timeout)
191{
192 AioContext *ctx = (AioContext *) source;
193
194 atomic_or(&ctx->notify_me, 1);
195
196
197 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
198
199 if (aio_prepare(ctx)) {
200 *timeout = 0;
201 }
202
203 return *timeout == 0;
204}
205
206static gboolean
207aio_ctx_check(GSource *source)
208{
209 AioContext *ctx = (AioContext *) source;
210 QEMUBH *bh;
211
212 atomic_and(&ctx->notify_me, ~1);
213 aio_notify_accept(ctx);
214
215 for (bh = ctx->first_bh; bh; bh = bh->next) {
216 if (!bh->deleted && bh->scheduled) {
217 return true;
218 }
219 }
220 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
221}
222
223static gboolean
224aio_ctx_dispatch(GSource *source,
225 GSourceFunc callback,
226 gpointer user_data)
227{
228 AioContext *ctx = (AioContext *) source;
229
230 assert(callback == NULL);
231 aio_dispatch(ctx);
232 return true;
233}
234
235static void
236aio_ctx_finalize(GSource *source)
237{
238 AioContext *ctx = (AioContext *) source;
239
240 qemu_bh_delete(ctx->notify_dummy_bh);
241 thread_pool_free(ctx->thread_pool);
242
243 qemu_mutex_lock(&ctx->bh_lock);
244 while (ctx->first_bh) {
245 QEMUBH *next = ctx->first_bh->next;
246
247
248 assert(ctx->first_bh->deleted);
249
250 g_free(ctx->first_bh);
251 ctx->first_bh = next;
252 }
253 qemu_mutex_unlock(&ctx->bh_lock);
254
255 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
256 event_notifier_cleanup(&ctx->notifier);
257 rfifolock_destroy(&ctx->lock);
258 qemu_mutex_destroy(&ctx->bh_lock);
259 timerlistgroup_deinit(&ctx->tlg);
260}
261
262static GSourceFuncs aio_source_funcs = {
263 aio_ctx_prepare,
264 aio_ctx_check,
265 aio_ctx_dispatch,
266 aio_ctx_finalize
267};
268
269GSource *aio_get_g_source(AioContext *ctx)
270{
271 g_source_ref(&ctx->source);
272 return &ctx->source;
273}
274
275ThreadPool *aio_get_thread_pool(AioContext *ctx)
276{
277 if (!ctx->thread_pool) {
278 ctx->thread_pool = thread_pool_new(ctx);
279 }
280 return ctx->thread_pool;
281}
282
283void aio_notify(AioContext *ctx)
284{
285
286
287
288 smp_mb();
289 if (ctx->notify_me) {
290 event_notifier_set(&ctx->notifier);
291 atomic_mb_set(&ctx->notified, true);
292 }
293}
294
295void aio_notify_accept(AioContext *ctx)
296{
297 if (atomic_xchg(&ctx->notified, false)) {
298 event_notifier_test_and_clear(&ctx->notifier);
299 }
300}
301
302static void aio_timerlist_notify(void *opaque)
303{
304 aio_notify(opaque);
305}
306
307static void aio_rfifolock_cb(void *opaque)
308{
309 AioContext *ctx = opaque;
310
311
312 qemu_bh_schedule(ctx->notify_dummy_bh);
313}
314
315static void notify_dummy_bh(void *opaque)
316{
317
318}
319
320static void event_notifier_dummy_cb(EventNotifier *e)
321{
322}
323
324AioContext *aio_context_new(Error **errp)
325{
326 int ret;
327 AioContext *ctx;
328 Error *local_err = NULL;
329
330 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
331 aio_context_setup(ctx, &local_err);
332 if (local_err) {
333 error_propagate(errp, local_err);
334 goto fail;
335 }
336 ret = event_notifier_init(&ctx->notifier, false);
337 if (ret < 0) {
338 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
339 goto fail;
340 }
341 g_source_set_can_recurse(&ctx->source, true);
342 aio_set_event_notifier(ctx, &ctx->notifier,
343 false,
344 (EventNotifierHandler *)
345 event_notifier_dummy_cb);
346 ctx->thread_pool = NULL;
347 qemu_mutex_init(&ctx->bh_lock);
348 rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
349 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
350
351 ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
352
353 return ctx;
354fail:
355 g_source_destroy(&ctx->source);
356 return NULL;
357}
358
359void aio_context_ref(AioContext *ctx)
360{
361 g_source_ref(&ctx->source);
362}
363
364void aio_context_unref(AioContext *ctx)
365{
366 g_source_unref(&ctx->source);
367}
368
369void aio_context_acquire(AioContext *ctx)
370{
371 rfifolock_lock(&ctx->lock);
372}
373
374void aio_context_release(AioContext *ctx)
375{
376 rfifolock_unlock(&ctx->lock);
377}
378