1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "sysemu/block-backend.h"
27#include "block/throttle-groups.h"
28#include "qemu/queue.h"
29#include "qemu/thread.h"
30#include "sysemu/qtest.h"
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56typedef struct ThrottleGroup {
57 char *name;
58
59 QemuMutex lock;
60 ThrottleState ts;
61 QLIST_HEAD(, BlockBackendPublic) head;
62 BlockBackend *tokens[2];
63 bool any_timer_armed[2];
64
65
66 unsigned refcount;
67 QTAILQ_ENTRY(ThrottleGroup) list;
68} ThrottleGroup;
69
70static QemuMutex throttle_groups_lock;
71static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
72 QTAILQ_HEAD_INITIALIZER(throttle_groups);
73
74
75
76
77
78
79
80
81
82ThrottleState *throttle_group_incref(const char *name)
83{
84 ThrottleGroup *tg = NULL;
85 ThrottleGroup *iter;
86
87 qemu_mutex_lock(&throttle_groups_lock);
88
89
90 QTAILQ_FOREACH(iter, &throttle_groups, list) {
91 if (!strcmp(name, iter->name)) {
92 tg = iter;
93 break;
94 }
95 }
96
97
98 if (!tg) {
99 tg = g_new0(ThrottleGroup, 1);
100 tg->name = g_strdup(name);
101 qemu_mutex_init(&tg->lock);
102 throttle_init(&tg->ts);
103 QLIST_INIT(&tg->head);
104
105 QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
106 }
107
108 tg->refcount++;
109
110 qemu_mutex_unlock(&throttle_groups_lock);
111
112 return &tg->ts;
113}
114
115
116
117
118
119
120
121
122void throttle_group_unref(ThrottleState *ts)
123{
124 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
125
126 qemu_mutex_lock(&throttle_groups_lock);
127 if (--tg->refcount == 0) {
128 QTAILQ_REMOVE(&throttle_groups, tg, list);
129 qemu_mutex_destroy(&tg->lock);
130 g_free(tg->name);
131 g_free(tg);
132 }
133 qemu_mutex_unlock(&throttle_groups_lock);
134}
135
136
137
138
139
140
141
142const char *throttle_group_get_name(BlockBackend *blk)
143{
144 BlockBackendPublic *blkp = blk_get_public(blk);
145 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
146 return tg->name;
147}
148
149
150
151
152
153
154
155
156
157static BlockBackend *throttle_group_next_blk(BlockBackend *blk)
158{
159 BlockBackendPublic *blkp = blk_get_public(blk);
160 ThrottleState *ts = blkp->throttle_state;
161 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
162 BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin);
163
164 if (!next) {
165 next = QLIST_FIRST(&tg->head);
166 }
167
168 return blk_by_public(next);
169}
170
171
172
173
174
175
176
177
178
179
180
181static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write)
182{
183 BlockBackendPublic *blkp = blk_get_public(blk);
184 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
185 BlockBackend *token, *start;
186
187 start = token = tg->tokens[is_write];
188
189
190 token = throttle_group_next_blk(token);
191 while (token != start && !blkp->pending_reqs[is_write]) {
192 token = throttle_group_next_blk(token);
193 }
194
195
196
197
198
199 if (token == start && !blkp->pending_reqs[is_write]) {
200 token = blk;
201 }
202
203 return token;
204}
205
206
207
208
209
210
211
212
213
214
215
216static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
217{
218 BlockBackendPublic *blkp = blk_get_public(blk);
219 ThrottleState *ts = blkp->throttle_state;
220 ThrottleTimers *tt = &blkp->throttle_timers;
221 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
222 bool must_wait;
223
224 if (blkp->io_limits_disabled) {
225 return false;
226 }
227
228
229 if (tg->any_timer_armed[is_write]) {
230 return true;
231 }
232
233 must_wait = throttle_schedule_timer(ts, tt, is_write);
234
235
236 if (must_wait) {
237 tg->tokens[is_write] = blk;
238 tg->any_timer_armed[is_write] = true;
239 }
240
241 return must_wait;
242}
243
244
245
246
247
248
249
250
251static void schedule_next_request(BlockBackend *blk, bool is_write)
252{
253 BlockBackendPublic *blkp = blk_get_public(blk);
254 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
255 bool must_wait;
256 BlockBackend *token;
257
258
259 token = next_throttle_token(blk, is_write);
260 if (!blkp->pending_reqs[is_write]) {
261 return;
262 }
263
264
265 must_wait = throttle_group_schedule_timer(token, is_write);
266
267
268 if (!must_wait) {
269
270 if (qemu_in_coroutine() &&
271 qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
272 token = blk;
273 } else {
274 ThrottleTimers *tt = &blkp->throttle_timers;
275 int64_t now = qemu_clock_get_ns(tt->clock_type);
276 timer_mod(tt->timers[is_write], now + 1);
277 tg->any_timer_armed[is_write] = true;
278 }
279 tg->tokens[is_write] = token;
280 }
281}
282
283
284
285
286
287
288
289
290
291void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
292 unsigned int bytes,
293 bool is_write)
294{
295 bool must_wait;
296 BlockBackend *token;
297
298 BlockBackendPublic *blkp = blk_get_public(blk);
299 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
300 qemu_mutex_lock(&tg->lock);
301
302
303 token = next_throttle_token(blk, is_write);
304 must_wait = throttle_group_schedule_timer(token, is_write);
305
306
307 if (must_wait || blkp->pending_reqs[is_write]) {
308 blkp->pending_reqs[is_write]++;
309 qemu_mutex_unlock(&tg->lock);
310 qemu_co_queue_wait(&blkp->throttled_reqs[is_write]);
311 qemu_mutex_lock(&tg->lock);
312 blkp->pending_reqs[is_write]--;
313 }
314
315
316 throttle_account(blkp->throttle_state, is_write, bytes);
317
318
319 schedule_next_request(blk, is_write);
320
321 qemu_mutex_unlock(&tg->lock);
322}
323
324void throttle_group_restart_blk(BlockBackend *blk)
325{
326 BlockBackendPublic *blkp = blk_get_public(blk);
327 int i;
328
329 for (i = 0; i < 2; i++) {
330 while (qemu_co_enter_next(&blkp->throttled_reqs[i])) {
331 ;
332 }
333 }
334}
335
336
337
338
339
340
341
342
343void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg)
344{
345 BlockBackendPublic *blkp = blk_get_public(blk);
346 ThrottleTimers *tt = &blkp->throttle_timers;
347 ThrottleState *ts = blkp->throttle_state;
348 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
349 qemu_mutex_lock(&tg->lock);
350
351 if (timer_pending(tt->timers[0])) {
352 tg->any_timer_armed[0] = false;
353 }
354 if (timer_pending(tt->timers[1])) {
355 tg->any_timer_armed[1] = false;
356 }
357 throttle_config(ts, tt, cfg);
358 qemu_mutex_unlock(&tg->lock);
359
360 qemu_co_enter_next(&blkp->throttled_reqs[0]);
361 qemu_co_enter_next(&blkp->throttled_reqs[1]);
362}
363
364
365
366
367
368
369
370
371void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg)
372{
373 BlockBackendPublic *blkp = blk_get_public(blk);
374 ThrottleState *ts = blkp->throttle_state;
375 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
376 qemu_mutex_lock(&tg->lock);
377 throttle_get_config(ts, cfg);
378 qemu_mutex_unlock(&tg->lock);
379}
380
381
382
383
384
385
386
387static void timer_cb(BlockBackend *blk, bool is_write)
388{
389 BlockBackendPublic *blkp = blk_get_public(blk);
390 ThrottleState *ts = blkp->throttle_state;
391 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
392 bool empty_queue;
393
394
395 qemu_mutex_lock(&tg->lock);
396 tg->any_timer_armed[is_write] = false;
397 qemu_mutex_unlock(&tg->lock);
398
399
400 empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
401
402
403
404 if (empty_queue) {
405 qemu_mutex_lock(&tg->lock);
406 schedule_next_request(blk, is_write);
407 qemu_mutex_unlock(&tg->lock);
408 }
409}
410
411static void read_timer_cb(void *opaque)
412{
413 timer_cb(opaque, false);
414}
415
416static void write_timer_cb(void *opaque)
417{
418 timer_cb(opaque, true);
419}
420
421
422
423
424
425
426
427
428void throttle_group_register_blk(BlockBackend *blk, const char *groupname)
429{
430 int i;
431 BlockBackendPublic *blkp = blk_get_public(blk);
432 ThrottleState *ts = throttle_group_incref(groupname);
433 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
434 int clock_type = QEMU_CLOCK_REALTIME;
435
436 if (qtest_enabled()) {
437
438 clock_type = QEMU_CLOCK_VIRTUAL;
439 }
440
441 blkp->throttle_state = ts;
442
443 qemu_mutex_lock(&tg->lock);
444
445 for (i = 0; i < 2; i++) {
446 if (!tg->tokens[i]) {
447 tg->tokens[i] = blk;
448 }
449 }
450
451 QLIST_INSERT_HEAD(&tg->head, blkp, round_robin);
452
453 throttle_timers_init(&blkp->throttle_timers,
454 blk_get_aio_context(blk),
455 clock_type,
456 read_timer_cb,
457 write_timer_cb,
458 blk);
459
460 qemu_mutex_unlock(&tg->lock);
461}
462
463
464
465
466
467
468
469
470
471
472
473void throttle_group_unregister_blk(BlockBackend *blk)
474{
475 BlockBackendPublic *blkp = blk_get_public(blk);
476 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
477 int i;
478
479 assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0);
480 assert(qemu_co_queue_empty(&blkp->throttled_reqs[0]));
481 assert(qemu_co_queue_empty(&blkp->throttled_reqs[1]));
482
483 qemu_mutex_lock(&tg->lock);
484 for (i = 0; i < 2; i++) {
485 if (tg->tokens[i] == blk) {
486 BlockBackend *token = throttle_group_next_blk(blk);
487
488 if (token == blk) {
489 token = NULL;
490 }
491 tg->tokens[i] = token;
492 }
493 }
494
495
496 QLIST_REMOVE(blkp, round_robin);
497 throttle_timers_destroy(&blkp->throttle_timers);
498 qemu_mutex_unlock(&tg->lock);
499
500 throttle_group_unref(&tg->ts);
501 blkp->throttle_state = NULL;
502}
503
504static void throttle_groups_init(void)
505{
506 qemu_mutex_init(&throttle_groups_lock);
507}
508
509block_init(throttle_groups_init);
510