1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "sysemu/block-backend.h"
27#include "block/throttle-groups.h"
28#include "qemu/throttle-options.h"
29#include "qemu/main-loop.h"
30#include "qemu/queue.h"
31#include "qemu/thread.h"
32#include "sysemu/qtest.h"
33#include "qapi/error.h"
34#include "qapi/qapi-visit-block-core.h"
35#include "qom/object.h"
36#include "qom/object_interfaces.h"
37
38static void throttle_group_obj_init(Object *obj);
39static void throttle_group_obj_complete(UserCreatable *obj, Error **errp);
40static void timer_cb(ThrottleGroupMember *tgm, bool is_write);
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66typedef struct ThrottleGroup {
67 Object parent_obj;
68
69
70 bool is_initialized;
71 char *name;
72
73 QemuMutex lock;
74 ThrottleState ts;
75 QLIST_HEAD(, ThrottleGroupMember) head;
76 ThrottleGroupMember *tokens[2];
77 bool any_timer_armed[2];
78 QEMUClockType clock_type;
79
80
81 QTAILQ_ENTRY(ThrottleGroup) list;
82} ThrottleGroup;
83
84
85static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
86 QTAILQ_HEAD_INITIALIZER(throttle_groups);
87
88
89
90
91
92static ThrottleGroup *throttle_group_by_name(const char *name)
93{
94 ThrottleGroup *iter;
95
96
97 QTAILQ_FOREACH(iter, &throttle_groups, list) {
98 if (!g_strcmp0(name, iter->name)) {
99 return iter;
100 }
101 }
102
103 return NULL;
104}
105
106
107
108
109bool throttle_group_exists(const char *name)
110{
111 return throttle_group_by_name(name) != NULL;
112}
113
114
115
116
117
118
119
120
121
122
123
124
125ThrottleState *throttle_group_incref(const char *name)
126{
127 ThrottleGroup *tg = NULL;
128
129
130 tg = throttle_group_by_name(name);
131
132 if (tg) {
133 object_ref(OBJECT(tg));
134 } else {
135
136
137 tg = THROTTLE_GROUP(object_new(TYPE_THROTTLE_GROUP));
138 tg->name = g_strdup(name);
139 throttle_group_obj_complete(USER_CREATABLE(tg), &error_abort);
140 }
141
142 return &tg->ts;
143}
144
145
146
147
148
149
150
151
152
153
154
155void throttle_group_unref(ThrottleState *ts)
156{
157 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
158 object_unref(OBJECT(tg));
159}
160
161
162
163
164
165
166
167const char *throttle_group_get_name(ThrottleGroupMember *tgm)
168{
169 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
170 return tg->name;
171}
172
173
174
175
176
177
178
179
180
181static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
182{
183 ThrottleState *ts = tgm->throttle_state;
184 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
185 ThrottleGroupMember *next = QLIST_NEXT(tgm, round_robin);
186
187 if (!next) {
188 next = QLIST_FIRST(&tg->head);
189 }
190
191 return next;
192}
193
194
195
196
197
198
199
200
201
202
203static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
204 bool is_write)
205{
206 return tgm->pending_reqs[is_write];
207}
208
209
210
211
212
213
214
215
216
217
218
219static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
220 bool is_write)
221{
222 ThrottleState *ts = tgm->throttle_state;
223 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
224 ThrottleGroupMember *token, *start;
225
226
227
228
229
230 if (tgm_has_pending_reqs(tgm, is_write) &&
231 atomic_read(&tgm->io_limits_disabled)) {
232 return tgm;
233 }
234
235 start = token = tg->tokens[is_write];
236
237
238 token = throttle_group_next_tgm(token);
239 while (token != start && !tgm_has_pending_reqs(token, is_write)) {
240 token = throttle_group_next_tgm(token);
241 }
242
243
244
245
246
247 if (token == start && !tgm_has_pending_reqs(token, is_write)) {
248 token = tgm;
249 }
250
251
252 assert(token == tgm || tgm_has_pending_reqs(token, is_write));
253
254 return token;
255}
256
257
258
259
260
261
262
263
264
265
266
267static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
268 bool is_write)
269{
270 ThrottleState *ts = tgm->throttle_state;
271 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
272 ThrottleTimers *tt = &tgm->throttle_timers;
273 bool must_wait;
274
275 if (atomic_read(&tgm->io_limits_disabled)) {
276 return false;
277 }
278
279
280 if (tg->any_timer_armed[is_write]) {
281 return true;
282 }
283
284 must_wait = throttle_schedule_timer(ts, tt, is_write);
285
286
287 if (must_wait) {
288 tg->tokens[is_write] = tgm;
289 tg->any_timer_armed[is_write] = true;
290 }
291
292 return must_wait;
293}
294
295
296
297
298
299
300
301static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
302 bool is_write)
303{
304 bool ret;
305
306 qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
307 ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
308 qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
309
310 return ret;
311}
312
313
314
315
316
317
318
319
320static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
321{
322 ThrottleState *ts = tgm->throttle_state;
323 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
324 bool must_wait;
325 ThrottleGroupMember *token;
326
327
328 token = next_throttle_token(tgm, is_write);
329 if (!tgm_has_pending_reqs(token, is_write)) {
330 return;
331 }
332
333
334 must_wait = throttle_group_schedule_timer(token, is_write);
335
336
337 if (!must_wait) {
338
339 if (qemu_in_coroutine() &&
340 throttle_group_co_restart_queue(tgm, is_write)) {
341 token = tgm;
342 } else {
343 ThrottleTimers *tt = &token->throttle_timers;
344 int64_t now = qemu_clock_get_ns(tg->clock_type);
345 timer_mod(tt->timers[is_write], now);
346 tg->any_timer_armed[is_write] = true;
347 }
348 tg->tokens[is_write] = token;
349 }
350}
351
352
353
354
355
356
357
358
359
360void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
361 unsigned int bytes,
362 bool is_write)
363{
364 bool must_wait;
365 ThrottleGroupMember *token;
366 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
367 qemu_mutex_lock(&tg->lock);
368
369
370 token = next_throttle_token(tgm, is_write);
371 must_wait = throttle_group_schedule_timer(token, is_write);
372
373
374 if (must_wait || tgm->pending_reqs[is_write]) {
375 tgm->pending_reqs[is_write]++;
376 qemu_mutex_unlock(&tg->lock);
377 qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
378 qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
379 &tgm->throttled_reqs_lock);
380 qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
381 qemu_mutex_lock(&tg->lock);
382 tgm->pending_reqs[is_write]--;
383 }
384
385
386 throttle_account(tgm->throttle_state, is_write, bytes);
387
388
389 schedule_next_request(tgm, is_write);
390
391 qemu_mutex_unlock(&tg->lock);
392}
393
394typedef struct {
395 ThrottleGroupMember *tgm;
396 bool is_write;
397} RestartData;
398
399static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
400{
401 RestartData *data = opaque;
402 ThrottleGroupMember *tgm = data->tgm;
403 ThrottleState *ts = tgm->throttle_state;
404 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
405 bool is_write = data->is_write;
406 bool empty_queue;
407
408 empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
409
410
411
412 if (empty_queue) {
413 qemu_mutex_lock(&tg->lock);
414 schedule_next_request(tgm, is_write);
415 qemu_mutex_unlock(&tg->lock);
416 }
417
418 g_free(data);
419
420 atomic_dec(&tgm->restart_pending);
421 aio_wait_kick();
422}
423
424static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
425{
426 Coroutine *co;
427 RestartData *rd = g_new0(RestartData, 1);
428
429 rd->tgm = tgm;
430 rd->is_write = is_write;
431
432
433
434
435 assert(!timer_pending(tgm->throttle_timers.timers[is_write]));
436
437 atomic_inc(&tgm->restart_pending);
438
439 co = qemu_coroutine_create(throttle_group_restart_queue_entry, rd);
440 aio_co_enter(tgm->aio_context, co);
441}
442
443void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
444{
445 int i;
446
447 if (tgm->throttle_state) {
448 for (i = 0; i < 2; i++) {
449 QEMUTimer *t = tgm->throttle_timers.timers[i];
450 if (timer_pending(t)) {
451
452 timer_del(t);
453 timer_cb(tgm, i);
454 } else {
455
456 throttle_group_restart_queue(tgm, i);
457 }
458 }
459 }
460}
461
462
463
464
465
466
467
468
469void throttle_group_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
470{
471 ThrottleState *ts = tgm->throttle_state;
472 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
473 qemu_mutex_lock(&tg->lock);
474 throttle_config(ts, tg->clock_type, cfg);
475 qemu_mutex_unlock(&tg->lock);
476
477 throttle_group_restart_tgm(tgm);
478}
479
480
481
482
483
484
485
486
487void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
488{
489 ThrottleState *ts = tgm->throttle_state;
490 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
491 qemu_mutex_lock(&tg->lock);
492 throttle_get_config(ts, cfg);
493 qemu_mutex_unlock(&tg->lock);
494}
495
496
497
498
499
500
501
502static void timer_cb(ThrottleGroupMember *tgm, bool is_write)
503{
504 ThrottleState *ts = tgm->throttle_state;
505 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
506
507
508 qemu_mutex_lock(&tg->lock);
509 tg->any_timer_armed[is_write] = false;
510 qemu_mutex_unlock(&tg->lock);
511
512
513 throttle_group_restart_queue(tgm, is_write);
514}
515
516static void read_timer_cb(void *opaque)
517{
518 timer_cb(opaque, false);
519}
520
521static void write_timer_cb(void *opaque)
522{
523 timer_cb(opaque, true);
524}
525
526
527
528
529
530
531
532
533
534
535
536
537void throttle_group_register_tgm(ThrottleGroupMember *tgm,
538 const char *groupname,
539 AioContext *ctx)
540{
541 int i;
542 ThrottleState *ts = throttle_group_incref(groupname);
543 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
544
545 tgm->throttle_state = ts;
546 tgm->aio_context = ctx;
547 atomic_set(&tgm->restart_pending, 0);
548
549 qemu_mutex_lock(&tg->lock);
550
551 for (i = 0; i < 2; i++) {
552 if (!tg->tokens[i]) {
553 tg->tokens[i] = tgm;
554 }
555 }
556
557 QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
558
559 throttle_timers_init(&tgm->throttle_timers,
560 tgm->aio_context,
561 tg->clock_type,
562 read_timer_cb,
563 write_timer_cb,
564 tgm);
565 qemu_co_mutex_init(&tgm->throttled_reqs_lock);
566 qemu_co_queue_init(&tgm->throttled_reqs[0]);
567 qemu_co_queue_init(&tgm->throttled_reqs[1]);
568
569 qemu_mutex_unlock(&tg->lock);
570}
571
572
573
574
575
576
577
578
579
580
581
582void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
583{
584 ThrottleState *ts = tgm->throttle_state;
585 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
586 ThrottleGroupMember *token;
587 int i;
588
589 if (!ts) {
590
591 return;
592 }
593
594
595 AIO_WAIT_WHILE(tgm->aio_context, atomic_read(&tgm->restart_pending) > 0);
596
597 qemu_mutex_lock(&tg->lock);
598 for (i = 0; i < 2; i++) {
599 assert(tgm->pending_reqs[i] == 0);
600 assert(qemu_co_queue_empty(&tgm->throttled_reqs[i]));
601 assert(!timer_pending(tgm->throttle_timers.timers[i]));
602 if (tg->tokens[i] == tgm) {
603 token = throttle_group_next_tgm(tgm);
604
605 if (token == tgm) {
606 token = NULL;
607 }
608 tg->tokens[i] = token;
609 }
610 }
611
612
613 QLIST_REMOVE(tgm, round_robin);
614 throttle_timers_destroy(&tgm->throttle_timers);
615 qemu_mutex_unlock(&tg->lock);
616
617 throttle_group_unref(&tg->ts);
618 tgm->throttle_state = NULL;
619}
620
621void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
622 AioContext *new_context)
623{
624 ThrottleTimers *tt = &tgm->throttle_timers;
625 throttle_timers_attach_aio_context(tt, new_context);
626 tgm->aio_context = new_context;
627}
628
629void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
630{
631 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
632 ThrottleTimers *tt = &tgm->throttle_timers;
633 int i;
634
635
636 assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
637 assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
638 assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
639
640
641 qemu_mutex_lock(&tg->lock);
642 for (i = 0; i < 2; i++) {
643 if (timer_pending(tt->timers[i])) {
644 tg->any_timer_armed[i] = false;
645 schedule_next_request(tgm, i);
646 }
647 }
648 qemu_mutex_unlock(&tg->lock);
649
650 throttle_timers_detach_aio_context(tt);
651 tgm->aio_context = NULL;
652}
653
654#undef THROTTLE_OPT_PREFIX
655#define THROTTLE_OPT_PREFIX "x-"
656
657
658typedef struct {
659 const char *name;
660 BucketType type;
661 enum {
662 AVG,
663 MAX,
664 BURST_LENGTH,
665 IOPS_SIZE,
666 } category;
667} ThrottleParamInfo;
668
669static ThrottleParamInfo properties[] = {
670 {
671 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL,
672 THROTTLE_OPS_TOTAL, AVG,
673 },
674 {
675 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX,
676 THROTTLE_OPS_TOTAL, MAX,
677 },
678 {
679 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX_LENGTH,
680 THROTTLE_OPS_TOTAL, BURST_LENGTH,
681 },
682 {
683 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ,
684 THROTTLE_OPS_READ, AVG,
685 },
686 {
687 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX,
688 THROTTLE_OPS_READ, MAX,
689 },
690 {
691 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX_LENGTH,
692 THROTTLE_OPS_READ, BURST_LENGTH,
693 },
694 {
695 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE,
696 THROTTLE_OPS_WRITE, AVG,
697 },
698 {
699 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX,
700 THROTTLE_OPS_WRITE, MAX,
701 },
702 {
703 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX_LENGTH,
704 THROTTLE_OPS_WRITE, BURST_LENGTH,
705 },
706 {
707 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL,
708 THROTTLE_BPS_TOTAL, AVG,
709 },
710 {
711 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX,
712 THROTTLE_BPS_TOTAL, MAX,
713 },
714 {
715 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX_LENGTH,
716 THROTTLE_BPS_TOTAL, BURST_LENGTH,
717 },
718 {
719 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ,
720 THROTTLE_BPS_READ, AVG,
721 },
722 {
723 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX,
724 THROTTLE_BPS_READ, MAX,
725 },
726 {
727 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX_LENGTH,
728 THROTTLE_BPS_READ, BURST_LENGTH,
729 },
730 {
731 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE,
732 THROTTLE_BPS_WRITE, AVG,
733 },
734 {
735 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX,
736 THROTTLE_BPS_WRITE, MAX,
737 },
738 {
739 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX_LENGTH,
740 THROTTLE_BPS_WRITE, BURST_LENGTH,
741 },
742 {
743 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_SIZE,
744 0, IOPS_SIZE,
745 }
746};
747
748
749
750static void throttle_group_obj_init(Object *obj)
751{
752 ThrottleGroup *tg = THROTTLE_GROUP(obj);
753
754 tg->clock_type = QEMU_CLOCK_REALTIME;
755 if (qtest_enabled()) {
756
757 tg->clock_type = QEMU_CLOCK_VIRTUAL;
758 }
759 tg->is_initialized = false;
760 qemu_mutex_init(&tg->lock);
761 throttle_init(&tg->ts);
762 QLIST_INIT(&tg->head);
763}
764
765
766
767static void throttle_group_obj_complete(UserCreatable *obj, Error **errp)
768{
769 ThrottleGroup *tg = THROTTLE_GROUP(obj);
770 ThrottleConfig cfg;
771
772
773 if (!tg->name && tg->parent_obj.parent) {
774 tg->name = object_get_canonical_path_component(OBJECT(obj));
775 }
776
777 assert(tg->name);
778
779
780 if (throttle_group_exists(tg->name)) {
781 error_setg(errp, "A group with this name already exists");
782 return;
783 }
784
785
786 throttle_get_config(&tg->ts, &cfg);
787 if (!throttle_is_valid(&cfg, errp)) {
788 return;
789 }
790 throttle_config(&tg->ts, tg->clock_type, &cfg);
791 QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
792 tg->is_initialized = true;
793}
794
795
796
797static void throttle_group_obj_finalize(Object *obj)
798{
799 ThrottleGroup *tg = THROTTLE_GROUP(obj);
800 if (tg->is_initialized) {
801 QTAILQ_REMOVE(&throttle_groups, tg, list);
802 }
803 qemu_mutex_destroy(&tg->lock);
804 g_free(tg->name);
805}
806
807static void throttle_group_set(Object *obj, Visitor *v, const char * name,
808 void *opaque, Error **errp)
809
810{
811 ThrottleGroup *tg = THROTTLE_GROUP(obj);
812 ThrottleConfig *cfg;
813 ThrottleParamInfo *info = opaque;
814 Error *local_err = NULL;
815 int64_t value;
816
817
818
819
820
821 if (tg->is_initialized) {
822 error_setg(&local_err, "Property cannot be set after initialization");
823 goto ret;
824 }
825
826 visit_type_int64(v, name, &value, &local_err);
827 if (local_err) {
828 goto ret;
829 }
830 if (value < 0) {
831 error_setg(&local_err, "Property values cannot be negative");
832 goto ret;
833 }
834
835 cfg = &tg->ts.cfg;
836 switch (info->category) {
837 case AVG:
838 cfg->buckets[info->type].avg = value;
839 break;
840 case MAX:
841 cfg->buckets[info->type].max = value;
842 break;
843 case BURST_LENGTH:
844 if (value > UINT_MAX) {
845 error_setg(&local_err, "%s value must be in the"
846 "range [0, %u]", info->name, UINT_MAX);
847 goto ret;
848 }
849 cfg->buckets[info->type].burst_length = value;
850 break;
851 case IOPS_SIZE:
852 cfg->op_size = value;
853 break;
854 }
855
856ret:
857 error_propagate(errp, local_err);
858 return;
859
860}
861
862static void throttle_group_get(Object *obj, Visitor *v, const char *name,
863 void *opaque, Error **errp)
864{
865 ThrottleGroup *tg = THROTTLE_GROUP(obj);
866 ThrottleConfig cfg;
867 ThrottleParamInfo *info = opaque;
868 int64_t value;
869
870 throttle_get_config(&tg->ts, &cfg);
871 switch (info->category) {
872 case AVG:
873 value = cfg.buckets[info->type].avg;
874 break;
875 case MAX:
876 value = cfg.buckets[info->type].max;
877 break;
878 case BURST_LENGTH:
879 value = cfg.buckets[info->type].burst_length;
880 break;
881 case IOPS_SIZE:
882 value = cfg.op_size;
883 break;
884 }
885
886 visit_type_int64(v, name, &value, errp);
887}
888
889static void throttle_group_set_limits(Object *obj, Visitor *v,
890 const char *name, void *opaque,
891 Error **errp)
892
893{
894 ThrottleGroup *tg = THROTTLE_GROUP(obj);
895 ThrottleConfig cfg;
896 ThrottleLimits arg = { 0 };
897 ThrottleLimits *argp = &arg;
898 Error *local_err = NULL;
899
900 visit_type_ThrottleLimits(v, name, &argp, &local_err);
901 if (local_err) {
902 goto ret;
903 }
904 qemu_mutex_lock(&tg->lock);
905 throttle_get_config(&tg->ts, &cfg);
906 throttle_limits_to_config(argp, &cfg, &local_err);
907 if (local_err) {
908 goto unlock;
909 }
910 throttle_config(&tg->ts, tg->clock_type, &cfg);
911
912unlock:
913 qemu_mutex_unlock(&tg->lock);
914ret:
915 error_propagate(errp, local_err);
916 return;
917}
918
919static void throttle_group_get_limits(Object *obj, Visitor *v,
920 const char *name, void *opaque,
921 Error **errp)
922{
923 ThrottleGroup *tg = THROTTLE_GROUP(obj);
924 ThrottleConfig cfg;
925 ThrottleLimits arg = { 0 };
926 ThrottleLimits *argp = &arg;
927
928 qemu_mutex_lock(&tg->lock);
929 throttle_get_config(&tg->ts, &cfg);
930 qemu_mutex_unlock(&tg->lock);
931
932 throttle_config_to_limits(&cfg, argp);
933
934 visit_type_ThrottleLimits(v, name, &argp, errp);
935}
936
937static bool throttle_group_can_be_deleted(UserCreatable *uc)
938{
939 return OBJECT(uc)->ref == 1;
940}
941
942static void throttle_group_obj_class_init(ObjectClass *klass, void *class_data)
943{
944 size_t i = 0;
945 UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
946
947 ucc->complete = throttle_group_obj_complete;
948 ucc->can_be_deleted = throttle_group_can_be_deleted;
949
950
951 for (i = 0; i < sizeof(properties) / sizeof(ThrottleParamInfo); i++) {
952 object_class_property_add(klass,
953 properties[i].name,
954 "int",
955 throttle_group_get,
956 throttle_group_set,
957 NULL, &properties[i],
958 &error_abort);
959 }
960
961
962 object_class_property_add(klass,
963 "limits", "ThrottleLimits",
964 throttle_group_get_limits,
965 throttle_group_set_limits,
966 NULL, NULL,
967 &error_abort);
968}
969
970static const TypeInfo throttle_group_info = {
971 .name = TYPE_THROTTLE_GROUP,
972 .parent = TYPE_OBJECT,
973 .class_init = throttle_group_obj_class_init,
974 .instance_size = sizeof(ThrottleGroup),
975 .instance_init = throttle_group_obj_init,
976 .instance_finalize = throttle_group_obj_finalize,
977 .interfaces = (InterfaceInfo[]) {
978 { TYPE_USER_CREATABLE },
979 { }
980 },
981};
982
983static void throttle_groups_init(void)
984{
985 type_register_static(&throttle_group_info);
986}
987
988type_init(throttle_groups_init);
989