1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "sysemu/block-backend.h"
27#include "block/throttle-groups.h"
28#include "qemu/throttle-options.h"
29#include "qemu/queue.h"
30#include "qemu/thread.h"
31#include "sysemu/qtest.h"
32#include "qapi/error.h"
33#include "qapi/qapi-visit-block-core.h"
34#include "qom/object.h"
35#include "qom/object_interfaces.h"
36
37static void throttle_group_obj_init(Object *obj);
38static void throttle_group_obj_complete(UserCreatable *obj, Error **errp);
39static void timer_cb(ThrottleGroupMember *tgm, bool is_write);
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65typedef struct ThrottleGroup {
66 Object parent_obj;
67
68
69 bool is_initialized;
70 char *name;
71
72 QemuMutex lock;
73 ThrottleState ts;
74 QLIST_HEAD(, ThrottleGroupMember) head;
75 ThrottleGroupMember *tokens[2];
76 bool any_timer_armed[2];
77 QEMUClockType clock_type;
78
79
80 QTAILQ_ENTRY(ThrottleGroup) list;
81} ThrottleGroup;
82
83
84static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
85 QTAILQ_HEAD_INITIALIZER(throttle_groups);
86
87
88
89
90
91static ThrottleGroup *throttle_group_by_name(const char *name)
92{
93 ThrottleGroup *iter;
94
95
96 QTAILQ_FOREACH(iter, &throttle_groups, list) {
97 if (!g_strcmp0(name, iter->name)) {
98 return iter;
99 }
100 }
101
102 return NULL;
103}
104
105
106
107
108bool throttle_group_exists(const char *name)
109{
110 return throttle_group_by_name(name) != NULL;
111}
112
113
114
115
116
117
118
119
120
121
122
123
124ThrottleState *throttle_group_incref(const char *name)
125{
126 ThrottleGroup *tg = NULL;
127
128
129 tg = throttle_group_by_name(name);
130
131 if (tg) {
132 object_ref(OBJECT(tg));
133 } else {
134
135
136 tg = THROTTLE_GROUP(object_new(TYPE_THROTTLE_GROUP));
137 tg->name = g_strdup(name);
138 throttle_group_obj_complete(USER_CREATABLE(tg), &error_abort);
139 }
140
141 return &tg->ts;
142}
143
144
145
146
147
148
149
150
151
152
153
154void throttle_group_unref(ThrottleState *ts)
155{
156 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
157 object_unref(OBJECT(tg));
158}
159
160
161
162
163
164
165
166const char *throttle_group_get_name(ThrottleGroupMember *tgm)
167{
168 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
169 return tg->name;
170}
171
172
173
174
175
176
177
178
179
180static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
181{
182 ThrottleState *ts = tgm->throttle_state;
183 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
184 ThrottleGroupMember *next = QLIST_NEXT(tgm, round_robin);
185
186 if (!next) {
187 next = QLIST_FIRST(&tg->head);
188 }
189
190 return next;
191}
192
193
194
195
196
197
198
199
200
201
202static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
203 bool is_write)
204{
205 return tgm->pending_reqs[is_write];
206}
207
208
209
210
211
212
213
214
215
216
217
218static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
219 bool is_write)
220{
221 ThrottleState *ts = tgm->throttle_state;
222 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
223 ThrottleGroupMember *token, *start;
224
225
226
227
228
229 if (tgm_has_pending_reqs(tgm, is_write) &&
230 atomic_read(&tgm->io_limits_disabled)) {
231 return tgm;
232 }
233
234 start = token = tg->tokens[is_write];
235
236
237 token = throttle_group_next_tgm(token);
238 while (token != start && !tgm_has_pending_reqs(token, is_write)) {
239 token = throttle_group_next_tgm(token);
240 }
241
242
243
244
245
246 if (token == start && !tgm_has_pending_reqs(token, is_write)) {
247 token = tgm;
248 }
249
250
251 assert(token == tgm || tgm_has_pending_reqs(token, is_write));
252
253 return token;
254}
255
256
257
258
259
260
261
262
263
264
265
266static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
267 bool is_write)
268{
269 ThrottleState *ts = tgm->throttle_state;
270 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
271 ThrottleTimers *tt = &tgm->throttle_timers;
272 bool must_wait;
273
274 if (atomic_read(&tgm->io_limits_disabled)) {
275 return false;
276 }
277
278
279 if (tg->any_timer_armed[is_write]) {
280 return true;
281 }
282
283 must_wait = throttle_schedule_timer(ts, tt, is_write);
284
285
286 if (must_wait) {
287 tg->tokens[is_write] = tgm;
288 tg->any_timer_armed[is_write] = true;
289 }
290
291 return must_wait;
292}
293
294
295
296
297
298
299
300static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
301 bool is_write)
302{
303 bool ret;
304
305 qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
306 ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
307 qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
308
309 return ret;
310}
311
312
313
314
315
316
317
318
319static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
320{
321 ThrottleState *ts = tgm->throttle_state;
322 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
323 bool must_wait;
324 ThrottleGroupMember *token;
325
326
327 token = next_throttle_token(tgm, is_write);
328 if (!tgm_has_pending_reqs(token, is_write)) {
329 return;
330 }
331
332
333 must_wait = throttle_group_schedule_timer(token, is_write);
334
335
336 if (!must_wait) {
337
338 if (qemu_in_coroutine() &&
339 throttle_group_co_restart_queue(tgm, is_write)) {
340 token = tgm;
341 } else {
342 ThrottleTimers *tt = &token->throttle_timers;
343 int64_t now = qemu_clock_get_ns(tg->clock_type);
344 timer_mod(tt->timers[is_write], now);
345 tg->any_timer_armed[is_write] = true;
346 }
347 tg->tokens[is_write] = token;
348 }
349}
350
351
352
353
354
355
356
357
358
359void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
360 unsigned int bytes,
361 bool is_write)
362{
363 bool must_wait;
364 ThrottleGroupMember *token;
365 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
366 qemu_mutex_lock(&tg->lock);
367
368
369 token = next_throttle_token(tgm, is_write);
370 must_wait = throttle_group_schedule_timer(token, is_write);
371
372
373 if (must_wait || tgm->pending_reqs[is_write]) {
374 tgm->pending_reqs[is_write]++;
375 qemu_mutex_unlock(&tg->lock);
376 qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
377 qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
378 &tgm->throttled_reqs_lock);
379 qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
380 qemu_mutex_lock(&tg->lock);
381 tgm->pending_reqs[is_write]--;
382 }
383
384
385 throttle_account(tgm->throttle_state, is_write, bytes);
386
387
388 schedule_next_request(tgm, is_write);
389
390 qemu_mutex_unlock(&tg->lock);
391}
392
393typedef struct {
394 ThrottleGroupMember *tgm;
395 bool is_write;
396} RestartData;
397
398static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
399{
400 RestartData *data = opaque;
401 ThrottleGroupMember *tgm = data->tgm;
402 ThrottleState *ts = tgm->throttle_state;
403 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
404 bool is_write = data->is_write;
405 bool empty_queue;
406
407 empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
408
409
410
411 if (empty_queue) {
412 qemu_mutex_lock(&tg->lock);
413 schedule_next_request(tgm, is_write);
414 qemu_mutex_unlock(&tg->lock);
415 }
416
417 g_free(data);
418}
419
420static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
421{
422 Coroutine *co;
423 RestartData *rd = g_new0(RestartData, 1);
424
425 rd->tgm = tgm;
426 rd->is_write = is_write;
427
428
429
430
431 assert(!timer_pending(tgm->throttle_timers.timers[is_write]));
432
433 co = qemu_coroutine_create(throttle_group_restart_queue_entry, rd);
434 aio_co_enter(tgm->aio_context, co);
435}
436
437void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
438{
439 int i;
440
441 if (tgm->throttle_state) {
442 for (i = 0; i < 2; i++) {
443 QEMUTimer *t = tgm->throttle_timers.timers[i];
444 if (timer_pending(t)) {
445
446 timer_del(t);
447 timer_cb(tgm, i);
448 } else {
449
450 throttle_group_restart_queue(tgm, i);
451 }
452 }
453 }
454}
455
456
457
458
459
460
461
462
463void throttle_group_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
464{
465 ThrottleState *ts = tgm->throttle_state;
466 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
467 qemu_mutex_lock(&tg->lock);
468 throttle_config(ts, tg->clock_type, cfg);
469 qemu_mutex_unlock(&tg->lock);
470
471 throttle_group_restart_tgm(tgm);
472}
473
474
475
476
477
478
479
480
481void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
482{
483 ThrottleState *ts = tgm->throttle_state;
484 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
485 qemu_mutex_lock(&tg->lock);
486 throttle_get_config(ts, cfg);
487 qemu_mutex_unlock(&tg->lock);
488}
489
490
491
492
493
494
495
496static void timer_cb(ThrottleGroupMember *tgm, bool is_write)
497{
498 ThrottleState *ts = tgm->throttle_state;
499 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
500
501
502 qemu_mutex_lock(&tg->lock);
503 tg->any_timer_armed[is_write] = false;
504 qemu_mutex_unlock(&tg->lock);
505
506
507 throttle_group_restart_queue(tgm, is_write);
508}
509
510static void read_timer_cb(void *opaque)
511{
512 timer_cb(opaque, false);
513}
514
515static void write_timer_cb(void *opaque)
516{
517 timer_cb(opaque, true);
518}
519
520
521
522
523
524
525
526
527
528
529
530
531void throttle_group_register_tgm(ThrottleGroupMember *tgm,
532 const char *groupname,
533 AioContext *ctx)
534{
535 int i;
536 ThrottleState *ts = throttle_group_incref(groupname);
537 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
538
539 tgm->throttle_state = ts;
540 tgm->aio_context = ctx;
541
542 qemu_mutex_lock(&tg->lock);
543
544 for (i = 0; i < 2; i++) {
545 if (!tg->tokens[i]) {
546 tg->tokens[i] = tgm;
547 }
548 }
549
550 QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
551
552 throttle_timers_init(&tgm->throttle_timers,
553 tgm->aio_context,
554 tg->clock_type,
555 read_timer_cb,
556 write_timer_cb,
557 tgm);
558 qemu_co_mutex_init(&tgm->throttled_reqs_lock);
559 qemu_co_queue_init(&tgm->throttled_reqs[0]);
560 qemu_co_queue_init(&tgm->throttled_reqs[1]);
561
562 qemu_mutex_unlock(&tg->lock);
563}
564
565
566
567
568
569
570
571
572
573
574
575void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
576{
577 ThrottleState *ts = tgm->throttle_state;
578 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
579 ThrottleGroupMember *token;
580 int i;
581
582 if (!ts) {
583
584 return;
585 }
586
587 qemu_mutex_lock(&tg->lock);
588 for (i = 0; i < 2; i++) {
589 assert(tgm->pending_reqs[i] == 0);
590 assert(qemu_co_queue_empty(&tgm->throttled_reqs[i]));
591 assert(!timer_pending(tgm->throttle_timers.timers[i]));
592 if (tg->tokens[i] == tgm) {
593 token = throttle_group_next_tgm(tgm);
594
595 if (token == tgm) {
596 token = NULL;
597 }
598 tg->tokens[i] = token;
599 }
600 }
601
602
603 QLIST_REMOVE(tgm, round_robin);
604 throttle_timers_destroy(&tgm->throttle_timers);
605 qemu_mutex_unlock(&tg->lock);
606
607 throttle_group_unref(&tg->ts);
608 tgm->throttle_state = NULL;
609}
610
611void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
612 AioContext *new_context)
613{
614 ThrottleTimers *tt = &tgm->throttle_timers;
615 throttle_timers_attach_aio_context(tt, new_context);
616 tgm->aio_context = new_context;
617}
618
619void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
620{
621 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
622 ThrottleTimers *tt = &tgm->throttle_timers;
623 int i;
624
625
626 assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
627 assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
628 assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
629
630
631 qemu_mutex_lock(&tg->lock);
632 for (i = 0; i < 2; i++) {
633 if (timer_pending(tt->timers[i])) {
634 tg->any_timer_armed[i] = false;
635 schedule_next_request(tgm, i);
636 }
637 }
638 qemu_mutex_unlock(&tg->lock);
639
640 throttle_timers_detach_aio_context(tt);
641 tgm->aio_context = NULL;
642}
643
644#undef THROTTLE_OPT_PREFIX
645#define THROTTLE_OPT_PREFIX "x-"
646
647
648typedef struct {
649 const char *name;
650 BucketType type;
651 enum {
652 AVG,
653 MAX,
654 BURST_LENGTH,
655 IOPS_SIZE,
656 } category;
657} ThrottleParamInfo;
658
659static ThrottleParamInfo properties[] = {
660 {
661 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL,
662 THROTTLE_OPS_TOTAL, AVG,
663 },
664 {
665 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX,
666 THROTTLE_OPS_TOTAL, MAX,
667 },
668 {
669 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX_LENGTH,
670 THROTTLE_OPS_TOTAL, BURST_LENGTH,
671 },
672 {
673 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ,
674 THROTTLE_OPS_READ, AVG,
675 },
676 {
677 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX,
678 THROTTLE_OPS_READ, MAX,
679 },
680 {
681 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX_LENGTH,
682 THROTTLE_OPS_READ, BURST_LENGTH,
683 },
684 {
685 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE,
686 THROTTLE_OPS_WRITE, AVG,
687 },
688 {
689 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX,
690 THROTTLE_OPS_WRITE, MAX,
691 },
692 {
693 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX_LENGTH,
694 THROTTLE_OPS_WRITE, BURST_LENGTH,
695 },
696 {
697 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL,
698 THROTTLE_BPS_TOTAL, AVG,
699 },
700 {
701 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX,
702 THROTTLE_BPS_TOTAL, MAX,
703 },
704 {
705 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX_LENGTH,
706 THROTTLE_BPS_TOTAL, BURST_LENGTH,
707 },
708 {
709 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ,
710 THROTTLE_BPS_READ, AVG,
711 },
712 {
713 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX,
714 THROTTLE_BPS_READ, MAX,
715 },
716 {
717 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX_LENGTH,
718 THROTTLE_BPS_READ, BURST_LENGTH,
719 },
720 {
721 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE,
722 THROTTLE_BPS_WRITE, AVG,
723 },
724 {
725 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX,
726 THROTTLE_BPS_WRITE, MAX,
727 },
728 {
729 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX_LENGTH,
730 THROTTLE_BPS_WRITE, BURST_LENGTH,
731 },
732 {
733 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_SIZE,
734 0, IOPS_SIZE,
735 }
736};
737
738
739
740static void throttle_group_obj_init(Object *obj)
741{
742 ThrottleGroup *tg = THROTTLE_GROUP(obj);
743
744 tg->clock_type = QEMU_CLOCK_REALTIME;
745 if (qtest_enabled()) {
746
747 tg->clock_type = QEMU_CLOCK_VIRTUAL;
748 }
749 tg->is_initialized = false;
750 qemu_mutex_init(&tg->lock);
751 throttle_init(&tg->ts);
752 QLIST_INIT(&tg->head);
753}
754
755
756
757static void throttle_group_obj_complete(UserCreatable *obj, Error **errp)
758{
759 ThrottleGroup *tg = THROTTLE_GROUP(obj);
760 ThrottleConfig cfg;
761
762
763 if (!tg->name && tg->parent_obj.parent) {
764 tg->name = object_get_canonical_path_component(OBJECT(obj));
765 }
766
767 assert(tg->name);
768
769
770 if (throttle_group_exists(tg->name)) {
771 error_setg(errp, "A group with this name already exists");
772 return;
773 }
774
775
776 throttle_get_config(&tg->ts, &cfg);
777 if (!throttle_is_valid(&cfg, errp)) {
778 return;
779 }
780 throttle_config(&tg->ts, tg->clock_type, &cfg);
781 QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
782 tg->is_initialized = true;
783}
784
785
786
787static void throttle_group_obj_finalize(Object *obj)
788{
789 ThrottleGroup *tg = THROTTLE_GROUP(obj);
790 if (tg->is_initialized) {
791 QTAILQ_REMOVE(&throttle_groups, tg, list);
792 }
793 qemu_mutex_destroy(&tg->lock);
794 g_free(tg->name);
795}
796
797static void throttle_group_set(Object *obj, Visitor *v, const char * name,
798 void *opaque, Error **errp)
799
800{
801 ThrottleGroup *tg = THROTTLE_GROUP(obj);
802 ThrottleConfig *cfg;
803 ThrottleParamInfo *info = opaque;
804 Error *local_err = NULL;
805 int64_t value;
806
807
808
809
810
811 if (tg->is_initialized) {
812 error_setg(&local_err, "Property cannot be set after initialization");
813 goto ret;
814 }
815
816 visit_type_int64(v, name, &value, &local_err);
817 if (local_err) {
818 goto ret;
819 }
820 if (value < 0) {
821 error_setg(&local_err, "Property values cannot be negative");
822 goto ret;
823 }
824
825 cfg = &tg->ts.cfg;
826 switch (info->category) {
827 case AVG:
828 cfg->buckets[info->type].avg = value;
829 break;
830 case MAX:
831 cfg->buckets[info->type].max = value;
832 break;
833 case BURST_LENGTH:
834 if (value > UINT_MAX) {
835 error_setg(&local_err, "%s value must be in the"
836 "range [0, %u]", info->name, UINT_MAX);
837 goto ret;
838 }
839 cfg->buckets[info->type].burst_length = value;
840 break;
841 case IOPS_SIZE:
842 cfg->op_size = value;
843 break;
844 }
845
846ret:
847 error_propagate(errp, local_err);
848 return;
849
850}
851
852static void throttle_group_get(Object *obj, Visitor *v, const char *name,
853 void *opaque, Error **errp)
854{
855 ThrottleGroup *tg = THROTTLE_GROUP(obj);
856 ThrottleConfig cfg;
857 ThrottleParamInfo *info = opaque;
858 int64_t value;
859
860 throttle_get_config(&tg->ts, &cfg);
861 switch (info->category) {
862 case AVG:
863 value = cfg.buckets[info->type].avg;
864 break;
865 case MAX:
866 value = cfg.buckets[info->type].max;
867 break;
868 case BURST_LENGTH:
869 value = cfg.buckets[info->type].burst_length;
870 break;
871 case IOPS_SIZE:
872 value = cfg.op_size;
873 break;
874 }
875
876 visit_type_int64(v, name, &value, errp);
877}
878
879static void throttle_group_set_limits(Object *obj, Visitor *v,
880 const char *name, void *opaque,
881 Error **errp)
882
883{
884 ThrottleGroup *tg = THROTTLE_GROUP(obj);
885 ThrottleConfig cfg;
886 ThrottleLimits arg = { 0 };
887 ThrottleLimits *argp = &arg;
888 Error *local_err = NULL;
889
890 visit_type_ThrottleLimits(v, name, &argp, &local_err);
891 if (local_err) {
892 goto ret;
893 }
894 qemu_mutex_lock(&tg->lock);
895 throttle_get_config(&tg->ts, &cfg);
896 throttle_limits_to_config(argp, &cfg, &local_err);
897 if (local_err) {
898 goto unlock;
899 }
900 throttle_config(&tg->ts, tg->clock_type, &cfg);
901
902unlock:
903 qemu_mutex_unlock(&tg->lock);
904ret:
905 error_propagate(errp, local_err);
906 return;
907}
908
909static void throttle_group_get_limits(Object *obj, Visitor *v,
910 const char *name, void *opaque,
911 Error **errp)
912{
913 ThrottleGroup *tg = THROTTLE_GROUP(obj);
914 ThrottleConfig cfg;
915 ThrottleLimits arg = { 0 };
916 ThrottleLimits *argp = &arg;
917
918 qemu_mutex_lock(&tg->lock);
919 throttle_get_config(&tg->ts, &cfg);
920 qemu_mutex_unlock(&tg->lock);
921
922 throttle_config_to_limits(&cfg, argp);
923
924 visit_type_ThrottleLimits(v, name, &argp, errp);
925}
926
927static bool throttle_group_can_be_deleted(UserCreatable *uc)
928{
929 return OBJECT(uc)->ref == 1;
930}
931
932static void throttle_group_obj_class_init(ObjectClass *klass, void *class_data)
933{
934 size_t i = 0;
935 UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
936
937 ucc->complete = throttle_group_obj_complete;
938 ucc->can_be_deleted = throttle_group_can_be_deleted;
939
940
941 for (i = 0; i < sizeof(properties) / sizeof(ThrottleParamInfo); i++) {
942 object_class_property_add(klass,
943 properties[i].name,
944 "int",
945 throttle_group_get,
946 throttle_group_set,
947 NULL, &properties[i],
948 &error_abort);
949 }
950
951
952 object_class_property_add(klass,
953 "limits", "ThrottleLimits",
954 throttle_group_get_limits,
955 throttle_group_set_limits,
956 NULL, NULL,
957 &error_abort);
958}
959
960static const TypeInfo throttle_group_info = {
961 .name = TYPE_THROTTLE_GROUP,
962 .parent = TYPE_OBJECT,
963 .class_init = throttle_group_obj_class_init,
964 .instance_size = sizeof(ThrottleGroup),
965 .instance_init = throttle_group_obj_init,
966 .instance_finalize = throttle_group_obj_finalize,
967 .interfaces = (InterfaceInfo[]) {
968 { TYPE_USER_CREATABLE },
969 { }
970 },
971};
972
973static void throttle_groups_init(void)
974{
975 type_register_static(&throttle_group_info);
976}
977
978type_init(throttle_groups_init);
979