1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "sysemu/block-backend.h"
27#include "block/throttle-groups.h"
28#include "qemu/throttle-options.h"
29#include "qemu/queue.h"
30#include "qemu/thread.h"
31#include "sysemu/qtest.h"
32#include "qapi/error.h"
33#include "qapi-visit.h"
34#include "qom/object.h"
35#include "qom/object_interfaces.h"
36
37static void throttle_group_obj_init(Object *obj);
38static void throttle_group_obj_complete(UserCreatable *obj, Error **errp);
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64typedef struct ThrottleGroup {
65 Object parent_obj;
66
67
68 bool is_initialized;
69 char *name;
70
71 QemuMutex lock;
72 ThrottleState ts;
73 QLIST_HEAD(, ThrottleGroupMember) head;
74 ThrottleGroupMember *tokens[2];
75 bool any_timer_armed[2];
76 QEMUClockType clock_type;
77
78
79 QTAILQ_ENTRY(ThrottleGroup) list;
80} ThrottleGroup;
81
82
83static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
84 QTAILQ_HEAD_INITIALIZER(throttle_groups);
85
86
87
88
89
90static ThrottleGroup *throttle_group_by_name(const char *name)
91{
92 ThrottleGroup *iter;
93
94
95 QTAILQ_FOREACH(iter, &throttle_groups, list) {
96 if (!g_strcmp0(name, iter->name)) {
97 return iter;
98 }
99 }
100
101 return NULL;
102}
103
104
105
106
107bool throttle_group_exists(const char *name)
108{
109 return throttle_group_by_name(name) != NULL;
110}
111
112
113
114
115
116
117
118
119
120
121
122
123ThrottleState *throttle_group_incref(const char *name)
124{
125 ThrottleGroup *tg = NULL;
126
127
128 tg = throttle_group_by_name(name);
129
130 if (tg) {
131 object_ref(OBJECT(tg));
132 } else {
133
134
135 tg = THROTTLE_GROUP(object_new(TYPE_THROTTLE_GROUP));
136 tg->name = g_strdup(name);
137 throttle_group_obj_complete(USER_CREATABLE(tg), &error_abort);
138 }
139
140 return &tg->ts;
141}
142
143
144
145
146
147
148
149
150
151
152
153void throttle_group_unref(ThrottleState *ts)
154{
155 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
156 object_unref(OBJECT(tg));
157}
158
159
160
161
162
163
164
165const char *throttle_group_get_name(ThrottleGroupMember *tgm)
166{
167 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
168 return tg->name;
169}
170
171
172
173
174
175
176
177
178
179static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
180{
181 ThrottleState *ts = tgm->throttle_state;
182 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
183 ThrottleGroupMember *next = QLIST_NEXT(tgm, round_robin);
184
185 if (!next) {
186 next = QLIST_FIRST(&tg->head);
187 }
188
189 return next;
190}
191
192
193
194
195
196
197
198
199
200
201static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
202 bool is_write)
203{
204 return tgm->pending_reqs[is_write];
205}
206
207
208
209
210
211
212
213
214
215
216
217static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
218 bool is_write)
219{
220 ThrottleState *ts = tgm->throttle_state;
221 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
222 ThrottleGroupMember *token, *start;
223
224 start = token = tg->tokens[is_write];
225
226
227 token = throttle_group_next_tgm(token);
228 while (token != start && !tgm_has_pending_reqs(token, is_write)) {
229 token = throttle_group_next_tgm(token);
230 }
231
232
233
234
235
236 if (token == start && !tgm_has_pending_reqs(token, is_write)) {
237 token = tgm;
238 }
239
240
241 assert(token == tgm || tgm_has_pending_reqs(token, is_write));
242
243 return token;
244}
245
246
247
248
249
250
251
252
253
254
255
256static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
257 bool is_write)
258{
259 ThrottleState *ts = tgm->throttle_state;
260 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
261 ThrottleTimers *tt = &tgm->throttle_timers;
262 bool must_wait;
263
264 if (atomic_read(&tgm->io_limits_disabled)) {
265 return false;
266 }
267
268
269 if (tg->any_timer_armed[is_write]) {
270 return true;
271 }
272
273 must_wait = throttle_schedule_timer(ts, tt, is_write);
274
275
276 if (must_wait) {
277 tg->tokens[is_write] = tgm;
278 tg->any_timer_armed[is_write] = true;
279 }
280
281 return must_wait;
282}
283
284
285
286
287
288
289
290static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
291 bool is_write)
292{
293 bool ret;
294
295 qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
296 ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
297 qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
298
299 return ret;
300}
301
302
303
304
305
306
307
308
309static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
310{
311 ThrottleState *ts = tgm->throttle_state;
312 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
313 bool must_wait;
314 ThrottleGroupMember *token;
315
316
317 token = next_throttle_token(tgm, is_write);
318 if (!tgm_has_pending_reqs(token, is_write)) {
319 return;
320 }
321
322
323 must_wait = throttle_group_schedule_timer(token, is_write);
324
325
326 if (!must_wait) {
327
328 if (qemu_in_coroutine() &&
329 throttle_group_co_restart_queue(tgm, is_write)) {
330 token = tgm;
331 } else {
332 ThrottleTimers *tt = &token->throttle_timers;
333 int64_t now = qemu_clock_get_ns(tg->clock_type);
334 timer_mod(tt->timers[is_write], now);
335 tg->any_timer_armed[is_write] = true;
336 }
337 tg->tokens[is_write] = token;
338 }
339}
340
341
342
343
344
345
346
347
348
349void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
350 unsigned int bytes,
351 bool is_write)
352{
353 bool must_wait;
354 ThrottleGroupMember *token;
355 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
356 qemu_mutex_lock(&tg->lock);
357
358
359 token = next_throttle_token(tgm, is_write);
360 must_wait = throttle_group_schedule_timer(token, is_write);
361
362
363 if (must_wait || tgm->pending_reqs[is_write]) {
364 tgm->pending_reqs[is_write]++;
365 qemu_mutex_unlock(&tg->lock);
366 qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
367 qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
368 &tgm->throttled_reqs_lock);
369 qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
370 qemu_mutex_lock(&tg->lock);
371 tgm->pending_reqs[is_write]--;
372 }
373
374
375 throttle_account(tgm->throttle_state, is_write, bytes);
376
377
378 schedule_next_request(tgm, is_write);
379
380 qemu_mutex_unlock(&tg->lock);
381}
382
383typedef struct {
384 ThrottleGroupMember *tgm;
385 bool is_write;
386} RestartData;
387
388static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
389{
390 RestartData *data = opaque;
391 ThrottleGroupMember *tgm = data->tgm;
392 ThrottleState *ts = tgm->throttle_state;
393 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
394 bool is_write = data->is_write;
395 bool empty_queue;
396
397 empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
398
399
400
401 if (empty_queue) {
402 qemu_mutex_lock(&tg->lock);
403 schedule_next_request(tgm, is_write);
404 qemu_mutex_unlock(&tg->lock);
405 }
406
407 g_free(data);
408}
409
410static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
411{
412 Coroutine *co;
413 RestartData *rd = g_new0(RestartData, 1);
414
415 rd->tgm = tgm;
416 rd->is_write = is_write;
417
418 co = qemu_coroutine_create(throttle_group_restart_queue_entry, rd);
419 aio_co_enter(tgm->aio_context, co);
420}
421
422void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
423{
424 if (tgm->throttle_state) {
425 throttle_group_restart_queue(tgm, 0);
426 throttle_group_restart_queue(tgm, 1);
427 }
428}
429
430
431
432
433
434
435
436
437void throttle_group_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
438{
439 ThrottleState *ts = tgm->throttle_state;
440 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
441 qemu_mutex_lock(&tg->lock);
442 throttle_config(ts, tg->clock_type, cfg);
443 qemu_mutex_unlock(&tg->lock);
444
445 throttle_group_restart_tgm(tgm);
446}
447
448
449
450
451
452
453
454
455void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
456{
457 ThrottleState *ts = tgm->throttle_state;
458 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
459 qemu_mutex_lock(&tg->lock);
460 throttle_get_config(ts, cfg);
461 qemu_mutex_unlock(&tg->lock);
462}
463
464
465
466
467
468
469
470static void timer_cb(ThrottleGroupMember *tgm, bool is_write)
471{
472 ThrottleState *ts = tgm->throttle_state;
473 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
474
475
476 qemu_mutex_lock(&tg->lock);
477 tg->any_timer_armed[is_write] = false;
478 qemu_mutex_unlock(&tg->lock);
479
480
481 throttle_group_restart_queue(tgm, is_write);
482}
483
484static void read_timer_cb(void *opaque)
485{
486 timer_cb(opaque, false);
487}
488
489static void write_timer_cb(void *opaque)
490{
491 timer_cb(opaque, true);
492}
493
494
495
496
497
498
499
500
501
502
503
504
505void throttle_group_register_tgm(ThrottleGroupMember *tgm,
506 const char *groupname,
507 AioContext *ctx)
508{
509 int i;
510 ThrottleState *ts = throttle_group_incref(groupname);
511 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
512
513 tgm->throttle_state = ts;
514 tgm->aio_context = ctx;
515
516 qemu_mutex_lock(&tg->lock);
517
518 for (i = 0; i < 2; i++) {
519 if (!tg->tokens[i]) {
520 tg->tokens[i] = tgm;
521 }
522 }
523
524 QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
525
526 throttle_timers_init(&tgm->throttle_timers,
527 tgm->aio_context,
528 tg->clock_type,
529 read_timer_cb,
530 write_timer_cb,
531 tgm);
532 qemu_co_mutex_init(&tgm->throttled_reqs_lock);
533 qemu_co_queue_init(&tgm->throttled_reqs[0]);
534 qemu_co_queue_init(&tgm->throttled_reqs[1]);
535
536 qemu_mutex_unlock(&tg->lock);
537}
538
539
540
541
542
543
544
545
546
547
548
549void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
550{
551 ThrottleState *ts = tgm->throttle_state;
552 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
553 ThrottleGroupMember *token;
554 int i;
555
556 if (!ts) {
557
558 return;
559 }
560
561 assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
562 assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
563 assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
564
565 qemu_mutex_lock(&tg->lock);
566 for (i = 0; i < 2; i++) {
567 if (tg->tokens[i] == tgm) {
568 token = throttle_group_next_tgm(tgm);
569
570 if (token == tgm) {
571 token = NULL;
572 }
573 tg->tokens[i] = token;
574 }
575 }
576
577
578 QLIST_REMOVE(tgm, round_robin);
579 throttle_timers_destroy(&tgm->throttle_timers);
580 qemu_mutex_unlock(&tg->lock);
581
582 throttle_group_unref(&tg->ts);
583 tgm->throttle_state = NULL;
584}
585
586void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
587 AioContext *new_context)
588{
589 ThrottleTimers *tt = &tgm->throttle_timers;
590 throttle_timers_attach_aio_context(tt, new_context);
591 tgm->aio_context = new_context;
592}
593
594void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
595{
596 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
597 ThrottleTimers *tt = &tgm->throttle_timers;
598 int i;
599
600
601 assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
602 assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
603 assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
604
605
606 qemu_mutex_lock(&tg->lock);
607 for (i = 0; i < 2; i++) {
608 if (timer_pending(tt->timers[i])) {
609 tg->any_timer_armed[i] = false;
610 schedule_next_request(tgm, i);
611 }
612 }
613 qemu_mutex_unlock(&tg->lock);
614
615 throttle_timers_detach_aio_context(tt);
616 tgm->aio_context = NULL;
617}
618
619#undef THROTTLE_OPT_PREFIX
620#define THROTTLE_OPT_PREFIX "x-"
621
622
623typedef struct {
624 const char *name;
625 BucketType type;
626 enum {
627 AVG,
628 MAX,
629 BURST_LENGTH,
630 IOPS_SIZE,
631 } category;
632} ThrottleParamInfo;
633
634static ThrottleParamInfo properties[] = {
635 {
636 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL,
637 THROTTLE_OPS_TOTAL, AVG,
638 },
639 {
640 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX,
641 THROTTLE_OPS_TOTAL, MAX,
642 },
643 {
644 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX_LENGTH,
645 THROTTLE_OPS_TOTAL, BURST_LENGTH,
646 },
647 {
648 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ,
649 THROTTLE_OPS_READ, AVG,
650 },
651 {
652 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX,
653 THROTTLE_OPS_READ, MAX,
654 },
655 {
656 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX_LENGTH,
657 THROTTLE_OPS_READ, BURST_LENGTH,
658 },
659 {
660 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE,
661 THROTTLE_OPS_WRITE, AVG,
662 },
663 {
664 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX,
665 THROTTLE_OPS_WRITE, MAX,
666 },
667 {
668 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX_LENGTH,
669 THROTTLE_OPS_WRITE, BURST_LENGTH,
670 },
671 {
672 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL,
673 THROTTLE_BPS_TOTAL, AVG,
674 },
675 {
676 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX,
677 THROTTLE_BPS_TOTAL, MAX,
678 },
679 {
680 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX_LENGTH,
681 THROTTLE_BPS_TOTAL, BURST_LENGTH,
682 },
683 {
684 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ,
685 THROTTLE_BPS_READ, AVG,
686 },
687 {
688 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX,
689 THROTTLE_BPS_READ, MAX,
690 },
691 {
692 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX_LENGTH,
693 THROTTLE_BPS_READ, BURST_LENGTH,
694 },
695 {
696 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE,
697 THROTTLE_BPS_WRITE, AVG,
698 },
699 {
700 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX,
701 THROTTLE_BPS_WRITE, MAX,
702 },
703 {
704 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX_LENGTH,
705 THROTTLE_BPS_WRITE, BURST_LENGTH,
706 },
707 {
708 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_SIZE,
709 0, IOPS_SIZE,
710 }
711};
712
713
714
715static void throttle_group_obj_init(Object *obj)
716{
717 ThrottleGroup *tg = THROTTLE_GROUP(obj);
718
719 tg->clock_type = QEMU_CLOCK_REALTIME;
720 if (qtest_enabled()) {
721
722 tg->clock_type = QEMU_CLOCK_VIRTUAL;
723 }
724 tg->is_initialized = false;
725 qemu_mutex_init(&tg->lock);
726 throttle_init(&tg->ts);
727 QLIST_INIT(&tg->head);
728}
729
730
731
732static void throttle_group_obj_complete(UserCreatable *obj, Error **errp)
733{
734 ThrottleGroup *tg = THROTTLE_GROUP(obj);
735 ThrottleConfig cfg;
736
737
738 if (!tg->name && tg->parent_obj.parent) {
739 tg->name = object_get_canonical_path_component(OBJECT(obj));
740 }
741
742 assert(tg->name);
743
744
745 if (throttle_group_exists(tg->name)) {
746 error_setg(errp, "A group with this name already exists");
747 return;
748 }
749
750
751 throttle_get_config(&tg->ts, &cfg);
752 if (!throttle_is_valid(&cfg, errp)) {
753 return;
754 }
755 throttle_config(&tg->ts, tg->clock_type, &cfg);
756 QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
757 tg->is_initialized = true;
758}
759
760
761
762static void throttle_group_obj_finalize(Object *obj)
763{
764 ThrottleGroup *tg = THROTTLE_GROUP(obj);
765 if (tg->is_initialized) {
766 QTAILQ_REMOVE(&throttle_groups, tg, list);
767 }
768 qemu_mutex_destroy(&tg->lock);
769 g_free(tg->name);
770}
771
772static void throttle_group_set(Object *obj, Visitor *v, const char * name,
773 void *opaque, Error **errp)
774
775{
776 ThrottleGroup *tg = THROTTLE_GROUP(obj);
777 ThrottleConfig *cfg;
778 ThrottleParamInfo *info = opaque;
779 Error *local_err = NULL;
780 int64_t value;
781
782
783
784
785
786 if (tg->is_initialized) {
787 error_setg(&local_err, "Property cannot be set after initialization");
788 goto ret;
789 }
790
791 visit_type_int64(v, name, &value, &local_err);
792 if (local_err) {
793 goto ret;
794 }
795 if (value < 0) {
796 error_setg(&local_err, "Property values cannot be negative");
797 goto ret;
798 }
799
800 cfg = &tg->ts.cfg;
801 switch (info->category) {
802 case AVG:
803 cfg->buckets[info->type].avg = value;
804 break;
805 case MAX:
806 cfg->buckets[info->type].max = value;
807 break;
808 case BURST_LENGTH:
809 if (value > UINT_MAX) {
810 error_setg(&local_err, "%s value must be in the"
811 "range [0, %u]", info->name, UINT_MAX);
812 goto ret;
813 }
814 cfg->buckets[info->type].burst_length = value;
815 break;
816 case IOPS_SIZE:
817 cfg->op_size = value;
818 break;
819 }
820
821ret:
822 error_propagate(errp, local_err);
823 return;
824
825}
826
827static void throttle_group_get(Object *obj, Visitor *v, const char *name,
828 void *opaque, Error **errp)
829{
830 ThrottleGroup *tg = THROTTLE_GROUP(obj);
831 ThrottleConfig cfg;
832 ThrottleParamInfo *info = opaque;
833 int64_t value;
834
835 throttle_get_config(&tg->ts, &cfg);
836 switch (info->category) {
837 case AVG:
838 value = cfg.buckets[info->type].avg;
839 break;
840 case MAX:
841 value = cfg.buckets[info->type].max;
842 break;
843 case BURST_LENGTH:
844 value = cfg.buckets[info->type].burst_length;
845 break;
846 case IOPS_SIZE:
847 value = cfg.op_size;
848 break;
849 }
850
851 visit_type_int64(v, name, &value, errp);
852}
853
854static void throttle_group_set_limits(Object *obj, Visitor *v,
855 const char *name, void *opaque,
856 Error **errp)
857
858{
859 ThrottleGroup *tg = THROTTLE_GROUP(obj);
860 ThrottleConfig cfg;
861 ThrottleLimits arg = { 0 };
862 ThrottleLimits *argp = &arg;
863 Error *local_err = NULL;
864
865 visit_type_ThrottleLimits(v, name, &argp, &local_err);
866 if (local_err) {
867 goto ret;
868 }
869 qemu_mutex_lock(&tg->lock);
870 throttle_get_config(&tg->ts, &cfg);
871 throttle_limits_to_config(argp, &cfg, &local_err);
872 if (local_err) {
873 goto unlock;
874 }
875 throttle_config(&tg->ts, tg->clock_type, &cfg);
876
877unlock:
878 qemu_mutex_unlock(&tg->lock);
879ret:
880 error_propagate(errp, local_err);
881 return;
882}
883
884static void throttle_group_get_limits(Object *obj, Visitor *v,
885 const char *name, void *opaque,
886 Error **errp)
887{
888 ThrottleGroup *tg = THROTTLE_GROUP(obj);
889 ThrottleConfig cfg;
890 ThrottleLimits arg = { 0 };
891 ThrottleLimits *argp = &arg;
892
893 qemu_mutex_lock(&tg->lock);
894 throttle_get_config(&tg->ts, &cfg);
895 qemu_mutex_unlock(&tg->lock);
896
897 throttle_config_to_limits(&cfg, argp);
898
899 visit_type_ThrottleLimits(v, name, &argp, errp);
900}
901
902static bool throttle_group_can_be_deleted(UserCreatable *uc)
903{
904 return OBJECT(uc)->ref == 1;
905}
906
907static void throttle_group_obj_class_init(ObjectClass *klass, void *class_data)
908{
909 size_t i = 0;
910 UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
911
912 ucc->complete = throttle_group_obj_complete;
913 ucc->can_be_deleted = throttle_group_can_be_deleted;
914
915
916 for (i = 0; i < sizeof(properties) / sizeof(ThrottleParamInfo); i++) {
917 object_class_property_add(klass,
918 properties[i].name,
919 "int",
920 throttle_group_get,
921 throttle_group_set,
922 NULL, &properties[i],
923 &error_abort);
924 }
925
926
927 object_class_property_add(klass,
928 "limits", "ThrottleLimits",
929 throttle_group_get_limits,
930 throttle_group_set_limits,
931 NULL, NULL,
932 &error_abort);
933}
934
935static const TypeInfo throttle_group_info = {
936 .name = TYPE_THROTTLE_GROUP,
937 .parent = TYPE_OBJECT,
938 .class_init = throttle_group_obj_class_init,
939 .instance_size = sizeof(ThrottleGroup),
940 .instance_init = throttle_group_obj_init,
941 .instance_finalize = throttle_group_obj_finalize,
942 .interfaces = (InterfaceInfo[]) {
943 { TYPE_USER_CREATABLE },
944 { }
945 },
946};
947
948static void throttle_groups_init(void)
949{
950 type_register_static(&throttle_group_info);
951}
952
953type_init(throttle_groups_init);
954