1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "qemu/lockable.h"
16#include "qemu/option.h"
17#include "qemu/plugin.h"
18#include "qemu/qemu-plugin.h"
19#include "qemu/queue.h"
20#include "qemu/rcu_queue.h"
21#include "qemu/rcu.h"
22#include "exec/tb-flush.h"
23#include "tcg/tcg-op-common.h"
24#include "plugin.h"
25
26struct qemu_plugin_cb {
27 struct qemu_plugin_ctx *ctx;
28 union qemu_plugin_cb_sig f;
29 void *udata;
30 QLIST_ENTRY(qemu_plugin_cb) entry;
31};
32
33struct qemu_plugin_state plugin;
34
35struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
36{
37 struct qemu_plugin_ctx *ctx;
38 qemu_plugin_id_t *id_p;
39
40 id_p = g_hash_table_lookup(plugin.id_ht, &id);
41 ctx = container_of(id_p, struct qemu_plugin_ctx, id);
42 if (ctx == NULL) {
43 error_report("plugin: invalid plugin id %" PRIu64, id);
44 abort();
45 }
46 return ctx;
47}
48
49static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
50{
51 bitmap_copy(cpu->plugin_state->event_mask,
52 &data.host_ulong, QEMU_PLUGIN_EV_MAX);
53 tcg_flush_jmp_cache(cpu);
54}
55
56static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
57{
58 CPUState *cpu = container_of(k, CPUState, cpu_index);
59 run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask);
60
61 async_run_on_cpu(cpu, plugin_cpu_update__async, mask);
62}
63
64void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx,
65 enum qemu_plugin_event ev)
66{
67 struct qemu_plugin_cb *cb = ctx->callbacks[ev];
68
69 if (cb == NULL) {
70 return;
71 }
72 QLIST_REMOVE_RCU(cb, entry);
73 g_free(cb);
74 ctx->callbacks[ev] = NULL;
75 if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) {
76 clear_bit(ev, plugin.mask);
77 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL);
78 }
79}
80
81
82
83
84
85
86QEMU_DISABLE_CFI
87static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev)
88{
89 struct qemu_plugin_cb *cb, *next;
90
91 switch (ev) {
92 case QEMU_PLUGIN_EV_VCPU_INIT:
93 case QEMU_PLUGIN_EV_VCPU_EXIT:
94 case QEMU_PLUGIN_EV_VCPU_IDLE:
95 case QEMU_PLUGIN_EV_VCPU_RESUME:
96
97 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
98 qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple;
99
100 func(cb->ctx->id, cpu->cpu_index);
101 }
102 break;
103 default:
104 g_assert_not_reached();
105 }
106}
107
108
109
110
111
112
113QEMU_DISABLE_CFI
114static void plugin_cb__simple(enum qemu_plugin_event ev)
115{
116 struct qemu_plugin_cb *cb, *next;
117
118 switch (ev) {
119 case QEMU_PLUGIN_EV_FLUSH:
120 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
121 qemu_plugin_simple_cb_t func = cb->f.simple;
122
123 func(cb->ctx->id);
124 }
125 break;
126 default:
127 g_assert_not_reached();
128 }
129}
130
131
132
133
134
135
136QEMU_DISABLE_CFI
137static void plugin_cb__udata(enum qemu_plugin_event ev)
138{
139 struct qemu_plugin_cb *cb, *next;
140
141 switch (ev) {
142 case QEMU_PLUGIN_EV_ATEXIT:
143 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
144 qemu_plugin_udata_cb_t func = cb->f.udata;
145
146 func(cb->ctx->id, cb->udata);
147 }
148 break;
149 default:
150 g_assert_not_reached();
151 }
152}
153
154static void
155do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
156 void *func, void *udata)
157{
158 struct qemu_plugin_ctx *ctx;
159
160 QEMU_LOCK_GUARD(&plugin.lock);
161 ctx = plugin_id_to_ctx_locked(id);
162
163 if (unlikely(ctx->uninstalling)) {
164 return;
165 }
166 if (func) {
167 struct qemu_plugin_cb *cb = ctx->callbacks[ev];
168
169 if (cb) {
170 cb->f.generic = func;
171 cb->udata = udata;
172 } else {
173 cb = g_new(struct qemu_plugin_cb, 1);
174 cb->ctx = ctx;
175 cb->f.generic = func;
176 cb->udata = udata;
177 ctx->callbacks[ev] = cb;
178 QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry);
179 if (!test_bit(ev, plugin.mask)) {
180 set_bit(ev, plugin.mask);
181 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked,
182 NULL);
183 }
184 }
185 } else {
186 plugin_unregister_cb__locked(ctx, ev);
187 }
188}
189
190void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
191 void *func)
192{
193 do_plugin_register_cb(id, ev, func, NULL);
194}
195
196void
197plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev,
198 void *func, void *udata)
199{
200 do_plugin_register_cb(id, ev, func, udata);
201}
202
203CPUPluginState *qemu_plugin_create_vcpu_state(void)
204{
205 return g_new0(CPUPluginState, 1);
206}
207
208static void plugin_grow_scoreboards__locked(CPUState *cpu)
209{
210 size_t scoreboard_size = plugin.scoreboard_alloc_size;
211 bool need_realloc = false;
212
213 if (cpu->cpu_index < scoreboard_size) {
214 return;
215 }
216
217 while (cpu->cpu_index >= scoreboard_size) {
218 scoreboard_size *= 2;
219 need_realloc = true;
220 }
221
222 if (!need_realloc) {
223 return;
224 }
225
226 if (QLIST_EMPTY(&plugin.scoreboards)) {
227
228 plugin.scoreboard_alloc_size = scoreboard_size;
229 return;
230 }
231
232
233
234
235
236
237 qemu_rec_mutex_unlock(&plugin.lock);
238
239
240 start_exclusive();
241
242 qemu_rec_mutex_lock(&plugin.lock);
243
244 if (scoreboard_size > plugin.scoreboard_alloc_size) {
245 struct qemu_plugin_scoreboard *score;
246 QLIST_FOREACH(score, &plugin.scoreboards, entry) {
247 g_array_set_size(score->data, scoreboard_size);
248 }
249 plugin.scoreboard_alloc_size = scoreboard_size;
250
251 tb_flush(cpu);
252 }
253 end_exclusive();
254}
255
256static void qemu_plugin_vcpu_init__async(CPUState *cpu, run_on_cpu_data unused)
257{
258 bool success;
259
260 assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
261 qemu_rec_mutex_lock(&plugin.lock);
262 plugin.num_vcpus = MAX(plugin.num_vcpus, cpu->cpu_index + 1);
263 plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL);
264 success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
265 &cpu->cpu_index);
266 g_assert(success);
267 plugin_grow_scoreboards__locked(cpu);
268 qemu_rec_mutex_unlock(&plugin.lock);
269
270 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
271 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT);
272 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
273}
274
275void qemu_plugin_vcpu_init_hook(CPUState *cpu)
276{
277
278 async_run_on_cpu(cpu, qemu_plugin_vcpu_init__async, RUN_ON_CPU_NULL);
279}
280
281void qemu_plugin_vcpu_exit_hook(CPUState *cpu)
282{
283 bool success;
284
285 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
286 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT);
287 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
288
289 assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
290 qemu_rec_mutex_lock(&plugin.lock);
291 success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index);
292 g_assert(success);
293 qemu_rec_mutex_unlock(&plugin.lock);
294}
295
296struct plugin_for_each_args {
297 struct qemu_plugin_ctx *ctx;
298 qemu_plugin_vcpu_simple_cb_t cb;
299};
300
301static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata)
302{
303 struct plugin_for_each_args *args = udata;
304 int cpu_index = *(int *)k;
305
306 args->cb(args->ctx->id, cpu_index);
307}
308
309void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
310 qemu_plugin_vcpu_simple_cb_t cb)
311{
312 struct plugin_for_each_args args;
313
314 if (cb == NULL) {
315 return;
316 }
317 qemu_rec_mutex_lock(&plugin.lock);
318 args.ctx = plugin_id_to_ctx_locked(id);
319 args.cb = cb;
320 g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args);
321 qemu_rec_mutex_unlock(&plugin.lock);
322}
323
324
325static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
326{
327 GArray *cbs = *arr;
328
329 if (!cbs) {
330 cbs = g_array_sized_new(false, true,
331 sizeof(struct qemu_plugin_dyn_cb), 1);
332 *arr = cbs;
333 }
334
335 g_array_set_size(cbs, cbs->len + 1);
336 return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
337}
338
339static enum plugin_dyn_cb_type op_to_cb_type(enum qemu_plugin_op op)
340{
341 switch (op) {
342 case QEMU_PLUGIN_INLINE_ADD_U64:
343 return PLUGIN_CB_INLINE_ADD_U64;
344 case QEMU_PLUGIN_INLINE_STORE_U64:
345 return PLUGIN_CB_INLINE_STORE_U64;
346 default:
347 g_assert_not_reached();
348 }
349}
350
351void plugin_register_inline_op_on_entry(GArray **arr,
352 enum qemu_plugin_mem_rw rw,
353 enum qemu_plugin_op op,
354 qemu_plugin_u64 entry,
355 uint64_t imm)
356{
357 struct qemu_plugin_dyn_cb *dyn_cb;
358
359 struct qemu_plugin_inline_cb inline_cb = { .rw = rw,
360 .entry = entry,
361 .imm = imm };
362 dyn_cb = plugin_get_dyn_cb(arr);
363 dyn_cb->type = op_to_cb_type(op);
364 dyn_cb->inline_insn = inline_cb;
365}
366
367void plugin_register_dyn_cb__udata(GArray **arr,
368 qemu_plugin_vcpu_udata_cb_t cb,
369 enum qemu_plugin_cb_flags flags,
370 void *udata)
371{
372 static TCGHelperInfo info[3] = {
373 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG,
374 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG,
375 [QEMU_PLUGIN_CB_RW_REGS].flags = 0,
376
377
378
379
380 [0 ... 2].typemask = (dh_typemask(void, 0) |
381 dh_typemask(i32, 1) |
382 dh_typemask(ptr, 2))
383 };
384 assert((unsigned)flags < ARRAY_SIZE(info));
385
386 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
387 struct qemu_plugin_regular_cb regular_cb = { .f.vcpu_udata = cb,
388 .userp = udata,
389 .info = &info[flags] };
390 dyn_cb->type = PLUGIN_CB_REGULAR;
391 dyn_cb->regular = regular_cb;
392}
393
394void plugin_register_dyn_cond_cb__udata(GArray **arr,
395 qemu_plugin_vcpu_udata_cb_t cb,
396 enum qemu_plugin_cb_flags flags,
397 enum qemu_plugin_cond cond,
398 qemu_plugin_u64 entry,
399 uint64_t imm,
400 void *udata)
401{
402 static TCGHelperInfo info[3] = {
403 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG,
404 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG,
405 [QEMU_PLUGIN_CB_RW_REGS].flags = 0,
406
407
408
409
410 [0 ... 2].typemask = (dh_typemask(void, 0) |
411 dh_typemask(i32, 1) |
412 dh_typemask(ptr, 2))
413 };
414 assert((unsigned)flags < ARRAY_SIZE(info));
415
416 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
417 struct qemu_plugin_conditional_cb cond_cb = { .userp = udata,
418 .f.vcpu_udata = cb,
419 .cond = cond,
420 .entry = entry,
421 .imm = imm,
422 .info = &info[flags] };
423 dyn_cb->type = PLUGIN_CB_COND;
424 dyn_cb->cond = cond_cb;
425}
426
427void plugin_register_vcpu_mem_cb(GArray **arr,
428 void *cb,
429 enum qemu_plugin_cb_flags flags,
430 enum qemu_plugin_mem_rw rw,
431 void *udata)
432{
433
434
435
436
437 QEMU_BUILD_BUG_ON(
438 !__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) &&
439 !__builtin_types_compatible_p(qemu_plugin_meminfo_t, int32_t));
440
441 static TCGHelperInfo info[3] = {
442 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG,
443 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG,
444 [QEMU_PLUGIN_CB_RW_REGS].flags = 0,
445
446
447
448
449 [0 ... 2].typemask =
450 (dh_typemask(void, 0) |
451 dh_typemask(i32, 1) |
452 (__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t)
453 ? dh_typemask(i32, 2) : dh_typemask(s32, 2)) |
454 dh_typemask(i64, 3) |
455 dh_typemask(ptr, 4))
456 };
457 assert((unsigned)flags < ARRAY_SIZE(info));
458
459 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
460 struct qemu_plugin_regular_cb regular_cb = { .userp = udata,
461 .rw = rw,
462 .f.vcpu_mem = cb,
463 .info = &info[flags] };
464 dyn_cb->type = PLUGIN_CB_MEM_REGULAR;
465 dyn_cb->regular = regular_cb;
466}
467
468
469
470
471
472
473QEMU_DISABLE_CFI
474void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb)
475{
476 struct qemu_plugin_cb *cb, *next;
477 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS;
478
479
480
481 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
482 qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans;
483
484 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
485 func(cb->ctx->id, tb);
486 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
487 }
488}
489
490
491
492
493
494
495QEMU_DISABLE_CFI
496void
497qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2,
498 uint64_t a3, uint64_t a4, uint64_t a5,
499 uint64_t a6, uint64_t a7, uint64_t a8)
500{
501 struct qemu_plugin_cb *cb, *next;
502 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL;
503
504 if (!test_bit(ev, cpu->plugin_state->event_mask)) {
505 return;
506 }
507
508 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
509 qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall;
510
511 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
512 func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8);
513 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
514 }
515}
516
517
518
519
520
521
522QEMU_DISABLE_CFI
523void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
524{
525 struct qemu_plugin_cb *cb, *next;
526 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET;
527
528 if (!test_bit(ev, cpu->plugin_state->event_mask)) {
529 return;
530 }
531
532 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
533 qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret;
534
535 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
536 func(cb->ctx->id, cpu->cpu_index, num, ret);
537 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
538 }
539}
540
541void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
542{
543
544 if (cpu->cpu_index < plugin.num_vcpus) {
545 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
546 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
547 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
548 }
549}
550
551void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
552{
553 if (cpu->cpu_index < plugin.num_vcpus) {
554 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
555 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
556 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
557 }
558}
559
560void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
561 qemu_plugin_vcpu_simple_cb_t cb)
562{
563 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb);
564}
565
566void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
567 qemu_plugin_vcpu_simple_cb_t cb)
568{
569 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb);
570}
571
572void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
573 qemu_plugin_simple_cb_t cb)
574{
575 plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb);
576}
577
578static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp)
579{
580 g_array_free((GArray *) p, true);
581 return true;
582}
583
584void qemu_plugin_flush_cb(void)
585{
586 qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL);
587 qht_reset(&plugin.dyn_cb_arr_ht);
588
589 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH);
590}
591
592void exec_inline_op(enum plugin_dyn_cb_type type,
593 struct qemu_plugin_inline_cb *cb,
594 int cpu_index)
595{
596 char *ptr = cb->entry.score->data->data;
597 size_t elem_size = g_array_get_element_size(
598 cb->entry.score->data);
599 size_t offset = cb->entry.offset;
600 uint64_t *val = (uint64_t *)(ptr + offset + cpu_index * elem_size);
601
602 switch (type) {
603 case PLUGIN_CB_INLINE_ADD_U64:
604 *val += cb->imm;
605 break;
606 case PLUGIN_CB_INLINE_STORE_U64:
607 *val = cb->imm;
608 break;
609 default:
610 g_assert_not_reached();
611 }
612}
613
614void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
615 uint64_t value_low,
616 uint64_t value_high,
617 MemOpIdx oi, enum qemu_plugin_mem_rw rw)
618{
619 GArray *arr = cpu->neg.plugin_mem_cbs;
620 size_t i;
621
622 if (arr == NULL) {
623 return;
624 }
625
626 cpu->neg.plugin_mem_value_low = value_low;
627 cpu->neg.plugin_mem_value_high = value_high;
628
629 for (i = 0; i < arr->len; i++) {
630 struct qemu_plugin_dyn_cb *cb =
631 &g_array_index(arr, struct qemu_plugin_dyn_cb, i);
632
633 switch (cb->type) {
634 case PLUGIN_CB_MEM_REGULAR:
635 if (rw & cb->regular.rw) {
636 qemu_plugin_set_cb_flags(cpu,
637 tcg_call_to_qemu_plugin_cb_flags(cb->regular.info->flags));
638
639 cb->regular.f.vcpu_mem(cpu->cpu_index,
640 make_plugin_meminfo(oi, rw),
641 vaddr, cb->regular.userp);
642 qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
643 }
644 break;
645 case PLUGIN_CB_INLINE_ADD_U64:
646 case PLUGIN_CB_INLINE_STORE_U64:
647 if (rw & cb->inline_insn.rw) {
648 exec_inline_op(cb->type, &cb->inline_insn, cpu->cpu_index);
649 }
650 break;
651 default:
652 g_assert_not_reached();
653 }
654 }
655}
656
657void qemu_plugin_atexit_cb(void)
658{
659 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT);
660}
661
662void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
663 qemu_plugin_udata_cb_t cb,
664 void *udata)
665{
666 plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata);
667}
668
669
670
671
672
673
674
675
676
677void qemu_plugin_user_exit(void)
678{
679 enum qemu_plugin_event ev;
680 CPUState *cpu;
681
682
683
684
685
686
687
688
689
690 start_exclusive();
691
692 qemu_rec_mutex_lock(&plugin.lock);
693
694 for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) {
695 if (ev != QEMU_PLUGIN_EV_ATEXIT) {
696 struct qemu_plugin_cb *cb, *next;
697
698 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
699 plugin_unregister_cb__locked(cb->ctx, ev);
700 }
701 }
702 }
703 CPU_FOREACH(cpu) {
704 qemu_plugin_disable_mem_helpers(cpu);
705 }
706 qemu_rec_mutex_unlock(&plugin.lock);
707
708 tb_flush(current_cpu);
709 end_exclusive();
710
711
712 qemu_plugin_atexit_cb();
713}
714
715
716
717
718
719void qemu_plugin_user_prefork_lock(void)
720{
721 qemu_rec_mutex_lock(&plugin.lock);
722}
723
724void qemu_plugin_user_postfork(bool is_child)
725{
726 if (is_child) {
727
728 qemu_rec_mutex_init(&plugin.lock);
729 } else {
730 qemu_rec_mutex_unlock(&plugin.lock);
731 }
732}
733
734static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp)
735{
736 return ap == bp;
737}
738
739static void __attribute__((__constructor__)) plugin_init(void)
740{
741 int i;
742
743 for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) {
744 QLIST_INIT(&plugin.cb_lists[i]);
745 }
746 qemu_rec_mutex_init(&plugin.lock);
747 plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal);
748 plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal);
749 QLIST_INIT(&plugin.scoreboards);
750 plugin.scoreboard_alloc_size = 16;
751 QTAILQ_INIT(&plugin.ctxs);
752 qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16,
753 QHT_MODE_AUTO_RESIZE);
754 atexit(qemu_plugin_atexit_cb);
755}
756
757int plugin_num_vcpus(void)
758{
759 return plugin.num_vcpus;
760}
761
762struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size)
763{
764 struct qemu_plugin_scoreboard *score =
765 g_malloc0(sizeof(struct qemu_plugin_scoreboard));
766 score->data = g_array_new(FALSE, TRUE, element_size);
767 g_array_set_size(score->data, plugin.scoreboard_alloc_size);
768
769 qemu_rec_mutex_lock(&plugin.lock);
770 QLIST_INSERT_HEAD(&plugin.scoreboards, score, entry);
771 qemu_rec_mutex_unlock(&plugin.lock);
772
773 return score;
774}
775
776void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score)
777{
778 qemu_rec_mutex_lock(&plugin.lock);
779 QLIST_REMOVE(score, entry);
780 qemu_rec_mutex_unlock(&plugin.lock);
781
782 g_array_free(score->data, TRUE);
783 g_free(score);
784}
785
786enum qemu_plugin_cb_flags tcg_call_to_qemu_plugin_cb_flags(int flags)
787{
788 if (flags & TCG_CALL_NO_RWG) {
789 return QEMU_PLUGIN_CB_NO_REGS;
790 } else if (flags & TCG_CALL_NO_WG) {
791 return QEMU_PLUGIN_CB_R_REGS;
792 } else {
793 return QEMU_PLUGIN_CB_RW_REGS;
794 }
795}
796