qemu/plugins/core.c
<<
>>
Prefs
   1/*
   2 * QEMU Plugin Core code
   3 *
   4 * This is the core code that deals with injecting instrumentation into the code
   5 *
   6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
   7 * Copyright (C) 2019, Linaro
   8 *
   9 * License: GNU GPL, version 2 or later.
  10 *   See the COPYING file in the top-level directory.
  11 *
  12 * SPDX-License-Identifier: GPL-2.0-or-later
  13 */
  14#include "qemu/osdep.h"
  15#include "qemu/error-report.h"
  16#include "qemu/config-file.h"
  17#include "qapi/error.h"
  18#include "qemu/option.h"
  19#include "qemu/rcu_queue.h"
  20#include "qemu/xxhash.h"
  21#include "qemu/rcu.h"
  22#include "hw/core/cpu.h"
  23#include "exec/cpu-common.h"
  24
  25#include "cpu.h"
  26#include "exec/exec-all.h"
  27#include "exec/helper-proto.h"
  28#include "sysemu/sysemu.h"
  29#include "tcg/tcg.h"
  30#include "tcg/tcg-op.h"
  31#include "trace/mem-internal.h" /* mem_info macros */
  32#include "plugin.h"
  33
  34struct qemu_plugin_cb {
  35    struct qemu_plugin_ctx *ctx;
  36    union qemu_plugin_cb_sig f;
  37    void *udata;
  38    QLIST_ENTRY(qemu_plugin_cb) entry;
  39};
  40
  41struct qemu_plugin_state plugin;
  42
  43struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
  44{
  45    struct qemu_plugin_ctx *ctx;
  46    qemu_plugin_id_t *id_p;
  47
  48    id_p = g_hash_table_lookup(plugin.id_ht, &id);
  49    ctx = container_of(id_p, struct qemu_plugin_ctx, id);
  50    if (ctx == NULL) {
  51        error_report("plugin: invalid plugin id %" PRIu64, id);
  52        abort();
  53    }
  54    return ctx;
  55}
  56
  57static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
  58{
  59    bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
  60    cpu_tb_jmp_cache_clear(cpu);
  61}
  62
  63static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
  64{
  65    CPUState *cpu = container_of(k, CPUState, cpu_index);
  66    run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask);
  67
  68    if (cpu->created) {
  69        async_run_on_cpu(cpu, plugin_cpu_update__async, mask);
  70    } else {
  71        plugin_cpu_update__async(cpu, mask);
  72    }
  73}
  74
  75void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx,
  76                                  enum qemu_plugin_event ev)
  77{
  78    struct qemu_plugin_cb *cb = ctx->callbacks[ev];
  79
  80    if (cb == NULL) {
  81        return;
  82    }
  83    QLIST_REMOVE_RCU(cb, entry);
  84    g_free(cb);
  85    ctx->callbacks[ev] = NULL;
  86    if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) {
  87        clear_bit(ev, plugin.mask);
  88        g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL);
  89    }
  90}
  91
  92static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev)
  93{
  94    struct qemu_plugin_cb *cb, *next;
  95
  96    switch (ev) {
  97    case QEMU_PLUGIN_EV_VCPU_INIT:
  98    case QEMU_PLUGIN_EV_VCPU_EXIT:
  99    case QEMU_PLUGIN_EV_VCPU_IDLE:
 100    case QEMU_PLUGIN_EV_VCPU_RESUME:
 101        /* iterate safely; plugins might uninstall themselves at any time */
 102        QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
 103            qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple;
 104
 105            func(cb->ctx->id, cpu->cpu_index);
 106        }
 107        break;
 108    default:
 109        g_assert_not_reached();
 110    }
 111}
 112
 113static void plugin_cb__simple(enum qemu_plugin_event ev)
 114{
 115    struct qemu_plugin_cb *cb, *next;
 116
 117    switch (ev) {
 118    case QEMU_PLUGIN_EV_FLUSH:
 119        QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
 120            qemu_plugin_simple_cb_t func = cb->f.simple;
 121
 122            func(cb->ctx->id);
 123        }
 124        break;
 125    default:
 126        g_assert_not_reached();
 127    }
 128}
 129
 130static void plugin_cb__udata(enum qemu_plugin_event ev)
 131{
 132    struct qemu_plugin_cb *cb, *next;
 133
 134    switch (ev) {
 135    case QEMU_PLUGIN_EV_ATEXIT:
 136        QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
 137            qemu_plugin_udata_cb_t func = cb->f.udata;
 138
 139            func(cb->ctx->id, cb->udata);
 140        }
 141        break;
 142    default:
 143        g_assert_not_reached();
 144    }
 145}
 146
 147static void
 148do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
 149                      void *func, void *udata)
 150{
 151    struct qemu_plugin_ctx *ctx;
 152
 153    qemu_rec_mutex_lock(&plugin.lock);
 154    ctx = plugin_id_to_ctx_locked(id);
 155    /* if the plugin is on its way out, ignore this request */
 156    if (unlikely(ctx->uninstalling)) {
 157        goto out_unlock;
 158    }
 159    if (func) {
 160        struct qemu_plugin_cb *cb = ctx->callbacks[ev];
 161
 162        if (cb) {
 163            cb->f.generic = func;
 164            cb->udata = udata;
 165        } else {
 166            cb = g_new(struct qemu_plugin_cb, 1);
 167            cb->ctx = ctx;
 168            cb->f.generic = func;
 169            cb->udata = udata;
 170            ctx->callbacks[ev] = cb;
 171            QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry);
 172            if (!test_bit(ev, plugin.mask)) {
 173                set_bit(ev, plugin.mask);
 174                g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked,
 175                                     NULL);
 176            }
 177        }
 178    } else {
 179        plugin_unregister_cb__locked(ctx, ev);
 180    }
 181 out_unlock:
 182    qemu_rec_mutex_unlock(&plugin.lock);
 183}
 184
 185void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
 186                        void *func)
 187{
 188    do_plugin_register_cb(id, ev, func, NULL);
 189}
 190
 191void
 192plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev,
 193                         void *func, void *udata)
 194{
 195    do_plugin_register_cb(id, ev, func, udata);
 196}
 197
 198void qemu_plugin_vcpu_init_hook(CPUState *cpu)
 199{
 200    bool success;
 201
 202    qemu_rec_mutex_lock(&plugin.lock);
 203    plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL);
 204    success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
 205                                  &cpu->cpu_index);
 206    g_assert(success);
 207    qemu_rec_mutex_unlock(&plugin.lock);
 208
 209    plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT);
 210}
 211
 212void qemu_plugin_vcpu_exit_hook(CPUState *cpu)
 213{
 214    bool success;
 215
 216    plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT);
 217
 218    qemu_rec_mutex_lock(&plugin.lock);
 219    success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index);
 220    g_assert(success);
 221    qemu_rec_mutex_unlock(&plugin.lock);
 222}
 223
 224struct plugin_for_each_args {
 225    struct qemu_plugin_ctx *ctx;
 226    qemu_plugin_vcpu_simple_cb_t cb;
 227};
 228
 229static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata)
 230{
 231    struct plugin_for_each_args *args = udata;
 232    int cpu_index = *(int *)k;
 233
 234    args->cb(args->ctx->id, cpu_index);
 235}
 236
 237void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
 238                               qemu_plugin_vcpu_simple_cb_t cb)
 239{
 240    struct plugin_for_each_args args;
 241
 242    if (cb == NULL) {
 243        return;
 244    }
 245    qemu_rec_mutex_lock(&plugin.lock);
 246    args.ctx = plugin_id_to_ctx_locked(id);
 247    args.cb = cb;
 248    g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args);
 249    qemu_rec_mutex_unlock(&plugin.lock);
 250}
 251
 252/* Allocate and return a callback record */
 253static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
 254{
 255    GArray *cbs = *arr;
 256
 257    if (!cbs) {
 258        cbs = g_array_sized_new(false, false,
 259                                sizeof(struct qemu_plugin_dyn_cb), 1);
 260        *arr = cbs;
 261    }
 262
 263    g_array_set_size(cbs, cbs->len + 1);
 264    return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
 265}
 266
 267void plugin_register_inline_op(GArray **arr,
 268                               enum qemu_plugin_mem_rw rw,
 269                               enum qemu_plugin_op op, void *ptr,
 270                               uint64_t imm)
 271{
 272    struct qemu_plugin_dyn_cb *dyn_cb;
 273
 274    dyn_cb = plugin_get_dyn_cb(arr);
 275    dyn_cb->userp = ptr;
 276    dyn_cb->type = PLUGIN_CB_INLINE;
 277    dyn_cb->rw = rw;
 278    dyn_cb->inline_insn.op = op;
 279    dyn_cb->inline_insn.imm = imm;
 280}
 281
 282static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags)
 283{
 284    uint32_t ret;
 285
 286    switch (flags) {
 287    case QEMU_PLUGIN_CB_RW_REGS:
 288        ret = 0;
 289        break;
 290    case QEMU_PLUGIN_CB_R_REGS:
 291        ret = TCG_CALL_NO_WG;
 292        break;
 293    case QEMU_PLUGIN_CB_NO_REGS:
 294    default:
 295        ret = TCG_CALL_NO_RWG;
 296    }
 297    return ret;
 298}
 299
 300inline void
 301plugin_register_dyn_cb__udata(GArray **arr,
 302                              qemu_plugin_vcpu_udata_cb_t cb,
 303                              enum qemu_plugin_cb_flags flags, void *udata)
 304{
 305    struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
 306
 307    dyn_cb->userp = udata;
 308    dyn_cb->tcg_flags = cb_to_tcg_flags(flags);
 309    dyn_cb->f.vcpu_udata = cb;
 310    dyn_cb->type = PLUGIN_CB_REGULAR;
 311}
 312
 313void plugin_register_vcpu_mem_cb(GArray **arr,
 314                                 void *cb,
 315                                 enum qemu_plugin_cb_flags flags,
 316                                 enum qemu_plugin_mem_rw rw,
 317                                 void *udata)
 318{
 319    struct qemu_plugin_dyn_cb *dyn_cb;
 320
 321    dyn_cb = plugin_get_dyn_cb(arr);
 322    dyn_cb->userp = udata;
 323    dyn_cb->tcg_flags = cb_to_tcg_flags(flags);
 324    dyn_cb->type = PLUGIN_CB_REGULAR;
 325    dyn_cb->rw = rw;
 326    dyn_cb->f.generic = cb;
 327}
 328
 329void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb)
 330{
 331    struct qemu_plugin_cb *cb, *next;
 332    enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS;
 333
 334    /* no plugin_mask check here; caller should have checked */
 335
 336    QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
 337        qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans;
 338
 339        func(cb->ctx->id, tb);
 340    }
 341}
 342
 343void
 344qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2,
 345                         uint64_t a3, uint64_t a4, uint64_t a5,
 346                         uint64_t a6, uint64_t a7, uint64_t a8)
 347{
 348    struct qemu_plugin_cb *cb, *next;
 349    enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL;
 350
 351    if (!test_bit(ev, cpu->plugin_mask)) {
 352        return;
 353    }
 354
 355    QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
 356        qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall;
 357
 358        func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8);
 359    }
 360}
 361
 362void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
 363{
 364    struct qemu_plugin_cb *cb, *next;
 365    enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET;
 366
 367    if (!test_bit(ev, cpu->plugin_mask)) {
 368        return;
 369    }
 370
 371    QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
 372        qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret;
 373
 374        func(cb->ctx->id, cpu->cpu_index, num, ret);
 375    }
 376}
 377
 378void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
 379{
 380    plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
 381}
 382
 383void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
 384{
 385    plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
 386}
 387
 388void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
 389                                       qemu_plugin_vcpu_simple_cb_t cb)
 390{
 391    plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb);
 392}
 393
 394void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
 395                                         qemu_plugin_vcpu_simple_cb_t cb)
 396{
 397    plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb);
 398}
 399
 400void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
 401                                   qemu_plugin_simple_cb_t cb)
 402{
 403    plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb);
 404}
 405
 406static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp)
 407{
 408    g_array_free((GArray *) p, true);
 409    return true;
 410}
 411
 412void qemu_plugin_flush_cb(void)
 413{
 414    qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL);
 415    qht_reset(&plugin.dyn_cb_arr_ht);
 416
 417    plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH);
 418}
 419
 420void exec_inline_op(struct qemu_plugin_dyn_cb *cb)
 421{
 422    uint64_t *val = cb->userp;
 423
 424    switch (cb->inline_insn.op) {
 425    case QEMU_PLUGIN_INLINE_ADD_U64:
 426        *val += cb->inline_insn.imm;
 427        break;
 428    default:
 429        g_assert_not_reached();
 430    }
 431}
 432
 433void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info)
 434{
 435    GArray *arr = cpu->plugin_mem_cbs;
 436    size_t i;
 437
 438    if (arr == NULL) {
 439        return;
 440    }
 441    for (i = 0; i < arr->len; i++) {
 442        struct qemu_plugin_dyn_cb *cb =
 443            &g_array_index(arr, struct qemu_plugin_dyn_cb, i);
 444        int w = !!(info & TRACE_MEM_ST) + 1;
 445
 446        if (!(w & cb->rw)) {
 447                break;
 448        }
 449        switch (cb->type) {
 450        case PLUGIN_CB_REGULAR:
 451            cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp);
 452            break;
 453        case PLUGIN_CB_INLINE:
 454            exec_inline_op(cb);
 455            break;
 456        default:
 457            g_assert_not_reached();
 458        }
 459    }
 460}
 461
 462void qemu_plugin_atexit_cb(void)
 463{
 464    plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT);
 465}
 466
 467void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
 468                                    qemu_plugin_udata_cb_t cb,
 469                                    void *udata)
 470{
 471    plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata);
 472}
 473
 474/*
 475 * Call this function after longjmp'ing to the main loop. It's possible that the
 476 * last instruction of a TB might have used helpers, and therefore the
 477 * "disable" instruction will never execute because it ended up as dead code.
 478 */
 479void qemu_plugin_disable_mem_helpers(CPUState *cpu)
 480{
 481    cpu->plugin_mem_cbs = NULL;
 482}
 483
 484static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp)
 485{
 486    return ap == bp;
 487}
 488
 489static void __attribute__((__constructor__)) plugin_init(void)
 490{
 491    int i;
 492
 493    for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) {
 494        QLIST_INIT(&plugin.cb_lists[i]);
 495    }
 496    qemu_rec_mutex_init(&plugin.lock);
 497    plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal);
 498    plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal);
 499    QTAILQ_INIT(&plugin.ctxs);
 500    qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16,
 501             QHT_MODE_AUTO_RESIZE);
 502    atexit(qemu_plugin_atexit_cb);
 503}
 504