qemu/tests/qtest/fuzz/generic_fuzz.c
<<
>>
Prefs
   1/*
   2 * Generic Virtual-Device Fuzzing Target
   3 *
   4 * Copyright Red Hat Inc., 2020
   5 *
   6 * Authors:
   7 *  Alexander Bulekov   <alxndr@bu.edu>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 */
  12
  13#include "qemu/osdep.h"
  14
  15#include <wordexp.h>
  16
  17#include "hw/core/cpu.h"
  18#include "tests/qtest/libqos/libqtest.h"
  19#include "tests/qtest/libqos/pci-pc.h"
  20#include "fuzz.h"
  21#include "fork_fuzz.h"
  22#include "string.h"
  23#include "exec/memory.h"
  24#include "exec/ramblock.h"
  25#include "hw/qdev-core.h"
  26#include "hw/pci/pci.h"
  27#include "hw/boards.h"
  28#include "generic_fuzz_configs.h"
  29#include "hw/mem/sparse-mem.h"
  30
  31/*
  32 * SEPARATOR is used to separate "operations" in the fuzz input
  33 */
  34#define SEPARATOR "FUZZ"
  35
  36enum cmds {
  37    OP_IN,
  38    OP_OUT,
  39    OP_READ,
  40    OP_WRITE,
  41    OP_PCI_READ,
  42    OP_PCI_WRITE,
  43    OP_DISABLE_PCI,
  44    OP_ADD_DMA_PATTERN,
  45    OP_CLEAR_DMA_PATTERNS,
  46    OP_CLOCK_STEP,
  47};
  48
  49#define DEFAULT_TIMEOUT_US 100000
  50#define USEC_IN_SEC 1000000000
  51
  52#define MAX_DMA_FILL_SIZE 0x10000
  53
  54#define PCI_HOST_BRIDGE_CFG 0xcf8
  55#define PCI_HOST_BRIDGE_DATA 0xcfc
  56
  57typedef struct {
  58    ram_addr_t addr;
  59    ram_addr_t size; /* The number of bytes until the end of the I/O region */
  60} address_range;
  61
  62static useconds_t timeout = DEFAULT_TIMEOUT_US;
  63
  64static bool qtest_log_enabled;
  65
  66MemoryRegion *sparse_mem_mr;
  67
  68/*
  69 * A pattern used to populate a DMA region or perform a memwrite. This is
  70 * useful for e.g. populating tables of unique addresses.
  71 * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
  72 * Renders as: 00 01 02   00 03 02   00 05 02   00 07 02 ...
  73 */
  74typedef struct {
  75    uint8_t index;      /* Index of a byte to increment by stride */
  76    uint8_t stride;     /* Increment each index'th byte by this amount */
  77    size_t len;
  78    const uint8_t *data;
  79} pattern;
  80
  81/* Avoid filling the same DMA region between MMIO/PIO commands ? */
  82static bool avoid_double_fetches;
  83
  84static QTestState *qts_global; /* Need a global for the DMA callback */
  85
  86/*
  87 * List of memory regions that are children of QOM objects specified by the
  88 * user for fuzzing.
  89 */
  90static GHashTable *fuzzable_memoryregions;
  91static GPtrArray *fuzzable_pci_devices;
  92
  93struct get_io_cb_info {
  94    int index;
  95    int found;
  96    address_range result;
  97};
  98
  99static bool get_io_address_cb(Int128 start, Int128 size,
 100                              const MemoryRegion *mr,
 101                              hwaddr offset_in_region,
 102                              void *opaque)
 103{
 104    struct get_io_cb_info *info = opaque;
 105    if (g_hash_table_lookup(fuzzable_memoryregions, mr)) {
 106        if (info->index == 0) {
 107            info->result.addr = (ram_addr_t)start;
 108            info->result.size = (ram_addr_t)size;
 109            info->found = 1;
 110            return true;
 111        }
 112        info->index--;
 113    }
 114    return false;
 115}
 116
 117/*
 118 * List of dma regions populated since the last fuzzing command. Used to ensure
 119 * that we only write to each DMA address once, to avoid race conditions when
 120 * building reproducers.
 121 */
 122static GArray *dma_regions;
 123
 124static GArray *dma_patterns;
 125static int dma_pattern_index;
 126static bool pci_disabled;
 127
 128/*
 129 * Allocate a block of memory and populate it with a pattern.
 130 */
 131static void *pattern_alloc(pattern p, size_t len)
 132{
 133    int i;
 134    uint8_t *buf = g_malloc(len);
 135    uint8_t sum = 0;
 136
 137    for (i = 0; i < len; ++i) {
 138        buf[i] = p.data[i % p.len];
 139        if ((i % p.len) == p.index) {
 140            buf[i] += sum;
 141            sum += p.stride;
 142        }
 143    }
 144    return buf;
 145}
 146
 147static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
 148{
 149    unsigned access_size_max = mr->ops->valid.max_access_size;
 150
 151    /*
 152     * Regions are assumed to support 1-4 byte accesses unless
 153     * otherwise specified.
 154     */
 155    if (access_size_max == 0) {
 156        access_size_max = 4;
 157    }
 158
 159    /* Bound the maximum access by the alignment of the address.  */
 160    if (!mr->ops->impl.unaligned) {
 161        unsigned align_size_max = addr & -addr;
 162        if (align_size_max != 0 && align_size_max < access_size_max) {
 163            access_size_max = align_size_max;
 164        }
 165    }
 166
 167    /* Don't attempt accesses larger than the maximum.  */
 168    if (l > access_size_max) {
 169        l = access_size_max;
 170    }
 171    l = pow2floor(l);
 172
 173    return l;
 174}
 175
 176/*
 177 * Call-back for functions that perform DMA reads from guest memory. Confirm
 178 * that the region has not already been populated since the last loop in
 179 * generic_fuzz(), avoiding potential race-conditions, which we don't have
 180 * a good way for reproducing right now.
 181 */
 182void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr)
 183{
 184    /* Are we in the generic-fuzzer or are we using another fuzz-target? */
 185    if (!qts_global) {
 186        return;
 187    }
 188
 189    /*
 190     * Return immediately if:
 191     * - We have no DMA patterns defined
 192     * - The length of the DMA read request is zero
 193     * - The DMA read is hitting an MR other than the machine's main RAM
 194     * - The DMA request hits past the bounds of our RAM
 195     */
 196    if (dma_patterns->len == 0
 197        || len == 0
 198        || (mr != current_machine->ram && mr != sparse_mem_mr)) {
 199        return;
 200    }
 201
 202    /*
 203     * If we overlap with any existing dma_regions, split the range and only
 204     * populate the non-overlapping parts.
 205     */
 206    address_range region;
 207    bool double_fetch = false;
 208    for (int i = 0;
 209         i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
 210         ++i) {
 211        region = g_array_index(dma_regions, address_range, i);
 212        if (addr < region.addr + region.size && addr + len > region.addr) {
 213            double_fetch = true;
 214            if (addr < region.addr
 215                && avoid_double_fetches) {
 216                fuzz_dma_read_cb(addr, region.addr - addr, mr);
 217            }
 218            if (addr + len > region.addr + region.size
 219                && avoid_double_fetches) {
 220                fuzz_dma_read_cb(region.addr + region.size,
 221                        addr + len - (region.addr + region.size), mr);
 222            }
 223            return;
 224        }
 225    }
 226
 227    /* Cap the length of the DMA access to something reasonable */
 228    len = MIN(len, MAX_DMA_FILL_SIZE);
 229
 230    address_range ar = {addr, len};
 231    g_array_append_val(dma_regions, ar);
 232    pattern p = g_array_index(dma_patterns, pattern, dma_pattern_index);
 233    void *buf_base = pattern_alloc(p, ar.size);
 234    void *buf = buf_base;
 235    hwaddr l, addr1;
 236    MemoryRegion *mr1;
 237    while (len > 0) {
 238        l = len;
 239        mr1 = address_space_translate(first_cpu->as,
 240                                      addr, &addr1, &l, true,
 241                                      MEMTXATTRS_UNSPECIFIED);
 242
 243        if (!(memory_region_is_ram(mr1) ||
 244              memory_region_is_romd(mr1)) && mr1 != sparse_mem_mr) {
 245            l = memory_access_size(mr1, l, addr1);
 246        } else {
 247            /* ROM/RAM case */
 248            if (qtest_log_enabled) {
 249                /*
 250                * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
 251                * that will be written by qtest.c with a DMA tag, so we can reorder
 252                * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
 253                * command.
 254                */
 255                fprintf(stderr, "[DMA] ");
 256                if (double_fetch) {
 257                    fprintf(stderr, "[DOUBLE-FETCH] ");
 258                }
 259                fflush(stderr);
 260            }
 261            qtest_memwrite(qts_global, addr, buf, l);
 262        }
 263        len -= l;
 264        buf += l;
 265        addr += l;
 266
 267    }
 268    g_free(buf_base);
 269
 270    /* Increment the index of the pattern for the next DMA access */
 271    dma_pattern_index = (dma_pattern_index + 1) % dma_patterns->len;
 272}
 273
 274/*
 275 * Here we want to convert a fuzzer-provided [io-region-index, offset] to
 276 * a physical address. To do this, we iterate over all of the matched
 277 * MemoryRegions. Check whether each region exists within the particular io
 278 * space. Return the absolute address of the offset within the index'th region
 279 * that is a subregion of the io_space and the distance until the end of the
 280 * memory region.
 281 */
 282static bool get_io_address(address_range *result, AddressSpace *as,
 283                            uint8_t index,
 284                            uint32_t offset) {
 285    FlatView *view;
 286    view = as->current_map;
 287    g_assert(view);
 288    struct get_io_cb_info cb_info = {};
 289
 290    cb_info.index = index;
 291
 292    /*
 293     * Loop around the FlatView until we match "index" number of
 294     * fuzzable_memoryregions, or until we know that there are no matching
 295     * memory_regions.
 296     */
 297    do {
 298        flatview_for_each_range(view, get_io_address_cb , &cb_info);
 299    } while (cb_info.index != index && !cb_info.found);
 300
 301    *result = cb_info.result;
 302    if (result->size) {
 303        offset = offset % result->size;
 304        result->addr += offset;
 305        result->size -= offset;
 306    }
 307    return cb_info.found;
 308}
 309
 310static bool get_pio_address(address_range *result,
 311                            uint8_t index, uint16_t offset)
 312{
 313    /*
 314     * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
 315     * can contain an addr that extends past the PIO space. When we pass this
 316     * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
 317     * up fuzzing a completely different MemoryRegion/Device. Therefore, check
 318     * that the address here is within the PIO space limits.
 319     */
 320    bool found = get_io_address(result, &address_space_io, index, offset);
 321    return result->addr <= 0xFFFF ? found : false;
 322}
 323
 324static bool get_mmio_address(address_range *result,
 325                             uint8_t index, uint32_t offset)
 326{
 327    return get_io_address(result, &address_space_memory, index, offset);
 328}
 329
 330static void op_in(QTestState *s, const unsigned char * data, size_t len)
 331{
 332    enum Sizes {Byte, Word, Long, end_sizes};
 333    struct {
 334        uint8_t size;
 335        uint8_t base;
 336        uint16_t offset;
 337    } a;
 338    address_range abs;
 339
 340    if (len < sizeof(a)) {
 341        return;
 342    }
 343    memcpy(&a, data, sizeof(a));
 344    if (get_pio_address(&abs, a.base, a.offset) == 0) {
 345        return;
 346    }
 347
 348    switch (a.size %= end_sizes) {
 349    case Byte:
 350        qtest_inb(s, abs.addr);
 351        break;
 352    case Word:
 353        if (abs.size >= 2) {
 354            qtest_inw(s, abs.addr);
 355        }
 356        break;
 357    case Long:
 358        if (abs.size >= 4) {
 359            qtest_inl(s, abs.addr);
 360        }
 361        break;
 362    }
 363}
 364
 365static void op_out(QTestState *s, const unsigned char * data, size_t len)
 366{
 367    enum Sizes {Byte, Word, Long, end_sizes};
 368    struct {
 369        uint8_t size;
 370        uint8_t base;
 371        uint16_t offset;
 372        uint32_t value;
 373    } a;
 374    address_range abs;
 375
 376    if (len < sizeof(a)) {
 377        return;
 378    }
 379    memcpy(&a, data, sizeof(a));
 380
 381    if (get_pio_address(&abs, a.base, a.offset) == 0) {
 382        return;
 383    }
 384
 385    switch (a.size %= end_sizes) {
 386    case Byte:
 387        qtest_outb(s, abs.addr, a.value & 0xFF);
 388        break;
 389    case Word:
 390        if (abs.size >= 2) {
 391            qtest_outw(s, abs.addr, a.value & 0xFFFF);
 392        }
 393        break;
 394    case Long:
 395        if (abs.size >= 4) {
 396            qtest_outl(s, abs.addr, a.value);
 397        }
 398        break;
 399    }
 400}
 401
 402static void op_read(QTestState *s, const unsigned char * data, size_t len)
 403{
 404    enum Sizes {Byte, Word, Long, Quad, end_sizes};
 405    struct {
 406        uint8_t size;
 407        uint8_t base;
 408        uint32_t offset;
 409    } a;
 410    address_range abs;
 411
 412    if (len < sizeof(a)) {
 413        return;
 414    }
 415    memcpy(&a, data, sizeof(a));
 416
 417    if (get_mmio_address(&abs, a.base, a.offset) == 0) {
 418        return;
 419    }
 420
 421    switch (a.size %= end_sizes) {
 422    case Byte:
 423        qtest_readb(s, abs.addr);
 424        break;
 425    case Word:
 426        if (abs.size >= 2) {
 427            qtest_readw(s, abs.addr);
 428        }
 429        break;
 430    case Long:
 431        if (abs.size >= 4) {
 432            qtest_readl(s, abs.addr);
 433        }
 434        break;
 435    case Quad:
 436        if (abs.size >= 8) {
 437            qtest_readq(s, abs.addr);
 438        }
 439        break;
 440    }
 441}
 442
 443static void op_write(QTestState *s, const unsigned char * data, size_t len)
 444{
 445    enum Sizes {Byte, Word, Long, Quad, end_sizes};
 446    struct {
 447        uint8_t size;
 448        uint8_t base;
 449        uint32_t offset;
 450        uint64_t value;
 451    } a;
 452    address_range abs;
 453
 454    if (len < sizeof(a)) {
 455        return;
 456    }
 457    memcpy(&a, data, sizeof(a));
 458
 459    if (get_mmio_address(&abs, a.base, a.offset) == 0) {
 460        return;
 461    }
 462
 463    switch (a.size %= end_sizes) {
 464    case Byte:
 465            qtest_writeb(s, abs.addr, a.value & 0xFF);
 466        break;
 467    case Word:
 468        if (abs.size >= 2) {
 469            qtest_writew(s, abs.addr, a.value & 0xFFFF);
 470        }
 471        break;
 472    case Long:
 473        if (abs.size >= 4) {
 474            qtest_writel(s, abs.addr, a.value & 0xFFFFFFFF);
 475        }
 476        break;
 477    case Quad:
 478        if (abs.size >= 8) {
 479            qtest_writeq(s, abs.addr, a.value);
 480        }
 481        break;
 482    }
 483}
 484
 485static void op_pci_read(QTestState *s, const unsigned char * data, size_t len)
 486{
 487    enum Sizes {Byte, Word, Long, end_sizes};
 488    struct {
 489        uint8_t size;
 490        uint8_t base;
 491        uint8_t offset;
 492    } a;
 493    if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
 494        return;
 495    }
 496    memcpy(&a, data, sizeof(a));
 497    PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
 498                                  a.base % fuzzable_pci_devices->len);
 499    int devfn = dev->devfn;
 500    qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
 501    switch (a.size %= end_sizes) {
 502    case Byte:
 503        qtest_inb(s, PCI_HOST_BRIDGE_DATA);
 504        break;
 505    case Word:
 506        qtest_inw(s, PCI_HOST_BRIDGE_DATA);
 507        break;
 508    case Long:
 509        qtest_inl(s, PCI_HOST_BRIDGE_DATA);
 510        break;
 511    }
 512}
 513
 514static void op_pci_write(QTestState *s, const unsigned char * data, size_t len)
 515{
 516    enum Sizes {Byte, Word, Long, end_sizes};
 517    struct {
 518        uint8_t size;
 519        uint8_t base;
 520        uint8_t offset;
 521        uint32_t value;
 522    } a;
 523    if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
 524        return;
 525    }
 526    memcpy(&a, data, sizeof(a));
 527    PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
 528                                  a.base % fuzzable_pci_devices->len);
 529    int devfn = dev->devfn;
 530    qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
 531    switch (a.size %= end_sizes) {
 532    case Byte:
 533        qtest_outb(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFF);
 534        break;
 535    case Word:
 536        qtest_outw(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFF);
 537        break;
 538    case Long:
 539        qtest_outl(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFFFFFF);
 540        break;
 541    }
 542}
 543
 544static void op_add_dma_pattern(QTestState *s,
 545                               const unsigned char *data, size_t len)
 546{
 547    struct {
 548        /*
 549         * index and stride can be used to increment the index-th byte of the
 550         * pattern by the value stride, for each loop of the pattern.
 551         */
 552        uint8_t index;
 553        uint8_t stride;
 554    } a;
 555
 556    if (len < sizeof(a) + 1) {
 557        return;
 558    }
 559    memcpy(&a, data, sizeof(a));
 560    pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
 561    p.index = a.index % p.len;
 562    g_array_append_val(dma_patterns, p);
 563    return;
 564}
 565
 566static void op_clear_dma_patterns(QTestState *s,
 567                                  const unsigned char *data, size_t len)
 568{
 569    g_array_set_size(dma_patterns, 0);
 570    dma_pattern_index = 0;
 571}
 572
 573static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
 574{
 575    qtest_clock_step_next(s);
 576}
 577
 578static void op_disable_pci(QTestState *s, const unsigned char *data, size_t len)
 579{
 580    pci_disabled = true;
 581}
 582
 583static void handle_timeout(int sig)
 584{
 585    if (qtest_log_enabled) {
 586        fprintf(stderr, "[Timeout]\n");
 587        fflush(stderr);
 588    }
 589
 590    /*
 591     * If there is a crash, libfuzzer/ASAN forks a child to run an
 592     * "llvm-symbolizer" process for printing out a pretty stacktrace. It
 593     * communicates with this child using a pipe.  If we timeout+Exit, while
 594     * libfuzzer is still communicating with the llvm-symbolizer child, we will
 595     * be left with an orphan llvm-symbolizer process. Sometimes, this appears
 596     * to lead to a deadlock in the forkserver. Use waitpid to check if there
 597     * are any waitable children. If so, exit out of the signal-handler, and
 598     * let libfuzzer finish communicating with the child, and exit, on its own.
 599     */
 600    if (waitpid(-1, NULL, WNOHANG) == 0) {
 601        return;
 602    }
 603
 604    _Exit(0);
 605}
 606
 607/*
 608 * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
 609 * Some commands can be variable-width, so we use a separator, SEPARATOR, to
 610 * specify the boundaries between commands. SEPARATOR is used to separate
 611 * "operations" in the fuzz input. Why use a separator, instead of just using
 612 * the operations' length to identify operation boundaries?
 613 *   1. This is a simple way to support variable-length operations
 614 *   2. This adds "stability" to the input.
 615 *      For example take the input "AbBcgDefg", where there is no separator and
 616 *      Opcodes are capitalized.
 617 *      Simply, by removing the first byte, we end up with a very different
 618 *      sequence:
 619 *      BbcGdefg...
 620 *      By adding a separator, we avoid this problem:
 621 *      Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
 622 *      Since B uses two additional bytes as operands, the first "B" will be
 623 *      ignored. The fuzzer actively tries to reduce inputs, so such unused
 624 *      bytes are likely to be pruned, eventually.
 625 *
 626 *  SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
 627 *  SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
 628 *  -dict), though this should not be necessary.
 629 *
 630 * As a result, the stream of bytes is converted into a sequence of commands.
 631 * In a simplified example where SEPARATOR is 0xFF:
 632 * 00 01 02 FF 03 04 05 06 FF 01 FF ...
 633 * becomes this sequence of commands:
 634 * 00 01 02    -> op00 (0102)   -> in (0102, 2)
 635 * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
 636 * 01          -> op01 (-,0)    -> out (-,0)
 637 * ...
 638 *
 639 * Note here that it is the job of the individual opcode functions to check
 640 * that enough data was provided. I.e. in the last command out (,0), out needs
 641 * to check that there is not enough data provided to select an address/value
 642 * for the operation.
 643 */
 644static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
 645{
 646    void (*ops[]) (QTestState *s, const unsigned char* , size_t) = {
 647        [OP_IN]                 = op_in,
 648        [OP_OUT]                = op_out,
 649        [OP_READ]               = op_read,
 650        [OP_WRITE]              = op_write,
 651        [OP_PCI_READ]           = op_pci_read,
 652        [OP_PCI_WRITE]          = op_pci_write,
 653        [OP_DISABLE_PCI]        = op_disable_pci,
 654        [OP_ADD_DMA_PATTERN]    = op_add_dma_pattern,
 655        [OP_CLEAR_DMA_PATTERNS] = op_clear_dma_patterns,
 656        [OP_CLOCK_STEP]         = op_clock_step,
 657    };
 658    const unsigned char *cmd = Data;
 659    const unsigned char *nextcmd;
 660    size_t cmd_len;
 661    uint8_t op;
 662
 663    if (fork() == 0) {
 664        /*
 665         * Sometimes the fuzzer will find inputs that take quite a long time to
 666         * process. Often times, these inputs do not result in new coverage.
 667         * Even if these inputs might be interesting, they can slow down the
 668         * fuzzer, overall. Set a timeout to avoid hurting performance, too much
 669         */
 670        if (timeout) {
 671            struct sigaction sact;
 672            struct itimerval timer;
 673
 674            sigemptyset(&sact.sa_mask);
 675            sact.sa_flags   = SA_NODEFER;
 676            sact.sa_handler = handle_timeout;
 677            sigaction(SIGALRM, &sact, NULL);
 678
 679            memset(&timer, 0, sizeof(timer));
 680            timer.it_value.tv_sec = timeout / USEC_IN_SEC;
 681            timer.it_value.tv_usec = timeout % USEC_IN_SEC;
 682            setitimer(ITIMER_VIRTUAL, &timer, NULL);
 683        }
 684
 685        op_clear_dma_patterns(s, NULL, 0);
 686        pci_disabled = false;
 687
 688        while (cmd && Size) {
 689            /* Get the length until the next command or end of input */
 690            nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
 691            cmd_len = nextcmd ? nextcmd - cmd : Size;
 692
 693            if (cmd_len > 0) {
 694                /* Interpret the first byte of the command as an opcode */
 695                op = *cmd % (sizeof(ops) / sizeof((ops)[0]));
 696                ops[op](s, cmd + 1, cmd_len - 1);
 697
 698                /* Run the main loop */
 699                flush_events(s);
 700            }
 701            /* Advance to the next command */
 702            cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
 703            Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
 704            g_array_set_size(dma_regions, 0);
 705        }
 706        _Exit(0);
 707    } else {
 708        flush_events(s);
 709        wait(0);
 710    }
 711}
 712
 713static void usage(void)
 714{
 715    printf("Please specify the following environment variables:\n");
 716    printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
 717    printf("QEMU_FUZZ_OBJECTS= "
 718            "a space separated list of QOM type names for objects to fuzz\n");
 719    printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
 720            "Try to avoid racy DMA double fetch bugs? %d by default\n",
 721            avoid_double_fetches);
 722    printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
 723            "0 to disable. %d by default\n", timeout);
 724    exit(0);
 725}
 726
 727static int locate_fuzz_memory_regions(Object *child, void *opaque)
 728{
 729    const char *name;
 730    MemoryRegion *mr;
 731    if (object_dynamic_cast(child, TYPE_MEMORY_REGION)) {
 732        mr = MEMORY_REGION(child);
 733        if ((memory_region_is_ram(mr) ||
 734            memory_region_is_ram_device(mr) ||
 735            memory_region_is_rom(mr)) == false) {
 736            name = object_get_canonical_path_component(child);
 737            /*
 738             * We don't want duplicate pointers to the same MemoryRegion, so
 739             * try to remove copies of the pointer, before adding it.
 740             */
 741            g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
 742        }
 743    }
 744    return 0;
 745}
 746
 747static int locate_fuzz_objects(Object *child, void *opaque)
 748{
 749    char *pattern = opaque;
 750    if (g_pattern_match_simple(pattern, object_get_typename(child))) {
 751        /* Find and save ptrs to any child MemoryRegions */
 752        object_child_foreach_recursive(child, locate_fuzz_memory_regions, NULL);
 753
 754        /*
 755         * We matched an object. If its a PCI device, store a pointer to it so
 756         * we can map BARs and fuzz its config space.
 757         */
 758        if (object_dynamic_cast(OBJECT(child), TYPE_PCI_DEVICE)) {
 759            /*
 760             * Don't want duplicate pointers to the same PCIDevice, so remove
 761             * copies of the pointer, before adding it.
 762             */
 763            g_ptr_array_remove_fast(fuzzable_pci_devices, PCI_DEVICE(child));
 764            g_ptr_array_add(fuzzable_pci_devices, PCI_DEVICE(child));
 765        }
 766    } else if (object_dynamic_cast(OBJECT(child), TYPE_MEMORY_REGION)) {
 767        if (g_pattern_match_simple(pattern,
 768            object_get_canonical_path_component(child))) {
 769            MemoryRegion *mr;
 770            mr = MEMORY_REGION(child);
 771            if ((memory_region_is_ram(mr) ||
 772                 memory_region_is_ram_device(mr) ||
 773                 memory_region_is_rom(mr)) == false) {
 774                g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
 775            }
 776        }
 777    }
 778    return 0;
 779}
 780
 781
 782static void pci_enum(gpointer pcidev, gpointer bus)
 783{
 784    PCIDevice *dev = pcidev;
 785    QPCIDevice *qdev;
 786    int i;
 787
 788    qdev = qpci_device_find(bus, dev->devfn);
 789    g_assert(qdev != NULL);
 790    for (i = 0; i < 6; i++) {
 791        if (dev->io_regions[i].size) {
 792            qpci_iomap(qdev, i, NULL);
 793        }
 794    }
 795    qpci_device_enable(qdev);
 796    g_free(qdev);
 797}
 798
 799static void generic_pre_fuzz(QTestState *s)
 800{
 801    GHashTableIter iter;
 802    MemoryRegion *mr;
 803    QPCIBus *pcibus;
 804    char **result;
 805
 806    if (!getenv("QEMU_FUZZ_OBJECTS")) {
 807        usage();
 808    }
 809    if (getenv("QTEST_LOG")) {
 810        qtest_log_enabled = 1;
 811    }
 812    if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
 813        avoid_double_fetches = 1;
 814    }
 815    if (getenv("QEMU_FUZZ_TIMEOUT")) {
 816        timeout = g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL, 0);
 817    }
 818    qts_global = s;
 819
 820    /*
 821     * Create a special device that we can use to back DMA buffers at very
 822     * high memory addresses
 823     */
 824    sparse_mem_mr = sparse_mem_init(0, UINT64_MAX);
 825
 826    dma_regions = g_array_new(false, false, sizeof(address_range));
 827    dma_patterns = g_array_new(false, false, sizeof(pattern));
 828
 829    fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
 830    fuzzable_pci_devices   = g_ptr_array_new();
 831
 832    result = g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
 833    for (int i = 0; result[i] != NULL; i++) {
 834        printf("Matching objects by name %s\n", result[i]);
 835        object_child_foreach_recursive(qdev_get_machine(),
 836                                    locate_fuzz_objects,
 837                                    result[i]);
 838    }
 839    g_strfreev(result);
 840    printf("This process will try to fuzz the following MemoryRegions:\n");
 841
 842    g_hash_table_iter_init(&iter, fuzzable_memoryregions);
 843    while (g_hash_table_iter_next(&iter, (gpointer)&mr, NULL)) {
 844        printf("  * %s (size 0x%" PRIx64 ")\n",
 845               object_get_canonical_path_component(&(mr->parent_obj)),
 846               memory_region_size(mr));
 847    }
 848
 849    if (!g_hash_table_size(fuzzable_memoryregions)) {
 850        printf("No fuzzable memory regions found...\n");
 851        exit(1);
 852    }
 853
 854    pcibus = qpci_new_pc(s, NULL);
 855    g_ptr_array_foreach(fuzzable_pci_devices, pci_enum, pcibus);
 856    qpci_free_pc(pcibus);
 857
 858    counter_shm_init();
 859}
 860
 861/*
 862 * When libfuzzer gives us two inputs to combine, return a new input with the
 863 * following structure:
 864 *
 865 * Input 1 (data1)
 866 * SEPARATOR
 867 * Clear out the DMA Patterns
 868 * SEPARATOR
 869 * Disable the pci_read/write instructions
 870 * SEPARATOR
 871 * Input 2 (data2)
 872 *
 873 * The idea is to collate the core behaviors of the two inputs.
 874 * For example:
 875 * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
 876 *          device functionality A
 877 * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
 878 *          functionality B
 879 *
 880 * This function attempts to produce an input that:
 881 * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
 882 *          functionality A device, replaces the DMA patterns with a single
 883 *          patten, and triggers device functionality B.
 884 */
 885static size_t generic_fuzz_crossover(const uint8_t *data1, size_t size1, const
 886                                     uint8_t *data2, size_t size2, uint8_t *out,
 887                                     size_t max_out_size, unsigned int seed)
 888{
 889    size_t copy_len = 0, size = 0;
 890
 891    /* Check that we have enough space for data1 and at least part of data2 */
 892    if (max_out_size <= size1 + strlen(SEPARATOR) * 3 + 2) {
 893        return 0;
 894    }
 895
 896    /* Copy_Len in the first input */
 897    copy_len = size1;
 898    memcpy(out + size, data1, copy_len);
 899    size += copy_len;
 900    max_out_size -= copy_len;
 901
 902    /* Append a separator */
 903    copy_len = strlen(SEPARATOR);
 904    memcpy(out + size, SEPARATOR, copy_len);
 905    size += copy_len;
 906    max_out_size -= copy_len;
 907
 908    /* Clear out the DMA Patterns */
 909    copy_len = 1;
 910    if (copy_len) {
 911        out[size] = OP_CLEAR_DMA_PATTERNS;
 912    }
 913    size += copy_len;
 914    max_out_size -= copy_len;
 915
 916    /* Append a separator */
 917    copy_len = strlen(SEPARATOR);
 918    memcpy(out + size, SEPARATOR, copy_len);
 919    size += copy_len;
 920    max_out_size -= copy_len;
 921
 922    /* Disable PCI ops. Assume data1 took care of setting up PCI */
 923    copy_len = 1;
 924    if (copy_len) {
 925        out[size] = OP_DISABLE_PCI;
 926    }
 927    size += copy_len;
 928    max_out_size -= copy_len;
 929
 930    /* Append a separator */
 931    copy_len = strlen(SEPARATOR);
 932    memcpy(out + size, SEPARATOR, copy_len);
 933    size += copy_len;
 934    max_out_size -= copy_len;
 935
 936    /* Copy_Len over the second input */
 937    copy_len = MIN(size2, max_out_size);
 938    memcpy(out + size, data2, copy_len);
 939    size += copy_len;
 940    max_out_size -= copy_len;
 941
 942    return  size;
 943}
 944
 945
 946static GString *generic_fuzz_cmdline(FuzzTarget *t)
 947{
 948    GString *cmd_line = g_string_new(TARGET_NAME);
 949    if (!getenv("QEMU_FUZZ_ARGS")) {
 950        usage();
 951    }
 952    g_string_append_printf(cmd_line, " -display none \
 953                                      -machine accel=qtest, \
 954                                      -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
 955    return cmd_line;
 956}
 957
 958static GString *generic_fuzz_predefined_config_cmdline(FuzzTarget *t)
 959{
 960    gchar *args;
 961    const generic_fuzz_config *config;
 962    g_assert(t->opaque);
 963
 964    config = t->opaque;
 965    setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
 966    if (config->argfunc) {
 967        args = config->argfunc();
 968        setenv("QEMU_FUZZ_ARGS", args, 1);
 969        g_free(args);
 970    } else {
 971        g_assert_nonnull(config->args);
 972        setenv("QEMU_FUZZ_ARGS", config->args, 1);
 973    }
 974    setenv("QEMU_FUZZ_OBJECTS", config->objects, 1);
 975    return generic_fuzz_cmdline(t);
 976}
 977
 978static void register_generic_fuzz_targets(void)
 979{
 980    fuzz_add_target(&(FuzzTarget){
 981            .name = "generic-fuzz",
 982            .description = "Fuzz based on any qemu command-line args. ",
 983            .get_init_cmdline = generic_fuzz_cmdline,
 984            .pre_fuzz = generic_pre_fuzz,
 985            .fuzz = generic_fuzz,
 986            .crossover = generic_fuzz_crossover
 987    });
 988
 989    GString *name;
 990    const generic_fuzz_config *config;
 991
 992    for (int i = 0;
 993         i < sizeof(predefined_configs) / sizeof(generic_fuzz_config);
 994         i++) {
 995        config = predefined_configs + i;
 996        name = g_string_new("generic-fuzz");
 997        g_string_append_printf(name, "-%s", config->name);
 998        fuzz_add_target(&(FuzzTarget){
 999                .name = name->str,
1000                .description = "Predefined generic-fuzz config.",
1001                .get_init_cmdline = generic_fuzz_predefined_config_cmdline,
1002                .pre_fuzz = generic_pre_fuzz,
1003                .fuzz = generic_fuzz,
1004                .crossover = generic_fuzz_crossover,
1005                .opaque = (void *)config
1006        });
1007    }
1008}
1009
1010fuzz_target_init(register_generic_fuzz_targets);
1011