qemu/tests/qtest/fuzz/generic_fuzz.c
<<
>>
Prefs
   1/*
   2 * Generic Virtual-Device Fuzzing Target
   3 *
   4 * Copyright Red Hat Inc., 2020
   5 *
   6 * Authors:
   7 *  Alexander Bulekov   <alxndr@bu.edu>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 */
  12
  13#include "qemu/osdep.h"
  14
  15#include <wordexp.h>
  16
  17#include "hw/core/cpu.h"
  18#include "tests/qtest/libqos/libqtest.h"
  19#include "tests/qtest/libqos/pci-pc.h"
  20#include "fuzz.h"
  21#include "fork_fuzz.h"
  22#include "exec/address-spaces.h"
  23#include "string.h"
  24#include "exec/memory.h"
  25#include "exec/ramblock.h"
  26#include "exec/address-spaces.h"
  27#include "hw/qdev-core.h"
  28#include "hw/pci/pci.h"
  29#include "hw/boards.h"
  30#include "generic_fuzz_configs.h"
  31#include "hw/mem/sparse-mem.h"
  32
  33/*
  34 * SEPARATOR is used to separate "operations" in the fuzz input
  35 */
  36#define SEPARATOR "FUZZ"
  37
  38enum cmds {
  39    OP_IN,
  40    OP_OUT,
  41    OP_READ,
  42    OP_WRITE,
  43    OP_PCI_READ,
  44    OP_PCI_WRITE,
  45    OP_DISABLE_PCI,
  46    OP_ADD_DMA_PATTERN,
  47    OP_CLEAR_DMA_PATTERNS,
  48    OP_CLOCK_STEP,
  49};
  50
  51#define DEFAULT_TIMEOUT_US 100000
  52#define USEC_IN_SEC 1000000000
  53
  54#define MAX_DMA_FILL_SIZE 0x10000
  55
  56#define PCI_HOST_BRIDGE_CFG 0xcf8
  57#define PCI_HOST_BRIDGE_DATA 0xcfc
  58
  59typedef struct {
  60    ram_addr_t addr;
  61    ram_addr_t size; /* The number of bytes until the end of the I/O region */
  62} address_range;
  63
  64static useconds_t timeout = DEFAULT_TIMEOUT_US;
  65
  66static bool qtest_log_enabled;
  67
  68MemoryRegion *sparse_mem_mr;
  69
  70/*
  71 * A pattern used to populate a DMA region or perform a memwrite. This is
  72 * useful for e.g. populating tables of unique addresses.
  73 * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
  74 * Renders as: 00 01 02   00 03 02   00 05 02   00 07 02 ...
  75 */
  76typedef struct {
  77    uint8_t index;      /* Index of a byte to increment by stride */
  78    uint8_t stride;     /* Increment each index'th byte by this amount */
  79    size_t len;
  80    const uint8_t *data;
  81} pattern;
  82
  83/* Avoid filling the same DMA region between MMIO/PIO commands ? */
  84static bool avoid_double_fetches;
  85
  86static QTestState *qts_global; /* Need a global for the DMA callback */
  87
  88/*
  89 * List of memory regions that are children of QOM objects specified by the
  90 * user for fuzzing.
  91 */
  92static GHashTable *fuzzable_memoryregions;
  93static GPtrArray *fuzzable_pci_devices;
  94
  95struct get_io_cb_info {
  96    int index;
  97    int found;
  98    address_range result;
  99};
 100
 101static bool get_io_address_cb(Int128 start, Int128 size,
 102                              const MemoryRegion *mr,
 103                              hwaddr offset_in_region,
 104                              void *opaque)
 105{
 106    struct get_io_cb_info *info = opaque;
 107    if (g_hash_table_lookup(fuzzable_memoryregions, mr)) {
 108        if (info->index == 0) {
 109            info->result.addr = (ram_addr_t)start;
 110            info->result.size = (ram_addr_t)size;
 111            info->found = 1;
 112            return true;
 113        }
 114        info->index--;
 115    }
 116    return false;
 117}
 118
 119/*
 120 * List of dma regions populated since the last fuzzing command. Used to ensure
 121 * that we only write to each DMA address once, to avoid race conditions when
 122 * building reproducers.
 123 */
 124static GArray *dma_regions;
 125
 126static GArray *dma_patterns;
 127static int dma_pattern_index;
 128static bool pci_disabled;
 129
 130/*
 131 * Allocate a block of memory and populate it with a pattern.
 132 */
 133static void *pattern_alloc(pattern p, size_t len)
 134{
 135    int i;
 136    uint8_t *buf = g_malloc(len);
 137    uint8_t sum = 0;
 138
 139    for (i = 0; i < len; ++i) {
 140        buf[i] = p.data[i % p.len];
 141        if ((i % p.len) == p.index) {
 142            buf[i] += sum;
 143            sum += p.stride;
 144        }
 145    }
 146    return buf;
 147}
 148
 149static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
 150{
 151    unsigned access_size_max = mr->ops->valid.max_access_size;
 152
 153    /*
 154     * Regions are assumed to support 1-4 byte accesses unless
 155     * otherwise specified.
 156     */
 157    if (access_size_max == 0) {
 158        access_size_max = 4;
 159    }
 160
 161    /* Bound the maximum access by the alignment of the address.  */
 162    if (!mr->ops->impl.unaligned) {
 163        unsigned align_size_max = addr & -addr;
 164        if (align_size_max != 0 && align_size_max < access_size_max) {
 165            access_size_max = align_size_max;
 166        }
 167    }
 168
 169    /* Don't attempt accesses larger than the maximum.  */
 170    if (l > access_size_max) {
 171        l = access_size_max;
 172    }
 173    l = pow2floor(l);
 174
 175    return l;
 176}
 177
 178/*
 179 * Call-back for functions that perform DMA reads from guest memory. Confirm
 180 * that the region has not already been populated since the last loop in
 181 * generic_fuzz(), avoiding potential race-conditions, which we don't have
 182 * a good way for reproducing right now.
 183 */
 184void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr)
 185{
 186    /* Are we in the generic-fuzzer or are we using another fuzz-target? */
 187    if (!qts_global) {
 188        return;
 189    }
 190
 191    /*
 192     * Return immediately if:
 193     * - We have no DMA patterns defined
 194     * - The length of the DMA read request is zero
 195     * - The DMA read is hitting an MR other than the machine's main RAM
 196     * - The DMA request hits past the bounds of our RAM
 197     */
 198    if (dma_patterns->len == 0
 199        || len == 0
 200        || (mr != current_machine->ram && mr != sparse_mem_mr)) {
 201        return;
 202    }
 203
 204    /*
 205     * If we overlap with any existing dma_regions, split the range and only
 206     * populate the non-overlapping parts.
 207     */
 208    address_range region;
 209    bool double_fetch = false;
 210    for (int i = 0;
 211         i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
 212         ++i) {
 213        region = g_array_index(dma_regions, address_range, i);
 214        if (addr < region.addr + region.size && addr + len > region.addr) {
 215            double_fetch = true;
 216            if (addr < region.addr
 217                && avoid_double_fetches) {
 218                fuzz_dma_read_cb(addr, region.addr - addr, mr);
 219            }
 220            if (addr + len > region.addr + region.size
 221                && avoid_double_fetches) {
 222                fuzz_dma_read_cb(region.addr + region.size,
 223                        addr + len - (region.addr + region.size), mr);
 224            }
 225            return;
 226        }
 227    }
 228
 229    /* Cap the length of the DMA access to something reasonable */
 230    len = MIN(len, MAX_DMA_FILL_SIZE);
 231
 232    address_range ar = {addr, len};
 233    g_array_append_val(dma_regions, ar);
 234    pattern p = g_array_index(dma_patterns, pattern, dma_pattern_index);
 235    void *buf_base = pattern_alloc(p, ar.size);
 236    void *buf = buf_base;
 237    hwaddr l, addr1;
 238    MemoryRegion *mr1;
 239    while (len > 0) {
 240        l = len;
 241        mr1 = address_space_translate(first_cpu->as,
 242                                      addr, &addr1, &l, true,
 243                                      MEMTXATTRS_UNSPECIFIED);
 244
 245        if (!(memory_region_is_ram(mr1) ||
 246              memory_region_is_romd(mr1)) && mr1 != sparse_mem_mr) {
 247            l = memory_access_size(mr1, l, addr1);
 248        } else {
 249            /* ROM/RAM case */
 250            if (qtest_log_enabled) {
 251                /*
 252                * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
 253                * that will be written by qtest.c with a DMA tag, so we can reorder
 254                * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
 255                * command.
 256                */
 257                fprintf(stderr, "[DMA] ");
 258                if (double_fetch) {
 259                    fprintf(stderr, "[DOUBLE-FETCH] ");
 260                }
 261                fflush(stderr);
 262            }
 263            qtest_memwrite(qts_global, addr, buf, l);
 264        }
 265        len -= l;
 266        buf += l;
 267        addr += l;
 268
 269    }
 270    g_free(buf_base);
 271
 272    /* Increment the index of the pattern for the next DMA access */
 273    dma_pattern_index = (dma_pattern_index + 1) % dma_patterns->len;
 274}
 275
 276/*
 277 * Here we want to convert a fuzzer-provided [io-region-index, offset] to
 278 * a physical address. To do this, we iterate over all of the matched
 279 * MemoryRegions. Check whether each region exists within the particular io
 280 * space. Return the absolute address of the offset within the index'th region
 281 * that is a subregion of the io_space and the distance until the end of the
 282 * memory region.
 283 */
 284static bool get_io_address(address_range *result, AddressSpace *as,
 285                            uint8_t index,
 286                            uint32_t offset) {
 287    FlatView *view;
 288    view = as->current_map;
 289    g_assert(view);
 290    struct get_io_cb_info cb_info = {};
 291
 292    cb_info.index = index;
 293
 294    /*
 295     * Loop around the FlatView until we match "index" number of
 296     * fuzzable_memoryregions, or until we know that there are no matching
 297     * memory_regions.
 298     */
 299    do {
 300        flatview_for_each_range(view, get_io_address_cb , &cb_info);
 301    } while (cb_info.index != index && !cb_info.found);
 302
 303    *result = cb_info.result;
 304    if (result->size) {
 305        offset = offset % result->size;
 306        result->addr += offset;
 307        result->size -= offset;
 308    }
 309    return cb_info.found;
 310}
 311
 312static bool get_pio_address(address_range *result,
 313                            uint8_t index, uint16_t offset)
 314{
 315    /*
 316     * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
 317     * can contain an addr that extends past the PIO space. When we pass this
 318     * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
 319     * up fuzzing a completely different MemoryRegion/Device. Therefore, check
 320     * that the address here is within the PIO space limits.
 321     */
 322    bool found = get_io_address(result, &address_space_io, index, offset);
 323    return result->addr <= 0xFFFF ? found : false;
 324}
 325
 326static bool get_mmio_address(address_range *result,
 327                             uint8_t index, uint32_t offset)
 328{
 329    return get_io_address(result, &address_space_memory, index, offset);
 330}
 331
 332static void op_in(QTestState *s, const unsigned char * data, size_t len)
 333{
 334    enum Sizes {Byte, Word, Long, end_sizes};
 335    struct {
 336        uint8_t size;
 337        uint8_t base;
 338        uint16_t offset;
 339    } a;
 340    address_range abs;
 341
 342    if (len < sizeof(a)) {
 343        return;
 344    }
 345    memcpy(&a, data, sizeof(a));
 346    if (get_pio_address(&abs, a.base, a.offset) == 0) {
 347        return;
 348    }
 349
 350    switch (a.size %= end_sizes) {
 351    case Byte:
 352        qtest_inb(s, abs.addr);
 353        break;
 354    case Word:
 355        if (abs.size >= 2) {
 356            qtest_inw(s, abs.addr);
 357        }
 358        break;
 359    case Long:
 360        if (abs.size >= 4) {
 361            qtest_inl(s, abs.addr);
 362        }
 363        break;
 364    }
 365}
 366
 367static void op_out(QTestState *s, const unsigned char * data, size_t len)
 368{
 369    enum Sizes {Byte, Word, Long, end_sizes};
 370    struct {
 371        uint8_t size;
 372        uint8_t base;
 373        uint16_t offset;
 374        uint32_t value;
 375    } a;
 376    address_range abs;
 377
 378    if (len < sizeof(a)) {
 379        return;
 380    }
 381    memcpy(&a, data, sizeof(a));
 382
 383    if (get_pio_address(&abs, a.base, a.offset) == 0) {
 384        return;
 385    }
 386
 387    switch (a.size %= end_sizes) {
 388    case Byte:
 389        qtest_outb(s, abs.addr, a.value & 0xFF);
 390        break;
 391    case Word:
 392        if (abs.size >= 2) {
 393            qtest_outw(s, abs.addr, a.value & 0xFFFF);
 394        }
 395        break;
 396    case Long:
 397        if (abs.size >= 4) {
 398            qtest_outl(s, abs.addr, a.value);
 399        }
 400        break;
 401    }
 402}
 403
 404static void op_read(QTestState *s, const unsigned char * data, size_t len)
 405{
 406    enum Sizes {Byte, Word, Long, Quad, end_sizes};
 407    struct {
 408        uint8_t size;
 409        uint8_t base;
 410        uint32_t offset;
 411    } a;
 412    address_range abs;
 413
 414    if (len < sizeof(a)) {
 415        return;
 416    }
 417    memcpy(&a, data, sizeof(a));
 418
 419    if (get_mmio_address(&abs, a.base, a.offset) == 0) {
 420        return;
 421    }
 422
 423    switch (a.size %= end_sizes) {
 424    case Byte:
 425        qtest_readb(s, abs.addr);
 426        break;
 427    case Word:
 428        if (abs.size >= 2) {
 429            qtest_readw(s, abs.addr);
 430        }
 431        break;
 432    case Long:
 433        if (abs.size >= 4) {
 434            qtest_readl(s, abs.addr);
 435        }
 436        break;
 437    case Quad:
 438        if (abs.size >= 8) {
 439            qtest_readq(s, abs.addr);
 440        }
 441        break;
 442    }
 443}
 444
 445static void op_write(QTestState *s, const unsigned char * data, size_t len)
 446{
 447    enum Sizes {Byte, Word, Long, Quad, end_sizes};
 448    struct {
 449        uint8_t size;
 450        uint8_t base;
 451        uint32_t offset;
 452        uint64_t value;
 453    } a;
 454    address_range abs;
 455
 456    if (len < sizeof(a)) {
 457        return;
 458    }
 459    memcpy(&a, data, sizeof(a));
 460
 461    if (get_mmio_address(&abs, a.base, a.offset) == 0) {
 462        return;
 463    }
 464
 465    switch (a.size %= end_sizes) {
 466    case Byte:
 467            qtest_writeb(s, abs.addr, a.value & 0xFF);
 468        break;
 469    case Word:
 470        if (abs.size >= 2) {
 471            qtest_writew(s, abs.addr, a.value & 0xFFFF);
 472        }
 473        break;
 474    case Long:
 475        if (abs.size >= 4) {
 476            qtest_writel(s, abs.addr, a.value & 0xFFFFFFFF);
 477        }
 478        break;
 479    case Quad:
 480        if (abs.size >= 8) {
 481            qtest_writeq(s, abs.addr, a.value);
 482        }
 483        break;
 484    }
 485}
 486
 487static void op_pci_read(QTestState *s, const unsigned char * data, size_t len)
 488{
 489    enum Sizes {Byte, Word, Long, end_sizes};
 490    struct {
 491        uint8_t size;
 492        uint8_t base;
 493        uint8_t offset;
 494    } a;
 495    if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
 496        return;
 497    }
 498    memcpy(&a, data, sizeof(a));
 499    PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
 500                                  a.base % fuzzable_pci_devices->len);
 501    int devfn = dev->devfn;
 502    qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
 503    switch (a.size %= end_sizes) {
 504    case Byte:
 505        qtest_inb(s, PCI_HOST_BRIDGE_DATA);
 506        break;
 507    case Word:
 508        qtest_inw(s, PCI_HOST_BRIDGE_DATA);
 509        break;
 510    case Long:
 511        qtest_inl(s, PCI_HOST_BRIDGE_DATA);
 512        break;
 513    }
 514}
 515
 516static void op_pci_write(QTestState *s, const unsigned char * data, size_t len)
 517{
 518    enum Sizes {Byte, Word, Long, end_sizes};
 519    struct {
 520        uint8_t size;
 521        uint8_t base;
 522        uint8_t offset;
 523        uint32_t value;
 524    } a;
 525    if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
 526        return;
 527    }
 528    memcpy(&a, data, sizeof(a));
 529    PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
 530                                  a.base % fuzzable_pci_devices->len);
 531    int devfn = dev->devfn;
 532    qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
 533    switch (a.size %= end_sizes) {
 534    case Byte:
 535        qtest_outb(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFF);
 536        break;
 537    case Word:
 538        qtest_outw(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFF);
 539        break;
 540    case Long:
 541        qtest_outl(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFFFFFF);
 542        break;
 543    }
 544}
 545
 546static void op_add_dma_pattern(QTestState *s,
 547                               const unsigned char *data, size_t len)
 548{
 549    struct {
 550        /*
 551         * index and stride can be used to increment the index-th byte of the
 552         * pattern by the value stride, for each loop of the pattern.
 553         */
 554        uint8_t index;
 555        uint8_t stride;
 556    } a;
 557
 558    if (len < sizeof(a) + 1) {
 559        return;
 560    }
 561    memcpy(&a, data, sizeof(a));
 562    pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
 563    p.index = a.index % p.len;
 564    g_array_append_val(dma_patterns, p);
 565    return;
 566}
 567
 568static void op_clear_dma_patterns(QTestState *s,
 569                                  const unsigned char *data, size_t len)
 570{
 571    g_array_set_size(dma_patterns, 0);
 572    dma_pattern_index = 0;
 573}
 574
 575static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
 576{
 577    qtest_clock_step_next(s);
 578}
 579
 580static void op_disable_pci(QTestState *s, const unsigned char *data, size_t len)
 581{
 582    pci_disabled = true;
 583}
 584
 585static void handle_timeout(int sig)
 586{
 587    if (qtest_log_enabled) {
 588        fprintf(stderr, "[Timeout]\n");
 589        fflush(stderr);
 590    }
 591
 592    /*
 593     * If there is a crash, libfuzzer/ASAN forks a child to run an
 594     * "llvm-symbolizer" process for printing out a pretty stacktrace. It
 595     * communicates with this child using a pipe.  If we timeout+Exit, while
 596     * libfuzzer is still communicating with the llvm-symbolizer child, we will
 597     * be left with an orphan llvm-symbolizer process. Sometimes, this appears
 598     * to lead to a deadlock in the forkserver. Use waitpid to check if there
 599     * are any waitable children. If so, exit out of the signal-handler, and
 600     * let libfuzzer finish communicating with the child, and exit, on its own.
 601     */
 602    if (waitpid(-1, NULL, WNOHANG) == 0) {
 603        return;
 604    }
 605
 606    _Exit(0);
 607}
 608
 609/*
 610 * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
 611 * Some commands can be variable-width, so we use a separator, SEPARATOR, to
 612 * specify the boundaries between commands. SEPARATOR is used to separate
 613 * "operations" in the fuzz input. Why use a separator, instead of just using
 614 * the operations' length to identify operation boundaries?
 615 *   1. This is a simple way to support variable-length operations
 616 *   2. This adds "stability" to the input.
 617 *      For example take the input "AbBcgDefg", where there is no separator and
 618 *      Opcodes are capitalized.
 619 *      Simply, by removing the first byte, we end up with a very different
 620 *      sequence:
 621 *      BbcGdefg...
 622 *      By adding a separator, we avoid this problem:
 623 *      Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
 624 *      Since B uses two additional bytes as operands, the first "B" will be
 625 *      ignored. The fuzzer actively tries to reduce inputs, so such unused
 626 *      bytes are likely to be pruned, eventually.
 627 *
 628 *  SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
 629 *  SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
 630 *  -dict), though this should not be necessary.
 631 *
 632 * As a result, the stream of bytes is converted into a sequence of commands.
 633 * In a simplified example where SEPARATOR is 0xFF:
 634 * 00 01 02 FF 03 04 05 06 FF 01 FF ...
 635 * becomes this sequence of commands:
 636 * 00 01 02    -> op00 (0102)   -> in (0102, 2)
 637 * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
 638 * 01          -> op01 (-,0)    -> out (-,0)
 639 * ...
 640 *
 641 * Note here that it is the job of the individual opcode functions to check
 642 * that enough data was provided. I.e. in the last command out (,0), out needs
 643 * to check that there is not enough data provided to select an address/value
 644 * for the operation.
 645 */
 646static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
 647{
 648    void (*ops[]) (QTestState *s, const unsigned char* , size_t) = {
 649        [OP_IN]                 = op_in,
 650        [OP_OUT]                = op_out,
 651        [OP_READ]               = op_read,
 652        [OP_WRITE]              = op_write,
 653        [OP_PCI_READ]           = op_pci_read,
 654        [OP_PCI_WRITE]          = op_pci_write,
 655        [OP_DISABLE_PCI]        = op_disable_pci,
 656        [OP_ADD_DMA_PATTERN]    = op_add_dma_pattern,
 657        [OP_CLEAR_DMA_PATTERNS] = op_clear_dma_patterns,
 658        [OP_CLOCK_STEP]         = op_clock_step,
 659    };
 660    const unsigned char *cmd = Data;
 661    const unsigned char *nextcmd;
 662    size_t cmd_len;
 663    uint8_t op;
 664
 665    if (fork() == 0) {
 666        /*
 667         * Sometimes the fuzzer will find inputs that take quite a long time to
 668         * process. Often times, these inputs do not result in new coverage.
 669         * Even if these inputs might be interesting, they can slow down the
 670         * fuzzer, overall. Set a timeout to avoid hurting performance, too much
 671         */
 672        if (timeout) {
 673            struct sigaction sact;
 674            struct itimerval timer;
 675
 676            sigemptyset(&sact.sa_mask);
 677            sact.sa_flags   = SA_NODEFER;
 678            sact.sa_handler = handle_timeout;
 679            sigaction(SIGALRM, &sact, NULL);
 680
 681            memset(&timer, 0, sizeof(timer));
 682            timer.it_value.tv_sec = timeout / USEC_IN_SEC;
 683            timer.it_value.tv_usec = timeout % USEC_IN_SEC;
 684            setitimer(ITIMER_VIRTUAL, &timer, NULL);
 685        }
 686
 687        op_clear_dma_patterns(s, NULL, 0);
 688        pci_disabled = false;
 689
 690        while (cmd && Size) {
 691            /* Get the length until the next command or end of input */
 692            nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
 693            cmd_len = nextcmd ? nextcmd - cmd : Size;
 694
 695            if (cmd_len > 0) {
 696                /* Interpret the first byte of the command as an opcode */
 697                op = *cmd % (sizeof(ops) / sizeof((ops)[0]));
 698                ops[op](s, cmd + 1, cmd_len - 1);
 699
 700                /* Run the main loop */
 701                flush_events(s);
 702            }
 703            /* Advance to the next command */
 704            cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
 705            Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
 706            g_array_set_size(dma_regions, 0);
 707        }
 708        _Exit(0);
 709    } else {
 710        flush_events(s);
 711        wait(0);
 712    }
 713}
 714
 715static void usage(void)
 716{
 717    printf("Please specify the following environment variables:\n");
 718    printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
 719    printf("QEMU_FUZZ_OBJECTS= "
 720            "a space separated list of QOM type names for objects to fuzz\n");
 721    printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
 722            "Try to avoid racy DMA double fetch bugs? %d by default\n",
 723            avoid_double_fetches);
 724    printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
 725            "0 to disable. %d by default\n", timeout);
 726    exit(0);
 727}
 728
 729static int locate_fuzz_memory_regions(Object *child, void *opaque)
 730{
 731    const char *name;
 732    MemoryRegion *mr;
 733    if (object_dynamic_cast(child, TYPE_MEMORY_REGION)) {
 734        mr = MEMORY_REGION(child);
 735        if ((memory_region_is_ram(mr) ||
 736            memory_region_is_ram_device(mr) ||
 737            memory_region_is_rom(mr)) == false) {
 738            name = object_get_canonical_path_component(child);
 739            /*
 740             * We don't want duplicate pointers to the same MemoryRegion, so
 741             * try to remove copies of the pointer, before adding it.
 742             */
 743            g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
 744        }
 745    }
 746    return 0;
 747}
 748
 749static int locate_fuzz_objects(Object *child, void *opaque)
 750{
 751    char *pattern = opaque;
 752    if (g_pattern_match_simple(pattern, object_get_typename(child))) {
 753        /* Find and save ptrs to any child MemoryRegions */
 754        object_child_foreach_recursive(child, locate_fuzz_memory_regions, NULL);
 755
 756        /*
 757         * We matched an object. If its a PCI device, store a pointer to it so
 758         * we can map BARs and fuzz its config space.
 759         */
 760        if (object_dynamic_cast(OBJECT(child), TYPE_PCI_DEVICE)) {
 761            /*
 762             * Don't want duplicate pointers to the same PCIDevice, so remove
 763             * copies of the pointer, before adding it.
 764             */
 765            g_ptr_array_remove_fast(fuzzable_pci_devices, PCI_DEVICE(child));
 766            g_ptr_array_add(fuzzable_pci_devices, PCI_DEVICE(child));
 767        }
 768    } else if (object_dynamic_cast(OBJECT(child), TYPE_MEMORY_REGION)) {
 769        if (g_pattern_match_simple(pattern,
 770            object_get_canonical_path_component(child))) {
 771            MemoryRegion *mr;
 772            mr = MEMORY_REGION(child);
 773            if ((memory_region_is_ram(mr) ||
 774                 memory_region_is_ram_device(mr) ||
 775                 memory_region_is_rom(mr)) == false) {
 776                g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
 777            }
 778        }
 779    }
 780    return 0;
 781}
 782
 783
 784static void pci_enum(gpointer pcidev, gpointer bus)
 785{
 786    PCIDevice *dev = pcidev;
 787    QPCIDevice *qdev;
 788    int i;
 789
 790    qdev = qpci_device_find(bus, dev->devfn);
 791    g_assert(qdev != NULL);
 792    for (i = 0; i < 6; i++) {
 793        if (dev->io_regions[i].size) {
 794            qpci_iomap(qdev, i, NULL);
 795        }
 796    }
 797    qpci_device_enable(qdev);
 798    g_free(qdev);
 799}
 800
 801static void generic_pre_fuzz(QTestState *s)
 802{
 803    GHashTableIter iter;
 804    MemoryRegion *mr;
 805    QPCIBus *pcibus;
 806    char **result;
 807
 808    if (!getenv("QEMU_FUZZ_OBJECTS")) {
 809        usage();
 810    }
 811    if (getenv("QTEST_LOG")) {
 812        qtest_log_enabled = 1;
 813    }
 814    if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
 815        avoid_double_fetches = 1;
 816    }
 817    if (getenv("QEMU_FUZZ_TIMEOUT")) {
 818        timeout = g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL, 0);
 819    }
 820    qts_global = s;
 821
 822    /*
 823     * Create a special device that we can use to back DMA buffers at very
 824     * high memory addresses
 825     */
 826    sparse_mem_mr = sparse_mem_init(0, UINT64_MAX);
 827
 828    dma_regions = g_array_new(false, false, sizeof(address_range));
 829    dma_patterns = g_array_new(false, false, sizeof(pattern));
 830
 831    fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
 832    fuzzable_pci_devices   = g_ptr_array_new();
 833
 834    result = g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
 835    for (int i = 0; result[i] != NULL; i++) {
 836        printf("Matching objects by name %s\n", result[i]);
 837        object_child_foreach_recursive(qdev_get_machine(),
 838                                    locate_fuzz_objects,
 839                                    result[i]);
 840    }
 841    g_strfreev(result);
 842    printf("This process will try to fuzz the following MemoryRegions:\n");
 843
 844    g_hash_table_iter_init(&iter, fuzzable_memoryregions);
 845    while (g_hash_table_iter_next(&iter, (gpointer)&mr, NULL)) {
 846        printf("  * %s (size %lx)\n",
 847               object_get_canonical_path_component(&(mr->parent_obj)),
 848               (uint64_t)mr->size);
 849    }
 850
 851    if (!g_hash_table_size(fuzzable_memoryregions)) {
 852        printf("No fuzzable memory regions found...\n");
 853        exit(1);
 854    }
 855
 856    pcibus = qpci_new_pc(s, NULL);
 857    g_ptr_array_foreach(fuzzable_pci_devices, pci_enum, pcibus);
 858    qpci_free_pc(pcibus);
 859
 860    counter_shm_init();
 861}
 862
 863/*
 864 * When libfuzzer gives us two inputs to combine, return a new input with the
 865 * following structure:
 866 *
 867 * Input 1 (data1)
 868 * SEPARATOR
 869 * Clear out the DMA Patterns
 870 * SEPARATOR
 871 * Disable the pci_read/write instructions
 872 * SEPARATOR
 873 * Input 2 (data2)
 874 *
 875 * The idea is to collate the core behaviors of the two inputs.
 876 * For example:
 877 * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
 878 *          device functionality A
 879 * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
 880 *          functionality B
 881 *
 882 * This function attempts to produce an input that:
 883 * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
 884 *          functionality A device, replaces the DMA patterns with a single
 885 *          patten, and triggers device functionality B.
 886 */
 887static size_t generic_fuzz_crossover(const uint8_t *data1, size_t size1, const
 888                                     uint8_t *data2, size_t size2, uint8_t *out,
 889                                     size_t max_out_size, unsigned int seed)
 890{
 891    size_t copy_len = 0, size = 0;
 892
 893    /* Check that we have enough space for data1 and at least part of data2 */
 894    if (max_out_size <= size1 + strlen(SEPARATOR) * 3 + 2) {
 895        return 0;
 896    }
 897
 898    /* Copy_Len in the first input */
 899    copy_len = size1;
 900    memcpy(out + size, data1, copy_len);
 901    size += copy_len;
 902    max_out_size -= copy_len;
 903
 904    /* Append a separator */
 905    copy_len = strlen(SEPARATOR);
 906    memcpy(out + size, SEPARATOR, copy_len);
 907    size += copy_len;
 908    max_out_size -= copy_len;
 909
 910    /* Clear out the DMA Patterns */
 911    copy_len = 1;
 912    if (copy_len) {
 913        out[size] = OP_CLEAR_DMA_PATTERNS;
 914    }
 915    size += copy_len;
 916    max_out_size -= copy_len;
 917
 918    /* Append a separator */
 919    copy_len = strlen(SEPARATOR);
 920    memcpy(out + size, SEPARATOR, copy_len);
 921    size += copy_len;
 922    max_out_size -= copy_len;
 923
 924    /* Disable PCI ops. Assume data1 took care of setting up PCI */
 925    copy_len = 1;
 926    if (copy_len) {
 927        out[size] = OP_DISABLE_PCI;
 928    }
 929    size += copy_len;
 930    max_out_size -= copy_len;
 931
 932    /* Append a separator */
 933    copy_len = strlen(SEPARATOR);
 934    memcpy(out + size, SEPARATOR, copy_len);
 935    size += copy_len;
 936    max_out_size -= copy_len;
 937
 938    /* Copy_Len over the second input */
 939    copy_len = MIN(size2, max_out_size);
 940    memcpy(out + size, data2, copy_len);
 941    size += copy_len;
 942    max_out_size -= copy_len;
 943
 944    return  size;
 945}
 946
 947
 948static GString *generic_fuzz_cmdline(FuzzTarget *t)
 949{
 950    GString *cmd_line = g_string_new(TARGET_NAME);
 951    if (!getenv("QEMU_FUZZ_ARGS")) {
 952        usage();
 953    }
 954    g_string_append_printf(cmd_line, " -display none \
 955                                      -machine accel=qtest, \
 956                                      -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
 957    return cmd_line;
 958}
 959
 960static GString *generic_fuzz_predefined_config_cmdline(FuzzTarget *t)
 961{
 962    gchar *args;
 963    const generic_fuzz_config *config;
 964    g_assert(t->opaque);
 965
 966    config = t->opaque;
 967    setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
 968    if (config->argfunc) {
 969        args = config->argfunc();
 970        setenv("QEMU_FUZZ_ARGS", args, 1);
 971        g_free(args);
 972    } else {
 973        g_assert_nonnull(config->args);
 974        setenv("QEMU_FUZZ_ARGS", config->args, 1);
 975    }
 976    setenv("QEMU_FUZZ_OBJECTS", config->objects, 1);
 977    return generic_fuzz_cmdline(t);
 978}
 979
 980static void register_generic_fuzz_targets(void)
 981{
 982    fuzz_add_target(&(FuzzTarget){
 983            .name = "generic-fuzz",
 984            .description = "Fuzz based on any qemu command-line args. ",
 985            .get_init_cmdline = generic_fuzz_cmdline,
 986            .pre_fuzz = generic_pre_fuzz,
 987            .fuzz = generic_fuzz,
 988            .crossover = generic_fuzz_crossover
 989    });
 990
 991    GString *name;
 992    const generic_fuzz_config *config;
 993
 994    for (int i = 0;
 995         i < sizeof(predefined_configs) / sizeof(generic_fuzz_config);
 996         i++) {
 997        config = predefined_configs + i;
 998        name = g_string_new("generic-fuzz");
 999        g_string_append_printf(name, "-%s", config->name);
1000        fuzz_add_target(&(FuzzTarget){
1001                .name = name->str,
1002                .description = "Predefined generic-fuzz config.",
1003                .get_init_cmdline = generic_fuzz_predefined_config_cmdline,
1004                .pre_fuzz = generic_pre_fuzz,
1005                .fuzz = generic_fuzz,
1006                .crossover = generic_fuzz_crossover,
1007                .opaque = (void *)config
1008        });
1009    }
1010}
1011
1012fuzz_target_init(register_generic_fuzz_targets);
1013