qemu/tests/tcg/multiarch/vma-pthread.c
<<
>>
Prefs
   1/*
   2 * Test that VMA updates do not race.
   3 *
   4 * SPDX-License-Identifier: GPL-2.0-or-later
   5 *
   6 * Map a contiguous chunk of RWX memory. Split it into 8 equally sized
   7 * regions, each of which is guaranteed to have a certain combination of
   8 * protection bits set.
   9 *
  10 * Reader, writer and executor threads perform the respective operations on
  11 * pages, which are guaranteed to have the respective protection bit set.
  12 * Two mutator threads change the non-fixed protection bits randomly.
  13 */
  14#include <assert.h>
  15#include <fcntl.h>
  16#include <pthread.h>
  17#include <stdbool.h>
  18#include <stdlib.h>
  19#include <string.h>
  20#include <stdio.h>
  21#include <sys/mman.h>
  22#include <unistd.h>
  23
  24#include "nop_func.h"
  25
  26#define PAGE_IDX_BITS 10
  27#define PAGE_COUNT (1 << PAGE_IDX_BITS)
  28#define PAGE_IDX_MASK (PAGE_COUNT - 1)
  29#define REGION_IDX_BITS 3
  30#define PAGE_IDX_R_MASK (1 << 7)
  31#define PAGE_IDX_W_MASK (1 << 8)
  32#define PAGE_IDX_X_MASK (1 << 9)
  33#define REGION_MASK (PAGE_IDX_R_MASK | PAGE_IDX_W_MASK | PAGE_IDX_X_MASK)
  34#define PAGES_PER_REGION (1 << (PAGE_IDX_BITS - REGION_IDX_BITS))
  35
  36struct context {
  37    int pagesize;
  38    char *ptr;
  39    int dev_null_fd;
  40    volatile int mutator_count;
  41};
  42
  43static void *thread_read(void *arg)
  44{
  45    struct context *ctx = arg;
  46    ssize_t sret;
  47    size_t i, j;
  48    int ret;
  49
  50    for (i = 0; ctx->mutator_count; i++) {
  51        char *p;
  52
  53        j = (i & PAGE_IDX_MASK) | PAGE_IDX_R_MASK;
  54        p = &ctx->ptr[j * ctx->pagesize];
  55
  56        /* Read directly. */
  57        ret = memcmp(p, nop_func, sizeof(nop_func));
  58        if (ret != 0) {
  59            fprintf(stderr, "fail direct read %p\n", p);
  60            abort();
  61        }
  62
  63        /* Read indirectly. */
  64        sret = write(ctx->dev_null_fd, p, 1);
  65        if (sret != 1) {
  66            if (sret < 0) {
  67                fprintf(stderr, "fail indirect read %p (%m)\n", p);
  68            } else {
  69                fprintf(stderr, "fail indirect read %p (%zd)\n", p, sret);
  70            }
  71            abort();
  72        }
  73    }
  74
  75    return NULL;
  76}
  77
  78static void *thread_write(void *arg)
  79{
  80    struct context *ctx = arg;
  81    struct timespec *ts;
  82    size_t i, j;
  83    int ret;
  84
  85    for (i = 0; ctx->mutator_count; i++) {
  86        j = (i & PAGE_IDX_MASK) | PAGE_IDX_W_MASK;
  87
  88        /* Write directly. */
  89        memcpy(&ctx->ptr[j * ctx->pagesize], nop_func, sizeof(nop_func));
  90
  91        /* Write using a syscall. */
  92        ts = (struct timespec *)(&ctx->ptr[(j + 1) * ctx->pagesize] -
  93                                 sizeof(struct timespec));
  94        ret = clock_gettime(CLOCK_REALTIME, ts);
  95        if (ret != 0) {
  96            fprintf(stderr, "fail indirect write %p (%m)\n", ts);
  97            abort();
  98        }
  99    }
 100
 101    return NULL;
 102}
 103
 104static void *thread_execute(void *arg)
 105{
 106    struct context *ctx = arg;
 107    size_t i, j;
 108
 109    for (i = 0; ctx->mutator_count; i++) {
 110        j = (i & PAGE_IDX_MASK) | PAGE_IDX_X_MASK;
 111        ((void(*)(void))&ctx->ptr[j * ctx->pagesize])();
 112    }
 113
 114    return NULL;
 115}
 116
 117static void *thread_mutate(void *arg)
 118{
 119    size_t i, start_idx, end_idx, page_idx, tmp;
 120    struct context *ctx = arg;
 121    unsigned int seed;
 122    int prot, ret;
 123
 124    seed = (unsigned int)time(NULL);
 125    for (i = 0; i < 10000; i++) {
 126        start_idx = rand_r(&seed) & PAGE_IDX_MASK;
 127        end_idx = rand_r(&seed) & PAGE_IDX_MASK;
 128        if (start_idx > end_idx) {
 129            tmp = start_idx;
 130            start_idx = end_idx;
 131            end_idx = tmp;
 132        }
 133        prot = rand_r(&seed) & (PROT_READ | PROT_WRITE | PROT_EXEC);
 134        for (page_idx = start_idx & REGION_MASK; page_idx <= end_idx;
 135             page_idx += PAGES_PER_REGION) {
 136            if (page_idx & PAGE_IDX_R_MASK) {
 137                prot |= PROT_READ;
 138            }
 139            if (page_idx & PAGE_IDX_W_MASK) {
 140                /* FIXME: qemu syscalls check for both read+write. */
 141                prot |= PROT_WRITE | PROT_READ;
 142            }
 143            if (page_idx & PAGE_IDX_X_MASK) {
 144                prot |= PROT_EXEC;
 145            }
 146        }
 147        ret = mprotect(&ctx->ptr[start_idx * ctx->pagesize],
 148                       (end_idx - start_idx + 1) * ctx->pagesize, prot);
 149        assert(ret == 0);
 150    }
 151
 152    __atomic_fetch_sub(&ctx->mutator_count, 1, __ATOMIC_SEQ_CST);
 153
 154    return NULL;
 155}
 156
 157int main(void)
 158{
 159    pthread_t threads[5];
 160    struct context ctx;
 161    size_t i;
 162    int ret;
 163
 164    /* Without a template, nothing to test. */
 165    if (sizeof(nop_func) == 0) {
 166        return EXIT_SUCCESS;
 167    }
 168
 169    /* Initialize memory chunk. */
 170    ctx.pagesize = getpagesize();
 171    ctx.ptr = mmap(NULL, PAGE_COUNT * ctx.pagesize,
 172                   PROT_READ | PROT_WRITE | PROT_EXEC,
 173                   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 174    assert(ctx.ptr != MAP_FAILED);
 175    for (i = 0; i < PAGE_COUNT; i++) {
 176        memcpy(&ctx.ptr[i * ctx.pagesize], nop_func, sizeof(nop_func));
 177    }
 178    ctx.dev_null_fd = open("/dev/null", O_WRONLY);
 179    assert(ctx.dev_null_fd >= 0);
 180    ctx.mutator_count = 2;
 181
 182    /* Start threads. */
 183    ret = pthread_create(&threads[0], NULL, thread_read, &ctx);
 184    assert(ret == 0);
 185    ret = pthread_create(&threads[1], NULL, thread_write, &ctx);
 186    assert(ret == 0);
 187    ret = pthread_create(&threads[2], NULL, thread_execute, &ctx);
 188    assert(ret == 0);
 189    for (i = 3; i <= 4; i++) {
 190        ret = pthread_create(&threads[i], NULL, thread_mutate, &ctx);
 191        assert(ret == 0);
 192    }
 193
 194    /* Wait for threads to stop. */
 195    for (i = 0; i < sizeof(threads) / sizeof(threads[0]); i++) {
 196        ret = pthread_join(threads[i], NULL);
 197        assert(ret == 0);
 198    }
 199
 200    /* Destroy memory chunk. */
 201    ret = close(ctx.dev_null_fd);
 202    assert(ret == 0);
 203    ret = munmap(ctx.ptr, PAGE_COUNT * ctx.pagesize);
 204    assert(ret == 0);
 205
 206    return EXIT_SUCCESS;
 207}
 208