linux/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2021 Facebook */
   3
   4#include <argp.h>
   5#include <linux/log2.h>
   6#include <pthread.h>
   7#include "bench.h"
   8#include "bloom_filter_bench.skel.h"
   9#include "bpf_util.h"
  10
  11static struct ctx {
  12        bool use_array_map;
  13        bool use_hashmap;
  14        bool hashmap_use_bloom;
  15        bool count_false_hits;
  16
  17        struct bloom_filter_bench *skel;
  18
  19        int bloom_fd;
  20        int hashmap_fd;
  21        int array_map_fd;
  22
  23        pthread_mutex_t map_done_mtx;
  24        pthread_cond_t map_done_cv;
  25        bool map_done;
  26        bool map_prepare_err;
  27
  28        __u32 next_map_idx;
  29} ctx = {
  30        .map_done_mtx = PTHREAD_MUTEX_INITIALIZER,
  31        .map_done_cv = PTHREAD_COND_INITIALIZER,
  32};
  33
  34struct stat {
  35        __u32 stats[3];
  36};
  37
  38static struct {
  39        __u32 nr_entries;
  40        __u8 nr_hash_funcs;
  41        __u8 value_size;
  42} args = {
  43        .nr_entries = 1000,
  44        .nr_hash_funcs = 3,
  45        .value_size = 8,
  46};
  47
  48enum {
  49        ARG_NR_ENTRIES = 3000,
  50        ARG_NR_HASH_FUNCS = 3001,
  51        ARG_VALUE_SIZE = 3002,
  52};
  53
  54static const struct argp_option opts[] = {
  55        { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
  56                "Set number of expected unique entries in the bloom filter"},
  57        { "nr_hash_funcs", ARG_NR_HASH_FUNCS, "NR_HASH_FUNCS", 0,
  58                "Set number of hash functions in the bloom filter"},
  59        { "value_size", ARG_VALUE_SIZE, "VALUE_SIZE", 0,
  60                "Set value size (in bytes) of bloom filter entries"},
  61        {},
  62};
  63
  64static error_t parse_arg(int key, char *arg, struct argp_state *state)
  65{
  66        long ret;
  67
  68        switch (key) {
  69        case ARG_NR_ENTRIES:
  70                ret = strtol(arg, NULL, 10);
  71                if (ret < 1 || ret > UINT_MAX) {
  72                        fprintf(stderr, "Invalid nr_entries count.");
  73                        argp_usage(state);
  74                }
  75                args.nr_entries = ret;
  76                break;
  77        case ARG_NR_HASH_FUNCS:
  78                ret = strtol(arg, NULL, 10);
  79                if (ret < 1 || ret > 15) {
  80                        fprintf(stderr,
  81                                "The bloom filter must use 1 to 15 hash functions.");
  82                        argp_usage(state);
  83                }
  84                args.nr_hash_funcs = ret;
  85                break;
  86        case ARG_VALUE_SIZE:
  87                ret = strtol(arg, NULL, 10);
  88                if (ret < 2 || ret > 256) {
  89                        fprintf(stderr,
  90                                "Invalid value size. Must be between 2 and 256 bytes");
  91                        argp_usage(state);
  92                }
  93                args.value_size = ret;
  94                break;
  95        default:
  96                return ARGP_ERR_UNKNOWN;
  97        }
  98
  99        return 0;
 100}
 101
 102/* exported into benchmark runner */
 103const struct argp bench_bloom_map_argp = {
 104        .options = opts,
 105        .parser = parse_arg,
 106};
 107
 108static void validate(void)
 109{
 110        if (env.consumer_cnt != 1) {
 111                fprintf(stderr,
 112                        "The bloom filter benchmarks do not support multi-consumer use\n");
 113                exit(1);
 114        }
 115}
 116
 117static inline void trigger_bpf_program(void)
 118{
 119        syscall(__NR_getpgid);
 120}
 121
 122static void *producer(void *input)
 123{
 124        while (true)
 125                trigger_bpf_program();
 126
 127        return NULL;
 128}
 129
 130static void *map_prepare_thread(void *arg)
 131{
 132        __u32 val_size, i;
 133        void *val = NULL;
 134        int err;
 135
 136        val_size = args.value_size;
 137        val = malloc(val_size);
 138        if (!val) {
 139                ctx.map_prepare_err = true;
 140                goto done;
 141        }
 142
 143        while (true) {
 144                i = __atomic_add_fetch(&ctx.next_map_idx, 1, __ATOMIC_RELAXED);
 145                if (i > args.nr_entries)
 146                        break;
 147
 148again:
 149                /* Populate hashmap, bloom filter map, and array map with the same
 150                 * random values
 151                 */
 152                err = syscall(__NR_getrandom, val, val_size, 0);
 153                if (err != val_size) {
 154                        ctx.map_prepare_err = true;
 155                        fprintf(stderr, "failed to get random value: %d\n", -errno);
 156                        break;
 157                }
 158
 159                if (ctx.use_hashmap) {
 160                        err = bpf_map_update_elem(ctx.hashmap_fd, val, val, BPF_NOEXIST);
 161                        if (err) {
 162                                if (err != -EEXIST) {
 163                                        ctx.map_prepare_err = true;
 164                                        fprintf(stderr, "failed to add elem to hashmap: %d\n",
 165                                                -errno);
 166                                        break;
 167                                }
 168                                goto again;
 169                        }
 170                }
 171
 172                i--;
 173
 174                if (ctx.use_array_map) {
 175                        err = bpf_map_update_elem(ctx.array_map_fd, &i, val, 0);
 176                        if (err) {
 177                                ctx.map_prepare_err = true;
 178                                fprintf(stderr, "failed to add elem to array map: %d\n", -errno);
 179                                break;
 180                        }
 181                }
 182
 183                if (ctx.use_hashmap && !ctx.hashmap_use_bloom)
 184                        continue;
 185
 186                err = bpf_map_update_elem(ctx.bloom_fd, NULL, val, 0);
 187                if (err) {
 188                        ctx.map_prepare_err = true;
 189                        fprintf(stderr,
 190                                "failed to add elem to bloom filter map: %d\n", -errno);
 191                        break;
 192                }
 193        }
 194done:
 195        pthread_mutex_lock(&ctx.map_done_mtx);
 196        ctx.map_done = true;
 197        pthread_cond_signal(&ctx.map_done_cv);
 198        pthread_mutex_unlock(&ctx.map_done_mtx);
 199
 200        if (val)
 201                free(val);
 202
 203        return NULL;
 204}
 205
 206static void populate_maps(void)
 207{
 208        unsigned int nr_cpus = bpf_num_possible_cpus();
 209        pthread_t map_thread;
 210        int i, err, nr_rand_bytes;
 211
 212        ctx.bloom_fd = bpf_map__fd(ctx.skel->maps.bloom_map);
 213        ctx.hashmap_fd = bpf_map__fd(ctx.skel->maps.hashmap);
 214        ctx.array_map_fd = bpf_map__fd(ctx.skel->maps.array_map);
 215
 216        for (i = 0; i < nr_cpus; i++) {
 217                err = pthread_create(&map_thread, NULL, map_prepare_thread,
 218                                     NULL);
 219                if (err) {
 220                        fprintf(stderr, "failed to create pthread: %d\n", -errno);
 221                        exit(1);
 222                }
 223        }
 224
 225        pthread_mutex_lock(&ctx.map_done_mtx);
 226        while (!ctx.map_done)
 227                pthread_cond_wait(&ctx.map_done_cv, &ctx.map_done_mtx);
 228        pthread_mutex_unlock(&ctx.map_done_mtx);
 229
 230        if (ctx.map_prepare_err)
 231                exit(1);
 232
 233        nr_rand_bytes = syscall(__NR_getrandom, ctx.skel->bss->rand_vals,
 234                                ctx.skel->rodata->nr_rand_bytes, 0);
 235        if (nr_rand_bytes != ctx.skel->rodata->nr_rand_bytes) {
 236                fprintf(stderr, "failed to get random bytes\n");
 237                exit(1);
 238        }
 239}
 240
 241static void check_args(void)
 242{
 243        if (args.value_size < 8)  {
 244                __u64 nr_unique_entries = 1ULL << (args.value_size * 8);
 245
 246                if (args.nr_entries > nr_unique_entries) {
 247                        fprintf(stderr,
 248                                "Not enough unique values for the nr_entries requested\n");
 249                        exit(1);
 250                }
 251        }
 252}
 253
 254static struct bloom_filter_bench *setup_skeleton(void)
 255{
 256        struct bloom_filter_bench *skel;
 257
 258        check_args();
 259
 260        setup_libbpf();
 261
 262        skel = bloom_filter_bench__open();
 263        if (!skel) {
 264                fprintf(stderr, "failed to open skeleton\n");
 265                exit(1);
 266        }
 267
 268        skel->rodata->hashmap_use_bloom = ctx.hashmap_use_bloom;
 269        skel->rodata->count_false_hits = ctx.count_false_hits;
 270
 271        /* Resize number of entries */
 272        bpf_map__set_max_entries(skel->maps.hashmap, args.nr_entries);
 273
 274        bpf_map__set_max_entries(skel->maps.array_map, args.nr_entries);
 275
 276        bpf_map__set_max_entries(skel->maps.bloom_map, args.nr_entries);
 277
 278        /* Set value size */
 279        bpf_map__set_value_size(skel->maps.array_map, args.value_size);
 280
 281        bpf_map__set_value_size(skel->maps.bloom_map, args.value_size);
 282
 283        bpf_map__set_value_size(skel->maps.hashmap, args.value_size);
 284
 285        /* For the hashmap, we use the value as the key as well */
 286        bpf_map__set_key_size(skel->maps.hashmap, args.value_size);
 287
 288        skel->bss->value_size = args.value_size;
 289
 290        /* Set number of hash functions */
 291        bpf_map__set_map_extra(skel->maps.bloom_map, args.nr_hash_funcs);
 292
 293        if (bloom_filter_bench__load(skel)) {
 294                fprintf(stderr, "failed to load skeleton\n");
 295                exit(1);
 296        }
 297
 298        return skel;
 299}
 300
 301static void bloom_lookup_setup(void)
 302{
 303        struct bpf_link *link;
 304
 305        ctx.use_array_map = true;
 306
 307        ctx.skel = setup_skeleton();
 308
 309        populate_maps();
 310
 311        link = bpf_program__attach(ctx.skel->progs.bloom_lookup);
 312        if (!link) {
 313                fprintf(stderr, "failed to attach program!\n");
 314                exit(1);
 315        }
 316}
 317
 318static void bloom_update_setup(void)
 319{
 320        struct bpf_link *link;
 321
 322        ctx.use_array_map = true;
 323
 324        ctx.skel = setup_skeleton();
 325
 326        populate_maps();
 327
 328        link = bpf_program__attach(ctx.skel->progs.bloom_update);
 329        if (!link) {
 330                fprintf(stderr, "failed to attach program!\n");
 331                exit(1);
 332        }
 333}
 334
 335static void false_positive_setup(void)
 336{
 337        struct bpf_link *link;
 338
 339        ctx.use_hashmap = true;
 340        ctx.hashmap_use_bloom = true;
 341        ctx.count_false_hits = true;
 342
 343        ctx.skel = setup_skeleton();
 344
 345        populate_maps();
 346
 347        link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
 348        if (!link) {
 349                fprintf(stderr, "failed to attach program!\n");
 350                exit(1);
 351        }
 352}
 353
 354static void hashmap_with_bloom_setup(void)
 355{
 356        struct bpf_link *link;
 357
 358        ctx.use_hashmap = true;
 359        ctx.hashmap_use_bloom = true;
 360
 361        ctx.skel = setup_skeleton();
 362
 363        populate_maps();
 364
 365        link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
 366        if (!link) {
 367                fprintf(stderr, "failed to attach program!\n");
 368                exit(1);
 369        }
 370}
 371
 372static void hashmap_no_bloom_setup(void)
 373{
 374        struct bpf_link *link;
 375
 376        ctx.use_hashmap = true;
 377
 378        ctx.skel = setup_skeleton();
 379
 380        populate_maps();
 381
 382        link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
 383        if (!link) {
 384                fprintf(stderr, "failed to attach program!\n");
 385                exit(1);
 386        }
 387}
 388
 389static void measure(struct bench_res *res)
 390{
 391        unsigned long total_hits = 0, total_drops = 0, total_false_hits = 0;
 392        static unsigned long last_hits, last_drops, last_false_hits;
 393        unsigned int nr_cpus = bpf_num_possible_cpus();
 394        int hit_key, drop_key, false_hit_key;
 395        int i;
 396
 397        hit_key = ctx.skel->rodata->hit_key;
 398        drop_key = ctx.skel->rodata->drop_key;
 399        false_hit_key = ctx.skel->rodata->false_hit_key;
 400
 401        if (ctx.skel->bss->error != 0) {
 402                fprintf(stderr, "error (%d) when searching the bloom filter\n",
 403                        ctx.skel->bss->error);
 404                exit(1);
 405        }
 406
 407        for (i = 0; i < nr_cpus; i++) {
 408                struct stat *s = (void *)&ctx.skel->bss->percpu_stats[i];
 409
 410                total_hits += s->stats[hit_key];
 411                total_drops += s->stats[drop_key];
 412                total_false_hits += s->stats[false_hit_key];
 413        }
 414
 415        res->hits = total_hits - last_hits;
 416        res->drops = total_drops - last_drops;
 417        res->false_hits = total_false_hits - last_false_hits;
 418
 419        last_hits = total_hits;
 420        last_drops = total_drops;
 421        last_false_hits = total_false_hits;
 422}
 423
 424static void *consumer(void *input)
 425{
 426        return NULL;
 427}
 428
 429const struct bench bench_bloom_lookup = {
 430        .name = "bloom-lookup",
 431        .validate = validate,
 432        .setup = bloom_lookup_setup,
 433        .producer_thread = producer,
 434        .consumer_thread = consumer,
 435        .measure = measure,
 436        .report_progress = hits_drops_report_progress,
 437        .report_final = hits_drops_report_final,
 438};
 439
 440const struct bench bench_bloom_update = {
 441        .name = "bloom-update",
 442        .validate = validate,
 443        .setup = bloom_update_setup,
 444        .producer_thread = producer,
 445        .consumer_thread = consumer,
 446        .measure = measure,
 447        .report_progress = hits_drops_report_progress,
 448        .report_final = hits_drops_report_final,
 449};
 450
 451const struct bench bench_bloom_false_positive = {
 452        .name = "bloom-false-positive",
 453        .validate = validate,
 454        .setup = false_positive_setup,
 455        .producer_thread = producer,
 456        .consumer_thread = consumer,
 457        .measure = measure,
 458        .report_progress = false_hits_report_progress,
 459        .report_final = false_hits_report_final,
 460};
 461
 462const struct bench bench_hashmap_without_bloom = {
 463        .name = "hashmap-without-bloom",
 464        .validate = validate,
 465        .setup = hashmap_no_bloom_setup,
 466        .producer_thread = producer,
 467        .consumer_thread = consumer,
 468        .measure = measure,
 469        .report_progress = hits_drops_report_progress,
 470        .report_final = hits_drops_report_final,
 471};
 472
 473const struct bench bench_hashmap_with_bloom = {
 474        .name = "hashmap-with-bloom",
 475        .validate = validate,
 476        .setup = hashmap_with_bloom_setup,
 477        .producer_thread = producer,
 478        .consumer_thread = consumer,
 479        .measure = measure,
 480        .report_progress = hits_drops_report_progress,
 481        .report_final = hits_drops_report_final,
 482};
 483