linux/tools/testing/selftests/bpf/progs/map_ptr_kern.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (c) 2020 Facebook
   3
   4#include <linux/bpf.h>
   5#include <bpf/bpf_helpers.h>
   6
   7#define LOOP_BOUND 0xf
   8#define MAX_ENTRIES 8
   9#define HALF_ENTRIES (MAX_ENTRIES >> 1)
  10
  11_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
  12
  13enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
  14__u32 g_line = 0;
  15int page_size = 0; /* userspace should set it */
  16
  17#define VERIFY_TYPE(type, func) ({      \
  18        g_map_type = type;              \
  19        if (!func())                    \
  20                return 0;               \
  21})
  22
  23
  24#define VERIFY(expr) ({         \
  25        g_line = __LINE__;      \
  26        if (!(expr))            \
  27                return 0;       \
  28})
  29
  30struct bpf_map {
  31        enum bpf_map_type map_type;
  32        __u32 key_size;
  33        __u32 value_size;
  34        __u32 max_entries;
  35        __u32 id;
  36} __attribute__((preserve_access_index));
  37
  38static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
  39                                       __u32 value_size, __u32 max_entries)
  40{
  41        VERIFY(map->map_type == g_map_type);
  42        VERIFY(map->key_size == key_size);
  43        VERIFY(map->value_size == value_size);
  44        VERIFY(map->max_entries == max_entries);
  45        VERIFY(map->id > 0);
  46
  47        return 1;
  48}
  49
  50static inline int check_bpf_map_ptr(struct bpf_map *indirect,
  51                                    struct bpf_map *direct)
  52{
  53        VERIFY(indirect->map_type == direct->map_type);
  54        VERIFY(indirect->key_size == direct->key_size);
  55        VERIFY(indirect->value_size == direct->value_size);
  56        VERIFY(indirect->max_entries == direct->max_entries);
  57        VERIFY(indirect->id == direct->id);
  58
  59        return 1;
  60}
  61
  62static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
  63                        __u32 key_size, __u32 value_size, __u32 max_entries)
  64{
  65        VERIFY(check_bpf_map_ptr(indirect, direct));
  66        VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
  67                                    max_entries));
  68        return 1;
  69}
  70
  71static inline int check_default(struct bpf_map *indirect,
  72                                struct bpf_map *direct)
  73{
  74        VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
  75                     MAX_ENTRIES));
  76        return 1;
  77}
  78
  79static __noinline int
  80check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
  81{
  82        VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
  83                     MAX_ENTRIES));
  84        return 1;
  85}
  86
  87typedef struct {
  88        int counter;
  89} atomic_t;
  90
  91struct bpf_htab {
  92        struct bpf_map map;
  93        atomic_t count;
  94        __u32 n_buckets;
  95        __u32 elem_size;
  96} __attribute__((preserve_access_index));
  97
  98struct {
  99        __uint(type, BPF_MAP_TYPE_HASH);
 100        __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
 101        __uint(max_entries, MAX_ENTRIES);
 102        __type(key, __u32);
 103        __type(value, __u32);
 104} m_hash SEC(".maps");
 105
 106static inline int check_hash(void)
 107{
 108        struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
 109        struct bpf_map *map = (struct bpf_map *)&m_hash;
 110        int i;
 111
 112        VERIFY(check_default_noinline(&hash->map, map));
 113
 114        VERIFY(hash->n_buckets == MAX_ENTRIES);
 115        VERIFY(hash->elem_size == 64);
 116
 117        VERIFY(hash->count.counter == 0);
 118        for (i = 0; i < HALF_ENTRIES; ++i) {
 119                const __u32 key = i;
 120                const __u32 val = 1;
 121
 122                if (bpf_map_update_elem(hash, &key, &val, 0))
 123                        return 0;
 124        }
 125        VERIFY(hash->count.counter == HALF_ENTRIES);
 126
 127        return 1;
 128}
 129
 130struct bpf_array {
 131        struct bpf_map map;
 132        __u32 elem_size;
 133} __attribute__((preserve_access_index));
 134
 135struct {
 136        __uint(type, BPF_MAP_TYPE_ARRAY);
 137        __uint(max_entries, MAX_ENTRIES);
 138        __type(key, __u32);
 139        __type(value, __u32);
 140} m_array SEC(".maps");
 141
 142static inline int check_array(void)
 143{
 144        struct bpf_array *array = (struct bpf_array *)&m_array;
 145        struct bpf_map *map = (struct bpf_map *)&m_array;
 146        int i, n_lookups = 0, n_keys = 0;
 147
 148        VERIFY(check_default(&array->map, map));
 149
 150        VERIFY(array->elem_size == 8);
 151
 152        for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
 153                const __u32 key = i;
 154                __u32 *val = bpf_map_lookup_elem(array, &key);
 155
 156                ++n_lookups;
 157                if (val)
 158                        ++n_keys;
 159        }
 160
 161        VERIFY(n_lookups == MAX_ENTRIES);
 162        VERIFY(n_keys == MAX_ENTRIES);
 163
 164        return 1;
 165}
 166
 167struct {
 168        __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
 169        __uint(max_entries, MAX_ENTRIES);
 170        __type(key, __u32);
 171        __type(value, __u32);
 172} m_prog_array SEC(".maps");
 173
 174static inline int check_prog_array(void)
 175{
 176        struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
 177        struct bpf_map *map = (struct bpf_map *)&m_prog_array;
 178
 179        VERIFY(check_default(&prog_array->map, map));
 180
 181        return 1;
 182}
 183
 184struct {
 185        __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
 186        __uint(max_entries, MAX_ENTRIES);
 187        __type(key, __u32);
 188        __type(value, __u32);
 189} m_perf_event_array SEC(".maps");
 190
 191static inline int check_perf_event_array(void)
 192{
 193        struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
 194        struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
 195
 196        VERIFY(check_default(&perf_event_array->map, map));
 197
 198        return 1;
 199}
 200
 201struct {
 202        __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
 203        __uint(max_entries, MAX_ENTRIES);
 204        __type(key, __u32);
 205        __type(value, __u32);
 206} m_percpu_hash SEC(".maps");
 207
 208static inline int check_percpu_hash(void)
 209{
 210        struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
 211        struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
 212
 213        VERIFY(check_default(&percpu_hash->map, map));
 214
 215        return 1;
 216}
 217
 218struct {
 219        __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 220        __uint(max_entries, MAX_ENTRIES);
 221        __type(key, __u32);
 222        __type(value, __u32);
 223} m_percpu_array SEC(".maps");
 224
 225static inline int check_percpu_array(void)
 226{
 227        struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
 228        struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
 229
 230        VERIFY(check_default(&percpu_array->map, map));
 231
 232        return 1;
 233}
 234
 235struct bpf_stack_map {
 236        struct bpf_map map;
 237} __attribute__((preserve_access_index));
 238
 239struct {
 240        __uint(type, BPF_MAP_TYPE_STACK_TRACE);
 241        __uint(max_entries, MAX_ENTRIES);
 242        __type(key, __u32);
 243        __type(value, __u64);
 244} m_stack_trace SEC(".maps");
 245
 246static inline int check_stack_trace(void)
 247{
 248        struct bpf_stack_map *stack_trace =
 249                (struct bpf_stack_map *)&m_stack_trace;
 250        struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
 251
 252        VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
 253                     MAX_ENTRIES));
 254
 255        return 1;
 256}
 257
 258struct {
 259        __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
 260        __uint(max_entries, MAX_ENTRIES);
 261        __type(key, __u32);
 262        __type(value, __u32);
 263} m_cgroup_array SEC(".maps");
 264
 265static inline int check_cgroup_array(void)
 266{
 267        struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
 268        struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
 269
 270        VERIFY(check_default(&cgroup_array->map, map));
 271
 272        return 1;
 273}
 274
 275struct {
 276        __uint(type, BPF_MAP_TYPE_LRU_HASH);
 277        __uint(max_entries, MAX_ENTRIES);
 278        __type(key, __u32);
 279        __type(value, __u32);
 280} m_lru_hash SEC(".maps");
 281
 282static inline int check_lru_hash(void)
 283{
 284        struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
 285        struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
 286
 287        VERIFY(check_default(&lru_hash->map, map));
 288
 289        return 1;
 290}
 291
 292struct {
 293        __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
 294        __uint(max_entries, MAX_ENTRIES);
 295        __type(key, __u32);
 296        __type(value, __u32);
 297} m_lru_percpu_hash SEC(".maps");
 298
 299static inline int check_lru_percpu_hash(void)
 300{
 301        struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
 302        struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
 303
 304        VERIFY(check_default(&lru_percpu_hash->map, map));
 305
 306        return 1;
 307}
 308
 309struct lpm_trie {
 310        struct bpf_map map;
 311} __attribute__((preserve_access_index));
 312
 313struct lpm_key {
 314        struct bpf_lpm_trie_key trie_key;
 315        __u32 data;
 316};
 317
 318struct {
 319        __uint(type, BPF_MAP_TYPE_LPM_TRIE);
 320        __uint(map_flags, BPF_F_NO_PREALLOC);
 321        __uint(max_entries, MAX_ENTRIES);
 322        __type(key, struct lpm_key);
 323        __type(value, __u32);
 324} m_lpm_trie SEC(".maps");
 325
 326static inline int check_lpm_trie(void)
 327{
 328        struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
 329        struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
 330
 331        VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
 332                     MAX_ENTRIES));
 333
 334        return 1;
 335}
 336
 337struct inner_map {
 338        __uint(type, BPF_MAP_TYPE_ARRAY);
 339        __uint(max_entries, 1);
 340        __type(key, __u32);
 341        __type(value, __u32);
 342} inner_map SEC(".maps");
 343
 344struct {
 345        __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 346        __uint(max_entries, MAX_ENTRIES);
 347        __type(key, __u32);
 348        __type(value, __u32);
 349        __array(values, struct {
 350                __uint(type, BPF_MAP_TYPE_ARRAY);
 351                __uint(max_entries, 1);
 352                __type(key, __u32);
 353                __type(value, __u32);
 354        });
 355} m_array_of_maps SEC(".maps") = {
 356        .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
 357};
 358
 359static inline int check_array_of_maps(void)
 360{
 361        struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
 362        struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
 363
 364        VERIFY(check_default(&array_of_maps->map, map));
 365
 366        return 1;
 367}
 368
 369struct {
 370        __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
 371        __uint(max_entries, MAX_ENTRIES);
 372        __type(key, __u32);
 373        __type(value, __u32);
 374        __array(values, struct inner_map);
 375} m_hash_of_maps SEC(".maps") = {
 376        .values = {
 377                [2] = &inner_map,
 378        },
 379};
 380
 381static inline int check_hash_of_maps(void)
 382{
 383        struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
 384        struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
 385
 386        VERIFY(check_default(&hash_of_maps->map, map));
 387
 388        return 1;
 389}
 390
 391struct bpf_dtab {
 392        struct bpf_map map;
 393} __attribute__((preserve_access_index));
 394
 395struct {
 396        __uint(type, BPF_MAP_TYPE_DEVMAP);
 397        __uint(max_entries, MAX_ENTRIES);
 398        __type(key, __u32);
 399        __type(value, __u32);
 400} m_devmap SEC(".maps");
 401
 402static inline int check_devmap(void)
 403{
 404        struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
 405        struct bpf_map *map = (struct bpf_map *)&m_devmap;
 406
 407        VERIFY(check_default(&devmap->map, map));
 408
 409        return 1;
 410}
 411
 412struct bpf_stab {
 413        struct bpf_map map;
 414} __attribute__((preserve_access_index));
 415
 416struct {
 417        __uint(type, BPF_MAP_TYPE_SOCKMAP);
 418        __uint(max_entries, MAX_ENTRIES);
 419        __type(key, __u32);
 420        __type(value, __u32);
 421} m_sockmap SEC(".maps");
 422
 423static inline int check_sockmap(void)
 424{
 425        struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
 426        struct bpf_map *map = (struct bpf_map *)&m_sockmap;
 427
 428        VERIFY(check_default(&sockmap->map, map));
 429
 430        return 1;
 431}
 432
 433struct bpf_cpu_map {
 434        struct bpf_map map;
 435} __attribute__((preserve_access_index));
 436
 437struct {
 438        __uint(type, BPF_MAP_TYPE_CPUMAP);
 439        __uint(max_entries, MAX_ENTRIES);
 440        __type(key, __u32);
 441        __type(value, __u32);
 442} m_cpumap SEC(".maps");
 443
 444static inline int check_cpumap(void)
 445{
 446        struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
 447        struct bpf_map *map = (struct bpf_map *)&m_cpumap;
 448
 449        VERIFY(check_default(&cpumap->map, map));
 450
 451        return 1;
 452}
 453
 454struct xsk_map {
 455        struct bpf_map map;
 456} __attribute__((preserve_access_index));
 457
 458struct {
 459        __uint(type, BPF_MAP_TYPE_XSKMAP);
 460        __uint(max_entries, MAX_ENTRIES);
 461        __type(key, __u32);
 462        __type(value, __u32);
 463} m_xskmap SEC(".maps");
 464
 465static inline int check_xskmap(void)
 466{
 467        struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
 468        struct bpf_map *map = (struct bpf_map *)&m_xskmap;
 469
 470        VERIFY(check_default(&xskmap->map, map));
 471
 472        return 1;
 473}
 474
 475struct bpf_shtab {
 476        struct bpf_map map;
 477} __attribute__((preserve_access_index));
 478
 479struct {
 480        __uint(type, BPF_MAP_TYPE_SOCKHASH);
 481        __uint(max_entries, MAX_ENTRIES);
 482        __type(key, __u32);
 483        __type(value, __u32);
 484} m_sockhash SEC(".maps");
 485
 486static inline int check_sockhash(void)
 487{
 488        struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
 489        struct bpf_map *map = (struct bpf_map *)&m_sockhash;
 490
 491        VERIFY(check_default(&sockhash->map, map));
 492
 493        return 1;
 494}
 495
 496struct bpf_cgroup_storage_map {
 497        struct bpf_map map;
 498} __attribute__((preserve_access_index));
 499
 500struct {
 501        __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
 502        __type(key, struct bpf_cgroup_storage_key);
 503        __type(value, __u32);
 504} m_cgroup_storage SEC(".maps");
 505
 506static inline int check_cgroup_storage(void)
 507{
 508        struct bpf_cgroup_storage_map *cgroup_storage =
 509                (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
 510        struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
 511
 512        VERIFY(check(&cgroup_storage->map, map,
 513                     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
 514
 515        return 1;
 516}
 517
 518struct reuseport_array {
 519        struct bpf_map map;
 520} __attribute__((preserve_access_index));
 521
 522struct {
 523        __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
 524        __uint(max_entries, MAX_ENTRIES);
 525        __type(key, __u32);
 526        __type(value, __u32);
 527} m_reuseport_sockarray SEC(".maps");
 528
 529static inline int check_reuseport_sockarray(void)
 530{
 531        struct reuseport_array *reuseport_sockarray =
 532                (struct reuseport_array *)&m_reuseport_sockarray;
 533        struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
 534
 535        VERIFY(check_default(&reuseport_sockarray->map, map));
 536
 537        return 1;
 538}
 539
 540struct {
 541        __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
 542        __type(key, struct bpf_cgroup_storage_key);
 543        __type(value, __u32);
 544} m_percpu_cgroup_storage SEC(".maps");
 545
 546static inline int check_percpu_cgroup_storage(void)
 547{
 548        struct bpf_cgroup_storage_map *percpu_cgroup_storage =
 549                (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
 550        struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
 551
 552        VERIFY(check(&percpu_cgroup_storage->map, map,
 553                     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
 554
 555        return 1;
 556}
 557
 558struct bpf_queue_stack {
 559        struct bpf_map map;
 560} __attribute__((preserve_access_index));
 561
 562struct {
 563        __uint(type, BPF_MAP_TYPE_QUEUE);
 564        __uint(max_entries, MAX_ENTRIES);
 565        __type(value, __u32);
 566} m_queue SEC(".maps");
 567
 568static inline int check_queue(void)
 569{
 570        struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
 571        struct bpf_map *map = (struct bpf_map *)&m_queue;
 572
 573        VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
 574
 575        return 1;
 576}
 577
 578struct {
 579        __uint(type, BPF_MAP_TYPE_STACK);
 580        __uint(max_entries, MAX_ENTRIES);
 581        __type(value, __u32);
 582} m_stack SEC(".maps");
 583
 584static inline int check_stack(void)
 585{
 586        struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
 587        struct bpf_map *map = (struct bpf_map *)&m_stack;
 588
 589        VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
 590
 591        return 1;
 592}
 593
 594struct bpf_local_storage_map {
 595        struct bpf_map map;
 596} __attribute__((preserve_access_index));
 597
 598struct {
 599        __uint(type, BPF_MAP_TYPE_SK_STORAGE);
 600        __uint(map_flags, BPF_F_NO_PREALLOC);
 601        __type(key, __u32);
 602        __type(value, __u32);
 603} m_sk_storage SEC(".maps");
 604
 605static inline int check_sk_storage(void)
 606{
 607        struct bpf_local_storage_map *sk_storage =
 608                (struct bpf_local_storage_map *)&m_sk_storage;
 609        struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
 610
 611        VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
 612
 613        return 1;
 614}
 615
 616struct {
 617        __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
 618        __uint(max_entries, MAX_ENTRIES);
 619        __type(key, __u32);
 620        __type(value, __u32);
 621} m_devmap_hash SEC(".maps");
 622
 623static inline int check_devmap_hash(void)
 624{
 625        struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
 626        struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
 627
 628        VERIFY(check_default(&devmap_hash->map, map));
 629
 630        return 1;
 631}
 632
 633struct bpf_ringbuf_map {
 634        struct bpf_map map;
 635} __attribute__((preserve_access_index));
 636
 637struct {
 638        __uint(type, BPF_MAP_TYPE_RINGBUF);
 639} m_ringbuf SEC(".maps");
 640
 641static inline int check_ringbuf(void)
 642{
 643        struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
 644        struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
 645
 646        VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
 647
 648        return 1;
 649}
 650
 651SEC("cgroup_skb/egress")
 652int cg_skb(void *ctx)
 653{
 654        VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
 655        VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
 656        VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
 657        VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
 658        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
 659        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
 660        VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
 661        VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
 662        VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
 663        VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
 664        VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
 665        VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
 666        VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
 667        VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
 668        VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
 669        VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
 670        VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
 671        VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
 672        VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
 673        VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
 674                    check_reuseport_sockarray);
 675        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
 676                    check_percpu_cgroup_storage);
 677        VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
 678        VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
 679        VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
 680        VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
 681        VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
 682
 683        return 1;
 684}
 685
 686__u32 _version SEC("version") = 1;
 687char _license[] SEC("license") = "GPL";
 688