linux/tools/testing/selftests/bpf/progs/map_ptr_kern.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (c) 2020 Facebook
   3
   4#include <linux/bpf.h>
   5#include <bpf/bpf_helpers.h>
   6
   7#define LOOP_BOUND 0xf
   8#define MAX_ENTRIES 8
   9#define HALF_ENTRIES (MAX_ENTRIES >> 1)
  10
  11_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
  12
  13enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
  14__u32 g_line = 0;
  15
  16#define VERIFY_TYPE(type, func) ({      \
  17        g_map_type = type;              \
  18        if (!func())                    \
  19                return 0;               \
  20})
  21
  22
  23#define VERIFY(expr) ({         \
  24        g_line = __LINE__;      \
  25        if (!(expr))            \
  26                return 0;       \
  27})
  28
  29struct bpf_map_memory {
  30        __u32 pages;
  31} __attribute__((preserve_access_index));
  32
  33struct bpf_map {
  34        enum bpf_map_type map_type;
  35        __u32 key_size;
  36        __u32 value_size;
  37        __u32 max_entries;
  38        __u32 id;
  39        struct bpf_map_memory memory;
  40} __attribute__((preserve_access_index));
  41
  42static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
  43                                       __u32 value_size, __u32 max_entries)
  44{
  45        VERIFY(map->map_type == g_map_type);
  46        VERIFY(map->key_size == key_size);
  47        VERIFY(map->value_size == value_size);
  48        VERIFY(map->max_entries == max_entries);
  49        VERIFY(map->id > 0);
  50        VERIFY(map->memory.pages > 0);
  51
  52        return 1;
  53}
  54
  55static inline int check_bpf_map_ptr(struct bpf_map *indirect,
  56                                    struct bpf_map *direct)
  57{
  58        VERIFY(indirect->map_type == direct->map_type);
  59        VERIFY(indirect->key_size == direct->key_size);
  60        VERIFY(indirect->value_size == direct->value_size);
  61        VERIFY(indirect->max_entries == direct->max_entries);
  62        VERIFY(indirect->id == direct->id);
  63        VERIFY(indirect->memory.pages == direct->memory.pages);
  64
  65        return 1;
  66}
  67
  68static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
  69                        __u32 key_size, __u32 value_size, __u32 max_entries)
  70{
  71        VERIFY(check_bpf_map_ptr(indirect, direct));
  72        VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
  73                                    max_entries));
  74        return 1;
  75}
  76
  77static inline int check_default(struct bpf_map *indirect,
  78                                struct bpf_map *direct)
  79{
  80        VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
  81                     MAX_ENTRIES));
  82        return 1;
  83}
  84
  85typedef struct {
  86        int counter;
  87} atomic_t;
  88
  89struct bpf_htab {
  90        struct bpf_map map;
  91        atomic_t count;
  92        __u32 n_buckets;
  93        __u32 elem_size;
  94} __attribute__((preserve_access_index));
  95
  96struct {
  97        __uint(type, BPF_MAP_TYPE_HASH);
  98        __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
  99        __uint(max_entries, MAX_ENTRIES);
 100        __type(key, __u32);
 101        __type(value, __u32);
 102} m_hash SEC(".maps");
 103
 104static inline int check_hash(void)
 105{
 106        struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
 107        struct bpf_map *map = (struct bpf_map *)&m_hash;
 108        int i;
 109
 110        VERIFY(check_default(&hash->map, map));
 111
 112        VERIFY(hash->n_buckets == MAX_ENTRIES);
 113        VERIFY(hash->elem_size == 64);
 114
 115        VERIFY(hash->count.counter == 0);
 116        for (i = 0; i < HALF_ENTRIES; ++i) {
 117                const __u32 key = i;
 118                const __u32 val = 1;
 119
 120                if (bpf_map_update_elem(hash, &key, &val, 0))
 121                        return 0;
 122        }
 123        VERIFY(hash->count.counter == HALF_ENTRIES);
 124
 125        return 1;
 126}
 127
 128struct bpf_array {
 129        struct bpf_map map;
 130        __u32 elem_size;
 131} __attribute__((preserve_access_index));
 132
 133struct {
 134        __uint(type, BPF_MAP_TYPE_ARRAY);
 135        __uint(max_entries, MAX_ENTRIES);
 136        __type(key, __u32);
 137        __type(value, __u32);
 138} m_array SEC(".maps");
 139
 140static inline int check_array(void)
 141{
 142        struct bpf_array *array = (struct bpf_array *)&m_array;
 143        struct bpf_map *map = (struct bpf_map *)&m_array;
 144        int i, n_lookups = 0, n_keys = 0;
 145
 146        VERIFY(check_default(&array->map, map));
 147
 148        VERIFY(array->elem_size == 8);
 149
 150        for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
 151                const __u32 key = i;
 152                __u32 *val = bpf_map_lookup_elem(array, &key);
 153
 154                ++n_lookups;
 155                if (val)
 156                        ++n_keys;
 157        }
 158
 159        VERIFY(n_lookups == MAX_ENTRIES);
 160        VERIFY(n_keys == MAX_ENTRIES);
 161
 162        return 1;
 163}
 164
 165struct {
 166        __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
 167        __uint(max_entries, MAX_ENTRIES);
 168        __type(key, __u32);
 169        __type(value, __u32);
 170} m_prog_array SEC(".maps");
 171
 172static inline int check_prog_array(void)
 173{
 174        struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
 175        struct bpf_map *map = (struct bpf_map *)&m_prog_array;
 176
 177        VERIFY(check_default(&prog_array->map, map));
 178
 179        return 1;
 180}
 181
 182struct {
 183        __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
 184        __uint(max_entries, MAX_ENTRIES);
 185        __type(key, __u32);
 186        __type(value, __u32);
 187} m_perf_event_array SEC(".maps");
 188
 189static inline int check_perf_event_array(void)
 190{
 191        struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
 192        struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
 193
 194        VERIFY(check_default(&perf_event_array->map, map));
 195
 196        return 1;
 197}
 198
 199struct {
 200        __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
 201        __uint(max_entries, MAX_ENTRIES);
 202        __type(key, __u32);
 203        __type(value, __u32);
 204} m_percpu_hash SEC(".maps");
 205
 206static inline int check_percpu_hash(void)
 207{
 208        struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
 209        struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
 210
 211        VERIFY(check_default(&percpu_hash->map, map));
 212
 213        return 1;
 214}
 215
 216struct {
 217        __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 218        __uint(max_entries, MAX_ENTRIES);
 219        __type(key, __u32);
 220        __type(value, __u32);
 221} m_percpu_array SEC(".maps");
 222
 223static inline int check_percpu_array(void)
 224{
 225        struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
 226        struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
 227
 228        VERIFY(check_default(&percpu_array->map, map));
 229
 230        return 1;
 231}
 232
 233struct bpf_stack_map {
 234        struct bpf_map map;
 235} __attribute__((preserve_access_index));
 236
 237struct {
 238        __uint(type, BPF_MAP_TYPE_STACK_TRACE);
 239        __uint(max_entries, MAX_ENTRIES);
 240        __type(key, __u32);
 241        __type(value, __u64);
 242} m_stack_trace SEC(".maps");
 243
 244static inline int check_stack_trace(void)
 245{
 246        struct bpf_stack_map *stack_trace =
 247                (struct bpf_stack_map *)&m_stack_trace;
 248        struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
 249
 250        VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
 251                     MAX_ENTRIES));
 252
 253        return 1;
 254}
 255
 256struct {
 257        __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
 258        __uint(max_entries, MAX_ENTRIES);
 259        __type(key, __u32);
 260        __type(value, __u32);
 261} m_cgroup_array SEC(".maps");
 262
 263static inline int check_cgroup_array(void)
 264{
 265        struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
 266        struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
 267
 268        VERIFY(check_default(&cgroup_array->map, map));
 269
 270        return 1;
 271}
 272
 273struct {
 274        __uint(type, BPF_MAP_TYPE_LRU_HASH);
 275        __uint(max_entries, MAX_ENTRIES);
 276        __type(key, __u32);
 277        __type(value, __u32);
 278} m_lru_hash SEC(".maps");
 279
 280static inline int check_lru_hash(void)
 281{
 282        struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
 283        struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
 284
 285        VERIFY(check_default(&lru_hash->map, map));
 286
 287        return 1;
 288}
 289
 290struct {
 291        __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
 292        __uint(max_entries, MAX_ENTRIES);
 293        __type(key, __u32);
 294        __type(value, __u32);
 295} m_lru_percpu_hash SEC(".maps");
 296
 297static inline int check_lru_percpu_hash(void)
 298{
 299        struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
 300        struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
 301
 302        VERIFY(check_default(&lru_percpu_hash->map, map));
 303
 304        return 1;
 305}
 306
 307struct lpm_trie {
 308        struct bpf_map map;
 309} __attribute__((preserve_access_index));
 310
 311struct lpm_key {
 312        struct bpf_lpm_trie_key trie_key;
 313        __u32 data;
 314};
 315
 316struct {
 317        __uint(type, BPF_MAP_TYPE_LPM_TRIE);
 318        __uint(map_flags, BPF_F_NO_PREALLOC);
 319        __uint(max_entries, MAX_ENTRIES);
 320        __type(key, struct lpm_key);
 321        __type(value, __u32);
 322} m_lpm_trie SEC(".maps");
 323
 324static inline int check_lpm_trie(void)
 325{
 326        struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
 327        struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
 328
 329        VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
 330                     MAX_ENTRIES));
 331
 332        return 1;
 333}
 334
 335struct inner_map {
 336        __uint(type, BPF_MAP_TYPE_ARRAY);
 337        __uint(max_entries, 1);
 338        __type(key, __u32);
 339        __type(value, __u32);
 340} inner_map SEC(".maps");
 341
 342struct {
 343        __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 344        __uint(max_entries, MAX_ENTRIES);
 345        __type(key, __u32);
 346        __type(value, __u32);
 347        __array(values, struct {
 348                __uint(type, BPF_MAP_TYPE_ARRAY);
 349                __uint(max_entries, 1);
 350                __type(key, __u32);
 351                __type(value, __u32);
 352        });
 353} m_array_of_maps SEC(".maps") = {
 354        .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
 355};
 356
 357static inline int check_array_of_maps(void)
 358{
 359        struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
 360        struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
 361
 362        VERIFY(check_default(&array_of_maps->map, map));
 363
 364        return 1;
 365}
 366
 367struct {
 368        __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
 369        __uint(max_entries, MAX_ENTRIES);
 370        __type(key, __u32);
 371        __type(value, __u32);
 372        __array(values, struct inner_map);
 373} m_hash_of_maps SEC(".maps") = {
 374        .values = {
 375                [2] = &inner_map,
 376        },
 377};
 378
 379static inline int check_hash_of_maps(void)
 380{
 381        struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
 382        struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
 383
 384        VERIFY(check_default(&hash_of_maps->map, map));
 385
 386        return 1;
 387}
 388
 389struct bpf_dtab {
 390        struct bpf_map map;
 391} __attribute__((preserve_access_index));
 392
 393struct {
 394        __uint(type, BPF_MAP_TYPE_DEVMAP);
 395        __uint(max_entries, MAX_ENTRIES);
 396        __type(key, __u32);
 397        __type(value, __u32);
 398} m_devmap SEC(".maps");
 399
 400static inline int check_devmap(void)
 401{
 402        struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
 403        struct bpf_map *map = (struct bpf_map *)&m_devmap;
 404
 405        VERIFY(check_default(&devmap->map, map));
 406
 407        return 1;
 408}
 409
 410struct bpf_stab {
 411        struct bpf_map map;
 412} __attribute__((preserve_access_index));
 413
 414struct {
 415        __uint(type, BPF_MAP_TYPE_SOCKMAP);
 416        __uint(max_entries, MAX_ENTRIES);
 417        __type(key, __u32);
 418        __type(value, __u32);
 419} m_sockmap SEC(".maps");
 420
 421static inline int check_sockmap(void)
 422{
 423        struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
 424        struct bpf_map *map = (struct bpf_map *)&m_sockmap;
 425
 426        VERIFY(check_default(&sockmap->map, map));
 427
 428        return 1;
 429}
 430
 431struct bpf_cpu_map {
 432        struct bpf_map map;
 433} __attribute__((preserve_access_index));
 434
 435struct {
 436        __uint(type, BPF_MAP_TYPE_CPUMAP);
 437        __uint(max_entries, MAX_ENTRIES);
 438        __type(key, __u32);
 439        __type(value, __u32);
 440} m_cpumap SEC(".maps");
 441
 442static inline int check_cpumap(void)
 443{
 444        struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
 445        struct bpf_map *map = (struct bpf_map *)&m_cpumap;
 446
 447        VERIFY(check_default(&cpumap->map, map));
 448
 449        return 1;
 450}
 451
 452struct xsk_map {
 453        struct bpf_map map;
 454} __attribute__((preserve_access_index));
 455
 456struct {
 457        __uint(type, BPF_MAP_TYPE_XSKMAP);
 458        __uint(max_entries, MAX_ENTRIES);
 459        __type(key, __u32);
 460        __type(value, __u32);
 461} m_xskmap SEC(".maps");
 462
 463static inline int check_xskmap(void)
 464{
 465        struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
 466        struct bpf_map *map = (struct bpf_map *)&m_xskmap;
 467
 468        VERIFY(check_default(&xskmap->map, map));
 469
 470        return 1;
 471}
 472
 473struct bpf_shtab {
 474        struct bpf_map map;
 475} __attribute__((preserve_access_index));
 476
 477struct {
 478        __uint(type, BPF_MAP_TYPE_SOCKHASH);
 479        __uint(max_entries, MAX_ENTRIES);
 480        __type(key, __u32);
 481        __type(value, __u32);
 482} m_sockhash SEC(".maps");
 483
 484static inline int check_sockhash(void)
 485{
 486        struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
 487        struct bpf_map *map = (struct bpf_map *)&m_sockhash;
 488
 489        VERIFY(check_default(&sockhash->map, map));
 490
 491        return 1;
 492}
 493
 494struct bpf_cgroup_storage_map {
 495        struct bpf_map map;
 496} __attribute__((preserve_access_index));
 497
 498struct {
 499        __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
 500        __type(key, struct bpf_cgroup_storage_key);
 501        __type(value, __u32);
 502} m_cgroup_storage SEC(".maps");
 503
 504static inline int check_cgroup_storage(void)
 505{
 506        struct bpf_cgroup_storage_map *cgroup_storage =
 507                (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
 508        struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
 509
 510        VERIFY(check(&cgroup_storage->map, map,
 511                     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
 512
 513        return 1;
 514}
 515
 516struct reuseport_array {
 517        struct bpf_map map;
 518} __attribute__((preserve_access_index));
 519
 520struct {
 521        __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
 522        __uint(max_entries, MAX_ENTRIES);
 523        __type(key, __u32);
 524        __type(value, __u32);
 525} m_reuseport_sockarray SEC(".maps");
 526
 527static inline int check_reuseport_sockarray(void)
 528{
 529        struct reuseport_array *reuseport_sockarray =
 530                (struct reuseport_array *)&m_reuseport_sockarray;
 531        struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
 532
 533        VERIFY(check_default(&reuseport_sockarray->map, map));
 534
 535        return 1;
 536}
 537
 538struct {
 539        __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
 540        __type(key, struct bpf_cgroup_storage_key);
 541        __type(value, __u32);
 542} m_percpu_cgroup_storage SEC(".maps");
 543
 544static inline int check_percpu_cgroup_storage(void)
 545{
 546        struct bpf_cgroup_storage_map *percpu_cgroup_storage =
 547                (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
 548        struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
 549
 550        VERIFY(check(&percpu_cgroup_storage->map, map,
 551                     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
 552
 553        return 1;
 554}
 555
 556struct bpf_queue_stack {
 557        struct bpf_map map;
 558} __attribute__((preserve_access_index));
 559
 560struct {
 561        __uint(type, BPF_MAP_TYPE_QUEUE);
 562        __uint(max_entries, MAX_ENTRIES);
 563        __type(value, __u32);
 564} m_queue SEC(".maps");
 565
 566static inline int check_queue(void)
 567{
 568        struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
 569        struct bpf_map *map = (struct bpf_map *)&m_queue;
 570
 571        VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
 572
 573        return 1;
 574}
 575
 576struct {
 577        __uint(type, BPF_MAP_TYPE_STACK);
 578        __uint(max_entries, MAX_ENTRIES);
 579        __type(value, __u32);
 580} m_stack SEC(".maps");
 581
 582static inline int check_stack(void)
 583{
 584        struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
 585        struct bpf_map *map = (struct bpf_map *)&m_stack;
 586
 587        VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
 588
 589        return 1;
 590}
 591
 592struct bpf_sk_storage_map {
 593        struct bpf_map map;
 594} __attribute__((preserve_access_index));
 595
 596struct {
 597        __uint(type, BPF_MAP_TYPE_SK_STORAGE);
 598        __uint(map_flags, BPF_F_NO_PREALLOC);
 599        __type(key, __u32);
 600        __type(value, __u32);
 601} m_sk_storage SEC(".maps");
 602
 603static inline int check_sk_storage(void)
 604{
 605        struct bpf_sk_storage_map *sk_storage =
 606                (struct bpf_sk_storage_map *)&m_sk_storage;
 607        struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
 608
 609        VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
 610
 611        return 1;
 612}
 613
 614struct {
 615        __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
 616        __uint(max_entries, MAX_ENTRIES);
 617        __type(key, __u32);
 618        __type(value, __u32);
 619} m_devmap_hash SEC(".maps");
 620
 621static inline int check_devmap_hash(void)
 622{
 623        struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
 624        struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
 625
 626        VERIFY(check_default(&devmap_hash->map, map));
 627
 628        return 1;
 629}
 630
 631struct bpf_ringbuf_map {
 632        struct bpf_map map;
 633} __attribute__((preserve_access_index));
 634
 635struct {
 636        __uint(type, BPF_MAP_TYPE_RINGBUF);
 637        __uint(max_entries, 1 << 12);
 638} m_ringbuf SEC(".maps");
 639
 640static inline int check_ringbuf(void)
 641{
 642        struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
 643        struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
 644
 645        VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
 646
 647        return 1;
 648}
 649
 650SEC("cgroup_skb/egress")
 651int cg_skb(void *ctx)
 652{
 653        VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
 654        VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
 655        VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
 656        VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
 657        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
 658        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
 659        VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
 660        VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
 661        VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
 662        VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
 663        VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
 664        VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
 665        VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
 666        VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
 667        VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
 668        VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
 669        VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
 670        VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
 671        VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
 672        VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
 673                    check_reuseport_sockarray);
 674        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
 675                    check_percpu_cgroup_storage);
 676        VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
 677        VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
 678        VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
 679        VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
 680        VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
 681
 682        return 1;
 683}
 684
 685__u32 _version SEC("version") = 1;
 686char _license[] SEC("license") = "GPL";
 687