linux/tools/testing/selftests/bpf/progs/map_ptr_kern.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (c) 2020 Facebook
   3
   4#include <linux/bpf.h>
   5#include <bpf/bpf_helpers.h>
   6
   7#define LOOP_BOUND 0xf
   8#define MAX_ENTRIES 8
   9#define HALF_ENTRIES (MAX_ENTRIES >> 1)
  10
  11_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
  12
  13enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
  14__u32 g_line = 0;
  15int page_size = 0; /* userspace should set it */
  16
  17#define VERIFY_TYPE(type, func) ({      \
  18        g_map_type = type;              \
  19        if (!func())                    \
  20                return 0;               \
  21})
  22
  23
  24#define VERIFY(expr) ({         \
  25        g_line = __LINE__;      \
  26        if (!(expr))            \
  27                return 0;       \
  28})
  29
  30struct bpf_map {
  31        enum bpf_map_type map_type;
  32        __u32 key_size;
  33        __u32 value_size;
  34        __u32 max_entries;
  35        __u32 id;
  36} __attribute__((preserve_access_index));
  37
  38static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
  39                                       __u32 value_size, __u32 max_entries)
  40{
  41        VERIFY(map->map_type == g_map_type);
  42        VERIFY(map->key_size == key_size);
  43        VERIFY(map->value_size == value_size);
  44        VERIFY(map->max_entries == max_entries);
  45        VERIFY(map->id > 0);
  46
  47        return 1;
  48}
  49
  50static inline int check_bpf_map_ptr(struct bpf_map *indirect,
  51                                    struct bpf_map *direct)
  52{
  53        VERIFY(indirect->map_type == direct->map_type);
  54        VERIFY(indirect->key_size == direct->key_size);
  55        VERIFY(indirect->value_size == direct->value_size);
  56        VERIFY(indirect->max_entries == direct->max_entries);
  57        VERIFY(indirect->id == direct->id);
  58
  59        return 1;
  60}
  61
  62static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
  63                        __u32 key_size, __u32 value_size, __u32 max_entries)
  64{
  65        VERIFY(check_bpf_map_ptr(indirect, direct));
  66        VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
  67                                    max_entries));
  68        return 1;
  69}
  70
  71static inline int check_default(struct bpf_map *indirect,
  72                                struct bpf_map *direct)
  73{
  74        VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
  75                     MAX_ENTRIES));
  76        return 1;
  77}
  78
  79static __noinline int
  80check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
  81{
  82        VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
  83                     MAX_ENTRIES));
  84        return 1;
  85}
  86
  87typedef struct {
  88        int counter;
  89} atomic_t;
  90
  91struct bpf_htab {
  92        struct bpf_map map;
  93        atomic_t count;
  94        __u32 n_buckets;
  95        __u32 elem_size;
  96} __attribute__((preserve_access_index));
  97
  98struct {
  99        __uint(type, BPF_MAP_TYPE_HASH);
 100        __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
 101        __uint(max_entries, MAX_ENTRIES);
 102        __type(key, __u32);
 103        __type(value, __u32);
 104} m_hash SEC(".maps");
 105
 106__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
 107
 108static inline int check_hash(void)
 109{
 110        struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
 111        struct bpf_map *map = (struct bpf_map *)&m_hash;
 112        int i;
 113
 114        VERIFY(check_default_noinline(&hash->map, map));
 115
 116        VERIFY(hash->n_buckets == MAX_ENTRIES);
 117        VERIFY(hash->elem_size == 64);
 118
 119        VERIFY(hash->count.counter == 0);
 120        VERIFY(bpf_map_sum_elem_count(map) == 0);
 121
 122        for (i = 0; i < HALF_ENTRIES; ++i) {
 123                const __u32 key = i;
 124                const __u32 val = 1;
 125
 126                if (bpf_map_update_elem(hash, &key, &val, 0))
 127                        return 0;
 128        }
 129        VERIFY(hash->count.counter == HALF_ENTRIES);
 130        VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
 131
 132        return 1;
 133}
 134
 135struct bpf_array {
 136        struct bpf_map map;
 137        __u32 elem_size;
 138} __attribute__((preserve_access_index));
 139
 140struct {
 141        __uint(type, BPF_MAP_TYPE_ARRAY);
 142        __uint(max_entries, MAX_ENTRIES);
 143        __type(key, __u32);
 144        __type(value, __u32);
 145} m_array SEC(".maps");
 146
 147static inline int check_array(void)
 148{
 149        struct bpf_array *array = (struct bpf_array *)&m_array;
 150        struct bpf_map *map = (struct bpf_map *)&m_array;
 151        int i, n_lookups = 0, n_keys = 0;
 152
 153        VERIFY(check_default(&array->map, map));
 154
 155        VERIFY(array->elem_size == 8);
 156
 157        for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
 158                const __u32 key = i;
 159                __u32 *val = bpf_map_lookup_elem(array, &key);
 160
 161                ++n_lookups;
 162                if (val)
 163                        ++n_keys;
 164        }
 165
 166        VERIFY(n_lookups == MAX_ENTRIES);
 167        VERIFY(n_keys == MAX_ENTRIES);
 168
 169        return 1;
 170}
 171
 172struct {
 173        __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
 174        __uint(max_entries, MAX_ENTRIES);
 175        __type(key, __u32);
 176        __type(value, __u32);
 177} m_prog_array SEC(".maps");
 178
 179static inline int check_prog_array(void)
 180{
 181        struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
 182        struct bpf_map *map = (struct bpf_map *)&m_prog_array;
 183
 184        VERIFY(check_default(&prog_array->map, map));
 185
 186        return 1;
 187}
 188
 189struct {
 190        __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
 191        __uint(max_entries, MAX_ENTRIES);
 192        __type(key, __u32);
 193        __type(value, __u32);
 194} m_perf_event_array SEC(".maps");
 195
 196static inline int check_perf_event_array(void)
 197{
 198        struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
 199        struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
 200
 201        VERIFY(check_default(&perf_event_array->map, map));
 202
 203        return 1;
 204}
 205
 206struct {
 207        __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
 208        __uint(max_entries, MAX_ENTRIES);
 209        __type(key, __u32);
 210        __type(value, __u32);
 211} m_percpu_hash SEC(".maps");
 212
 213static inline int check_percpu_hash(void)
 214{
 215        struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
 216        struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
 217
 218        VERIFY(check_default(&percpu_hash->map, map));
 219
 220        return 1;
 221}
 222
 223struct {
 224        __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 225        __uint(max_entries, MAX_ENTRIES);
 226        __type(key, __u32);
 227        __type(value, __u32);
 228} m_percpu_array SEC(".maps");
 229
 230static inline int check_percpu_array(void)
 231{
 232        struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
 233        struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
 234
 235        VERIFY(check_default(&percpu_array->map, map));
 236
 237        return 1;
 238}
 239
 240struct bpf_stack_map {
 241        struct bpf_map map;
 242} __attribute__((preserve_access_index));
 243
 244struct {
 245        __uint(type, BPF_MAP_TYPE_STACK_TRACE);
 246        __uint(max_entries, MAX_ENTRIES);
 247        __type(key, __u32);
 248        __type(value, __u64);
 249} m_stack_trace SEC(".maps");
 250
 251static inline int check_stack_trace(void)
 252{
 253        struct bpf_stack_map *stack_trace =
 254                (struct bpf_stack_map *)&m_stack_trace;
 255        struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
 256
 257        VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
 258                     MAX_ENTRIES));
 259
 260        return 1;
 261}
 262
 263struct {
 264        __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
 265        __uint(max_entries, MAX_ENTRIES);
 266        __type(key, __u32);
 267        __type(value, __u32);
 268} m_cgroup_array SEC(".maps");
 269
 270static inline int check_cgroup_array(void)
 271{
 272        struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
 273        struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
 274
 275        VERIFY(check_default(&cgroup_array->map, map));
 276
 277        return 1;
 278}
 279
 280struct {
 281        __uint(type, BPF_MAP_TYPE_LRU_HASH);
 282        __uint(max_entries, MAX_ENTRIES);
 283        __type(key, __u32);
 284        __type(value, __u32);
 285} m_lru_hash SEC(".maps");
 286
 287static inline int check_lru_hash(void)
 288{
 289        struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
 290        struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
 291
 292        VERIFY(check_default(&lru_hash->map, map));
 293
 294        return 1;
 295}
 296
 297struct {
 298        __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
 299        __uint(max_entries, MAX_ENTRIES);
 300        __type(key, __u32);
 301        __type(value, __u32);
 302} m_lru_percpu_hash SEC(".maps");
 303
 304static inline int check_lru_percpu_hash(void)
 305{
 306        struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
 307        struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
 308
 309        VERIFY(check_default(&lru_percpu_hash->map, map));
 310
 311        return 1;
 312}
 313
 314struct lpm_trie {
 315        struct bpf_map map;
 316} __attribute__((preserve_access_index));
 317
 318struct lpm_key {
 319        struct bpf_lpm_trie_key_hdr trie_key;
 320        __u32 data;
 321};
 322
 323struct {
 324        __uint(type, BPF_MAP_TYPE_LPM_TRIE);
 325        __uint(map_flags, BPF_F_NO_PREALLOC);
 326        __uint(max_entries, MAX_ENTRIES);
 327        __type(key, struct lpm_key);
 328        __type(value, __u32);
 329} m_lpm_trie SEC(".maps");
 330
 331static inline int check_lpm_trie(void)
 332{
 333        struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
 334        struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
 335
 336        VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
 337                     MAX_ENTRIES));
 338
 339        return 1;
 340}
 341
 342#define INNER_MAX_ENTRIES 1234
 343
 344struct inner_map {
 345        __uint(type, BPF_MAP_TYPE_ARRAY);
 346        __uint(max_entries, INNER_MAX_ENTRIES);
 347        __type(key, __u32);
 348        __type(value, __u32);
 349} inner_map SEC(".maps");
 350
 351struct {
 352        __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 353        __uint(max_entries, MAX_ENTRIES);
 354        __type(key, __u32);
 355        __type(value, __u32);
 356        __array(values, struct {
 357                __uint(type, BPF_MAP_TYPE_ARRAY);
 358                __uint(max_entries, INNER_MAX_ENTRIES);
 359                __type(key, __u32);
 360                __type(value, __u32);
 361        });
 362} m_array_of_maps SEC(".maps") = {
 363        .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
 364};
 365
 366static inline int check_array_of_maps(void)
 367{
 368        struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
 369        struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
 370        struct bpf_array *inner_map;
 371        int key = 0;
 372
 373        VERIFY(check_default(&array_of_maps->map, map));
 374        inner_map = bpf_map_lookup_elem(array_of_maps, &key);
 375        VERIFY(inner_map != NULL);
 376        VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
 377
 378        return 1;
 379}
 380
 381struct {
 382        __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
 383        __uint(max_entries, MAX_ENTRIES);
 384        __type(key, __u32);
 385        __type(value, __u32);
 386        __array(values, struct inner_map);
 387} m_hash_of_maps SEC(".maps") = {
 388        .values = {
 389                [2] = &inner_map,
 390        },
 391};
 392
 393static inline int check_hash_of_maps(void)
 394{
 395        struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
 396        struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
 397        struct bpf_htab *inner_map;
 398        int key = 2;
 399
 400        VERIFY(check_default(&hash_of_maps->map, map));
 401        inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
 402        VERIFY(inner_map != NULL);
 403        VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
 404
 405        return 1;
 406}
 407
 408struct bpf_dtab {
 409        struct bpf_map map;
 410} __attribute__((preserve_access_index));
 411
 412struct {
 413        __uint(type, BPF_MAP_TYPE_DEVMAP);
 414        __uint(max_entries, MAX_ENTRIES);
 415        __type(key, __u32);
 416        __type(value, __u32);
 417} m_devmap SEC(".maps");
 418
 419static inline int check_devmap(void)
 420{
 421        struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
 422        struct bpf_map *map = (struct bpf_map *)&m_devmap;
 423
 424        VERIFY(check_default(&devmap->map, map));
 425
 426        return 1;
 427}
 428
 429struct bpf_stab {
 430        struct bpf_map map;
 431} __attribute__((preserve_access_index));
 432
 433struct {
 434        __uint(type, BPF_MAP_TYPE_SOCKMAP);
 435        __uint(max_entries, MAX_ENTRIES);
 436        __type(key, __u32);
 437        __type(value, __u32);
 438} m_sockmap SEC(".maps");
 439
 440static inline int check_sockmap(void)
 441{
 442        struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
 443        struct bpf_map *map = (struct bpf_map *)&m_sockmap;
 444
 445        VERIFY(check_default(&sockmap->map, map));
 446
 447        return 1;
 448}
 449
 450struct bpf_cpu_map {
 451        struct bpf_map map;
 452} __attribute__((preserve_access_index));
 453
 454struct {
 455        __uint(type, BPF_MAP_TYPE_CPUMAP);
 456        __uint(max_entries, MAX_ENTRIES);
 457        __type(key, __u32);
 458        __type(value, __u32);
 459} m_cpumap SEC(".maps");
 460
 461static inline int check_cpumap(void)
 462{
 463        struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
 464        struct bpf_map *map = (struct bpf_map *)&m_cpumap;
 465
 466        VERIFY(check_default(&cpumap->map, map));
 467
 468        return 1;
 469}
 470
 471struct xsk_map {
 472        struct bpf_map map;
 473} __attribute__((preserve_access_index));
 474
 475struct {
 476        __uint(type, BPF_MAP_TYPE_XSKMAP);
 477        __uint(max_entries, MAX_ENTRIES);
 478        __type(key, __u32);
 479        __type(value, __u32);
 480} m_xskmap SEC(".maps");
 481
 482static inline int check_xskmap(void)
 483{
 484        struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
 485        struct bpf_map *map = (struct bpf_map *)&m_xskmap;
 486
 487        VERIFY(check_default(&xskmap->map, map));
 488
 489        return 1;
 490}
 491
 492struct bpf_shtab {
 493        struct bpf_map map;
 494} __attribute__((preserve_access_index));
 495
 496struct {
 497        __uint(type, BPF_MAP_TYPE_SOCKHASH);
 498        __uint(max_entries, MAX_ENTRIES);
 499        __type(key, __u32);
 500        __type(value, __u32);
 501} m_sockhash SEC(".maps");
 502
 503static inline int check_sockhash(void)
 504{
 505        struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
 506        struct bpf_map *map = (struct bpf_map *)&m_sockhash;
 507
 508        VERIFY(check_default(&sockhash->map, map));
 509
 510        return 1;
 511}
 512
 513struct bpf_cgroup_storage_map {
 514        struct bpf_map map;
 515} __attribute__((preserve_access_index));
 516
 517struct {
 518        __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
 519        __type(key, struct bpf_cgroup_storage_key);
 520        __type(value, __u32);
 521} m_cgroup_storage SEC(".maps");
 522
 523static inline int check_cgroup_storage(void)
 524{
 525        struct bpf_cgroup_storage_map *cgroup_storage =
 526                (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
 527        struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
 528
 529        VERIFY(check(&cgroup_storage->map, map,
 530                     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
 531
 532        return 1;
 533}
 534
 535struct reuseport_array {
 536        struct bpf_map map;
 537} __attribute__((preserve_access_index));
 538
 539struct {
 540        __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
 541        __uint(max_entries, MAX_ENTRIES);
 542        __type(key, __u32);
 543        __type(value, __u32);
 544} m_reuseport_sockarray SEC(".maps");
 545
 546static inline int check_reuseport_sockarray(void)
 547{
 548        struct reuseport_array *reuseport_sockarray =
 549                (struct reuseport_array *)&m_reuseport_sockarray;
 550        struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
 551
 552        VERIFY(check_default(&reuseport_sockarray->map, map));
 553
 554        return 1;
 555}
 556
 557struct {
 558        __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
 559        __type(key, struct bpf_cgroup_storage_key);
 560        __type(value, __u32);
 561} m_percpu_cgroup_storage SEC(".maps");
 562
 563static inline int check_percpu_cgroup_storage(void)
 564{
 565        struct bpf_cgroup_storage_map *percpu_cgroup_storage =
 566                (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
 567        struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
 568
 569        VERIFY(check(&percpu_cgroup_storage->map, map,
 570                     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
 571
 572        return 1;
 573}
 574
 575struct bpf_queue_stack {
 576        struct bpf_map map;
 577} __attribute__((preserve_access_index));
 578
 579struct {
 580        __uint(type, BPF_MAP_TYPE_QUEUE);
 581        __uint(max_entries, MAX_ENTRIES);
 582        __type(value, __u32);
 583} m_queue SEC(".maps");
 584
 585static inline int check_queue(void)
 586{
 587        struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
 588        struct bpf_map *map = (struct bpf_map *)&m_queue;
 589
 590        VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
 591
 592        return 1;
 593}
 594
 595struct {
 596        __uint(type, BPF_MAP_TYPE_STACK);
 597        __uint(max_entries, MAX_ENTRIES);
 598        __type(value, __u32);
 599} m_stack SEC(".maps");
 600
 601static inline int check_stack(void)
 602{
 603        struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
 604        struct bpf_map *map = (struct bpf_map *)&m_stack;
 605
 606        VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
 607
 608        return 1;
 609}
 610
 611struct bpf_local_storage_map {
 612        struct bpf_map map;
 613} __attribute__((preserve_access_index));
 614
 615struct {
 616        __uint(type, BPF_MAP_TYPE_SK_STORAGE);
 617        __uint(map_flags, BPF_F_NO_PREALLOC);
 618        __type(key, __u32);
 619        __type(value, __u32);
 620} m_sk_storage SEC(".maps");
 621
 622static inline int check_sk_storage(void)
 623{
 624        struct bpf_local_storage_map *sk_storage =
 625                (struct bpf_local_storage_map *)&m_sk_storage;
 626        struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
 627
 628        VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
 629
 630        return 1;
 631}
 632
 633struct {
 634        __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
 635        __uint(max_entries, MAX_ENTRIES);
 636        __type(key, __u32);
 637        __type(value, __u32);
 638} m_devmap_hash SEC(".maps");
 639
 640static inline int check_devmap_hash(void)
 641{
 642        struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
 643        struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
 644
 645        VERIFY(check_default(&devmap_hash->map, map));
 646
 647        return 1;
 648}
 649
 650struct bpf_ringbuf_map {
 651        struct bpf_map map;
 652} __attribute__((preserve_access_index));
 653
 654struct {
 655        __uint(type, BPF_MAP_TYPE_RINGBUF);
 656} m_ringbuf SEC(".maps");
 657
 658static inline int check_ringbuf(void)
 659{
 660        struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
 661        struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
 662
 663        VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
 664
 665        return 1;
 666}
 667
 668SEC("cgroup_skb/egress")
 669int cg_skb(void *ctx)
 670{
 671        VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
 672        VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
 673        VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
 674        VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
 675        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
 676        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
 677        VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
 678        VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
 679        VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
 680        VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
 681        VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
 682        VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
 683        VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
 684        VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
 685        VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
 686        VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
 687        VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
 688        VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
 689        VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
 690        VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
 691                    check_reuseport_sockarray);
 692        VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
 693                    check_percpu_cgroup_storage);
 694        VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
 695        VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
 696        VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
 697        VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
 698        VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
 699
 700        return 1;
 701}
 702
 703char _license[] SEC("license") = "GPL";
 704