linux/kernel/bpf/stackmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2016 Facebook
   3 */
   4#include <linux/bpf.h>
   5#include <linux/jhash.h>
   6#include <linux/filter.h>
   7#include <linux/stacktrace.h>
   8#include <linux/perf_event.h>
   9#include <linux/elf.h>
  10#include <linux/pagemap.h>
  11#include <linux/irq_work.h>
  12#include "percpu_freelist.h"
  13
  14#define STACK_CREATE_FLAG_MASK                                  \
  15        (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |        \
  16         BPF_F_STACK_BUILD_ID)
  17
  18struct stack_map_bucket {
  19        struct pcpu_freelist_node fnode;
  20        u32 hash;
  21        u32 nr;
  22        u64 data[];
  23};
  24
  25struct bpf_stack_map {
  26        struct bpf_map map;
  27        void *elems;
  28        struct pcpu_freelist freelist;
  29        u32 n_buckets;
  30        struct stack_map_bucket *buckets[];
  31};
  32
  33/* irq_work to run up_read() for build_id lookup in nmi context */
  34struct stack_map_irq_work {
  35        struct irq_work irq_work;
  36        struct mm_struct *mm;
  37};
  38
  39static void do_up_read(struct irq_work *entry)
  40{
  41        struct stack_map_irq_work *work;
  42
  43        if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
  44                return;
  45
  46        work = container_of(entry, struct stack_map_irq_work, irq_work);
  47        mmap_read_unlock_non_owner(work->mm);
  48}
  49
  50static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
  51
  52static inline bool stack_map_use_build_id(struct bpf_map *map)
  53{
  54        return (map->map_flags & BPF_F_STACK_BUILD_ID);
  55}
  56
  57static inline int stack_map_data_size(struct bpf_map *map)
  58{
  59        return stack_map_use_build_id(map) ?
  60                sizeof(struct bpf_stack_build_id) : sizeof(u64);
  61}
  62
  63static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
  64{
  65        u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
  66        int err;
  67
  68        smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
  69                                         smap->map.numa_node);
  70        if (!smap->elems)
  71                return -ENOMEM;
  72
  73        err = pcpu_freelist_init(&smap->freelist);
  74        if (err)
  75                goto free_elems;
  76
  77        pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
  78                               smap->map.max_entries);
  79        return 0;
  80
  81free_elems:
  82        bpf_map_area_free(smap->elems);
  83        return err;
  84}
  85
  86/* Called from syscall */
  87static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
  88{
  89        u32 value_size = attr->value_size;
  90        struct bpf_stack_map *smap;
  91        struct bpf_map_memory mem;
  92        u64 cost, n_buckets;
  93        int err;
  94
  95        if (!bpf_capable())
  96                return ERR_PTR(-EPERM);
  97
  98        if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
  99                return ERR_PTR(-EINVAL);
 100
 101        /* check sanity of attributes */
 102        if (attr->max_entries == 0 || attr->key_size != 4 ||
 103            value_size < 8 || value_size % 8)
 104                return ERR_PTR(-EINVAL);
 105
 106        BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
 107        if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
 108                if (value_size % sizeof(struct bpf_stack_build_id) ||
 109                    value_size / sizeof(struct bpf_stack_build_id)
 110                    > sysctl_perf_event_max_stack)
 111                        return ERR_PTR(-EINVAL);
 112        } else if (value_size / 8 > sysctl_perf_event_max_stack)
 113                return ERR_PTR(-EINVAL);
 114
 115        /* hash table size must be power of 2 */
 116        n_buckets = roundup_pow_of_two(attr->max_entries);
 117
 118        cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
 119        cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
 120        err = bpf_map_charge_init(&mem, cost);
 121        if (err)
 122                return ERR_PTR(err);
 123
 124        smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
 125        if (!smap) {
 126                bpf_map_charge_finish(&mem);
 127                return ERR_PTR(-ENOMEM);
 128        }
 129
 130        bpf_map_init_from_attr(&smap->map, attr);
 131        smap->map.value_size = value_size;
 132        smap->n_buckets = n_buckets;
 133
 134        err = get_callchain_buffers(sysctl_perf_event_max_stack);
 135        if (err)
 136                goto free_charge;
 137
 138        err = prealloc_elems_and_freelist(smap);
 139        if (err)
 140                goto put_buffers;
 141
 142        bpf_map_charge_move(&smap->map.memory, &mem);
 143
 144        return &smap->map;
 145
 146put_buffers:
 147        put_callchain_buffers();
 148free_charge:
 149        bpf_map_charge_finish(&mem);
 150        bpf_map_area_free(smap);
 151        return ERR_PTR(err);
 152}
 153
 154#define BPF_BUILD_ID 3
 155/*
 156 * Parse build id from the note segment. This logic can be shared between
 157 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
 158 * identical.
 159 */
 160static inline int stack_map_parse_build_id(void *page_addr,
 161                                           unsigned char *build_id,
 162                                           void *note_start,
 163                                           Elf32_Word note_size)
 164{
 165        Elf32_Word note_offs = 0, new_offs;
 166
 167        /* check for overflow */
 168        if (note_start < page_addr || note_start + note_size < note_start)
 169                return -EINVAL;
 170
 171        /* only supports note that fits in the first page */
 172        if (note_start + note_size > page_addr + PAGE_SIZE)
 173                return -EINVAL;
 174
 175        while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
 176                Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
 177
 178                if (nhdr->n_type == BPF_BUILD_ID &&
 179                    nhdr->n_namesz == sizeof("GNU") &&
 180                    nhdr->n_descsz > 0 &&
 181                    nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
 182                        memcpy(build_id,
 183                               note_start + note_offs +
 184                               ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
 185                               nhdr->n_descsz);
 186                        memset(build_id + nhdr->n_descsz, 0,
 187                               BPF_BUILD_ID_SIZE - nhdr->n_descsz);
 188                        return 0;
 189                }
 190                new_offs = note_offs + sizeof(Elf32_Nhdr) +
 191                        ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
 192                if (new_offs <= note_offs)  /* overflow */
 193                        break;
 194                note_offs = new_offs;
 195        }
 196        return -EINVAL;
 197}
 198
 199/* Parse build ID from 32-bit ELF */
 200static int stack_map_get_build_id_32(void *page_addr,
 201                                     unsigned char *build_id)
 202{
 203        Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
 204        Elf32_Phdr *phdr;
 205        int i;
 206
 207        /* only supports phdr that fits in one page */
 208        if (ehdr->e_phnum >
 209            (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
 210                return -EINVAL;
 211
 212        phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
 213
 214        for (i = 0; i < ehdr->e_phnum; ++i)
 215                if (phdr[i].p_type == PT_NOTE)
 216                        return stack_map_parse_build_id(page_addr, build_id,
 217                                        page_addr + phdr[i].p_offset,
 218                                        phdr[i].p_filesz);
 219        return -EINVAL;
 220}
 221
 222/* Parse build ID from 64-bit ELF */
 223static int stack_map_get_build_id_64(void *page_addr,
 224                                     unsigned char *build_id)
 225{
 226        Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
 227        Elf64_Phdr *phdr;
 228        int i;
 229
 230        /* only supports phdr that fits in one page */
 231        if (ehdr->e_phnum >
 232            (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
 233                return -EINVAL;
 234
 235        phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
 236
 237        for (i = 0; i < ehdr->e_phnum; ++i)
 238                if (phdr[i].p_type == PT_NOTE)
 239                        return stack_map_parse_build_id(page_addr, build_id,
 240                                        page_addr + phdr[i].p_offset,
 241                                        phdr[i].p_filesz);
 242        return -EINVAL;
 243}
 244
 245/* Parse build ID of ELF file mapped to vma */
 246static int stack_map_get_build_id(struct vm_area_struct *vma,
 247                                  unsigned char *build_id)
 248{
 249        Elf32_Ehdr *ehdr;
 250        struct page *page;
 251        void *page_addr;
 252        int ret;
 253
 254        /* only works for page backed storage  */
 255        if (!vma->vm_file)
 256                return -EINVAL;
 257
 258        page = find_get_page(vma->vm_file->f_mapping, 0);
 259        if (!page)
 260                return -EFAULT; /* page not mapped */
 261
 262        ret = -EINVAL;
 263        page_addr = kmap_atomic(page);
 264        ehdr = (Elf32_Ehdr *)page_addr;
 265
 266        /* compare magic x7f "ELF" */
 267        if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
 268                goto out;
 269
 270        /* only support executable file and shared object file */
 271        if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
 272                goto out;
 273
 274        if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
 275                ret = stack_map_get_build_id_32(page_addr, build_id);
 276        else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
 277                ret = stack_map_get_build_id_64(page_addr, build_id);
 278out:
 279        kunmap_atomic(page_addr);
 280        put_page(page);
 281        return ret;
 282}
 283
 284static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
 285                                          u64 *ips, u32 trace_nr, bool user)
 286{
 287        int i;
 288        struct vm_area_struct *vma;
 289        bool irq_work_busy = false;
 290        struct stack_map_irq_work *work = NULL;
 291
 292        if (irqs_disabled()) {
 293                if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
 294                        work = this_cpu_ptr(&up_read_work);
 295                        if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
 296                                /* cannot queue more up_read, fallback */
 297                                irq_work_busy = true;
 298                        }
 299                } else {
 300                        /*
 301                         * PREEMPT_RT does not allow to trylock mmap sem in
 302                         * interrupt disabled context. Force the fallback code.
 303                         */
 304                        irq_work_busy = true;
 305                }
 306        }
 307
 308        /*
 309         * We cannot do up_read() when the irq is disabled, because of
 310         * risk to deadlock with rq_lock. To do build_id lookup when the
 311         * irqs are disabled, we need to run up_read() in irq_work. We use
 312         * a percpu variable to do the irq_work. If the irq_work is
 313         * already used by another lookup, we fall back to report ips.
 314         *
 315         * Same fallback is used for kernel stack (!user) on a stackmap
 316         * with build_id.
 317         */
 318        if (!user || !current || !current->mm || irq_work_busy ||
 319            !mmap_read_trylock_non_owner(current->mm)) {
 320                /* cannot access current->mm, fall back to ips */
 321                for (i = 0; i < trace_nr; i++) {
 322                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
 323                        id_offs[i].ip = ips[i];
 324                        memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
 325                }
 326                return;
 327        }
 328
 329        for (i = 0; i < trace_nr; i++) {
 330                vma = find_vma(current->mm, ips[i]);
 331                if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
 332                        /* per entry fall back to ips */
 333                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
 334                        id_offs[i].ip = ips[i];
 335                        memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
 336                        continue;
 337                }
 338                id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
 339                        - vma->vm_start;
 340                id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
 341        }
 342
 343        if (!work) {
 344                mmap_read_unlock_non_owner(current->mm);
 345        } else {
 346                work->mm = current->mm;
 347                irq_work_queue(&work->irq_work);
 348        }
 349}
 350
 351BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 352           u64, flags)
 353{
 354        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 355        struct perf_callchain_entry *trace;
 356        struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
 357        u32 max_depth = map->value_size / stack_map_data_size(map);
 358        /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
 359        u32 init_nr = sysctl_perf_event_max_stack - max_depth;
 360        u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 361        u32 hash, id, trace_nr, trace_len;
 362        bool user = flags & BPF_F_USER_STACK;
 363        bool kernel = !user;
 364        u64 *ips;
 365        bool hash_matches;
 366
 367        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 368                               BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
 369                return -EINVAL;
 370
 371        trace = get_perf_callchain(regs, init_nr, kernel, user,
 372                                   sysctl_perf_event_max_stack, false, false);
 373
 374        if (unlikely(!trace))
 375                /* couldn't fetch the stack trace */
 376                return -EFAULT;
 377
 378        /* get_perf_callchain() guarantees that trace->nr >= init_nr
 379         * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
 380         */
 381        trace_nr = trace->nr - init_nr;
 382
 383        if (trace_nr <= skip)
 384                /* skipping more than usable stack trace */
 385                return -EFAULT;
 386
 387        trace_nr -= skip;
 388        trace_len = trace_nr * sizeof(u64);
 389        ips = trace->ip + skip + init_nr;
 390        hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
 391        id = hash & (smap->n_buckets - 1);
 392        bucket = READ_ONCE(smap->buckets[id]);
 393
 394        hash_matches = bucket && bucket->hash == hash;
 395        /* fast cmp */
 396        if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
 397                return id;
 398
 399        if (stack_map_use_build_id(map)) {
 400                /* for build_id+offset, pop a bucket before slow cmp */
 401                new_bucket = (struct stack_map_bucket *)
 402                        pcpu_freelist_pop(&smap->freelist);
 403                if (unlikely(!new_bucket))
 404                        return -ENOMEM;
 405                new_bucket->nr = trace_nr;
 406                stack_map_get_build_id_offset(
 407                        (struct bpf_stack_build_id *)new_bucket->data,
 408                        ips, trace_nr, user);
 409                trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
 410                if (hash_matches && bucket->nr == trace_nr &&
 411                    memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
 412                        pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
 413                        return id;
 414                }
 415                if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
 416                        pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
 417                        return -EEXIST;
 418                }
 419        } else {
 420                if (hash_matches && bucket->nr == trace_nr &&
 421                    memcmp(bucket->data, ips, trace_len) == 0)
 422                        return id;
 423                if (bucket && !(flags & BPF_F_REUSE_STACKID))
 424                        return -EEXIST;
 425
 426                new_bucket = (struct stack_map_bucket *)
 427                        pcpu_freelist_pop(&smap->freelist);
 428                if (unlikely(!new_bucket))
 429                        return -ENOMEM;
 430                memcpy(new_bucket->data, ips, trace_len);
 431        }
 432
 433        new_bucket->hash = hash;
 434        new_bucket->nr = trace_nr;
 435
 436        old_bucket = xchg(&smap->buckets[id], new_bucket);
 437        if (old_bucket)
 438                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 439        return id;
 440}
 441
 442const struct bpf_func_proto bpf_get_stackid_proto = {
 443        .func           = bpf_get_stackid,
 444        .gpl_only       = true,
 445        .ret_type       = RET_INTEGER,
 446        .arg1_type      = ARG_PTR_TO_CTX,
 447        .arg2_type      = ARG_CONST_MAP_PTR,
 448        .arg3_type      = ARG_ANYTHING,
 449};
 450
 451BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
 452           u64, flags)
 453{
 454        u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
 455        bool user_build_id = flags & BPF_F_USER_BUILD_ID;
 456        u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 457        bool user = flags & BPF_F_USER_STACK;
 458        struct perf_callchain_entry *trace;
 459        bool kernel = !user;
 460        int err = -EINVAL;
 461        u64 *ips;
 462
 463        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 464                               BPF_F_USER_BUILD_ID)))
 465                goto clear;
 466        if (kernel && user_build_id)
 467                goto clear;
 468
 469        elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
 470                                            : sizeof(u64);
 471        if (unlikely(size % elem_size))
 472                goto clear;
 473
 474        num_elem = size / elem_size;
 475        if (sysctl_perf_event_max_stack < num_elem)
 476                init_nr = 0;
 477        else
 478                init_nr = sysctl_perf_event_max_stack - num_elem;
 479        trace = get_perf_callchain(regs, init_nr, kernel, user,
 480                                   sysctl_perf_event_max_stack, false, false);
 481        if (unlikely(!trace))
 482                goto err_fault;
 483
 484        trace_nr = trace->nr - init_nr;
 485        if (trace_nr < skip)
 486                goto err_fault;
 487
 488        trace_nr -= skip;
 489        trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
 490        copy_len = trace_nr * elem_size;
 491        ips = trace->ip + skip + init_nr;
 492        if (user && user_build_id)
 493                stack_map_get_build_id_offset(buf, ips, trace_nr, user);
 494        else
 495                memcpy(buf, ips, copy_len);
 496
 497        if (size > copy_len)
 498                memset(buf + copy_len, 0, size - copy_len);
 499        return copy_len;
 500
 501err_fault:
 502        err = -EFAULT;
 503clear:
 504        memset(buf, 0, size);
 505        return err;
 506}
 507
 508const struct bpf_func_proto bpf_get_stack_proto = {
 509        .func           = bpf_get_stack,
 510        .gpl_only       = true,
 511        .ret_type       = RET_INTEGER,
 512        .arg1_type      = ARG_PTR_TO_CTX,
 513        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 514        .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
 515        .arg4_type      = ARG_ANYTHING,
 516};
 517
 518/* Called from eBPF program */
 519static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
 520{
 521        return ERR_PTR(-EOPNOTSUPP);
 522}
 523
 524/* Called from syscall */
 525int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 526{
 527        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 528        struct stack_map_bucket *bucket, *old_bucket;
 529        u32 id = *(u32 *)key, trace_len;
 530
 531        if (unlikely(id >= smap->n_buckets))
 532                return -ENOENT;
 533
 534        bucket = xchg(&smap->buckets[id], NULL);
 535        if (!bucket)
 536                return -ENOENT;
 537
 538        trace_len = bucket->nr * stack_map_data_size(map);
 539        memcpy(value, bucket->data, trace_len);
 540        memset(value + trace_len, 0, map->value_size - trace_len);
 541
 542        old_bucket = xchg(&smap->buckets[id], bucket);
 543        if (old_bucket)
 544                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 545        return 0;
 546}
 547
 548static int stack_map_get_next_key(struct bpf_map *map, void *key,
 549                                  void *next_key)
 550{
 551        struct bpf_stack_map *smap = container_of(map,
 552                                                  struct bpf_stack_map, map);
 553        u32 id;
 554
 555        WARN_ON_ONCE(!rcu_read_lock_held());
 556
 557        if (!key) {
 558                id = 0;
 559        } else {
 560                id = *(u32 *)key;
 561                if (id >= smap->n_buckets || !smap->buckets[id])
 562                        id = 0;
 563                else
 564                        id++;
 565        }
 566
 567        while (id < smap->n_buckets && !smap->buckets[id])
 568                id++;
 569
 570        if (id >= smap->n_buckets)
 571                return -ENOENT;
 572
 573        *(u32 *)next_key = id;
 574        return 0;
 575}
 576
 577static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
 578                                 u64 map_flags)
 579{
 580        return -EINVAL;
 581}
 582
 583/* Called from syscall or from eBPF program */
 584static int stack_map_delete_elem(struct bpf_map *map, void *key)
 585{
 586        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 587        struct stack_map_bucket *old_bucket;
 588        u32 id = *(u32 *)key;
 589
 590        if (unlikely(id >= smap->n_buckets))
 591                return -E2BIG;
 592
 593        old_bucket = xchg(&smap->buckets[id], NULL);
 594        if (old_bucket) {
 595                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 596                return 0;
 597        } else {
 598                return -ENOENT;
 599        }
 600}
 601
 602/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 603static void stack_map_free(struct bpf_map *map)
 604{
 605        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 606
 607        /* wait for bpf programs to complete before freeing stack map */
 608        synchronize_rcu();
 609
 610        bpf_map_area_free(smap->elems);
 611        pcpu_freelist_destroy(&smap->freelist);
 612        bpf_map_area_free(smap);
 613        put_callchain_buffers();
 614}
 615
 616const struct bpf_map_ops stack_trace_map_ops = {
 617        .map_alloc = stack_map_alloc,
 618        .map_free = stack_map_free,
 619        .map_get_next_key = stack_map_get_next_key,
 620        .map_lookup_elem = stack_map_lookup_elem,
 621        .map_update_elem = stack_map_update_elem,
 622        .map_delete_elem = stack_map_delete_elem,
 623        .map_check_btf = map_check_no_btf,
 624};
 625
 626static int __init stack_map_init(void)
 627{
 628        int cpu;
 629        struct stack_map_irq_work *work;
 630
 631        for_each_possible_cpu(cpu) {
 632                work = per_cpu_ptr(&up_read_work, cpu);
 633                init_irq_work(&work->irq_work, do_up_read);
 634        }
 635        return 0;
 636}
 637subsys_initcall(stack_map_init);
 638