linux/kernel/bpf/stackmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2016 Facebook
   3 */
   4#include <linux/bpf.h>
   5#include <linux/jhash.h>
   6#include <linux/filter.h>
   7#include <linux/kernel.h>
   8#include <linux/stacktrace.h>
   9#include <linux/perf_event.h>
  10#include <linux/elf.h>
  11#include <linux/pagemap.h>
  12#include <linux/irq_work.h>
  13#include <linux/btf_ids.h>
  14#include "percpu_freelist.h"
  15
  16#define STACK_CREATE_FLAG_MASK                                  \
  17        (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |        \
  18         BPF_F_STACK_BUILD_ID)
  19
  20struct stack_map_bucket {
  21        struct pcpu_freelist_node fnode;
  22        u32 hash;
  23        u32 nr;
  24        u64 data[];
  25};
  26
  27struct bpf_stack_map {
  28        struct bpf_map map;
  29        void *elems;
  30        struct pcpu_freelist freelist;
  31        u32 n_buckets;
  32        struct stack_map_bucket *buckets[];
  33};
  34
  35/* irq_work to run up_read() for build_id lookup in nmi context */
  36struct stack_map_irq_work {
  37        struct irq_work irq_work;
  38        struct mm_struct *mm;
  39};
  40
  41static void do_up_read(struct irq_work *entry)
  42{
  43        struct stack_map_irq_work *work;
  44
  45        if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
  46                return;
  47
  48        work = container_of(entry, struct stack_map_irq_work, irq_work);
  49        mmap_read_unlock_non_owner(work->mm);
  50}
  51
  52static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
  53
  54static inline bool stack_map_use_build_id(struct bpf_map *map)
  55{
  56        return (map->map_flags & BPF_F_STACK_BUILD_ID);
  57}
  58
  59static inline int stack_map_data_size(struct bpf_map *map)
  60{
  61        return stack_map_use_build_id(map) ?
  62                sizeof(struct bpf_stack_build_id) : sizeof(u64);
  63}
  64
  65static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
  66{
  67        u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
  68        int err;
  69
  70        smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
  71                                         smap->map.numa_node);
  72        if (!smap->elems)
  73                return -ENOMEM;
  74
  75        err = pcpu_freelist_init(&smap->freelist);
  76        if (err)
  77                goto free_elems;
  78
  79        pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
  80                               smap->map.max_entries);
  81        return 0;
  82
  83free_elems:
  84        bpf_map_area_free(smap->elems);
  85        return err;
  86}
  87
  88/* Called from syscall */
  89static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
  90{
  91        u32 value_size = attr->value_size;
  92        struct bpf_stack_map *smap;
  93        struct bpf_map_memory mem;
  94        u64 cost, n_buckets;
  95        int err;
  96
  97        if (!bpf_capable())
  98                return ERR_PTR(-EPERM);
  99
 100        if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
 101                return ERR_PTR(-EINVAL);
 102
 103        /* check sanity of attributes */
 104        if (attr->max_entries == 0 || attr->key_size != 4 ||
 105            value_size < 8 || value_size % 8)
 106                return ERR_PTR(-EINVAL);
 107
 108        BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
 109        if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
 110                if (value_size % sizeof(struct bpf_stack_build_id) ||
 111                    value_size / sizeof(struct bpf_stack_build_id)
 112                    > sysctl_perf_event_max_stack)
 113                        return ERR_PTR(-EINVAL);
 114        } else if (value_size / 8 > sysctl_perf_event_max_stack)
 115                return ERR_PTR(-EINVAL);
 116
 117        /* hash table size must be power of 2 */
 118        n_buckets = roundup_pow_of_two(attr->max_entries);
 119
 120        cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
 121        cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
 122        err = bpf_map_charge_init(&mem, cost);
 123        if (err)
 124                return ERR_PTR(err);
 125
 126        smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
 127        if (!smap) {
 128                bpf_map_charge_finish(&mem);
 129                return ERR_PTR(-ENOMEM);
 130        }
 131
 132        bpf_map_init_from_attr(&smap->map, attr);
 133        smap->map.value_size = value_size;
 134        smap->n_buckets = n_buckets;
 135
 136        err = get_callchain_buffers(sysctl_perf_event_max_stack);
 137        if (err)
 138                goto free_charge;
 139
 140        err = prealloc_elems_and_freelist(smap);
 141        if (err)
 142                goto put_buffers;
 143
 144        bpf_map_charge_move(&smap->map.memory, &mem);
 145
 146        return &smap->map;
 147
 148put_buffers:
 149        put_callchain_buffers();
 150free_charge:
 151        bpf_map_charge_finish(&mem);
 152        bpf_map_area_free(smap);
 153        return ERR_PTR(err);
 154}
 155
 156#define BPF_BUILD_ID 3
 157/*
 158 * Parse build id from the note segment. This logic can be shared between
 159 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
 160 * identical.
 161 */
 162static inline int stack_map_parse_build_id(void *page_addr,
 163                                           unsigned char *build_id,
 164                                           void *note_start,
 165                                           Elf32_Word note_size)
 166{
 167        Elf32_Word note_offs = 0, new_offs;
 168
 169        /* check for overflow */
 170        if (note_start < page_addr || note_start + note_size < note_start)
 171                return -EINVAL;
 172
 173        /* only supports note that fits in the first page */
 174        if (note_start + note_size > page_addr + PAGE_SIZE)
 175                return -EINVAL;
 176
 177        while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
 178                Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
 179
 180                if (nhdr->n_type == BPF_BUILD_ID &&
 181                    nhdr->n_namesz == sizeof("GNU") &&
 182                    nhdr->n_descsz > 0 &&
 183                    nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
 184                        memcpy(build_id,
 185                               note_start + note_offs +
 186                               ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
 187                               nhdr->n_descsz);
 188                        memset(build_id + nhdr->n_descsz, 0,
 189                               BPF_BUILD_ID_SIZE - nhdr->n_descsz);
 190                        return 0;
 191                }
 192                new_offs = note_offs + sizeof(Elf32_Nhdr) +
 193                        ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
 194                if (new_offs <= note_offs)  /* overflow */
 195                        break;
 196                note_offs = new_offs;
 197        }
 198        return -EINVAL;
 199}
 200
 201/* Parse build ID from 32-bit ELF */
 202static int stack_map_get_build_id_32(void *page_addr,
 203                                     unsigned char *build_id)
 204{
 205        Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
 206        Elf32_Phdr *phdr;
 207        int i;
 208
 209        /* only supports phdr that fits in one page */
 210        if (ehdr->e_phnum >
 211            (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
 212                return -EINVAL;
 213
 214        phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
 215
 216        for (i = 0; i < ehdr->e_phnum; ++i) {
 217                if (phdr[i].p_type == PT_NOTE &&
 218                    !stack_map_parse_build_id(page_addr, build_id,
 219                                              page_addr + phdr[i].p_offset,
 220                                              phdr[i].p_filesz))
 221                        return 0;
 222        }
 223        return -EINVAL;
 224}
 225
 226/* Parse build ID from 64-bit ELF */
 227static int stack_map_get_build_id_64(void *page_addr,
 228                                     unsigned char *build_id)
 229{
 230        Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
 231        Elf64_Phdr *phdr;
 232        int i;
 233
 234        /* only supports phdr that fits in one page */
 235        if (ehdr->e_phnum >
 236            (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
 237                return -EINVAL;
 238
 239        phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
 240
 241        for (i = 0; i < ehdr->e_phnum; ++i) {
 242                if (phdr[i].p_type == PT_NOTE &&
 243                    !stack_map_parse_build_id(page_addr, build_id,
 244                                              page_addr + phdr[i].p_offset,
 245                                              phdr[i].p_filesz))
 246                        return 0;
 247        }
 248        return -EINVAL;
 249}
 250
 251/* Parse build ID of ELF file mapped to vma */
 252static int stack_map_get_build_id(struct vm_area_struct *vma,
 253                                  unsigned char *build_id)
 254{
 255        Elf32_Ehdr *ehdr;
 256        struct page *page;
 257        void *page_addr;
 258        int ret;
 259
 260        /* only works for page backed storage  */
 261        if (!vma->vm_file)
 262                return -EINVAL;
 263
 264        page = find_get_page(vma->vm_file->f_mapping, 0);
 265        if (!page)
 266                return -EFAULT; /* page not mapped */
 267
 268        ret = -EINVAL;
 269        page_addr = kmap_atomic(page);
 270        ehdr = (Elf32_Ehdr *)page_addr;
 271
 272        /* compare magic x7f "ELF" */
 273        if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
 274                goto out;
 275
 276        /* only support executable file and shared object file */
 277        if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
 278                goto out;
 279
 280        if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
 281                ret = stack_map_get_build_id_32(page_addr, build_id);
 282        else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
 283                ret = stack_map_get_build_id_64(page_addr, build_id);
 284out:
 285        kunmap_atomic(page_addr);
 286        put_page(page);
 287        return ret;
 288}
 289
 290static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
 291                                          u64 *ips, u32 trace_nr, bool user)
 292{
 293        int i;
 294        struct vm_area_struct *vma;
 295        bool irq_work_busy = false;
 296        struct stack_map_irq_work *work = NULL;
 297
 298        if (irqs_disabled()) {
 299                if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
 300                        work = this_cpu_ptr(&up_read_work);
 301                        if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
 302                                /* cannot queue more up_read, fallback */
 303                                irq_work_busy = true;
 304                        }
 305                } else {
 306                        /*
 307                         * PREEMPT_RT does not allow to trylock mmap sem in
 308                         * interrupt disabled context. Force the fallback code.
 309                         */
 310                        irq_work_busy = true;
 311                }
 312        }
 313
 314        /*
 315         * We cannot do up_read() when the irq is disabled, because of
 316         * risk to deadlock with rq_lock. To do build_id lookup when the
 317         * irqs are disabled, we need to run up_read() in irq_work. We use
 318         * a percpu variable to do the irq_work. If the irq_work is
 319         * already used by another lookup, we fall back to report ips.
 320         *
 321         * Same fallback is used for kernel stack (!user) on a stackmap
 322         * with build_id.
 323         */
 324        if (!user || !current || !current->mm || irq_work_busy ||
 325            !mmap_read_trylock_non_owner(current->mm)) {
 326                /* cannot access current->mm, fall back to ips */
 327                for (i = 0; i < trace_nr; i++) {
 328                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
 329                        id_offs[i].ip = ips[i];
 330                        memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
 331                }
 332                return;
 333        }
 334
 335        for (i = 0; i < trace_nr; i++) {
 336                vma = find_vma(current->mm, ips[i]);
 337                if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
 338                        /* per entry fall back to ips */
 339                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
 340                        id_offs[i].ip = ips[i];
 341                        memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
 342                        continue;
 343                }
 344                id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
 345                        - vma->vm_start;
 346                id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
 347        }
 348
 349        if (!work) {
 350                mmap_read_unlock_non_owner(current->mm);
 351        } else {
 352                work->mm = current->mm;
 353                irq_work_queue(&work->irq_work);
 354        }
 355}
 356
 357static struct perf_callchain_entry *
 358get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
 359{
 360#ifdef CONFIG_STACKTRACE
 361        struct perf_callchain_entry *entry;
 362        int rctx;
 363
 364        entry = get_callchain_entry(&rctx);
 365
 366        if (!entry)
 367                return NULL;
 368
 369        entry->nr = init_nr +
 370                stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
 371                                     sysctl_perf_event_max_stack - init_nr, 0);
 372
 373        /* stack_trace_save_tsk() works on unsigned long array, while
 374         * perf_callchain_entry uses u64 array. For 32-bit systems, it is
 375         * necessary to fix this mismatch.
 376         */
 377        if (__BITS_PER_LONG != 64) {
 378                unsigned long *from = (unsigned long *) entry->ip;
 379                u64 *to = entry->ip;
 380                int i;
 381
 382                /* copy data from the end to avoid using extra buffer */
 383                for (i = entry->nr - 1; i >= (int)init_nr; i--)
 384                        to[i] = (u64)(from[i]);
 385        }
 386
 387        put_callchain_entry(rctx);
 388
 389        return entry;
 390#else /* CONFIG_STACKTRACE */
 391        return NULL;
 392#endif
 393}
 394
 395static long __bpf_get_stackid(struct bpf_map *map,
 396                              struct perf_callchain_entry *trace, u64 flags)
 397{
 398        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 399        struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
 400        u32 max_depth = map->value_size / stack_map_data_size(map);
 401        /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
 402        u32 init_nr = sysctl_perf_event_max_stack - max_depth;
 403        u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 404        u32 hash, id, trace_nr, trace_len;
 405        bool user = flags & BPF_F_USER_STACK;
 406        u64 *ips;
 407        bool hash_matches;
 408
 409        /* get_perf_callchain() guarantees that trace->nr >= init_nr
 410         * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
 411         */
 412        trace_nr = trace->nr - init_nr;
 413
 414        if (trace_nr <= skip)
 415                /* skipping more than usable stack trace */
 416                return -EFAULT;
 417
 418        trace_nr -= skip;
 419        trace_len = trace_nr * sizeof(u64);
 420        ips = trace->ip + skip + init_nr;
 421        hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
 422        id = hash & (smap->n_buckets - 1);
 423        bucket = READ_ONCE(smap->buckets[id]);
 424
 425        hash_matches = bucket && bucket->hash == hash;
 426        /* fast cmp */
 427        if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
 428                return id;
 429
 430        if (stack_map_use_build_id(map)) {
 431                /* for build_id+offset, pop a bucket before slow cmp */
 432                new_bucket = (struct stack_map_bucket *)
 433                        pcpu_freelist_pop(&smap->freelist);
 434                if (unlikely(!new_bucket))
 435                        return -ENOMEM;
 436                new_bucket->nr = trace_nr;
 437                stack_map_get_build_id_offset(
 438                        (struct bpf_stack_build_id *)new_bucket->data,
 439                        ips, trace_nr, user);
 440                trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
 441                if (hash_matches && bucket->nr == trace_nr &&
 442                    memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
 443                        pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
 444                        return id;
 445                }
 446                if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
 447                        pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
 448                        return -EEXIST;
 449                }
 450        } else {
 451                if (hash_matches && bucket->nr == trace_nr &&
 452                    memcmp(bucket->data, ips, trace_len) == 0)
 453                        return id;
 454                if (bucket && !(flags & BPF_F_REUSE_STACKID))
 455                        return -EEXIST;
 456
 457                new_bucket = (struct stack_map_bucket *)
 458                        pcpu_freelist_pop(&smap->freelist);
 459                if (unlikely(!new_bucket))
 460                        return -ENOMEM;
 461                memcpy(new_bucket->data, ips, trace_len);
 462        }
 463
 464        new_bucket->hash = hash;
 465        new_bucket->nr = trace_nr;
 466
 467        old_bucket = xchg(&smap->buckets[id], new_bucket);
 468        if (old_bucket)
 469                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 470        return id;
 471}
 472
 473BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 474           u64, flags)
 475{
 476        u32 max_depth = map->value_size / stack_map_data_size(map);
 477        /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
 478        u32 init_nr = sysctl_perf_event_max_stack - max_depth;
 479        bool user = flags & BPF_F_USER_STACK;
 480        struct perf_callchain_entry *trace;
 481        bool kernel = !user;
 482
 483        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 484                               BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
 485                return -EINVAL;
 486
 487        trace = get_perf_callchain(regs, init_nr, kernel, user,
 488                                   sysctl_perf_event_max_stack, false, false);
 489
 490        if (unlikely(!trace))
 491                /* couldn't fetch the stack trace */
 492                return -EFAULT;
 493
 494        return __bpf_get_stackid(map, trace, flags);
 495}
 496
 497const struct bpf_func_proto bpf_get_stackid_proto = {
 498        .func           = bpf_get_stackid,
 499        .gpl_only       = true,
 500        .ret_type       = RET_INTEGER,
 501        .arg1_type      = ARG_PTR_TO_CTX,
 502        .arg2_type      = ARG_CONST_MAP_PTR,
 503        .arg3_type      = ARG_ANYTHING,
 504};
 505
 506static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
 507{
 508        __u64 nr_kernel = 0;
 509
 510        while (nr_kernel < trace->nr) {
 511                if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
 512                        break;
 513                nr_kernel++;
 514        }
 515        return nr_kernel;
 516}
 517
 518BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
 519           struct bpf_map *, map, u64, flags)
 520{
 521        struct perf_event *event = ctx->event;
 522        struct perf_callchain_entry *trace;
 523        bool kernel, user;
 524        __u64 nr_kernel;
 525        int ret;
 526
 527        /* perf_sample_data doesn't have callchain, use bpf_get_stackid */
 528        if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
 529                return bpf_get_stackid((unsigned long)(ctx->regs),
 530                                       (unsigned long) map, flags, 0, 0);
 531
 532        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 533                               BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
 534                return -EINVAL;
 535
 536        user = flags & BPF_F_USER_STACK;
 537        kernel = !user;
 538
 539        trace = ctx->data->callchain;
 540        if (unlikely(!trace))
 541                return -EFAULT;
 542
 543        nr_kernel = count_kernel_ip(trace);
 544
 545        if (kernel) {
 546                __u64 nr = trace->nr;
 547
 548                trace->nr = nr_kernel;
 549                ret = __bpf_get_stackid(map, trace, flags);
 550
 551                /* restore nr */
 552                trace->nr = nr;
 553        } else { /* user */
 554                u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
 555
 556                skip += nr_kernel;
 557                if (skip > BPF_F_SKIP_FIELD_MASK)
 558                        return -EFAULT;
 559
 560                flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
 561                ret = __bpf_get_stackid(map, trace, flags);
 562        }
 563        return ret;
 564}
 565
 566const struct bpf_func_proto bpf_get_stackid_proto_pe = {
 567        .func           = bpf_get_stackid_pe,
 568        .gpl_only       = false,
 569        .ret_type       = RET_INTEGER,
 570        .arg1_type      = ARG_PTR_TO_CTX,
 571        .arg2_type      = ARG_CONST_MAP_PTR,
 572        .arg3_type      = ARG_ANYTHING,
 573};
 574
 575static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 576                            struct perf_callchain_entry *trace_in,
 577                            void *buf, u32 size, u64 flags)
 578{
 579        u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
 580        bool user_build_id = flags & BPF_F_USER_BUILD_ID;
 581        u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 582        bool user = flags & BPF_F_USER_STACK;
 583        struct perf_callchain_entry *trace;
 584        bool kernel = !user;
 585        int err = -EINVAL;
 586        u64 *ips;
 587
 588        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 589                               BPF_F_USER_BUILD_ID)))
 590                goto clear;
 591        if (kernel && user_build_id)
 592                goto clear;
 593
 594        elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
 595                                            : sizeof(u64);
 596        if (unlikely(size % elem_size))
 597                goto clear;
 598
 599        /* cannot get valid user stack for task without user_mode regs */
 600        if (task && user && !user_mode(regs))
 601                goto err_fault;
 602
 603        num_elem = size / elem_size;
 604        if (sysctl_perf_event_max_stack < num_elem)
 605                init_nr = 0;
 606        else
 607                init_nr = sysctl_perf_event_max_stack - num_elem;
 608
 609        if (trace_in)
 610                trace = trace_in;
 611        else if (kernel && task)
 612                trace = get_callchain_entry_for_task(task, init_nr);
 613        else
 614                trace = get_perf_callchain(regs, init_nr, kernel, user,
 615                                           sysctl_perf_event_max_stack,
 616                                           false, false);
 617        if (unlikely(!trace))
 618                goto err_fault;
 619
 620        trace_nr = trace->nr - init_nr;
 621        if (trace_nr < skip)
 622                goto err_fault;
 623
 624        trace_nr -= skip;
 625        trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
 626        copy_len = trace_nr * elem_size;
 627        ips = trace->ip + skip + init_nr;
 628        if (user && user_build_id)
 629                stack_map_get_build_id_offset(buf, ips, trace_nr, user);
 630        else
 631                memcpy(buf, ips, copy_len);
 632
 633        if (size > copy_len)
 634                memset(buf + copy_len, 0, size - copy_len);
 635        return copy_len;
 636
 637err_fault:
 638        err = -EFAULT;
 639clear:
 640        memset(buf, 0, size);
 641        return err;
 642}
 643
 644BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
 645           u64, flags)
 646{
 647        return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
 648}
 649
 650const struct bpf_func_proto bpf_get_stack_proto = {
 651        .func           = bpf_get_stack,
 652        .gpl_only       = true,
 653        .ret_type       = RET_INTEGER,
 654        .arg1_type      = ARG_PTR_TO_CTX,
 655        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 656        .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
 657        .arg4_type      = ARG_ANYTHING,
 658};
 659
 660BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
 661           u32, size, u64, flags)
 662{
 663        struct pt_regs *regs = task_pt_regs(task);
 664
 665        return __bpf_get_stack(regs, task, NULL, buf, size, flags);
 666}
 667
 668BTF_ID_LIST(bpf_get_task_stack_btf_ids)
 669BTF_ID(struct, task_struct)
 670
 671const struct bpf_func_proto bpf_get_task_stack_proto = {
 672        .func           = bpf_get_task_stack,
 673        .gpl_only       = false,
 674        .ret_type       = RET_INTEGER,
 675        .arg1_type      = ARG_PTR_TO_BTF_ID,
 676        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 677        .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
 678        .arg4_type      = ARG_ANYTHING,
 679        .btf_id         = bpf_get_task_stack_btf_ids,
 680};
 681
 682BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
 683           void *, buf, u32, size, u64, flags)
 684{
 685        struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
 686        struct perf_event *event = ctx->event;
 687        struct perf_callchain_entry *trace;
 688        bool kernel, user;
 689        int err = -EINVAL;
 690        __u64 nr_kernel;
 691
 692        if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
 693                return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
 694
 695        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 696                               BPF_F_USER_BUILD_ID)))
 697                goto clear;
 698
 699        user = flags & BPF_F_USER_STACK;
 700        kernel = !user;
 701
 702        err = -EFAULT;
 703        trace = ctx->data->callchain;
 704        if (unlikely(!trace))
 705                goto clear;
 706
 707        nr_kernel = count_kernel_ip(trace);
 708
 709        if (kernel) {
 710                __u64 nr = trace->nr;
 711
 712                trace->nr = nr_kernel;
 713                err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
 714
 715                /* restore nr */
 716                trace->nr = nr;
 717        } else { /* user */
 718                u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
 719
 720                skip += nr_kernel;
 721                if (skip > BPF_F_SKIP_FIELD_MASK)
 722                        goto clear;
 723
 724                flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
 725                err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
 726        }
 727        return err;
 728
 729clear:
 730        memset(buf, 0, size);
 731        return err;
 732
 733}
 734
 735const struct bpf_func_proto bpf_get_stack_proto_pe = {
 736        .func           = bpf_get_stack_pe,
 737        .gpl_only       = true,
 738        .ret_type       = RET_INTEGER,
 739        .arg1_type      = ARG_PTR_TO_CTX,
 740        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 741        .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
 742        .arg4_type      = ARG_ANYTHING,
 743};
 744
 745/* Called from eBPF program */
 746static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
 747{
 748        return ERR_PTR(-EOPNOTSUPP);
 749}
 750
 751/* Called from syscall */
 752int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 753{
 754        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 755        struct stack_map_bucket *bucket, *old_bucket;
 756        u32 id = *(u32 *)key, trace_len;
 757
 758        if (unlikely(id >= smap->n_buckets))
 759                return -ENOENT;
 760
 761        bucket = xchg(&smap->buckets[id], NULL);
 762        if (!bucket)
 763                return -ENOENT;
 764
 765        trace_len = bucket->nr * stack_map_data_size(map);
 766        memcpy(value, bucket->data, trace_len);
 767        memset(value + trace_len, 0, map->value_size - trace_len);
 768
 769        old_bucket = xchg(&smap->buckets[id], bucket);
 770        if (old_bucket)
 771                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 772        return 0;
 773}
 774
 775static int stack_map_get_next_key(struct bpf_map *map, void *key,
 776                                  void *next_key)
 777{
 778        struct bpf_stack_map *smap = container_of(map,
 779                                                  struct bpf_stack_map, map);
 780        u32 id;
 781
 782        WARN_ON_ONCE(!rcu_read_lock_held());
 783
 784        if (!key) {
 785                id = 0;
 786        } else {
 787                id = *(u32 *)key;
 788                if (id >= smap->n_buckets || !smap->buckets[id])
 789                        id = 0;
 790                else
 791                        id++;
 792        }
 793
 794        while (id < smap->n_buckets && !smap->buckets[id])
 795                id++;
 796
 797        if (id >= smap->n_buckets)
 798                return -ENOENT;
 799
 800        *(u32 *)next_key = id;
 801        return 0;
 802}
 803
 804static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
 805                                 u64 map_flags)
 806{
 807        return -EINVAL;
 808}
 809
 810/* Called from syscall or from eBPF program */
 811static int stack_map_delete_elem(struct bpf_map *map, void *key)
 812{
 813        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 814        struct stack_map_bucket *old_bucket;
 815        u32 id = *(u32 *)key;
 816
 817        if (unlikely(id >= smap->n_buckets))
 818                return -E2BIG;
 819
 820        old_bucket = xchg(&smap->buckets[id], NULL);
 821        if (old_bucket) {
 822                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 823                return 0;
 824        } else {
 825                return -ENOENT;
 826        }
 827}
 828
 829/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 830static void stack_map_free(struct bpf_map *map)
 831{
 832        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 833
 834        bpf_map_area_free(smap->elems);
 835        pcpu_freelist_destroy(&smap->freelist);
 836        bpf_map_area_free(smap);
 837        put_callchain_buffers();
 838}
 839
 840static int stack_trace_map_btf_id;
 841const struct bpf_map_ops stack_trace_map_ops = {
 842        .map_alloc = stack_map_alloc,
 843        .map_free = stack_map_free,
 844        .map_get_next_key = stack_map_get_next_key,
 845        .map_lookup_elem = stack_map_lookup_elem,
 846        .map_update_elem = stack_map_update_elem,
 847        .map_delete_elem = stack_map_delete_elem,
 848        .map_check_btf = map_check_no_btf,
 849        .map_btf_name = "bpf_stack_map",
 850        .map_btf_id = &stack_trace_map_btf_id,
 851};
 852
 853static int __init stack_map_init(void)
 854{
 855        int cpu;
 856        struct stack_map_irq_work *work;
 857
 858        for_each_possible_cpu(cpu) {
 859                work = per_cpu_ptr(&up_read_work, cpu);
 860                init_irq_work(&work->irq_work, do_up_read);
 861        }
 862        return 0;
 863}
 864subsys_initcall(stack_map_init);
 865