linux/kernel/events/callchain.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Performance events callchain code, extracted from core.c:
   4 *
   5 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   6 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
   7 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
   8 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   9 */
  10
  11#include <linux/perf_event.h>
  12#include <linux/slab.h>
  13#include <linux/sched/task_stack.h>
  14
  15#include "internal.h"
  16
  17struct callchain_cpus_entries {
  18        struct rcu_head                 rcu_head;
  19        struct perf_callchain_entry     *cpu_entries[0];
  20};
  21
  22int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
  23int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
  24
  25static inline size_t perf_callchain_entry__sizeof(void)
  26{
  27        return (sizeof(struct perf_callchain_entry) +
  28                sizeof(__u64) * (sysctl_perf_event_max_stack +
  29                                 sysctl_perf_event_max_contexts_per_stack));
  30}
  31
  32static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
  33static atomic_t nr_callchain_events;
  34static DEFINE_MUTEX(callchain_mutex);
  35static struct callchain_cpus_entries *callchain_cpus_entries;
  36
  37
  38__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
  39                                  struct pt_regs *regs)
  40{
  41}
  42
  43__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
  44                                struct pt_regs *regs)
  45{
  46}
  47
  48static void release_callchain_buffers_rcu(struct rcu_head *head)
  49{
  50        struct callchain_cpus_entries *entries;
  51        int cpu;
  52
  53        entries = container_of(head, struct callchain_cpus_entries, rcu_head);
  54
  55        for_each_possible_cpu(cpu)
  56                kfree(entries->cpu_entries[cpu]);
  57
  58        kfree(entries);
  59}
  60
  61static void release_callchain_buffers(void)
  62{
  63        struct callchain_cpus_entries *entries;
  64
  65        entries = callchain_cpus_entries;
  66        RCU_INIT_POINTER(callchain_cpus_entries, NULL);
  67        call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
  68}
  69
  70static int alloc_callchain_buffers(void)
  71{
  72        int cpu;
  73        int size;
  74        struct callchain_cpus_entries *entries;
  75
  76        /*
  77         * We can't use the percpu allocation API for data that can be
  78         * accessed from NMI. Use a temporary manual per cpu allocation
  79         * until that gets sorted out.
  80         */
  81        size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
  82
  83        entries = kzalloc(size, GFP_KERNEL);
  84        if (!entries)
  85                return -ENOMEM;
  86
  87        size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
  88
  89        for_each_possible_cpu(cpu) {
  90                entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
  91                                                         cpu_to_node(cpu));
  92                if (!entries->cpu_entries[cpu])
  93                        goto fail;
  94        }
  95
  96        rcu_assign_pointer(callchain_cpus_entries, entries);
  97
  98        return 0;
  99
 100fail:
 101        for_each_possible_cpu(cpu)
 102                kfree(entries->cpu_entries[cpu]);
 103        kfree(entries);
 104
 105        return -ENOMEM;
 106}
 107
 108int get_callchain_buffers(int event_max_stack)
 109{
 110        int err = 0;
 111        int count;
 112
 113        mutex_lock(&callchain_mutex);
 114
 115        count = atomic_inc_return(&nr_callchain_events);
 116        if (WARN_ON_ONCE(count < 1)) {
 117                err = -EINVAL;
 118                goto exit;
 119        }
 120
 121        /*
 122         * If requesting per event more than the global cap,
 123         * return a different error to help userspace figure
 124         * this out.
 125         *
 126         * And also do it here so that we have &callchain_mutex held.
 127         */
 128        if (event_max_stack > sysctl_perf_event_max_stack) {
 129                err = -EOVERFLOW;
 130                goto exit;
 131        }
 132
 133        if (count == 1)
 134                err = alloc_callchain_buffers();
 135exit:
 136        if (err)
 137                atomic_dec(&nr_callchain_events);
 138
 139        mutex_unlock(&callchain_mutex);
 140
 141        return err;
 142}
 143
 144void put_callchain_buffers(void)
 145{
 146        if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
 147                release_callchain_buffers();
 148                mutex_unlock(&callchain_mutex);
 149        }
 150}
 151
 152static struct perf_callchain_entry *get_callchain_entry(int *rctx)
 153{
 154        int cpu;
 155        struct callchain_cpus_entries *entries;
 156
 157        *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
 158        if (*rctx == -1)
 159                return NULL;
 160
 161        entries = rcu_dereference(callchain_cpus_entries);
 162        if (!entries)
 163                return NULL;
 164
 165        cpu = smp_processor_id();
 166
 167        return (((void *)entries->cpu_entries[cpu]) +
 168                (*rctx * perf_callchain_entry__sizeof()));
 169}
 170
 171static void
 172put_callchain_entry(int rctx)
 173{
 174        put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
 175}
 176
 177struct perf_callchain_entry *
 178get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
 179                   u32 max_stack, bool crosstask, bool add_mark)
 180{
 181        struct perf_callchain_entry *entry;
 182        struct perf_callchain_entry_ctx ctx;
 183        int rctx;
 184
 185        entry = get_callchain_entry(&rctx);
 186        if (rctx == -1)
 187                return NULL;
 188
 189        if (!entry)
 190                goto exit_put;
 191
 192        ctx.entry     = entry;
 193        ctx.max_stack = max_stack;
 194        ctx.nr        = entry->nr = init_nr;
 195        ctx.contexts       = 0;
 196        ctx.contexts_maxed = false;
 197
 198        if (kernel && !user_mode(regs)) {
 199                if (add_mark)
 200                        perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
 201                perf_callchain_kernel(&ctx, regs);
 202        }
 203
 204        if (user) {
 205                if (!user_mode(regs)) {
 206                        if  (current->mm)
 207                                regs = task_pt_regs(current);
 208                        else
 209                                regs = NULL;
 210                }
 211
 212                if (regs) {
 213                        mm_segment_t fs;
 214
 215                        if (crosstask)
 216                                goto exit_put;
 217
 218                        if (add_mark)
 219                                perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
 220
 221                        fs = get_fs();
 222                        set_fs(USER_DS);
 223                        perf_callchain_user(&ctx, regs);
 224                        set_fs(fs);
 225                }
 226        }
 227
 228exit_put:
 229        put_callchain_entry(rctx);
 230
 231        return entry;
 232}
 233
 234/*
 235 * Used for sysctl_perf_event_max_stack and
 236 * sysctl_perf_event_max_contexts_per_stack.
 237 */
 238int perf_event_max_stack_handler(struct ctl_table *table, int write,
 239                                 void __user *buffer, size_t *lenp, loff_t *ppos)
 240{
 241        int *value = table->data;
 242        int new_value = *value, ret;
 243        struct ctl_table new_table = *table;
 244
 245        new_table.data = &new_value;
 246        ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
 247        if (ret || !write)
 248                return ret;
 249
 250        mutex_lock(&callchain_mutex);
 251        if (atomic_read(&nr_callchain_events))
 252                ret = -EBUSY;
 253        else
 254                *value = new_value;
 255
 256        mutex_unlock(&callchain_mutex);
 257
 258        return ret;
 259}
 260