linux/kernel/events/callchain.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Performance events callchain code, extracted from core.c:
   4 *
   5 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   6 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
   7 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
   8 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   9 */
  10
  11#include <linux/perf_event.h>
  12#include <linux/slab.h>
  13#include <linux/sched/task_stack.h>
  14
  15#include "internal.h"
  16
  17struct callchain_cpus_entries {
  18        struct rcu_head                 rcu_head;
  19        struct perf_callchain_entry     *cpu_entries[];
  20};
  21
  22int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
  23int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
  24
  25static inline size_t perf_callchain_entry__sizeof(void)
  26{
  27        return (sizeof(struct perf_callchain_entry) +
  28                sizeof(__u64) * (sysctl_perf_event_max_stack +
  29                                 sysctl_perf_event_max_contexts_per_stack));
  30}
  31
  32static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
  33static atomic_t nr_callchain_events;
  34static DEFINE_MUTEX(callchain_mutex);
  35static struct callchain_cpus_entries *callchain_cpus_entries;
  36
  37
  38__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
  39                                  struct pt_regs *regs)
  40{
  41}
  42
  43__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
  44                                struct pt_regs *regs)
  45{
  46}
  47
  48static void release_callchain_buffers_rcu(struct rcu_head *head)
  49{
  50        struct callchain_cpus_entries *entries;
  51        int cpu;
  52
  53        entries = container_of(head, struct callchain_cpus_entries, rcu_head);
  54
  55        for_each_possible_cpu(cpu)
  56                kfree(entries->cpu_entries[cpu]);
  57
  58        kfree(entries);
  59}
  60
  61static void release_callchain_buffers(void)
  62{
  63        struct callchain_cpus_entries *entries;
  64
  65        entries = callchain_cpus_entries;
  66        RCU_INIT_POINTER(callchain_cpus_entries, NULL);
  67        call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
  68}
  69
  70static int alloc_callchain_buffers(void)
  71{
  72        int cpu;
  73        int size;
  74        struct callchain_cpus_entries *entries;
  75
  76        /*
  77         * We can't use the percpu allocation API for data that can be
  78         * accessed from NMI. Use a temporary manual per cpu allocation
  79         * until that gets sorted out.
  80         */
  81        size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
  82
  83        entries = kzalloc(size, GFP_KERNEL);
  84        if (!entries)
  85                return -ENOMEM;
  86
  87        size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
  88
  89        for_each_possible_cpu(cpu) {
  90                entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
  91                                                         cpu_to_node(cpu));
  92                if (!entries->cpu_entries[cpu])
  93                        goto fail;
  94        }
  95
  96        rcu_assign_pointer(callchain_cpus_entries, entries);
  97
  98        return 0;
  99
 100fail:
 101        for_each_possible_cpu(cpu)
 102                kfree(entries->cpu_entries[cpu]);
 103        kfree(entries);
 104
 105        return -ENOMEM;
 106}
 107
 108int get_callchain_buffers(int event_max_stack)
 109{
 110        int err = 0;
 111        int count;
 112
 113        mutex_lock(&callchain_mutex);
 114
 115        count = atomic_inc_return(&nr_callchain_events);
 116        if (WARN_ON_ONCE(count < 1)) {
 117                err = -EINVAL;
 118                goto exit;
 119        }
 120
 121        /*
 122         * If requesting per event more than the global cap,
 123         * return a different error to help userspace figure
 124         * this out.
 125         *
 126         * And also do it here so that we have &callchain_mutex held.
 127         */
 128        if (event_max_stack > sysctl_perf_event_max_stack) {
 129                err = -EOVERFLOW;
 130                goto exit;
 131        }
 132
 133        if (count == 1)
 134                err = alloc_callchain_buffers();
 135exit:
 136        if (err)
 137                atomic_dec(&nr_callchain_events);
 138
 139        mutex_unlock(&callchain_mutex);
 140
 141        return err;
 142}
 143
 144void put_callchain_buffers(void)
 145{
 146        if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
 147                release_callchain_buffers();
 148                mutex_unlock(&callchain_mutex);
 149        }
 150}
 151
 152struct perf_callchain_entry *get_callchain_entry(int *rctx)
 153{
 154        int cpu;
 155        struct callchain_cpus_entries *entries;
 156
 157        *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
 158        if (*rctx == -1)
 159                return NULL;
 160
 161        entries = rcu_dereference(callchain_cpus_entries);
 162        if (!entries) {
 163                put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx);
 164                return NULL;
 165        }
 166
 167        cpu = smp_processor_id();
 168
 169        return (((void *)entries->cpu_entries[cpu]) +
 170                (*rctx * perf_callchain_entry__sizeof()));
 171}
 172
 173void
 174put_callchain_entry(int rctx)
 175{
 176        put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
 177}
 178
 179struct perf_callchain_entry *
 180get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
 181                   u32 max_stack, bool crosstask, bool add_mark)
 182{
 183        struct perf_callchain_entry *entry;
 184        struct perf_callchain_entry_ctx ctx;
 185        int rctx;
 186
 187        entry = get_callchain_entry(&rctx);
 188        if (!entry)
 189                return NULL;
 190
 191        ctx.entry     = entry;
 192        ctx.max_stack = max_stack;
 193        ctx.nr        = entry->nr = init_nr;
 194        ctx.contexts       = 0;
 195        ctx.contexts_maxed = false;
 196
 197        if (kernel && !user_mode(regs)) {
 198                if (add_mark)
 199                        perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
 200                perf_callchain_kernel(&ctx, regs);
 201        }
 202
 203        if (user) {
 204                if (!user_mode(regs)) {
 205                        if  (current->mm)
 206                                regs = task_pt_regs(current);
 207                        else
 208                                regs = NULL;
 209                }
 210
 211                if (regs) {
 212                        mm_segment_t fs;
 213
 214                        if (crosstask)
 215                                goto exit_put;
 216
 217                        if (add_mark)
 218                                perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
 219
 220                        fs = force_uaccess_begin();
 221                        perf_callchain_user(&ctx, regs);
 222                        force_uaccess_end(fs);
 223                }
 224        }
 225
 226exit_put:
 227        put_callchain_entry(rctx);
 228
 229        return entry;
 230}
 231
 232/*
 233 * Used for sysctl_perf_event_max_stack and
 234 * sysctl_perf_event_max_contexts_per_stack.
 235 */
 236int perf_event_max_stack_handler(struct ctl_table *table, int write,
 237                                 void *buffer, size_t *lenp, loff_t *ppos)
 238{
 239        int *value = table->data;
 240        int new_value = *value, ret;
 241        struct ctl_table new_table = *table;
 242
 243        new_table.data = &new_value;
 244        ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
 245        if (ret || !write)
 246                return ret;
 247
 248        mutex_lock(&callchain_mutex);
 249        if (atomic_read(&nr_callchain_events))
 250                ret = -EBUSY;
 251        else
 252                *value = new_value;
 253
 254        mutex_unlock(&callchain_mutex);
 255
 256        return ret;
 257}
 258