linux/kernel/trace/trace_event_perf.c
<<
>>
Prefs
   1/*
   2 * trace event based perf event profiling/tracing
   3 *
   4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
   5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/kprobes.h>
  10#include "trace.h"
  11#include "trace_probe.h"
  12
  13static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
  14
  15/*
  16 * Force it to be aligned to unsigned long to avoid misaligned accesses
  17 * suprises
  18 */
  19typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
  20        perf_trace_t;
  21
  22/* Count the events in use (per event id, not per instance) */
  23static int      total_ref_count;
  24
  25static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
  26                                 struct perf_event *p_event)
  27{
  28
  29        /*
  30         * We checked and allowed to create parent,
  31         * allow children without checking.
  32         */
  33        if (p_event->parent)
  34                return 0;
  35
  36        /*
  37         * It's ok to check current process (owner) permissions in here,
  38         * because code below is called only via perf_event_open syscall.
  39         */
  40
  41        /* The ftrace function trace is allowed only for root. */
  42        if (ftrace_event_is_function(tp_event) &&
  43            perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
  44                return -EPERM;
  45
  46        /* No tracing, just counting, so no obvious leak */
  47        if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
  48                return 0;
  49
  50        /* Some events are ok to be traced by non-root users... */
  51        if (p_event->attach_state == PERF_ATTACH_TASK) {
  52                if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
  53                        return 0;
  54        }
  55
  56        /*
  57         * ...otherwise raw tracepoint data can be a severe data leak,
  58         * only allow root to have these.
  59         */
  60        if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
  61                return -EPERM;
  62
  63        return 0;
  64}
  65
  66static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
  67                                struct perf_event *p_event)
  68{
  69        struct hlist_head __percpu *list;
  70        int ret = -ENOMEM;
  71        int cpu;
  72
  73        p_event->tp_event = tp_event;
  74        if (tp_event->perf_refcount++ > 0)
  75                return 0;
  76
  77        list = alloc_percpu(struct hlist_head);
  78        if (!list)
  79                goto fail;
  80
  81        for_each_possible_cpu(cpu)
  82                INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
  83
  84        tp_event->perf_events = list;
  85
  86        if (!total_ref_count) {
  87                char __percpu *buf;
  88                int i;
  89
  90                for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  91                        buf = (char __percpu *)alloc_percpu(perf_trace_t);
  92                        if (!buf)
  93                                goto fail;
  94
  95                        perf_trace_buf[i] = buf;
  96                }
  97        }
  98
  99        ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
 100        if (ret)
 101                goto fail;
 102
 103        total_ref_count++;
 104        return 0;
 105
 106fail:
 107        if (!total_ref_count) {
 108                int i;
 109
 110                for (i = 0; i < PERF_NR_CONTEXTS; i++) {
 111                        free_percpu(perf_trace_buf[i]);
 112                        perf_trace_buf[i] = NULL;
 113                }
 114        }
 115
 116        if (!--tp_event->perf_refcount) {
 117                free_percpu(tp_event->perf_events);
 118                tp_event->perf_events = NULL;
 119        }
 120
 121        return ret;
 122}
 123
 124static void perf_trace_event_unreg(struct perf_event *p_event)
 125{
 126        struct ftrace_event_call *tp_event = p_event->tp_event;
 127        int i;
 128
 129        if (--tp_event->perf_refcount > 0)
 130                goto out;
 131
 132        tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
 133
 134        /*
 135         * Ensure our callback won't be called anymore. The buffers
 136         * will be freed after that.
 137         */
 138        tracepoint_synchronize_unregister();
 139
 140        free_percpu(tp_event->perf_events);
 141        tp_event->perf_events = NULL;
 142
 143        if (!--total_ref_count) {
 144                for (i = 0; i < PERF_NR_CONTEXTS; i++) {
 145                        free_percpu(perf_trace_buf[i]);
 146                        perf_trace_buf[i] = NULL;
 147                }
 148        }
 149out:
 150        module_put(tp_event->mod);
 151}
 152
 153static int perf_trace_event_open(struct perf_event *p_event)
 154{
 155        struct ftrace_event_call *tp_event = p_event->tp_event;
 156        return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
 157}
 158
 159static void perf_trace_event_close(struct perf_event *p_event)
 160{
 161        struct ftrace_event_call *tp_event = p_event->tp_event;
 162        tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
 163}
 164
 165static int perf_trace_event_init(struct ftrace_event_call *tp_event,
 166                                 struct perf_event *p_event)
 167{
 168        int ret;
 169
 170        ret = perf_trace_event_perm(tp_event, p_event);
 171        if (ret)
 172                return ret;
 173
 174        ret = perf_trace_event_reg(tp_event, p_event);
 175        if (ret)
 176                return ret;
 177
 178        ret = perf_trace_event_open(p_event);
 179        if (ret) {
 180                perf_trace_event_unreg(p_event);
 181                return ret;
 182        }
 183
 184        return 0;
 185}
 186
 187int perf_trace_init(struct perf_event *p_event)
 188{
 189        struct ftrace_event_call *tp_event;
 190        int event_id = p_event->attr.config;
 191        int ret = -EINVAL;
 192
 193        mutex_lock(&event_mutex);
 194        list_for_each_entry(tp_event, &ftrace_events, list) {
 195                if (tp_event->event.type == event_id &&
 196                    tp_event->class && tp_event->class->reg &&
 197                    try_module_get(tp_event->mod)) {
 198                        ret = perf_trace_event_init(tp_event, p_event);
 199                        if (ret)
 200                                module_put(tp_event->mod);
 201                        break;
 202                }
 203        }
 204        mutex_unlock(&event_mutex);
 205
 206        return ret;
 207}
 208
 209void perf_trace_destroy(struct perf_event *p_event)
 210{
 211        mutex_lock(&event_mutex);
 212        perf_trace_event_close(p_event);
 213        perf_trace_event_unreg(p_event);
 214        mutex_unlock(&event_mutex);
 215}
 216
 217#ifdef CONFIG_KPROBE_EVENT
 218int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
 219{
 220        int ret;
 221        char *func = NULL;
 222        struct ftrace_event_call *tp_event;
 223
 224        if (p_event->attr.kprobe_func) {
 225                func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
 226                if (!func)
 227                        return -ENOMEM;
 228                ret = strncpy_from_user(
 229                        func, u64_to_user_ptr(p_event->attr.kprobe_func),
 230                        KSYM_NAME_LEN);
 231                if (ret == KSYM_NAME_LEN)
 232                        ret = -E2BIG;
 233                if (ret < 0)
 234                        goto out;
 235
 236                if (func[0] == '\0') {
 237                        kfree(func);
 238                        func = NULL;
 239                }
 240        }
 241
 242        tp_event = create_local_trace_kprobe(
 243                func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
 244                p_event->attr.probe_offset, is_retprobe);
 245        if (IS_ERR(tp_event)) {
 246                ret = PTR_ERR(tp_event);
 247                goto out;
 248        }
 249
 250        ret = perf_trace_event_init(tp_event, p_event);
 251        if (ret)
 252                destroy_local_trace_kprobe(tp_event);
 253out:
 254        kfree(func);
 255        return ret;
 256}
 257
 258void perf_kprobe_destroy(struct perf_event *p_event)
 259{
 260        perf_trace_event_close(p_event);
 261        perf_trace_event_unreg(p_event);
 262
 263        destroy_local_trace_kprobe(p_event->tp_event);
 264}
 265#endif /* CONFIG_KPROBE_EVENT */
 266
 267#ifdef CONFIG_UPROBE_EVENT
 268int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
 269{
 270        int ret;
 271        char *path = NULL;
 272        struct ftrace_event_call *tp_event;
 273
 274        if (!p_event->attr.uprobe_path)
 275                return -EINVAL;
 276        path = kzalloc(PATH_MAX, GFP_KERNEL);
 277        if (!path)
 278                return -ENOMEM;
 279        ret = strncpy_from_user(
 280                path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
 281        if (ret == PATH_MAX)
 282                return -E2BIG;
 283        if (ret < 0)
 284                goto out;
 285        if (path[0] == '\0') {
 286                ret = -EINVAL;
 287                goto out;
 288        }
 289
 290        tp_event = create_local_trace_uprobe(
 291                path, p_event->attr.probe_offset, is_retprobe);
 292        if (IS_ERR(tp_event)) {
 293                ret = PTR_ERR(tp_event);
 294                goto out;
 295        }
 296
 297        /*
 298         * local trace_uprobe need to hold event_mutex to call
 299         * uprobe_buffer_enable() and uprobe_buffer_disable().
 300         * event_mutex is not required for local trace_kprobes.
 301         */
 302        mutex_lock(&event_mutex);
 303        ret = perf_trace_event_init(tp_event, p_event);
 304        if (ret)
 305                destroy_local_trace_uprobe(tp_event);
 306        mutex_unlock(&event_mutex);
 307out:
 308        kfree(path);
 309        return ret;
 310}
 311
 312void perf_uprobe_destroy(struct perf_event *p_event)
 313{
 314        mutex_lock(&event_mutex);
 315        perf_trace_event_close(p_event);
 316        perf_trace_event_unreg(p_event);
 317        mutex_unlock(&event_mutex);
 318        destroy_local_trace_uprobe(p_event->tp_event);
 319}
 320#endif /* CONFIG_UPROBE_EVENT */
 321
 322int perf_trace_add(struct perf_event *p_event, int flags)
 323{
 324        struct ftrace_event_call *tp_event = p_event->tp_event;
 325        struct hlist_head __percpu *pcpu_list;
 326        struct hlist_head *list;
 327
 328        pcpu_list = tp_event->perf_events;
 329        if (WARN_ON_ONCE(!pcpu_list))
 330                return -EINVAL;
 331
 332        if (!(flags & PERF_EF_START))
 333                p_event->hw.state = PERF_HES_STOPPED;
 334
 335        list = this_cpu_ptr(pcpu_list);
 336        hlist_add_head_rcu(&p_event->hlist_entry, list);
 337
 338        return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
 339}
 340
 341void perf_trace_del(struct perf_event *p_event, int flags)
 342{
 343        struct ftrace_event_call *tp_event = p_event->tp_event;
 344        hlist_del_rcu(&p_event->hlist_entry);
 345        tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 346}
 347
 348__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
 349                                       struct pt_regs **regs, int *rctxp)
 350{
 351        struct trace_entry *entry;
 352        unsigned long flags;
 353        char *raw_data;
 354        int pc;
 355
 356        BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
 357
 358        pc = preempt_count();
 359
 360        *rctxp = perf_swevent_get_recursion_context();
 361        if (*rctxp < 0)
 362                return NULL;
 363
 364        if (regs)
 365                *regs = this_cpu_ptr(&__perf_regs[*rctxp]);
 366        raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
 367
 368        /* zero the dead bytes from align to not leak stack to user */
 369        memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
 370
 371        entry = (struct trace_entry *)raw_data;
 372        local_save_flags(flags);
 373        tracing_generic_entry_update(entry, flags, pc);
 374        entry->type = type;
 375
 376        return raw_data;
 377}
 378EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
 379
 380#ifdef CONFIG_FUNCTION_TRACER
 381static void
 382perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
 383                          struct ftrace_ops *ops, struct pt_regs *pt_regs)
 384{
 385        struct ftrace_entry *entry;
 386        struct hlist_head *head;
 387        struct pt_regs regs;
 388        int rctx;
 389
 390#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
 391                    sizeof(u64)) - sizeof(u32))
 392
 393        BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
 394
 395        memset(&regs, 0, sizeof(regs));
 396        perf_fetch_caller_regs(&regs);
 397
 398        entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
 399        if (!entry)
 400                return;
 401
 402        entry->ip = ip;
 403        entry->parent_ip = parent_ip;
 404
 405        head = this_cpu_ptr(event_function.perf_events);
 406        perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
 407                              1, &regs, head, NULL);
 408
 409#undef ENTRY_SIZE
 410}
 411
 412static int perf_ftrace_function_register(struct perf_event *event)
 413{
 414        struct ftrace_ops *ops = &event->ftrace_ops;
 415
 416        ops->flags |= FTRACE_OPS_FL_CONTROL;
 417        ops->func = perf_ftrace_function_call;
 418        return register_ftrace_function(ops);
 419}
 420
 421static int perf_ftrace_function_unregister(struct perf_event *event)
 422{
 423        struct ftrace_ops *ops = &event->ftrace_ops;
 424        int ret = unregister_ftrace_function(ops);
 425        ftrace_free_filter(ops);
 426        return ret;
 427}
 428
 429static void perf_ftrace_function_enable(struct perf_event *event)
 430{
 431        ftrace_function_local_enable(&event->ftrace_ops);
 432}
 433
 434static void perf_ftrace_function_disable(struct perf_event *event)
 435{
 436        ftrace_function_local_disable(&event->ftrace_ops);
 437}
 438
 439int perf_ftrace_event_register(struct ftrace_event_call *call,
 440                               enum trace_reg type, void *data)
 441{
 442        switch (type) {
 443        case TRACE_REG_REGISTER:
 444        case TRACE_REG_UNREGISTER:
 445                break;
 446        case TRACE_REG_PERF_REGISTER:
 447        case TRACE_REG_PERF_UNREGISTER:
 448                return 0;
 449        case TRACE_REG_PERF_OPEN:
 450                return perf_ftrace_function_register(data);
 451        case TRACE_REG_PERF_CLOSE:
 452                return perf_ftrace_function_unregister(data);
 453        case TRACE_REG_PERF_ADD:
 454                perf_ftrace_function_enable(data);
 455                return 0;
 456        case TRACE_REG_PERF_DEL:
 457                perf_ftrace_function_disable(data);
 458                return 0;
 459        }
 460
 461        return -EINVAL;
 462}
 463#endif /* CONFIG_FUNCTION_TRACER */
 464