linux/kernel/trace/trace_boot.c
<<
>>
Prefs
   1/*
   2 * ring buffer based initcalls tracer
   3 *
   4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
   5 *
   6 */
   7
   8#include <linux/init.h>
   9#include <linux/debugfs.h>
  10#include <linux/ftrace.h>
  11#include <linux/kallsyms.h>
  12#include <linux/time.h>
  13
  14#include "trace.h"
  15#include "trace_output.h"
  16
  17static struct trace_array *boot_trace;
  18static bool pre_initcalls_finished;
  19
  20/* Tells the boot tracer that the pre_smp_initcalls are finished.
  21 * So we are ready .
  22 * It doesn't enable sched events tracing however.
  23 * You have to call enable_boot_trace to do so.
  24 */
  25void start_boot_trace(void)
  26{
  27        pre_initcalls_finished = true;
  28}
  29
  30void enable_boot_trace(void)
  31{
  32        if (boot_trace && pre_initcalls_finished)
  33                tracing_start_sched_switch_record();
  34}
  35
  36void disable_boot_trace(void)
  37{
  38        if (boot_trace && pre_initcalls_finished)
  39                tracing_stop_sched_switch_record();
  40}
  41
  42static int boot_trace_init(struct trace_array *tr)
  43{
  44        boot_trace = tr;
  45
  46        if (!tr)
  47                return 0;
  48
  49        tracing_reset_online_cpus(tr);
  50
  51        tracing_sched_switch_assign_trace(tr);
  52        return 0;
  53}
  54
  55static enum print_line_t
  56initcall_call_print_line(struct trace_iterator *iter)
  57{
  58        struct trace_entry *entry = iter->ent;
  59        struct trace_seq *s = &iter->seq;
  60        struct trace_boot_call *field;
  61        struct boot_trace_call *call;
  62        u64 ts;
  63        unsigned long nsec_rem;
  64        int ret;
  65
  66        trace_assign_type(field, entry);
  67        call = &field->boot_call;
  68        ts = iter->ts;
  69        nsec_rem = do_div(ts, NSEC_PER_SEC);
  70
  71        ret = trace_seq_printf(s, "[%5ld.%09ld] calling  %s @ %i\n",
  72                        (unsigned long)ts, nsec_rem, call->func, call->caller);
  73
  74        if (!ret)
  75                return TRACE_TYPE_PARTIAL_LINE;
  76        else
  77                return TRACE_TYPE_HANDLED;
  78}
  79
  80static enum print_line_t
  81initcall_ret_print_line(struct trace_iterator *iter)
  82{
  83        struct trace_entry *entry = iter->ent;
  84        struct trace_seq *s = &iter->seq;
  85        struct trace_boot_ret *field;
  86        struct boot_trace_ret *init_ret;
  87        u64 ts;
  88        unsigned long nsec_rem;
  89        int ret;
  90
  91        trace_assign_type(field, entry);
  92        init_ret = &field->boot_ret;
  93        ts = iter->ts;
  94        nsec_rem = do_div(ts, NSEC_PER_SEC);
  95
  96        ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
  97                        "returned %d after %llu msecs\n",
  98                        (unsigned long) ts,
  99                        nsec_rem,
 100                        init_ret->func, init_ret->result, init_ret->duration);
 101
 102        if (!ret)
 103                return TRACE_TYPE_PARTIAL_LINE;
 104        else
 105                return TRACE_TYPE_HANDLED;
 106}
 107
 108static enum print_line_t initcall_print_line(struct trace_iterator *iter)
 109{
 110        struct trace_entry *entry = iter->ent;
 111
 112        switch (entry->type) {
 113        case TRACE_BOOT_CALL:
 114                return initcall_call_print_line(iter);
 115        case TRACE_BOOT_RET:
 116                return initcall_ret_print_line(iter);
 117        default:
 118                return TRACE_TYPE_UNHANDLED;
 119        }
 120}
 121
 122struct tracer boot_tracer __read_mostly =
 123{
 124        .name           = "initcall",
 125        .init           = boot_trace_init,
 126        .reset          = tracing_reset_online_cpus,
 127        .print_line     = initcall_print_line,
 128};
 129
 130void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
 131{
 132        struct ftrace_event_call *call = &event_boot_call;
 133        struct ring_buffer_event *event;
 134        struct ring_buffer *buffer;
 135        struct trace_boot_call *entry;
 136        struct trace_array *tr = boot_trace;
 137
 138        if (!tr || !pre_initcalls_finished)
 139                return;
 140
 141        /* Get its name now since this function could
 142         * disappear because it is in the .init section.
 143         */
 144        sprint_symbol(bt->func, (unsigned long)fn);
 145        preempt_disable();
 146
 147        buffer = tr->buffer;
 148        event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL,
 149                                          sizeof(*entry), 0, 0);
 150        if (!event)
 151                goto out;
 152        entry   = ring_buffer_event_data(event);
 153        entry->boot_call = *bt;
 154        if (!filter_check_discard(call, entry, buffer, event))
 155                trace_buffer_unlock_commit(buffer, event, 0, 0);
 156 out:
 157        preempt_enable();
 158}
 159
 160void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
 161{
 162        struct ftrace_event_call *call = &event_boot_ret;
 163        struct ring_buffer_event *event;
 164        struct ring_buffer *buffer;
 165        struct trace_boot_ret *entry;
 166        struct trace_array *tr = boot_trace;
 167
 168        if (!tr || !pre_initcalls_finished)
 169                return;
 170
 171        sprint_symbol(bt->func, (unsigned long)fn);
 172        preempt_disable();
 173
 174        buffer = tr->buffer;
 175        event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET,
 176                                          sizeof(*entry), 0, 0);
 177        if (!event)
 178                goto out;
 179        entry   = ring_buffer_event_data(event);
 180        entry->boot_ret = *bt;
 181        if (!filter_check_discard(call, entry, buffer, event))
 182                trace_buffer_unlock_commit(buffer, event, 0, 0);
 183 out:
 184        preempt_enable();
 185}
 186