linux/kernel/trace/trace_stat.c
<<
>>
Prefs
   1/*
   2 * Infrastructure for statistic tracing (histogram output).
   3 *
   4 * Copyright (C) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   5 *
   6 * Based on the code from trace_branch.c which is
   7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   8 *
   9 */
  10
  11
  12#include <linux/list.h>
  13#include <linux/slab.h>
  14#include <linux/rbtree.h>
  15#include <linux/debugfs.h>
  16#include "trace_stat.h"
  17#include "trace.h"
  18
  19
  20/*
  21 * List of stat red-black nodes from a tracer
  22 * We use a such tree to sort quickly the stat
  23 * entries from the tracer.
  24 */
  25struct stat_node {
  26        struct rb_node          node;
  27        void                    *stat;
  28};
  29
  30/* A stat session is the stats output in one file */
  31struct stat_session {
  32        struct list_head        session_list;
  33        struct tracer_stat      *ts;
  34        struct rb_root          stat_root;
  35        struct mutex            stat_mutex;
  36        struct dentry           *file;
  37};
  38
  39/* All of the sessions currently in use. Each stat file embed one session */
  40static LIST_HEAD(all_stat_sessions);
  41static DEFINE_MUTEX(all_stat_sessions_mutex);
  42
  43/* The root directory for all stat files */
  44static struct dentry            *stat_dir;
  45
  46/*
  47 * Iterate through the rbtree using a post order traversal path
  48 * to release the next node.
  49 * It won't necessary release one at each iteration
  50 * but it will at least advance closer to the next one
  51 * to be released.
  52 */
  53static struct rb_node *release_next(struct tracer_stat *ts,
  54                                    struct rb_node *node)
  55{
  56        struct stat_node *snode;
  57        struct rb_node *parent = rb_parent(node);
  58
  59        if (node->rb_left)
  60                return node->rb_left;
  61        else if (node->rb_right)
  62                return node->rb_right;
  63        else {
  64                if (!parent)
  65                        ;
  66                else if (parent->rb_left == node)
  67                        parent->rb_left = NULL;
  68                else
  69                        parent->rb_right = NULL;
  70
  71                snode = container_of(node, struct stat_node, node);
  72                if (ts->stat_release)
  73                        ts->stat_release(snode->stat);
  74                kfree(snode);
  75
  76                return parent;
  77        }
  78}
  79
  80static void __reset_stat_session(struct stat_session *session)
  81{
  82        struct rb_node *node = session->stat_root.rb_node;
  83
  84        while (node)
  85                node = release_next(session->ts, node);
  86
  87        session->stat_root = RB_ROOT;
  88}
  89
  90static void reset_stat_session(struct stat_session *session)
  91{
  92        mutex_lock(&session->stat_mutex);
  93        __reset_stat_session(session);
  94        mutex_unlock(&session->stat_mutex);
  95}
  96
  97static void destroy_session(struct stat_session *session)
  98{
  99        debugfs_remove(session->file);
 100        __reset_stat_session(session);
 101        mutex_destroy(&session->stat_mutex);
 102        kfree(session);
 103}
 104
 105typedef int (*cmp_stat_t)(void *, void *);
 106
 107static int insert_stat(struct rb_root *root, void *stat, cmp_stat_t cmp)
 108{
 109        struct rb_node **new = &(root->rb_node), *parent = NULL;
 110        struct stat_node *data;
 111
 112        data = kzalloc(sizeof(*data), GFP_KERNEL);
 113        if (!data)
 114                return -ENOMEM;
 115        data->stat = stat;
 116
 117        /*
 118         * Figure out where to put new node
 119         * This is a descendent sorting
 120         */
 121        while (*new) {
 122                struct stat_node *this;
 123                int result;
 124
 125                this = container_of(*new, struct stat_node, node);
 126                result = cmp(data->stat, this->stat);
 127
 128                parent = *new;
 129                if (result >= 0)
 130                        new = &((*new)->rb_left);
 131                else
 132                        new = &((*new)->rb_right);
 133        }
 134
 135        rb_link_node(&data->node, parent, new);
 136        rb_insert_color(&data->node, root);
 137        return 0;
 138}
 139
 140/*
 141 * For tracers that don't provide a stat_cmp callback.
 142 * This one will force an insertion as right-most node
 143 * in the rbtree.
 144 */
 145static int dummy_cmp(void *p1, void *p2)
 146{
 147        return -1;
 148}
 149
 150/*
 151 * Initialize the stat rbtree at each trace_stat file opening.
 152 * All of these copies and sorting are required on all opening
 153 * since the stats could have changed between two file sessions.
 154 */
 155static int stat_seq_init(struct stat_session *session)
 156{
 157        struct tracer_stat *ts = session->ts;
 158        struct rb_root *root = &session->stat_root;
 159        void *stat;
 160        int ret = 0;
 161        int i;
 162
 163        mutex_lock(&session->stat_mutex);
 164        __reset_stat_session(session);
 165
 166        if (!ts->stat_cmp)
 167                ts->stat_cmp = dummy_cmp;
 168
 169        stat = ts->stat_start(ts);
 170        if (!stat)
 171                goto exit;
 172
 173        ret = insert_stat(root, stat, ts->stat_cmp);
 174        if (ret)
 175                goto exit;
 176
 177        /*
 178         * Iterate over the tracer stat entries and store them in an rbtree.
 179         */
 180        for (i = 1; ; i++) {
 181                stat = ts->stat_next(stat, i);
 182
 183                /* End of insertion */
 184                if (!stat)
 185                        break;
 186
 187                ret = insert_stat(root, stat, ts->stat_cmp);
 188                if (ret)
 189                        goto exit_free_rbtree;
 190        }
 191
 192exit:
 193        mutex_unlock(&session->stat_mutex);
 194        return ret;
 195
 196exit_free_rbtree:
 197        __reset_stat_session(session);
 198        mutex_unlock(&session->stat_mutex);
 199        return ret;
 200}
 201
 202
 203static void *stat_seq_start(struct seq_file *s, loff_t *pos)
 204{
 205        struct stat_session *session = s->private;
 206        struct rb_node *node;
 207        int n = *pos;
 208        int i;
 209
 210        /* Prevent from tracer switch or rbtree modification */
 211        mutex_lock(&session->stat_mutex);
 212
 213        /* If we are in the beginning of the file, print the headers */
 214        if (session->ts->stat_headers) {
 215                if (n == 0)
 216                        return SEQ_START_TOKEN;
 217                n--;
 218        }
 219
 220        node = rb_first(&session->stat_root);
 221        for (i = 0; node && i < n; i++)
 222                node = rb_next(node);
 223
 224        return node;
 225}
 226
 227static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
 228{
 229        struct stat_session *session = s->private;
 230        struct rb_node *node = p;
 231
 232        (*pos)++;
 233
 234        if (p == SEQ_START_TOKEN)
 235                return rb_first(&session->stat_root);
 236
 237        return rb_next(node);
 238}
 239
 240static void stat_seq_stop(struct seq_file *s, void *p)
 241{
 242        struct stat_session *session = s->private;
 243        mutex_unlock(&session->stat_mutex);
 244}
 245
 246static int stat_seq_show(struct seq_file *s, void *v)
 247{
 248        struct stat_session *session = s->private;
 249        struct stat_node *l = container_of(v, struct stat_node, node);
 250
 251        if (v == SEQ_START_TOKEN)
 252                return session->ts->stat_headers(s);
 253
 254        return session->ts->stat_show(s, l->stat);
 255}
 256
 257static const struct seq_operations trace_stat_seq_ops = {
 258        .start          = stat_seq_start,
 259        .next           = stat_seq_next,
 260        .stop           = stat_seq_stop,
 261        .show           = stat_seq_show
 262};
 263
 264/* The session stat is refilled and resorted at each stat file opening */
 265static int tracing_stat_open(struct inode *inode, struct file *file)
 266{
 267        int ret;
 268        struct seq_file *m;
 269        struct stat_session *session = inode->i_private;
 270
 271        ret = stat_seq_init(session);
 272        if (ret)
 273                return ret;
 274
 275        ret = seq_open(file, &trace_stat_seq_ops);
 276        if (ret) {
 277                reset_stat_session(session);
 278                return ret;
 279        }
 280
 281        m = file->private_data;
 282        m->private = session;
 283        return ret;
 284}
 285
 286/*
 287 * Avoid consuming memory with our now useless rbtree.
 288 */
 289static int tracing_stat_release(struct inode *i, struct file *f)
 290{
 291        struct stat_session *session = i->i_private;
 292
 293        reset_stat_session(session);
 294
 295        return seq_release(i, f);
 296}
 297
 298static const struct file_operations tracing_stat_fops = {
 299        .open           = tracing_stat_open,
 300        .read           = seq_read,
 301        .llseek         = seq_lseek,
 302        .release        = tracing_stat_release
 303};
 304
 305static int tracing_stat_init(void)
 306{
 307        struct dentry *d_tracing;
 308
 309        d_tracing = tracing_init_dentry();
 310        if (!d_tracing)
 311                return 0;
 312
 313        stat_dir = debugfs_create_dir("trace_stat", d_tracing);
 314        if (!stat_dir)
 315                pr_warning("Could not create debugfs "
 316                           "'trace_stat' entry\n");
 317        return 0;
 318}
 319
 320static int init_stat_file(struct stat_session *session)
 321{
 322        if (!stat_dir && tracing_stat_init())
 323                return -ENODEV;
 324
 325        session->file = debugfs_create_file(session->ts->name, 0644,
 326                                            stat_dir,
 327                                            session, &tracing_stat_fops);
 328        if (!session->file)
 329                return -ENOMEM;
 330        return 0;
 331}
 332
 333int register_stat_tracer(struct tracer_stat *trace)
 334{
 335        struct stat_session *session, *node;
 336        int ret;
 337
 338        if (!trace)
 339                return -EINVAL;
 340
 341        if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
 342                return -EINVAL;
 343
 344        /* Already registered? */
 345        mutex_lock(&all_stat_sessions_mutex);
 346        list_for_each_entry(node, &all_stat_sessions, session_list) {
 347                if (node->ts == trace) {
 348                        mutex_unlock(&all_stat_sessions_mutex);
 349                        return -EINVAL;
 350                }
 351        }
 352        mutex_unlock(&all_stat_sessions_mutex);
 353
 354        /* Init the session */
 355        session = kzalloc(sizeof(*session), GFP_KERNEL);
 356        if (!session)
 357                return -ENOMEM;
 358
 359        session->ts = trace;
 360        INIT_LIST_HEAD(&session->session_list);
 361        mutex_init(&session->stat_mutex);
 362
 363        ret = init_stat_file(session);
 364        if (ret) {
 365                destroy_session(session);
 366                return ret;
 367        }
 368
 369        /* Register */
 370        mutex_lock(&all_stat_sessions_mutex);
 371        list_add_tail(&session->session_list, &all_stat_sessions);
 372        mutex_unlock(&all_stat_sessions_mutex);
 373
 374        return 0;
 375}
 376
 377void unregister_stat_tracer(struct tracer_stat *trace)
 378{
 379        struct stat_session *node, *tmp;
 380
 381        mutex_lock(&all_stat_sessions_mutex);
 382        list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
 383                if (node->ts == trace) {
 384                        list_del(&node->session_list);
 385                        destroy_session(node);
 386                        break;
 387                }
 388        }
 389        mutex_unlock(&all_stat_sessions_mutex);
 390}
 391