linux/kernel/trace/trace_stat.c
<<
>>
Prefs
   1/*
   2 * Infrastructure for statistic tracing (histogram output).
   3 *
   4 * Copyright (C) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   5 *
   6 * Based on the code from trace_branch.c which is
   7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   8 *
   9 */
  10
  11
  12#include <linux/list.h>
  13#include <linux/rbtree.h>
  14#include <linux/debugfs.h>
  15#include "trace_stat.h"
  16#include "trace.h"
  17
  18
  19/*
  20 * List of stat red-black nodes from a tracer
  21 * We use a such tree to sort quickly the stat
  22 * entries from the tracer.
  23 */
  24struct stat_node {
  25        struct rb_node          node;
  26        void                    *stat;
  27};
  28
  29/* A stat session is the stats output in one file */
  30struct stat_session {
  31        struct list_head        session_list;
  32        struct tracer_stat      *ts;
  33        struct rb_root          stat_root;
  34        struct mutex            stat_mutex;
  35        struct dentry           *file;
  36};
  37
  38/* All of the sessions currently in use. Each stat file embed one session */
  39static LIST_HEAD(all_stat_sessions);
  40static DEFINE_MUTEX(all_stat_sessions_mutex);
  41
  42/* The root directory for all stat files */
  43static struct dentry            *stat_dir;
  44
  45/*
  46 * Iterate through the rbtree using a post order traversal path
  47 * to release the next node.
  48 * It won't necessary release one at each iteration
  49 * but it will at least advance closer to the next one
  50 * to be released.
  51 */
  52static struct rb_node *release_next(struct tracer_stat *ts,
  53                                    struct rb_node *node)
  54{
  55        struct stat_node *snode;
  56        struct rb_node *parent = rb_parent(node);
  57
  58        if (node->rb_left)
  59                return node->rb_left;
  60        else if (node->rb_right)
  61                return node->rb_right;
  62        else {
  63                if (!parent)
  64                        ;
  65                else if (parent->rb_left == node)
  66                        parent->rb_left = NULL;
  67                else
  68                        parent->rb_right = NULL;
  69
  70                snode = container_of(node, struct stat_node, node);
  71                if (ts->stat_release)
  72                        ts->stat_release(snode->stat);
  73                kfree(snode);
  74
  75                return parent;
  76        }
  77}
  78
  79static void __reset_stat_session(struct stat_session *session)
  80{
  81        struct rb_node *node = session->stat_root.rb_node;
  82
  83        while (node)
  84                node = release_next(session->ts, node);
  85
  86        session->stat_root = RB_ROOT;
  87}
  88
  89static void reset_stat_session(struct stat_session *session)
  90{
  91        mutex_lock(&session->stat_mutex);
  92        __reset_stat_session(session);
  93        mutex_unlock(&session->stat_mutex);
  94}
  95
  96static void destroy_session(struct stat_session *session)
  97{
  98        debugfs_remove(session->file);
  99        __reset_stat_session(session);
 100        mutex_destroy(&session->stat_mutex);
 101        kfree(session);
 102}
 103
 104typedef int (*cmp_stat_t)(void *, void *);
 105
 106static int insert_stat(struct rb_root *root, void *stat, cmp_stat_t cmp)
 107{
 108        struct rb_node **new = &(root->rb_node), *parent = NULL;
 109        struct stat_node *data;
 110
 111        data = kzalloc(sizeof(*data), GFP_KERNEL);
 112        if (!data)
 113                return -ENOMEM;
 114        data->stat = stat;
 115
 116        /*
 117         * Figure out where to put new node
 118         * This is a descendent sorting
 119         */
 120        while (*new) {
 121                struct stat_node *this;
 122                int result;
 123
 124                this = container_of(*new, struct stat_node, node);
 125                result = cmp(data->stat, this->stat);
 126
 127                parent = *new;
 128                if (result >= 0)
 129                        new = &((*new)->rb_left);
 130                else
 131                        new = &((*new)->rb_right);
 132        }
 133
 134        rb_link_node(&data->node, parent, new);
 135        rb_insert_color(&data->node, root);
 136        return 0;
 137}
 138
 139/*
 140 * For tracers that don't provide a stat_cmp callback.
 141 * This one will force an insertion as right-most node
 142 * in the rbtree.
 143 */
 144static int dummy_cmp(void *p1, void *p2)
 145{
 146        return -1;
 147}
 148
 149/*
 150 * Initialize the stat rbtree at each trace_stat file opening.
 151 * All of these copies and sorting are required on all opening
 152 * since the stats could have changed between two file sessions.
 153 */
 154static int stat_seq_init(struct stat_session *session)
 155{
 156        struct tracer_stat *ts = session->ts;
 157        struct rb_root *root = &session->stat_root;
 158        void *stat;
 159        int ret = 0;
 160        int i;
 161
 162        mutex_lock(&session->stat_mutex);
 163        __reset_stat_session(session);
 164
 165        if (!ts->stat_cmp)
 166                ts->stat_cmp = dummy_cmp;
 167
 168        stat = ts->stat_start(ts);
 169        if (!stat)
 170                goto exit;
 171
 172        ret = insert_stat(root, stat, ts->stat_cmp);
 173        if (ret)
 174                goto exit;
 175
 176        /*
 177         * Iterate over the tracer stat entries and store them in an rbtree.
 178         */
 179        for (i = 1; ; i++) {
 180                stat = ts->stat_next(stat, i);
 181
 182                /* End of insertion */
 183                if (!stat)
 184                        break;
 185
 186                ret = insert_stat(root, stat, ts->stat_cmp);
 187                if (ret)
 188                        goto exit_free_rbtree;
 189        }
 190
 191exit:
 192        mutex_unlock(&session->stat_mutex);
 193        return ret;
 194
 195exit_free_rbtree:
 196        __reset_stat_session(session);
 197        mutex_unlock(&session->stat_mutex);
 198        return ret;
 199}
 200
 201
 202static void *stat_seq_start(struct seq_file *s, loff_t *pos)
 203{
 204        struct stat_session *session = s->private;
 205        struct rb_node *node;
 206        int n = *pos;
 207        int i;
 208
 209        /* Prevent from tracer switch or rbtree modification */
 210        mutex_lock(&session->stat_mutex);
 211
 212        /* If we are in the beginning of the file, print the headers */
 213        if (session->ts->stat_headers) {
 214                if (n == 0)
 215                        return SEQ_START_TOKEN;
 216                n--;
 217        }
 218
 219        node = rb_first(&session->stat_root);
 220        for (i = 0; node && i < n; i++)
 221                node = rb_next(node);
 222
 223        return node;
 224}
 225
 226static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
 227{
 228        struct stat_session *session = s->private;
 229        struct rb_node *node = p;
 230
 231        (*pos)++;
 232
 233        if (p == SEQ_START_TOKEN)
 234                return rb_first(&session->stat_root);
 235
 236        return rb_next(node);
 237}
 238
 239static void stat_seq_stop(struct seq_file *s, void *p)
 240{
 241        struct stat_session *session = s->private;
 242        mutex_unlock(&session->stat_mutex);
 243}
 244
 245static int stat_seq_show(struct seq_file *s, void *v)
 246{
 247        struct stat_session *session = s->private;
 248        struct stat_node *l = container_of(v, struct stat_node, node);
 249
 250        if (v == SEQ_START_TOKEN)
 251                return session->ts->stat_headers(s);
 252
 253        return session->ts->stat_show(s, l->stat);
 254}
 255
 256static const struct seq_operations trace_stat_seq_ops = {
 257        .start          = stat_seq_start,
 258        .next           = stat_seq_next,
 259        .stop           = stat_seq_stop,
 260        .show           = stat_seq_show
 261};
 262
 263/* The session stat is refilled and resorted at each stat file opening */
 264static int tracing_stat_open(struct inode *inode, struct file *file)
 265{
 266        int ret;
 267        struct seq_file *m;
 268        struct stat_session *session = inode->i_private;
 269
 270        ret = stat_seq_init(session);
 271        if (ret)
 272                return ret;
 273
 274        ret = seq_open(file, &trace_stat_seq_ops);
 275        if (ret) {
 276                reset_stat_session(session);
 277                return ret;
 278        }
 279
 280        m = file->private_data;
 281        m->private = session;
 282        return ret;
 283}
 284
 285/*
 286 * Avoid consuming memory with our now useless rbtree.
 287 */
 288static int tracing_stat_release(struct inode *i, struct file *f)
 289{
 290        struct stat_session *session = i->i_private;
 291
 292        reset_stat_session(session);
 293
 294        return seq_release(i, f);
 295}
 296
 297static const struct file_operations tracing_stat_fops = {
 298        .open           = tracing_stat_open,
 299        .read           = seq_read,
 300        .llseek         = seq_lseek,
 301        .release        = tracing_stat_release
 302};
 303
 304static int tracing_stat_init(void)
 305{
 306        struct dentry *d_tracing;
 307
 308        d_tracing = tracing_init_dentry();
 309
 310        stat_dir = debugfs_create_dir("trace_stat", d_tracing);
 311        if (!stat_dir)
 312                pr_warning("Could not create debugfs "
 313                           "'trace_stat' entry\n");
 314        return 0;
 315}
 316
 317static int init_stat_file(struct stat_session *session)
 318{
 319        if (!stat_dir && tracing_stat_init())
 320                return -ENODEV;
 321
 322        session->file = debugfs_create_file(session->ts->name, 0644,
 323                                            stat_dir,
 324                                            session, &tracing_stat_fops);
 325        if (!session->file)
 326                return -ENOMEM;
 327        return 0;
 328}
 329
 330int register_stat_tracer(struct tracer_stat *trace)
 331{
 332        struct stat_session *session, *node;
 333        int ret;
 334
 335        if (!trace)
 336                return -EINVAL;
 337
 338        if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
 339                return -EINVAL;
 340
 341        /* Already registered? */
 342        mutex_lock(&all_stat_sessions_mutex);
 343        list_for_each_entry(node, &all_stat_sessions, session_list) {
 344                if (node->ts == trace) {
 345                        mutex_unlock(&all_stat_sessions_mutex);
 346                        return -EINVAL;
 347                }
 348        }
 349        mutex_unlock(&all_stat_sessions_mutex);
 350
 351        /* Init the session */
 352        session = kzalloc(sizeof(*session), GFP_KERNEL);
 353        if (!session)
 354                return -ENOMEM;
 355
 356        session->ts = trace;
 357        INIT_LIST_HEAD(&session->session_list);
 358        mutex_init(&session->stat_mutex);
 359
 360        ret = init_stat_file(session);
 361        if (ret) {
 362                destroy_session(session);
 363                return ret;
 364        }
 365
 366        /* Register */
 367        mutex_lock(&all_stat_sessions_mutex);
 368        list_add_tail(&session->session_list, &all_stat_sessions);
 369        mutex_unlock(&all_stat_sessions_mutex);
 370
 371        return 0;
 372}
 373
 374void unregister_stat_tracer(struct tracer_stat *trace)
 375{
 376        struct stat_session *node, *tmp;
 377
 378        mutex_lock(&all_stat_sessions_mutex);
 379        list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
 380                if (node->ts == trace) {
 381                        list_del(&node->session_list);
 382                        destroy_session(node);
 383                        break;
 384                }
 385        }
 386        mutex_unlock(&all_stat_sessions_mutex);
 387}
 388