linux/kernel/trace/ftrace.c
<<
>>
Prefs
   1/*
   2 * Infrastructure for profiling code inserted by 'gcc -pg'.
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally ported from the -rt patch by:
   8 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code in the latency_tracer, that is:
  11 *
  12 *  Copyright (C) 2004-2006 Ingo Molnar
  13 *  Copyright (C) 2004 William Lee Irwin III
  14 */
  15
  16#include <linux/stop_machine.h>
  17#include <linux/clocksource.h>
  18#include <linux/kallsyms.h>
  19#include <linux/seq_file.h>
  20#include <linux/suspend.h>
  21#include <linux/debugfs.h>
  22#include <linux/hardirq.h>
  23#include <linux/kthread.h>
  24#include <linux/uaccess.h>
  25#include <linux/ftrace.h>
  26#include <linux/sysctl.h>
  27#include <linux/slab.h>
  28#include <linux/ctype.h>
  29#include <linux/list.h>
  30#include <linux/hash.h>
  31#include <linux/rcupdate.h>
  32
  33#include <trace/events/sched.h>
  34
  35#include <asm/setup.h>
  36
  37#include "trace_output.h"
  38#include "trace_stat.h"
  39
  40#define FTRACE_WARN_ON(cond)                    \
  41        ({                                      \
  42                int ___r = cond;                \
  43                if (WARN_ON(___r))              \
  44                        ftrace_kill();          \
  45                ___r;                           \
  46        })
  47
  48#define FTRACE_WARN_ON_ONCE(cond)               \
  49        ({                                      \
  50                int ___r = cond;                \
  51                if (WARN_ON_ONCE(___r))         \
  52                        ftrace_kill();          \
  53                ___r;                           \
  54        })
  55
  56/* hash bits for specific function selection */
  57#define FTRACE_HASH_BITS 7
  58#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
  59#define FTRACE_HASH_DEFAULT_BITS 10
  60#define FTRACE_HASH_MAX_BITS 12
  61
  62/* ftrace_enabled is a method to turn ftrace on or off */
  63int ftrace_enabled __read_mostly;
  64static int last_ftrace_enabled;
  65
  66/* Quick disabling of function tracer. */
  67int function_trace_stop;
  68
  69/* List for set_ftrace_pid's pids. */
  70LIST_HEAD(ftrace_pids);
  71struct ftrace_pid {
  72        struct list_head list;
  73        struct pid *pid;
  74};
  75
  76/*
  77 * ftrace_disabled is set when an anomaly is discovered.
  78 * ftrace_disabled is much stronger than ftrace_enabled.
  79 */
  80static int ftrace_disabled __read_mostly;
  81
  82static DEFINE_MUTEX(ftrace_lock);
  83
  84static struct ftrace_ops ftrace_list_end __read_mostly = {
  85        .func           = ftrace_stub,
  86};
  87
  88static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
  89static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
  90ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  91static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
  92ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  93ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
  94static struct ftrace_ops global_ops;
  95
  96static void
  97ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
  98
  99/*
 100 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
 101 * can use rcu_dereference_raw() is that elements removed from this list
 102 * are simply leaked, so there is no need to interact with a grace-period
 103 * mechanism.  The rcu_dereference_raw() calls are needed to handle
 104 * concurrent insertions into the ftrace_global_list.
 105 *
 106 * Silly Alpha and silly pointer-speculation compiler optimizations!
 107 */
 108static void ftrace_global_list_func(unsigned long ip,
 109                                    unsigned long parent_ip)
 110{
 111        struct ftrace_ops *op;
 112
 113        if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
 114                return;
 115
 116        trace_recursion_set(TRACE_GLOBAL_BIT);
 117        op = rcu_dereference_raw(ftrace_global_list); /*see above*/
 118        while (op != &ftrace_list_end) {
 119                op->func(ip, parent_ip);
 120                op = rcu_dereference_raw(op->next); /*see above*/
 121        };
 122        trace_recursion_clear(TRACE_GLOBAL_BIT);
 123}
 124
 125static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
 126{
 127        if (!test_tsk_trace_trace(current))
 128                return;
 129
 130        ftrace_pid_function(ip, parent_ip);
 131}
 132
 133static void set_ftrace_pid_function(ftrace_func_t func)
 134{
 135        /* do not set ftrace_pid_function to itself! */
 136        if (func != ftrace_pid_func)
 137                ftrace_pid_function = func;
 138}
 139
 140/**
 141 * clear_ftrace_function - reset the ftrace function
 142 *
 143 * This NULLs the ftrace function and in essence stops
 144 * tracing.  There may be lag
 145 */
 146void clear_ftrace_function(void)
 147{
 148        ftrace_trace_function = ftrace_stub;
 149        __ftrace_trace_function = ftrace_stub;
 150        __ftrace_trace_function_delay = ftrace_stub;
 151        ftrace_pid_function = ftrace_stub;
 152}
 153
 154#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 155#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 156/*
 157 * For those archs that do not test ftrace_trace_stop in their
 158 * mcount call site, we need to do it from C.
 159 */
 160static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
 161{
 162        if (function_trace_stop)
 163                return;
 164
 165        __ftrace_trace_function(ip, parent_ip);
 166}
 167#endif
 168
 169static void update_global_ops(void)
 170{
 171        ftrace_func_t func;
 172
 173        /*
 174         * If there's only one function registered, then call that
 175         * function directly. Otherwise, we need to iterate over the
 176         * registered callers.
 177         */
 178        if (ftrace_global_list == &ftrace_list_end ||
 179            ftrace_global_list->next == &ftrace_list_end)
 180                func = ftrace_global_list->func;
 181        else
 182                func = ftrace_global_list_func;
 183
 184        /* If we filter on pids, update to use the pid function */
 185        if (!list_empty(&ftrace_pids)) {
 186                set_ftrace_pid_function(func);
 187                func = ftrace_pid_func;
 188        }
 189
 190        global_ops.func = func;
 191}
 192
 193static void update_ftrace_function(void)
 194{
 195        ftrace_func_t func;
 196
 197        update_global_ops();
 198
 199        /*
 200         * If we are at the end of the list and this ops is
 201         * not dynamic, then have the mcount trampoline call
 202         * the function directly
 203         */
 204        if (ftrace_ops_list == &ftrace_list_end ||
 205            (ftrace_ops_list->next == &ftrace_list_end &&
 206             !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
 207                func = ftrace_ops_list->func;
 208        else
 209                func = ftrace_ops_list_func;
 210
 211#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 212        ftrace_trace_function = func;
 213#else
 214#ifdef CONFIG_DYNAMIC_FTRACE
 215        /* do not update till all functions have been modified */
 216        __ftrace_trace_function_delay = func;
 217#else
 218        __ftrace_trace_function = func;
 219#endif
 220        ftrace_trace_function = ftrace_test_stop_func;
 221#endif
 222}
 223
 224static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 225{
 226        ops->next = *list;
 227        /*
 228         * We are entering ops into the list but another
 229         * CPU might be walking that list. We need to make sure
 230         * the ops->next pointer is valid before another CPU sees
 231         * the ops pointer included into the list.
 232         */
 233        rcu_assign_pointer(*list, ops);
 234}
 235
 236static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 237{
 238        struct ftrace_ops **p;
 239
 240        /*
 241         * If we are removing the last function, then simply point
 242         * to the ftrace_stub.
 243         */
 244        if (*list == ops && ops->next == &ftrace_list_end) {
 245                *list = &ftrace_list_end;
 246                return 0;
 247        }
 248
 249        for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 250                if (*p == ops)
 251                        break;
 252
 253        if (*p != ops)
 254                return -1;
 255
 256        *p = (*p)->next;
 257        return 0;
 258}
 259
 260static int __register_ftrace_function(struct ftrace_ops *ops)
 261{
 262        if (ftrace_disabled)
 263                return -ENODEV;
 264
 265        if (FTRACE_WARN_ON(ops == &global_ops))
 266                return -EINVAL;
 267
 268        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 269                return -EBUSY;
 270
 271        if (!core_kernel_data((unsigned long)ops))
 272                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 273
 274        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
 275                int first = ftrace_global_list == &ftrace_list_end;
 276                add_ftrace_ops(&ftrace_global_list, ops);
 277                ops->flags |= FTRACE_OPS_FL_ENABLED;
 278                if (first)
 279                        add_ftrace_ops(&ftrace_ops_list, &global_ops);
 280        } else
 281                add_ftrace_ops(&ftrace_ops_list, ops);
 282
 283        if (ftrace_enabled)
 284                update_ftrace_function();
 285
 286        return 0;
 287}
 288
 289static int __unregister_ftrace_function(struct ftrace_ops *ops)
 290{
 291        int ret;
 292
 293        if (ftrace_disabled)
 294                return -ENODEV;
 295
 296        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 297                return -EBUSY;
 298
 299        if (FTRACE_WARN_ON(ops == &global_ops))
 300                return -EINVAL;
 301
 302        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
 303                ret = remove_ftrace_ops(&ftrace_global_list, ops);
 304                if (!ret && ftrace_global_list == &ftrace_list_end)
 305                        ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
 306                if (!ret)
 307                        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 308        } else
 309                ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 310
 311        if (ret < 0)
 312                return ret;
 313
 314        if (ftrace_enabled)
 315                update_ftrace_function();
 316
 317        /*
 318         * Dynamic ops may be freed, we must make sure that all
 319         * callers are done before leaving this function.
 320         */
 321        if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
 322                synchronize_sched();
 323
 324        return 0;
 325}
 326
 327static void ftrace_update_pid_func(void)
 328{
 329        /* Only do something if we are tracing something */
 330        if (ftrace_trace_function == ftrace_stub)
 331                return;
 332
 333        update_ftrace_function();
 334}
 335
 336#ifdef CONFIG_FUNCTION_PROFILER
 337struct ftrace_profile {
 338        struct hlist_node               node;
 339        unsigned long                   ip;
 340        unsigned long                   counter;
 341#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 342        unsigned long long              time;
 343        unsigned long long              time_squared;
 344#endif
 345};
 346
 347struct ftrace_profile_page {
 348        struct ftrace_profile_page      *next;
 349        unsigned long                   index;
 350        struct ftrace_profile           records[];
 351};
 352
 353struct ftrace_profile_stat {
 354        atomic_t                        disabled;
 355        struct hlist_head               *hash;
 356        struct ftrace_profile_page      *pages;
 357        struct ftrace_profile_page      *start;
 358        struct tracer_stat              stat;
 359};
 360
 361#define PROFILE_RECORDS_SIZE                                            \
 362        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 363
 364#define PROFILES_PER_PAGE                                       \
 365        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 366
 367static int ftrace_profile_bits __read_mostly;
 368static int ftrace_profile_enabled __read_mostly;
 369
 370/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 371static DEFINE_MUTEX(ftrace_profile_lock);
 372
 373static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 374
 375#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
 376
 377static void *
 378function_stat_next(void *v, int idx)
 379{
 380        struct ftrace_profile *rec = v;
 381        struct ftrace_profile_page *pg;
 382
 383        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 384
 385 again:
 386        if (idx != 0)
 387                rec++;
 388
 389        if ((void *)rec >= (void *)&pg->records[pg->index]) {
 390                pg = pg->next;
 391                if (!pg)
 392                        return NULL;
 393                rec = &pg->records[0];
 394                if (!rec->counter)
 395                        goto again;
 396        }
 397
 398        return rec;
 399}
 400
 401static void *function_stat_start(struct tracer_stat *trace)
 402{
 403        struct ftrace_profile_stat *stat =
 404                container_of(trace, struct ftrace_profile_stat, stat);
 405
 406        if (!stat || !stat->start)
 407                return NULL;
 408
 409        return function_stat_next(&stat->start->records[0], 0);
 410}
 411
 412#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 413/* function graph compares on total time */
 414static int function_stat_cmp(void *p1, void *p2)
 415{
 416        struct ftrace_profile *a = p1;
 417        struct ftrace_profile *b = p2;
 418
 419        if (a->time < b->time)
 420                return -1;
 421        if (a->time > b->time)
 422                return 1;
 423        else
 424                return 0;
 425}
 426#else
 427/* not function graph compares against hits */
 428static int function_stat_cmp(void *p1, void *p2)
 429{
 430        struct ftrace_profile *a = p1;
 431        struct ftrace_profile *b = p2;
 432
 433        if (a->counter < b->counter)
 434                return -1;
 435        if (a->counter > b->counter)
 436                return 1;
 437        else
 438                return 0;
 439}
 440#endif
 441
 442static int function_stat_headers(struct seq_file *m)
 443{
 444#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 445        seq_printf(m, "  Function                               "
 446                   "Hit    Time            Avg             s^2\n"
 447                      "  --------                               "
 448                   "---    ----            ---             ---\n");
 449#else
 450        seq_printf(m, "  Function                               Hit\n"
 451                      "  --------                               ---\n");
 452#endif
 453        return 0;
 454}
 455
 456static int function_stat_show(struct seq_file *m, void *v)
 457{
 458        struct ftrace_profile *rec = v;
 459        char str[KSYM_SYMBOL_LEN];
 460        int ret = 0;
 461#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 462        static struct trace_seq s;
 463        unsigned long long avg;
 464        unsigned long long stddev;
 465#endif
 466        mutex_lock(&ftrace_profile_lock);
 467
 468        /* we raced with function_profile_reset() */
 469        if (unlikely(rec->counter == 0)) {
 470                ret = -EBUSY;
 471                goto out;
 472        }
 473
 474        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 475        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 476
 477#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 478        seq_printf(m, "    ");
 479        avg = rec->time;
 480        do_div(avg, rec->counter);
 481
 482        /* Sample standard deviation (s^2) */
 483        if (rec->counter <= 1)
 484                stddev = 0;
 485        else {
 486                stddev = rec->time_squared - rec->counter * avg * avg;
 487                /*
 488                 * Divide only 1000 for ns^2 -> us^2 conversion.
 489                 * trace_print_graph_duration will divide 1000 again.
 490                 */
 491                do_div(stddev, (rec->counter - 1) * 1000);
 492        }
 493
 494        trace_seq_init(&s);
 495        trace_print_graph_duration(rec->time, &s);
 496        trace_seq_puts(&s, "    ");
 497        trace_print_graph_duration(avg, &s);
 498        trace_seq_puts(&s, "    ");
 499        trace_print_graph_duration(stddev, &s);
 500        trace_print_seq(m, &s);
 501#endif
 502        seq_putc(m, '\n');
 503out:
 504        mutex_unlock(&ftrace_profile_lock);
 505
 506        return ret;
 507}
 508
 509static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 510{
 511        struct ftrace_profile_page *pg;
 512
 513        pg = stat->pages = stat->start;
 514
 515        while (pg) {
 516                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 517                pg->index = 0;
 518                pg = pg->next;
 519        }
 520
 521        memset(stat->hash, 0,
 522               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 523}
 524
 525int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 526{
 527        struct ftrace_profile_page *pg;
 528        int functions;
 529        int pages;
 530        int i;
 531
 532        /* If we already allocated, do nothing */
 533        if (stat->pages)
 534                return 0;
 535
 536        stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 537        if (!stat->pages)
 538                return -ENOMEM;
 539
 540#ifdef CONFIG_DYNAMIC_FTRACE
 541        functions = ftrace_update_tot_cnt;
 542#else
 543        /*
 544         * We do not know the number of functions that exist because
 545         * dynamic tracing is what counts them. With past experience
 546         * we have around 20K functions. That should be more than enough.
 547         * It is highly unlikely we will execute every function in
 548         * the kernel.
 549         */
 550        functions = 20000;
 551#endif
 552
 553        pg = stat->start = stat->pages;
 554
 555        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 556
 557        for (i = 0; i < pages; i++) {
 558                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 559                if (!pg->next)
 560                        goto out_free;
 561                pg = pg->next;
 562        }
 563
 564        return 0;
 565
 566 out_free:
 567        pg = stat->start;
 568        while (pg) {
 569                unsigned long tmp = (unsigned long)pg;
 570
 571                pg = pg->next;
 572                free_page(tmp);
 573        }
 574
 575        free_page((unsigned long)stat->pages);
 576        stat->pages = NULL;
 577        stat->start = NULL;
 578
 579        return -ENOMEM;
 580}
 581
 582static int ftrace_profile_init_cpu(int cpu)
 583{
 584        struct ftrace_profile_stat *stat;
 585        int size;
 586
 587        stat = &per_cpu(ftrace_profile_stats, cpu);
 588
 589        if (stat->hash) {
 590                /* If the profile is already created, simply reset it */
 591                ftrace_profile_reset(stat);
 592                return 0;
 593        }
 594
 595        /*
 596         * We are profiling all functions, but usually only a few thousand
 597         * functions are hit. We'll make a hash of 1024 items.
 598         */
 599        size = FTRACE_PROFILE_HASH_SIZE;
 600
 601        stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
 602
 603        if (!stat->hash)
 604                return -ENOMEM;
 605
 606        if (!ftrace_profile_bits) {
 607                size--;
 608
 609                for (; size; size >>= 1)
 610                        ftrace_profile_bits++;
 611        }
 612
 613        /* Preallocate the function profiling pages */
 614        if (ftrace_profile_pages_init(stat) < 0) {
 615                kfree(stat->hash);
 616                stat->hash = NULL;
 617                return -ENOMEM;
 618        }
 619
 620        return 0;
 621}
 622
 623static int ftrace_profile_init(void)
 624{
 625        int cpu;
 626        int ret = 0;
 627
 628        for_each_online_cpu(cpu) {
 629                ret = ftrace_profile_init_cpu(cpu);
 630                if (ret)
 631                        break;
 632        }
 633
 634        return ret;
 635}
 636
 637/* interrupts must be disabled */
 638static struct ftrace_profile *
 639ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 640{
 641        struct ftrace_profile *rec;
 642        struct hlist_head *hhd;
 643        struct hlist_node *n;
 644        unsigned long key;
 645
 646        key = hash_long(ip, ftrace_profile_bits);
 647        hhd = &stat->hash[key];
 648
 649        if (hlist_empty(hhd))
 650                return NULL;
 651
 652        hlist_for_each_entry_rcu(rec, n, hhd, node) {
 653                if (rec->ip == ip)
 654                        return rec;
 655        }
 656
 657        return NULL;
 658}
 659
 660static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 661                               struct ftrace_profile *rec)
 662{
 663        unsigned long key;
 664
 665        key = hash_long(rec->ip, ftrace_profile_bits);
 666        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 667}
 668
 669/*
 670 * The memory is already allocated, this simply finds a new record to use.
 671 */
 672static struct ftrace_profile *
 673ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 674{
 675        struct ftrace_profile *rec = NULL;
 676
 677        /* prevent recursion (from NMIs) */
 678        if (atomic_inc_return(&stat->disabled) != 1)
 679                goto out;
 680
 681        /*
 682         * Try to find the function again since an NMI
 683         * could have added it
 684         */
 685        rec = ftrace_find_profiled_func(stat, ip);
 686        if (rec)
 687                goto out;
 688
 689        if (stat->pages->index == PROFILES_PER_PAGE) {
 690                if (!stat->pages->next)
 691                        goto out;
 692                stat->pages = stat->pages->next;
 693        }
 694
 695        rec = &stat->pages->records[stat->pages->index++];
 696        rec->ip = ip;
 697        ftrace_add_profile(stat, rec);
 698
 699 out:
 700        atomic_dec(&stat->disabled);
 701
 702        return rec;
 703}
 704
 705static void
 706function_profile_call(unsigned long ip, unsigned long parent_ip)
 707{
 708        struct ftrace_profile_stat *stat;
 709        struct ftrace_profile *rec;
 710        unsigned long flags;
 711
 712        if (!ftrace_profile_enabled)
 713                return;
 714
 715        local_irq_save(flags);
 716
 717        stat = &__get_cpu_var(ftrace_profile_stats);
 718        if (!stat->hash || !ftrace_profile_enabled)
 719                goto out;
 720
 721        rec = ftrace_find_profiled_func(stat, ip);
 722        if (!rec) {
 723                rec = ftrace_profile_alloc(stat, ip);
 724                if (!rec)
 725                        goto out;
 726        }
 727
 728        rec->counter++;
 729 out:
 730        local_irq_restore(flags);
 731}
 732
 733#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 734static int profile_graph_entry(struct ftrace_graph_ent *trace)
 735{
 736        function_profile_call(trace->func, 0);
 737        return 1;
 738}
 739
 740static void profile_graph_return(struct ftrace_graph_ret *trace)
 741{
 742        struct ftrace_profile_stat *stat;
 743        unsigned long long calltime;
 744        struct ftrace_profile *rec;
 745        unsigned long flags;
 746
 747        local_irq_save(flags);
 748        stat = &__get_cpu_var(ftrace_profile_stats);
 749        if (!stat->hash || !ftrace_profile_enabled)
 750                goto out;
 751
 752        /* If the calltime was zero'd ignore it */
 753        if (!trace->calltime)
 754                goto out;
 755
 756        calltime = trace->rettime - trace->calltime;
 757
 758        if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
 759                int index;
 760
 761                index = trace->depth;
 762
 763                /* Append this call time to the parent time to subtract */
 764                if (index)
 765                        current->ret_stack[index - 1].subtime += calltime;
 766
 767                if (current->ret_stack[index].subtime < calltime)
 768                        calltime -= current->ret_stack[index].subtime;
 769                else
 770                        calltime = 0;
 771        }
 772
 773        rec = ftrace_find_profiled_func(stat, trace->func);
 774        if (rec) {
 775                rec->time += calltime;
 776                rec->time_squared += calltime * calltime;
 777        }
 778
 779 out:
 780        local_irq_restore(flags);
 781}
 782
 783static int register_ftrace_profiler(void)
 784{
 785        return register_ftrace_graph(&profile_graph_return,
 786                                     &profile_graph_entry);
 787}
 788
 789static void unregister_ftrace_profiler(void)
 790{
 791        unregister_ftrace_graph();
 792}
 793#else
 794static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 795        .func           = function_profile_call,
 796};
 797
 798static int register_ftrace_profiler(void)
 799{
 800        return register_ftrace_function(&ftrace_profile_ops);
 801}
 802
 803static void unregister_ftrace_profiler(void)
 804{
 805        unregister_ftrace_function(&ftrace_profile_ops);
 806}
 807#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 808
 809static ssize_t
 810ftrace_profile_write(struct file *filp, const char __user *ubuf,
 811                     size_t cnt, loff_t *ppos)
 812{
 813        unsigned long val;
 814        int ret;
 815
 816        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 817        if (ret)
 818                return ret;
 819
 820        val = !!val;
 821
 822        mutex_lock(&ftrace_profile_lock);
 823        if (ftrace_profile_enabled ^ val) {
 824                if (val) {
 825                        ret = ftrace_profile_init();
 826                        if (ret < 0) {
 827                                cnt = ret;
 828                                goto out;
 829                        }
 830
 831                        ret = register_ftrace_profiler();
 832                        if (ret < 0) {
 833                                cnt = ret;
 834                                goto out;
 835                        }
 836                        ftrace_profile_enabled = 1;
 837                } else {
 838                        ftrace_profile_enabled = 0;
 839                        /*
 840                         * unregister_ftrace_profiler calls stop_machine
 841                         * so this acts like an synchronize_sched.
 842                         */
 843                        unregister_ftrace_profiler();
 844                }
 845        }
 846 out:
 847        mutex_unlock(&ftrace_profile_lock);
 848
 849        *ppos += cnt;
 850
 851        return cnt;
 852}
 853
 854static ssize_t
 855ftrace_profile_read(struct file *filp, char __user *ubuf,
 856                     size_t cnt, loff_t *ppos)
 857{
 858        char buf[64];           /* big enough to hold a number */
 859        int r;
 860
 861        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
 862        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 863}
 864
 865static const struct file_operations ftrace_profile_fops = {
 866        .open           = tracing_open_generic,
 867        .read           = ftrace_profile_read,
 868        .write          = ftrace_profile_write,
 869        .llseek         = default_llseek,
 870};
 871
 872/* used to initialize the real stat files */
 873static struct tracer_stat function_stats __initdata = {
 874        .name           = "functions",
 875        .stat_start     = function_stat_start,
 876        .stat_next      = function_stat_next,
 877        .stat_cmp       = function_stat_cmp,
 878        .stat_headers   = function_stat_headers,
 879        .stat_show      = function_stat_show
 880};
 881
 882static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 883{
 884        struct ftrace_profile_stat *stat;
 885        struct dentry *entry;
 886        char *name;
 887        int ret;
 888        int cpu;
 889
 890        for_each_possible_cpu(cpu) {
 891                stat = &per_cpu(ftrace_profile_stats, cpu);
 892
 893                /* allocate enough for function name + cpu number */
 894                name = kmalloc(32, GFP_KERNEL);
 895                if (!name) {
 896                        /*
 897                         * The files created are permanent, if something happens
 898                         * we still do not free memory.
 899                         */
 900                        WARN(1,
 901                             "Could not allocate stat file for cpu %d\n",
 902                             cpu);
 903                        return;
 904                }
 905                stat->stat = function_stats;
 906                snprintf(name, 32, "function%d", cpu);
 907                stat->stat.name = name;
 908                ret = register_stat_tracer(&stat->stat);
 909                if (ret) {
 910                        WARN(1,
 911                             "Could not register function stat for cpu %d\n",
 912                             cpu);
 913                        kfree(name);
 914                        return;
 915                }
 916        }
 917
 918        entry = debugfs_create_file("function_profile_enabled", 0644,
 919                                    d_tracer, NULL, &ftrace_profile_fops);
 920        if (!entry)
 921                pr_warning("Could not create debugfs "
 922                           "'function_profile_enabled' entry\n");
 923}
 924
 925#else /* CONFIG_FUNCTION_PROFILER */
 926static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 927{
 928}
 929#endif /* CONFIG_FUNCTION_PROFILER */
 930
 931static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 932
 933#ifdef CONFIG_DYNAMIC_FTRACE
 934
 935#ifndef CONFIG_FTRACE_MCOUNT_RECORD
 936# error Dynamic ftrace depends on MCOUNT_RECORD
 937#endif
 938
 939static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
 940
 941struct ftrace_func_probe {
 942        struct hlist_node       node;
 943        struct ftrace_probe_ops *ops;
 944        unsigned long           flags;
 945        unsigned long           ip;
 946        void                    *data;
 947        struct rcu_head         rcu;
 948};
 949
 950enum {
 951        FTRACE_ENABLE_CALLS             = (1 << 0),
 952        FTRACE_DISABLE_CALLS            = (1 << 1),
 953        FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
 954        FTRACE_START_FUNC_RET           = (1 << 3),
 955        FTRACE_STOP_FUNC_RET            = (1 << 4),
 956};
 957struct ftrace_func_entry {
 958        struct hlist_node hlist;
 959        unsigned long ip;
 960};
 961
 962struct ftrace_hash {
 963        unsigned long           size_bits;
 964        struct hlist_head       *buckets;
 965        unsigned long           count;
 966        struct rcu_head         rcu;
 967};
 968
 969/*
 970 * We make these constant because no one should touch them,
 971 * but they are used as the default "empty hash", to avoid allocating
 972 * it all the time. These are in a read only section such that if
 973 * anyone does try to modify it, it will cause an exception.
 974 */
 975static const struct hlist_head empty_buckets[1];
 976static const struct ftrace_hash empty_hash = {
 977        .buckets = (struct hlist_head *)empty_buckets,
 978};
 979#define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
 980
 981static struct ftrace_ops global_ops = {
 982        .func                   = ftrace_stub,
 983        .notrace_hash           = EMPTY_HASH,
 984        .filter_hash            = EMPTY_HASH,
 985};
 986
 987static struct dyn_ftrace *ftrace_new_addrs;
 988
 989static DEFINE_MUTEX(ftrace_regex_lock);
 990
 991struct ftrace_page {
 992        struct ftrace_page      *next;
 993        int                     index;
 994        struct dyn_ftrace       records[];
 995};
 996
 997#define ENTRIES_PER_PAGE \
 998  ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
 999
1000/* estimate from running different kernels */
1001#define NR_TO_INIT              10000
1002
1003static struct ftrace_page       *ftrace_pages_start;
1004static struct ftrace_page       *ftrace_pages;
1005
1006static struct dyn_ftrace *ftrace_free_records;
1007
1008static struct ftrace_func_entry *
1009ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1010{
1011        unsigned long key;
1012        struct ftrace_func_entry *entry;
1013        struct hlist_head *hhd;
1014        struct hlist_node *n;
1015
1016        if (!hash->count)
1017                return NULL;
1018
1019        if (hash->size_bits > 0)
1020                key = hash_long(ip, hash->size_bits);
1021        else
1022                key = 0;
1023
1024        hhd = &hash->buckets[key];
1025
1026        hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1027                if (entry->ip == ip)
1028                        return entry;
1029        }
1030        return NULL;
1031}
1032
1033static void __add_hash_entry(struct ftrace_hash *hash,
1034                             struct ftrace_func_entry *entry)
1035{
1036        struct hlist_head *hhd;
1037        unsigned long key;
1038
1039        if (hash->size_bits)
1040                key = hash_long(entry->ip, hash->size_bits);
1041        else
1042                key = 0;
1043
1044        hhd = &hash->buckets[key];
1045        hlist_add_head(&entry->hlist, hhd);
1046        hash->count++;
1047}
1048
1049static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1050{
1051        struct ftrace_func_entry *entry;
1052
1053        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1054        if (!entry)
1055                return -ENOMEM;
1056
1057        entry->ip = ip;
1058        __add_hash_entry(hash, entry);
1059
1060        return 0;
1061}
1062
1063static void
1064free_hash_entry(struct ftrace_hash *hash,
1065                  struct ftrace_func_entry *entry)
1066{
1067        hlist_del(&entry->hlist);
1068        kfree(entry);
1069        hash->count--;
1070}
1071
1072static void
1073remove_hash_entry(struct ftrace_hash *hash,
1074                  struct ftrace_func_entry *entry)
1075{
1076        hlist_del(&entry->hlist);
1077        hash->count--;
1078}
1079
1080static void ftrace_hash_clear(struct ftrace_hash *hash)
1081{
1082        struct hlist_head *hhd;
1083        struct hlist_node *tp, *tn;
1084        struct ftrace_func_entry *entry;
1085        int size = 1 << hash->size_bits;
1086        int i;
1087
1088        if (!hash->count)
1089                return;
1090
1091        for (i = 0; i < size; i++) {
1092                hhd = &hash->buckets[i];
1093                hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1094                        free_hash_entry(hash, entry);
1095        }
1096        FTRACE_WARN_ON(hash->count);
1097}
1098
1099static void free_ftrace_hash(struct ftrace_hash *hash)
1100{
1101        if (!hash || hash == EMPTY_HASH)
1102                return;
1103        ftrace_hash_clear(hash);
1104        kfree(hash->buckets);
1105        kfree(hash);
1106}
1107
1108static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1109{
1110        struct ftrace_hash *hash;
1111
1112        hash = container_of(rcu, struct ftrace_hash, rcu);
1113        free_ftrace_hash(hash);
1114}
1115
1116static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1117{
1118        if (!hash || hash == EMPTY_HASH)
1119                return;
1120        call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1121}
1122
1123static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1124{
1125        struct ftrace_hash *hash;
1126        int size;
1127
1128        hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1129        if (!hash)
1130                return NULL;
1131
1132        size = 1 << size_bits;
1133        hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1134
1135        if (!hash->buckets) {
1136                kfree(hash);
1137                return NULL;
1138        }
1139
1140        hash->size_bits = size_bits;
1141
1142        return hash;
1143}
1144
1145static struct ftrace_hash *
1146alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1147{
1148        struct ftrace_func_entry *entry;
1149        struct ftrace_hash *new_hash;
1150        struct hlist_node *tp;
1151        int size;
1152        int ret;
1153        int i;
1154
1155        new_hash = alloc_ftrace_hash(size_bits);
1156        if (!new_hash)
1157                return NULL;
1158
1159        /* Empty hash? */
1160        if (!hash || !hash->count)
1161                return new_hash;
1162
1163        size = 1 << hash->size_bits;
1164        for (i = 0; i < size; i++) {
1165                hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1166                        ret = add_hash_entry(new_hash, entry->ip);
1167                        if (ret < 0)
1168                                goto free_hash;
1169                }
1170        }
1171
1172        FTRACE_WARN_ON(new_hash->count != hash->count);
1173
1174        return new_hash;
1175
1176 free_hash:
1177        free_ftrace_hash(new_hash);
1178        return NULL;
1179}
1180
1181static void
1182ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1183static void
1184ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1185
1186static int
1187ftrace_hash_move(struct ftrace_ops *ops, int enable,
1188                 struct ftrace_hash **dst, struct ftrace_hash *src)
1189{
1190        struct ftrace_func_entry *entry;
1191        struct hlist_node *tp, *tn;
1192        struct hlist_head *hhd;
1193        struct ftrace_hash *old_hash;
1194        struct ftrace_hash *new_hash;
1195        unsigned long key;
1196        int size = src->count;
1197        int bits = 0;
1198        int ret;
1199        int i;
1200
1201        /*
1202         * Remove the current set, update the hash and add
1203         * them back.
1204         */
1205        ftrace_hash_rec_disable(ops, enable);
1206
1207        /*
1208         * If the new source is empty, just free dst and assign it
1209         * the empty_hash.
1210         */
1211        if (!src->count) {
1212                free_ftrace_hash_rcu(*dst);
1213                rcu_assign_pointer(*dst, EMPTY_HASH);
1214                return 0;
1215        }
1216
1217        /*
1218         * Make the hash size about 1/2 the # found
1219         */
1220        for (size /= 2; size; size >>= 1)
1221                bits++;
1222
1223        /* Don't allocate too much */
1224        if (bits > FTRACE_HASH_MAX_BITS)
1225                bits = FTRACE_HASH_MAX_BITS;
1226
1227        ret = -ENOMEM;
1228        new_hash = alloc_ftrace_hash(bits);
1229        if (!new_hash)
1230                goto out;
1231
1232        size = 1 << src->size_bits;
1233        for (i = 0; i < size; i++) {
1234                hhd = &src->buckets[i];
1235                hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1236                        if (bits > 0)
1237                                key = hash_long(entry->ip, bits);
1238                        else
1239                                key = 0;
1240                        remove_hash_entry(src, entry);
1241                        __add_hash_entry(new_hash, entry);
1242                }
1243        }
1244
1245        old_hash = *dst;
1246        rcu_assign_pointer(*dst, new_hash);
1247        free_ftrace_hash_rcu(old_hash);
1248
1249        ret = 0;
1250 out:
1251        /*
1252         * Enable regardless of ret:
1253         *  On success, we enable the new hash.
1254         *  On failure, we re-enable the original hash.
1255         */
1256        ftrace_hash_rec_enable(ops, enable);
1257
1258        return ret;
1259}
1260
1261/*
1262 * Test the hashes for this ops to see if we want to call
1263 * the ops->func or not.
1264 *
1265 * It's a match if the ip is in the ops->filter_hash or
1266 * the filter_hash does not exist or is empty,
1267 *  AND
1268 * the ip is not in the ops->notrace_hash.
1269 *
1270 * This needs to be called with preemption disabled as
1271 * the hashes are freed with call_rcu_sched().
1272 */
1273static int
1274ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1275{
1276        struct ftrace_hash *filter_hash;
1277        struct ftrace_hash *notrace_hash;
1278        int ret;
1279
1280        filter_hash = rcu_dereference_raw(ops->filter_hash);
1281        notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1282
1283        if ((!filter_hash || !filter_hash->count ||
1284             ftrace_lookup_ip(filter_hash, ip)) &&
1285            (!notrace_hash || !notrace_hash->count ||
1286             !ftrace_lookup_ip(notrace_hash, ip)))
1287                ret = 1;
1288        else
1289                ret = 0;
1290
1291        return ret;
1292}
1293
1294/*
1295 * This is a double for. Do not use 'break' to break out of the loop,
1296 * you must use a goto.
1297 */
1298#define do_for_each_ftrace_rec(pg, rec)                                 \
1299        for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1300                int _____i;                                             \
1301                for (_____i = 0; _____i < pg->index; _____i++) {        \
1302                        rec = &pg->records[_____i];
1303
1304#define while_for_each_ftrace_rec()             \
1305                }                               \
1306        }
1307
1308static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1309                                     int filter_hash,
1310                                     bool inc)
1311{
1312        struct ftrace_hash *hash;
1313        struct ftrace_hash *other_hash;
1314        struct ftrace_page *pg;
1315        struct dyn_ftrace *rec;
1316        int count = 0;
1317        int all = 0;
1318
1319        /* Only update if the ops has been registered */
1320        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1321                return;
1322
1323        /*
1324         * In the filter_hash case:
1325         *   If the count is zero, we update all records.
1326         *   Otherwise we just update the items in the hash.
1327         *
1328         * In the notrace_hash case:
1329         *   We enable the update in the hash.
1330         *   As disabling notrace means enabling the tracing,
1331         *   and enabling notrace means disabling, the inc variable
1332         *   gets inversed.
1333         */
1334        if (filter_hash) {
1335                hash = ops->filter_hash;
1336                other_hash = ops->notrace_hash;
1337                if (!hash || !hash->count)
1338                        all = 1;
1339        } else {
1340                inc = !inc;
1341                hash = ops->notrace_hash;
1342                other_hash = ops->filter_hash;
1343                /*
1344                 * If the notrace hash has no items,
1345                 * then there's nothing to do.
1346                 */
1347                if (hash && !hash->count)
1348                        return;
1349        }
1350
1351        do_for_each_ftrace_rec(pg, rec) {
1352                int in_other_hash = 0;
1353                int in_hash = 0;
1354                int match = 0;
1355
1356                if (all) {
1357                        /*
1358                         * Only the filter_hash affects all records.
1359                         * Update if the record is not in the notrace hash.
1360                         */
1361                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1362                                match = 1;
1363                } else {
1364                        in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1365                        in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1366
1367                        /*
1368                         *
1369                         */
1370                        if (filter_hash && in_hash && !in_other_hash)
1371                                match = 1;
1372                        else if (!filter_hash && in_hash &&
1373                                 (in_other_hash || !other_hash->count))
1374                                match = 1;
1375                }
1376                if (!match)
1377                        continue;
1378
1379                if (inc) {
1380                        rec->flags++;
1381                        if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1382                                return;
1383                } else {
1384                        if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1385                                return;
1386                        rec->flags--;
1387                }
1388                count++;
1389                /* Shortcut, if we handled all records, we are done. */
1390                if (!all && count == hash->count)
1391                        return;
1392        } while_for_each_ftrace_rec();
1393}
1394
1395static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1396                                    int filter_hash)
1397{
1398        __ftrace_hash_rec_update(ops, filter_hash, 0);
1399}
1400
1401static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1402                                   int filter_hash)
1403{
1404        __ftrace_hash_rec_update(ops, filter_hash, 1);
1405}
1406
1407static void ftrace_free_rec(struct dyn_ftrace *rec)
1408{
1409        rec->freelist = ftrace_free_records;
1410        ftrace_free_records = rec;
1411        rec->flags |= FTRACE_FL_FREE;
1412}
1413
1414static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1415{
1416        struct dyn_ftrace *rec;
1417
1418        /* First check for freed records */
1419        if (ftrace_free_records) {
1420                rec = ftrace_free_records;
1421
1422                if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1423                        FTRACE_WARN_ON_ONCE(1);
1424                        ftrace_free_records = NULL;
1425                        return NULL;
1426                }
1427
1428                ftrace_free_records = rec->freelist;
1429                memset(rec, 0, sizeof(*rec));
1430                return rec;
1431        }
1432
1433        if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1434                if (!ftrace_pages->next) {
1435                        /* allocate another page */
1436                        ftrace_pages->next =
1437                                (void *)get_zeroed_page(GFP_KERNEL);
1438                        if (!ftrace_pages->next)
1439                                return NULL;
1440                }
1441                ftrace_pages = ftrace_pages->next;
1442        }
1443
1444        return &ftrace_pages->records[ftrace_pages->index++];
1445}
1446
1447static struct dyn_ftrace *
1448ftrace_record_ip(unsigned long ip)
1449{
1450        struct dyn_ftrace *rec;
1451
1452        if (ftrace_disabled)
1453                return NULL;
1454
1455        rec = ftrace_alloc_dyn_node(ip);
1456        if (!rec)
1457                return NULL;
1458
1459        rec->ip = ip;
1460        rec->newlist = ftrace_new_addrs;
1461        ftrace_new_addrs = rec;
1462
1463        return rec;
1464}
1465
1466static void print_ip_ins(const char *fmt, unsigned char *p)
1467{
1468        int i;
1469
1470        printk(KERN_CONT "%s", fmt);
1471
1472        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1473                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1474}
1475
1476static void ftrace_bug(int failed, unsigned long ip)
1477{
1478        switch (failed) {
1479        case -EFAULT:
1480                FTRACE_WARN_ON_ONCE(1);
1481                pr_info("ftrace faulted on modifying ");
1482                print_ip_sym(ip);
1483                break;
1484        case -EINVAL:
1485                FTRACE_WARN_ON_ONCE(1);
1486                pr_info("ftrace failed to modify ");
1487                print_ip_sym(ip);
1488                print_ip_ins(" actual: ", (unsigned char *)ip);
1489                printk(KERN_CONT "\n");
1490                break;
1491        case -EPERM:
1492                FTRACE_WARN_ON_ONCE(1);
1493                pr_info("ftrace faulted on writing ");
1494                print_ip_sym(ip);
1495                break;
1496        default:
1497                FTRACE_WARN_ON_ONCE(1);
1498                pr_info("ftrace faulted on unknown error ");
1499                print_ip_sym(ip);
1500        }
1501}
1502
1503
1504/* Return 1 if the address range is reserved for ftrace */
1505int ftrace_text_reserved(void *start, void *end)
1506{
1507        struct dyn_ftrace *rec;
1508        struct ftrace_page *pg;
1509
1510        do_for_each_ftrace_rec(pg, rec) {
1511                if (rec->ip <= (unsigned long)end &&
1512                    rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1513                        return 1;
1514        } while_for_each_ftrace_rec();
1515        return 0;
1516}
1517
1518
1519static int
1520__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1521{
1522        unsigned long ftrace_addr;
1523        unsigned long flag = 0UL;
1524
1525        ftrace_addr = (unsigned long)FTRACE_ADDR;
1526
1527        /*
1528         * If we are enabling tracing:
1529         *
1530         *   If the record has a ref count, then we need to enable it
1531         *   because someone is using it.
1532         *
1533         *   Otherwise we make sure its disabled.
1534         *
1535         * If we are disabling tracing, then disable all records that
1536         * are enabled.
1537         */
1538        if (enable && (rec->flags & ~FTRACE_FL_MASK))
1539                flag = FTRACE_FL_ENABLED;
1540
1541        /* If the state of this record hasn't changed, then do nothing */
1542        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1543                return 0;
1544
1545        if (flag) {
1546                rec->flags |= FTRACE_FL_ENABLED;
1547                return ftrace_make_call(rec, ftrace_addr);
1548        }
1549
1550        rec->flags &= ~FTRACE_FL_ENABLED;
1551        return ftrace_make_nop(NULL, rec, ftrace_addr);
1552}
1553
1554static void ftrace_replace_code(int enable)
1555{
1556        struct dyn_ftrace *rec;
1557        struct ftrace_page *pg;
1558        int failed;
1559
1560        if (unlikely(ftrace_disabled))
1561                return;
1562
1563        do_for_each_ftrace_rec(pg, rec) {
1564                /* Skip over free records */
1565                if (rec->flags & FTRACE_FL_FREE)
1566                        continue;
1567
1568                failed = __ftrace_replace_code(rec, enable);
1569                if (failed) {
1570                        ftrace_bug(failed, rec->ip);
1571                        /* Stop processing */
1572                        return;
1573                }
1574        } while_for_each_ftrace_rec();
1575}
1576
1577static int
1578ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1579{
1580        unsigned long ip;
1581        int ret;
1582
1583        ip = rec->ip;
1584
1585        if (unlikely(ftrace_disabled))
1586                return 0;
1587
1588        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1589        if (ret) {
1590                ftrace_bug(ret, ip);
1591                return 0;
1592        }
1593        return 1;
1594}
1595
1596/*
1597 * archs can override this function if they must do something
1598 * before the modifying code is performed.
1599 */
1600int __weak ftrace_arch_code_modify_prepare(void)
1601{
1602        return 0;
1603}
1604
1605/*
1606 * archs can override this function if they must do something
1607 * after the modifying code is performed.
1608 */
1609int __weak ftrace_arch_code_modify_post_process(void)
1610{
1611        return 0;
1612}
1613
1614static int __ftrace_modify_code(void *data)
1615{
1616        int *command = data;
1617
1618        /*
1619         * Do not call function tracer while we update the code.
1620         * We are in stop machine, no worrying about races.
1621         */
1622        function_trace_stop++;
1623
1624        if (*command & FTRACE_ENABLE_CALLS)
1625                ftrace_replace_code(1);
1626        else if (*command & FTRACE_DISABLE_CALLS)
1627                ftrace_replace_code(0);
1628
1629        if (*command & FTRACE_UPDATE_TRACE_FUNC)
1630                ftrace_update_ftrace_func(ftrace_trace_function);
1631
1632        if (*command & FTRACE_START_FUNC_RET)
1633                ftrace_enable_ftrace_graph_caller();
1634        else if (*command & FTRACE_STOP_FUNC_RET)
1635                ftrace_disable_ftrace_graph_caller();
1636
1637#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1638        /*
1639         * For archs that call ftrace_test_stop_func(), we must
1640         * wait till after we update all the function callers
1641         * before we update the callback. This keeps different
1642         * ops that record different functions from corrupting
1643         * each other.
1644         */
1645        __ftrace_trace_function = __ftrace_trace_function_delay;
1646#endif
1647        function_trace_stop--;
1648
1649        return 0;
1650}
1651
1652static void ftrace_run_update_code(int command)
1653{
1654        int ret;
1655
1656        ret = ftrace_arch_code_modify_prepare();
1657        FTRACE_WARN_ON(ret);
1658        if (ret)
1659                return;
1660
1661        stop_machine(__ftrace_modify_code, &command, NULL);
1662
1663        ret = ftrace_arch_code_modify_post_process();
1664        FTRACE_WARN_ON(ret);
1665}
1666
1667static ftrace_func_t saved_ftrace_func;
1668static int ftrace_start_up;
1669static int global_start_up;
1670
1671static void ftrace_startup_enable(int command)
1672{
1673        if (saved_ftrace_func != ftrace_trace_function) {
1674                saved_ftrace_func = ftrace_trace_function;
1675                command |= FTRACE_UPDATE_TRACE_FUNC;
1676        }
1677
1678        if (!command || !ftrace_enabled)
1679                return;
1680
1681        ftrace_run_update_code(command);
1682}
1683
1684static int ftrace_startup(struct ftrace_ops *ops, int command)
1685{
1686        bool hash_enable = true;
1687
1688        if (unlikely(ftrace_disabled))
1689                return -ENODEV;
1690
1691        ftrace_start_up++;
1692        command |= FTRACE_ENABLE_CALLS;
1693
1694        /* ops marked global share the filter hashes */
1695        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1696                ops = &global_ops;
1697                /* Don't update hash if global is already set */
1698                if (global_start_up)
1699                        hash_enable = false;
1700                global_start_up++;
1701        }
1702
1703        ops->flags |= FTRACE_OPS_FL_ENABLED;
1704        if (hash_enable)
1705                ftrace_hash_rec_enable(ops, 1);
1706
1707        ftrace_startup_enable(command);
1708
1709        return 0;
1710}
1711
1712static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1713{
1714        bool hash_disable = true;
1715
1716        if (unlikely(ftrace_disabled))
1717                return;
1718
1719        ftrace_start_up--;
1720        /*
1721         * Just warn in case of unbalance, no need to kill ftrace, it's not
1722         * critical but the ftrace_call callers may be never nopped again after
1723         * further ftrace uses.
1724         */
1725        WARN_ON_ONCE(ftrace_start_up < 0);
1726
1727        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1728                ops = &global_ops;
1729                global_start_up--;
1730                WARN_ON_ONCE(global_start_up < 0);
1731                /* Don't update hash if global still has users */
1732                if (global_start_up) {
1733                        WARN_ON_ONCE(!ftrace_start_up);
1734                        hash_disable = false;
1735                }
1736        }
1737
1738        if (hash_disable)
1739                ftrace_hash_rec_disable(ops, 1);
1740
1741        if (ops != &global_ops || !global_start_up)
1742                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1743
1744        if (!ftrace_start_up)
1745                command |= FTRACE_DISABLE_CALLS;
1746
1747        if (saved_ftrace_func != ftrace_trace_function) {
1748                saved_ftrace_func = ftrace_trace_function;
1749                command |= FTRACE_UPDATE_TRACE_FUNC;
1750        }
1751
1752        if (!command || !ftrace_enabled)
1753                return;
1754
1755        ftrace_run_update_code(command);
1756}
1757
1758static void ftrace_startup_sysctl(void)
1759{
1760        if (unlikely(ftrace_disabled))
1761                return;
1762
1763        /* Force update next time */
1764        saved_ftrace_func = NULL;
1765        /* ftrace_start_up is true if we want ftrace running */
1766        if (ftrace_start_up)
1767                ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1768}
1769
1770static void ftrace_shutdown_sysctl(void)
1771{
1772        if (unlikely(ftrace_disabled))
1773                return;
1774
1775        /* ftrace_start_up is true if ftrace is running */
1776        if (ftrace_start_up)
1777                ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1778}
1779
1780static cycle_t          ftrace_update_time;
1781static unsigned long    ftrace_update_cnt;
1782unsigned long           ftrace_update_tot_cnt;
1783
1784static int ops_traces_mod(struct ftrace_ops *ops)
1785{
1786        struct ftrace_hash *hash;
1787
1788        hash = ops->filter_hash;
1789        return !!(!hash || !hash->count);
1790}
1791
1792static int ftrace_update_code(struct module *mod)
1793{
1794        struct dyn_ftrace *p;
1795        cycle_t start, stop;
1796        unsigned long ref = 0;
1797
1798        /*
1799         * When adding a module, we need to check if tracers are
1800         * currently enabled and if they are set to trace all functions.
1801         * If they are, we need to enable the module functions as well
1802         * as update the reference counts for those function records.
1803         */
1804        if (mod) {
1805                struct ftrace_ops *ops;
1806
1807                for (ops = ftrace_ops_list;
1808                     ops != &ftrace_list_end; ops = ops->next) {
1809                        if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1810                            ops_traces_mod(ops))
1811                                ref++;
1812                }
1813        }
1814
1815        start = ftrace_now(raw_smp_processor_id());
1816        ftrace_update_cnt = 0;
1817
1818        while (ftrace_new_addrs) {
1819
1820                /* If something went wrong, bail without enabling anything */
1821                if (unlikely(ftrace_disabled))
1822                        return -1;
1823
1824                p = ftrace_new_addrs;
1825                ftrace_new_addrs = p->newlist;
1826                p->flags = ref;
1827
1828                /*
1829                 * Do the initial record conversion from mcount jump
1830                 * to the NOP instructions.
1831                 */
1832                if (!ftrace_code_disable(mod, p)) {
1833                        ftrace_free_rec(p);
1834                        /* Game over */
1835                        break;
1836                }
1837
1838                ftrace_update_cnt++;
1839
1840                /*
1841                 * If the tracing is enabled, go ahead and enable the record.
1842                 *
1843                 * The reason not to enable the record immediatelly is the
1844                 * inherent check of ftrace_make_nop/ftrace_make_call for
1845                 * correct previous instructions.  Making first the NOP
1846                 * conversion puts the module to the correct state, thus
1847                 * passing the ftrace_make_call check.
1848                 */
1849                if (ftrace_start_up && ref) {
1850                        int failed = __ftrace_replace_code(p, 1);
1851                        if (failed) {
1852                                ftrace_bug(failed, p->ip);
1853                                ftrace_free_rec(p);
1854                        }
1855                }
1856        }
1857
1858        stop = ftrace_now(raw_smp_processor_id());
1859        ftrace_update_time = stop - start;
1860        ftrace_update_tot_cnt += ftrace_update_cnt;
1861
1862        return 0;
1863}
1864
1865static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1866{
1867        struct ftrace_page *pg;
1868        int cnt;
1869        int i;
1870
1871        /* allocate a few pages */
1872        ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1873        if (!ftrace_pages_start)
1874                return -1;
1875
1876        /*
1877         * Allocate a few more pages.
1878         *
1879         * TODO: have some parser search vmlinux before
1880         *   final linking to find all calls to ftrace.
1881         *   Then we can:
1882         *    a) know how many pages to allocate.
1883         *     and/or
1884         *    b) set up the table then.
1885         *
1886         *  The dynamic code is still necessary for
1887         *  modules.
1888         */
1889
1890        pg = ftrace_pages = ftrace_pages_start;
1891
1892        cnt = num_to_init / ENTRIES_PER_PAGE;
1893        pr_info("ftrace: allocating %ld entries in %d pages\n",
1894                num_to_init, cnt + 1);
1895
1896        for (i = 0; i < cnt; i++) {
1897                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1898
1899                /* If we fail, we'll try later anyway */
1900                if (!pg->next)
1901                        break;
1902
1903                pg = pg->next;
1904        }
1905
1906        return 0;
1907}
1908
1909enum {
1910        FTRACE_ITER_FILTER      = (1 << 0),
1911        FTRACE_ITER_NOTRACE     = (1 << 1),
1912        FTRACE_ITER_PRINTALL    = (1 << 2),
1913        FTRACE_ITER_HASH        = (1 << 3),
1914        FTRACE_ITER_ENABLED     = (1 << 4),
1915};
1916
1917#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1918
1919struct ftrace_iterator {
1920        loff_t                          pos;
1921        loff_t                          func_pos;
1922        struct ftrace_page              *pg;
1923        struct dyn_ftrace               *func;
1924        struct ftrace_func_probe        *probe;
1925        struct trace_parser             parser;
1926        struct ftrace_hash              *hash;
1927        struct ftrace_ops               *ops;
1928        int                             hidx;
1929        int                             idx;
1930        unsigned                        flags;
1931};
1932
1933static void *
1934t_hash_next(struct seq_file *m, loff_t *pos)
1935{
1936        struct ftrace_iterator *iter = m->private;
1937        struct hlist_node *hnd = NULL;
1938        struct hlist_head *hhd;
1939
1940        (*pos)++;
1941        iter->pos = *pos;
1942
1943        if (iter->probe)
1944                hnd = &iter->probe->node;
1945 retry:
1946        if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1947                return NULL;
1948
1949        hhd = &ftrace_func_hash[iter->hidx];
1950
1951        if (hlist_empty(hhd)) {
1952                iter->hidx++;
1953                hnd = NULL;
1954                goto retry;
1955        }
1956
1957        if (!hnd)
1958                hnd = hhd->first;
1959        else {
1960                hnd = hnd->next;
1961                if (!hnd) {
1962                        iter->hidx++;
1963                        goto retry;
1964                }
1965        }
1966
1967        if (WARN_ON_ONCE(!hnd))
1968                return NULL;
1969
1970        iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1971
1972        return iter;
1973}
1974
1975static void *t_hash_start(struct seq_file *m, loff_t *pos)
1976{
1977        struct ftrace_iterator *iter = m->private;
1978        void *p = NULL;
1979        loff_t l;
1980
1981        if (iter->func_pos > *pos)
1982                return NULL;
1983
1984        iter->hidx = 0;
1985        for (l = 0; l <= (*pos - iter->func_pos); ) {
1986                p = t_hash_next(m, &l);
1987                if (!p)
1988                        break;
1989        }
1990        if (!p)
1991                return NULL;
1992
1993        /* Only set this if we have an item */
1994        iter->flags |= FTRACE_ITER_HASH;
1995
1996        return iter;
1997}
1998
1999static int
2000t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2001{
2002        struct ftrace_func_probe *rec;
2003
2004        rec = iter->probe;
2005        if (WARN_ON_ONCE(!rec))
2006                return -EIO;
2007
2008        if (rec->ops->print)
2009                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2010
2011        seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2012
2013        if (rec->data)
2014                seq_printf(m, ":%p", rec->data);
2015        seq_putc(m, '\n');
2016
2017        return 0;
2018}
2019
2020static void *
2021t_next(struct seq_file *m, void *v, loff_t *pos)
2022{
2023        struct ftrace_iterator *iter = m->private;
2024        struct ftrace_ops *ops = &global_ops;
2025        struct dyn_ftrace *rec = NULL;
2026
2027        if (unlikely(ftrace_disabled))
2028                return NULL;
2029
2030        if (iter->flags & FTRACE_ITER_HASH)
2031                return t_hash_next(m, pos);
2032
2033        (*pos)++;
2034        iter->pos = iter->func_pos = *pos;
2035
2036        if (iter->flags & FTRACE_ITER_PRINTALL)
2037                return t_hash_start(m, pos);
2038
2039 retry:
2040        if (iter->idx >= iter->pg->index) {
2041                if (iter->pg->next) {
2042                        iter->pg = iter->pg->next;
2043                        iter->idx = 0;
2044                        goto retry;
2045                }
2046        } else {
2047                rec = &iter->pg->records[iter->idx++];
2048                if ((rec->flags & FTRACE_FL_FREE) ||
2049
2050                    ((iter->flags & FTRACE_ITER_FILTER) &&
2051                     !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2052
2053                    ((iter->flags & FTRACE_ITER_NOTRACE) &&
2054                     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2055
2056                    ((iter->flags & FTRACE_ITER_ENABLED) &&
2057                     !(rec->flags & ~FTRACE_FL_MASK))) {
2058
2059                        rec = NULL;
2060                        goto retry;
2061                }
2062        }
2063
2064        if (!rec)
2065                return t_hash_start(m, pos);
2066
2067        iter->func = rec;
2068
2069        return iter;
2070}
2071
2072static void reset_iter_read(struct ftrace_iterator *iter)
2073{
2074        iter->pos = 0;
2075        iter->func_pos = 0;
2076        iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2077}
2078
2079static void *t_start(struct seq_file *m, loff_t *pos)
2080{
2081        struct ftrace_iterator *iter = m->private;
2082        struct ftrace_ops *ops = &global_ops;
2083        void *p = NULL;
2084        loff_t l;
2085
2086        mutex_lock(&ftrace_lock);
2087
2088        if (unlikely(ftrace_disabled))
2089                return NULL;
2090
2091        /*
2092         * If an lseek was done, then reset and start from beginning.
2093         */
2094        if (*pos < iter->pos)
2095                reset_iter_read(iter);
2096
2097        /*
2098         * For set_ftrace_filter reading, if we have the filter
2099         * off, we can short cut and just print out that all
2100         * functions are enabled.
2101         */
2102        if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2103                if (*pos > 0)
2104                        return t_hash_start(m, pos);
2105                iter->flags |= FTRACE_ITER_PRINTALL;
2106                /* reset in case of seek/pread */
2107                iter->flags &= ~FTRACE_ITER_HASH;
2108                return iter;
2109        }
2110
2111        if (iter->flags & FTRACE_ITER_HASH)
2112                return t_hash_start(m, pos);
2113
2114        /*
2115         * Unfortunately, we need to restart at ftrace_pages_start
2116         * every time we let go of the ftrace_mutex. This is because
2117         * those pointers can change without the lock.
2118         */
2119        iter->pg = ftrace_pages_start;
2120        iter->idx = 0;
2121        for (l = 0; l <= *pos; ) {
2122                p = t_next(m, p, &l);
2123                if (!p)
2124                        break;
2125        }
2126
2127        if (!p) {
2128                if (iter->flags & FTRACE_ITER_FILTER)
2129                        return t_hash_start(m, pos);
2130
2131                return NULL;
2132        }
2133
2134        return iter;
2135}
2136
2137static void t_stop(struct seq_file *m, void *p)
2138{
2139        mutex_unlock(&ftrace_lock);
2140}
2141
2142static int t_show(struct seq_file *m, void *v)
2143{
2144        struct ftrace_iterator *iter = m->private;
2145        struct dyn_ftrace *rec;
2146
2147        if (iter->flags & FTRACE_ITER_HASH)
2148                return t_hash_show(m, iter);
2149
2150        if (iter->flags & FTRACE_ITER_PRINTALL) {
2151                seq_printf(m, "#### all functions enabled ####\n");
2152                return 0;
2153        }
2154
2155        rec = iter->func;
2156
2157        if (!rec)
2158                return 0;
2159
2160        seq_printf(m, "%ps", (void *)rec->ip);
2161        if (iter->flags & FTRACE_ITER_ENABLED)
2162                seq_printf(m, " (%ld)",
2163                           rec->flags & ~FTRACE_FL_MASK);
2164        seq_printf(m, "\n");
2165
2166        return 0;
2167}
2168
2169static const struct seq_operations show_ftrace_seq_ops = {
2170        .start = t_start,
2171        .next = t_next,
2172        .stop = t_stop,
2173        .show = t_show,
2174};
2175
2176static int
2177ftrace_avail_open(struct inode *inode, struct file *file)
2178{
2179        struct ftrace_iterator *iter;
2180        int ret;
2181
2182        if (unlikely(ftrace_disabled))
2183                return -ENODEV;
2184
2185        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2186        if (!iter)
2187                return -ENOMEM;
2188
2189        iter->pg = ftrace_pages_start;
2190
2191        ret = seq_open(file, &show_ftrace_seq_ops);
2192        if (!ret) {
2193                struct seq_file *m = file->private_data;
2194
2195                m->private = iter;
2196        } else {
2197                kfree(iter);
2198        }
2199
2200        return ret;
2201}
2202
2203static int
2204ftrace_enabled_open(struct inode *inode, struct file *file)
2205{
2206        struct ftrace_iterator *iter;
2207        int ret;
2208
2209        if (unlikely(ftrace_disabled))
2210                return -ENODEV;
2211
2212        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2213        if (!iter)
2214                return -ENOMEM;
2215
2216        iter->pg = ftrace_pages_start;
2217        iter->flags = FTRACE_ITER_ENABLED;
2218
2219        ret = seq_open(file, &show_ftrace_seq_ops);
2220        if (!ret) {
2221                struct seq_file *m = file->private_data;
2222
2223                m->private = iter;
2224        } else {
2225                kfree(iter);
2226        }
2227
2228        return ret;
2229}
2230
2231static void ftrace_filter_reset(struct ftrace_hash *hash)
2232{
2233        mutex_lock(&ftrace_lock);
2234        ftrace_hash_clear(hash);
2235        mutex_unlock(&ftrace_lock);
2236}
2237
2238static int
2239ftrace_regex_open(struct ftrace_ops *ops, int flag,
2240                  struct inode *inode, struct file *file)
2241{
2242        struct ftrace_iterator *iter;
2243        struct ftrace_hash *hash;
2244        int ret = 0;
2245
2246        if (unlikely(ftrace_disabled))
2247                return -ENODEV;
2248
2249        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2250        if (!iter)
2251                return -ENOMEM;
2252
2253        if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2254                kfree(iter);
2255                return -ENOMEM;
2256        }
2257
2258        if (flag & FTRACE_ITER_NOTRACE)
2259                hash = ops->notrace_hash;
2260        else
2261                hash = ops->filter_hash;
2262
2263        iter->ops = ops;
2264        iter->flags = flag;
2265
2266        if (file->f_mode & FMODE_WRITE) {
2267                mutex_lock(&ftrace_lock);
2268                iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2269                mutex_unlock(&ftrace_lock);
2270
2271                if (!iter->hash) {
2272                        trace_parser_put(&iter->parser);
2273                        kfree(iter);
2274                        return -ENOMEM;
2275                }
2276        }
2277
2278        mutex_lock(&ftrace_regex_lock);
2279
2280        if ((file->f_mode & FMODE_WRITE) &&
2281            (file->f_flags & O_TRUNC))
2282                ftrace_filter_reset(iter->hash);
2283
2284        if (file->f_mode & FMODE_READ) {
2285                iter->pg = ftrace_pages_start;
2286
2287                ret = seq_open(file, &show_ftrace_seq_ops);
2288                if (!ret) {
2289                        struct seq_file *m = file->private_data;
2290                        m->private = iter;
2291                } else {
2292                        /* Failed */
2293                        free_ftrace_hash(iter->hash);
2294                        trace_parser_put(&iter->parser);
2295                        kfree(iter);
2296                }
2297        } else
2298                file->private_data = iter;
2299        mutex_unlock(&ftrace_regex_lock);
2300
2301        return ret;
2302}
2303
2304static int
2305ftrace_filter_open(struct inode *inode, struct file *file)
2306{
2307        return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2308                                 inode, file);
2309}
2310
2311static int
2312ftrace_notrace_open(struct inode *inode, struct file *file)
2313{
2314        return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2315                                 inode, file);
2316}
2317
2318static loff_t
2319ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2320{
2321        loff_t ret;
2322
2323        if (file->f_mode & FMODE_READ)
2324                ret = seq_lseek(file, offset, origin);
2325        else
2326                file->f_pos = ret = 1;
2327
2328        return ret;
2329}
2330
2331static int ftrace_match(char *str, char *regex, int len, int type)
2332{
2333        int matched = 0;
2334        int slen;
2335
2336        switch (type) {
2337        case MATCH_FULL:
2338                if (strcmp(str, regex) == 0)
2339                        matched = 1;
2340                break;
2341        case MATCH_FRONT_ONLY:
2342                if (strncmp(str, regex, len) == 0)
2343                        matched = 1;
2344                break;
2345        case MATCH_MIDDLE_ONLY:
2346                if (strstr(str, regex))
2347                        matched = 1;
2348                break;
2349        case MATCH_END_ONLY:
2350                slen = strlen(str);
2351                if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2352                        matched = 1;
2353                break;
2354        }
2355
2356        return matched;
2357}
2358
2359static int
2360enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2361{
2362        struct ftrace_func_entry *entry;
2363        int ret = 0;
2364
2365        entry = ftrace_lookup_ip(hash, rec->ip);
2366        if (not) {
2367                /* Do nothing if it doesn't exist */
2368                if (!entry)
2369                        return 0;
2370
2371                free_hash_entry(hash, entry);
2372        } else {
2373                /* Do nothing if it exists */
2374                if (entry)
2375                        return 0;
2376
2377                ret = add_hash_entry(hash, rec->ip);
2378        }
2379        return ret;
2380}
2381
2382static int
2383ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2384                    char *regex, int len, int type)
2385{
2386        char str[KSYM_SYMBOL_LEN];
2387        char *modname;
2388
2389        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2390
2391        if (mod) {
2392                /* module lookup requires matching the module */
2393                if (!modname || strcmp(modname, mod))
2394                        return 0;
2395
2396                /* blank search means to match all funcs in the mod */
2397                if (!len)
2398                        return 1;
2399        }
2400
2401        return ftrace_match(str, regex, len, type);
2402}
2403
2404static int
2405match_records(struct ftrace_hash *hash, char *buff,
2406              int len, char *mod, int not)
2407{
2408        unsigned search_len = 0;
2409        struct ftrace_page *pg;
2410        struct dyn_ftrace *rec;
2411        int type = MATCH_FULL;
2412        char *search = buff;
2413        int found = 0;
2414        int ret;
2415
2416        if (len) {
2417                type = filter_parse_regex(buff, len, &search, &not);
2418                search_len = strlen(search);
2419        }
2420
2421        mutex_lock(&ftrace_lock);
2422
2423        if (unlikely(ftrace_disabled))
2424                goto out_unlock;
2425
2426        do_for_each_ftrace_rec(pg, rec) {
2427
2428                if (ftrace_match_record(rec, mod, search, search_len, type)) {
2429                        ret = enter_record(hash, rec, not);
2430                        if (ret < 0) {
2431                                found = ret;
2432                                goto out_unlock;
2433                        }
2434                        found = 1;
2435                }
2436        } while_for_each_ftrace_rec();
2437 out_unlock:
2438        mutex_unlock(&ftrace_lock);
2439
2440        return found;
2441}
2442
2443static int
2444ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2445{
2446        return match_records(hash, buff, len, NULL, 0);
2447}
2448
2449static int
2450ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2451{
2452        int not = 0;
2453
2454        /* blank or '*' mean the same */
2455        if (strcmp(buff, "*") == 0)
2456                buff[0] = 0;
2457
2458        /* handle the case of 'dont filter this module' */
2459        if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2460                buff[0] = 0;
2461                not = 1;
2462        }
2463
2464        return match_records(hash, buff, strlen(buff), mod, not);
2465}
2466
2467/*
2468 * We register the module command as a template to show others how
2469 * to register the a command as well.
2470 */
2471
2472static int
2473ftrace_mod_callback(struct ftrace_hash *hash,
2474                    char *func, char *cmd, char *param, int enable)
2475{
2476        char *mod;
2477        int ret = -EINVAL;
2478
2479        /*
2480         * cmd == 'mod' because we only registered this func
2481         * for the 'mod' ftrace_func_command.
2482         * But if you register one func with multiple commands,
2483         * you can tell which command was used by the cmd
2484         * parameter.
2485         */
2486
2487        /* we must have a module name */
2488        if (!param)
2489                return ret;
2490
2491        mod = strsep(&param, ":");
2492        if (!strlen(mod))
2493                return ret;
2494
2495        ret = ftrace_match_module_records(hash, func, mod);
2496        if (!ret)
2497                ret = -EINVAL;
2498        if (ret < 0)
2499                return ret;
2500
2501        return 0;
2502}
2503
2504static struct ftrace_func_command ftrace_mod_cmd = {
2505        .name                   = "mod",
2506        .func                   = ftrace_mod_callback,
2507};
2508
2509static int __init ftrace_mod_cmd_init(void)
2510{
2511        return register_ftrace_command(&ftrace_mod_cmd);
2512}
2513device_initcall(ftrace_mod_cmd_init);
2514
2515static void
2516function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2517{
2518        struct ftrace_func_probe *entry;
2519        struct hlist_head *hhd;
2520        struct hlist_node *n;
2521        unsigned long key;
2522
2523        key = hash_long(ip, FTRACE_HASH_BITS);
2524
2525        hhd = &ftrace_func_hash[key];
2526
2527        if (hlist_empty(hhd))
2528                return;
2529
2530        /*
2531         * Disable preemption for these calls to prevent a RCU grace
2532         * period. This syncs the hash iteration and freeing of items
2533         * on the hash. rcu_read_lock is too dangerous here.
2534         */
2535        preempt_disable_notrace();
2536        hlist_for_each_entry_rcu(entry, n, hhd, node) {
2537                if (entry->ip == ip)
2538                        entry->ops->func(ip, parent_ip, &entry->data);
2539        }
2540        preempt_enable_notrace();
2541}
2542
2543static struct ftrace_ops trace_probe_ops __read_mostly =
2544{
2545        .func           = function_trace_probe_call,
2546};
2547
2548static int ftrace_probe_registered;
2549
2550static void __enable_ftrace_function_probe(void)
2551{
2552        int ret;
2553        int i;
2554
2555        if (ftrace_probe_registered)
2556                return;
2557
2558        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2559                struct hlist_head *hhd = &ftrace_func_hash[i];
2560                if (hhd->first)
2561                        break;
2562        }
2563        /* Nothing registered? */
2564        if (i == FTRACE_FUNC_HASHSIZE)
2565                return;
2566
2567        ret = __register_ftrace_function(&trace_probe_ops);
2568        if (!ret)
2569                ret = ftrace_startup(&trace_probe_ops, 0);
2570
2571        ftrace_probe_registered = 1;
2572}
2573
2574static void __disable_ftrace_function_probe(void)
2575{
2576        int ret;
2577        int i;
2578
2579        if (!ftrace_probe_registered)
2580                return;
2581
2582        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2583                struct hlist_head *hhd = &ftrace_func_hash[i];
2584                if (hhd->first)
2585                        return;
2586        }
2587
2588        /* no more funcs left */
2589        ret = __unregister_ftrace_function(&trace_probe_ops);
2590        if (!ret)
2591                ftrace_shutdown(&trace_probe_ops, 0);
2592
2593        ftrace_probe_registered = 0;
2594}
2595
2596
2597static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2598{
2599        struct ftrace_func_probe *entry =
2600                container_of(rhp, struct ftrace_func_probe, rcu);
2601
2602        if (entry->ops->free)
2603                entry->ops->free(&entry->data);
2604        kfree(entry);
2605}
2606
2607
2608int
2609register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2610                              void *data)
2611{
2612        struct ftrace_func_probe *entry;
2613        struct ftrace_page *pg;
2614        struct dyn_ftrace *rec;
2615        int type, len, not;
2616        unsigned long key;
2617        int count = 0;
2618        char *search;
2619
2620        type = filter_parse_regex(glob, strlen(glob), &search, &not);
2621        len = strlen(search);
2622
2623        /* we do not support '!' for function probes */
2624        if (WARN_ON(not))
2625                return -EINVAL;
2626
2627        mutex_lock(&ftrace_lock);
2628
2629        if (unlikely(ftrace_disabled))
2630                goto out_unlock;
2631
2632        do_for_each_ftrace_rec(pg, rec) {
2633
2634                if (!ftrace_match_record(rec, NULL, search, len, type))
2635                        continue;
2636
2637                entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2638                if (!entry) {
2639                        /* If we did not process any, then return error */
2640                        if (!count)
2641                                count = -ENOMEM;
2642                        goto out_unlock;
2643                }
2644
2645                count++;
2646
2647                entry->data = data;
2648
2649                /*
2650                 * The caller might want to do something special
2651                 * for each function we find. We call the callback
2652                 * to give the caller an opportunity to do so.
2653                 */
2654                if (ops->callback) {
2655                        if (ops->callback(rec->ip, &entry->data) < 0) {
2656                                /* caller does not like this func */
2657                                kfree(entry);
2658                                continue;
2659                        }
2660                }
2661
2662                entry->ops = ops;
2663                entry->ip = rec->ip;
2664
2665                key = hash_long(entry->ip, FTRACE_HASH_BITS);
2666                hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2667
2668        } while_for_each_ftrace_rec();
2669        __enable_ftrace_function_probe();
2670
2671 out_unlock:
2672        mutex_unlock(&ftrace_lock);
2673
2674        return count;
2675}
2676
2677enum {
2678        PROBE_TEST_FUNC         = 1,
2679        PROBE_TEST_DATA         = 2
2680};
2681
2682static void
2683__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2684                                  void *data, int flags)
2685{
2686        struct ftrace_func_probe *entry;
2687        struct hlist_node *n, *tmp;
2688        char str[KSYM_SYMBOL_LEN];
2689        int type = MATCH_FULL;
2690        int i, len = 0;
2691        char *search;
2692
2693        if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2694                glob = NULL;
2695        else if (glob) {
2696                int not;
2697
2698                type = filter_parse_regex(glob, strlen(glob), &search, &not);
2699                len = strlen(search);
2700
2701                /* we do not support '!' for function probes */
2702                if (WARN_ON(not))
2703                        return;
2704        }
2705
2706        mutex_lock(&ftrace_lock);
2707        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2708                struct hlist_head *hhd = &ftrace_func_hash[i];
2709
2710                hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2711
2712                        /* break up if statements for readability */
2713                        if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2714                                continue;
2715
2716                        if ((flags & PROBE_TEST_DATA) && entry->data != data)
2717                                continue;
2718
2719                        /* do this last, since it is the most expensive */
2720                        if (glob) {
2721                                kallsyms_lookup(entry->ip, NULL, NULL,
2722                                                NULL, str);
2723                                if (!ftrace_match(str, glob, len, type))
2724                                        continue;
2725                        }
2726
2727                        hlist_del(&entry->node);
2728                        call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2729                }
2730        }
2731        __disable_ftrace_function_probe();
2732        mutex_unlock(&ftrace_lock);
2733}
2734
2735void
2736unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2737                                void *data)
2738{
2739        __unregister_ftrace_function_probe(glob, ops, data,
2740                                          PROBE_TEST_FUNC | PROBE_TEST_DATA);
2741}
2742
2743void
2744unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2745{
2746        __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2747}
2748
2749void unregister_ftrace_function_probe_all(char *glob)
2750{
2751        __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2752}
2753
2754static LIST_HEAD(ftrace_commands);
2755static DEFINE_MUTEX(ftrace_cmd_mutex);
2756
2757int register_ftrace_command(struct ftrace_func_command *cmd)
2758{
2759        struct ftrace_func_command *p;
2760        int ret = 0;
2761
2762        mutex_lock(&ftrace_cmd_mutex);
2763        list_for_each_entry(p, &ftrace_commands, list) {
2764                if (strcmp(cmd->name, p->name) == 0) {
2765                        ret = -EBUSY;
2766                        goto out_unlock;
2767                }
2768        }
2769        list_add(&cmd->list, &ftrace_commands);
2770 out_unlock:
2771        mutex_unlock(&ftrace_cmd_mutex);
2772
2773        return ret;
2774}
2775
2776int unregister_ftrace_command(struct ftrace_func_command *cmd)
2777{
2778        struct ftrace_func_command *p, *n;
2779        int ret = -ENODEV;
2780
2781        mutex_lock(&ftrace_cmd_mutex);
2782        list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2783                if (strcmp(cmd->name, p->name) == 0) {
2784                        ret = 0;
2785                        list_del_init(&p->list);
2786                        goto out_unlock;
2787                }
2788        }
2789 out_unlock:
2790        mutex_unlock(&ftrace_cmd_mutex);
2791
2792        return ret;
2793}
2794
2795static int ftrace_process_regex(struct ftrace_hash *hash,
2796                                char *buff, int len, int enable)
2797{
2798        char *func, *command, *next = buff;
2799        struct ftrace_func_command *p;
2800        int ret = -EINVAL;
2801
2802        func = strsep(&next, ":");
2803
2804        if (!next) {
2805                ret = ftrace_match_records(hash, func, len);
2806                if (!ret)
2807                        ret = -EINVAL;
2808                if (ret < 0)
2809                        return ret;
2810                return 0;
2811        }
2812
2813        /* command found */
2814
2815        command = strsep(&next, ":");
2816
2817        mutex_lock(&ftrace_cmd_mutex);
2818        list_for_each_entry(p, &ftrace_commands, list) {
2819                if (strcmp(p->name, command) == 0) {
2820                        ret = p->func(hash, func, command, next, enable);
2821                        goto out_unlock;
2822                }
2823        }
2824 out_unlock:
2825        mutex_unlock(&ftrace_cmd_mutex);
2826
2827        return ret;
2828}
2829
2830static ssize_t
2831ftrace_regex_write(struct file *file, const char __user *ubuf,
2832                   size_t cnt, loff_t *ppos, int enable)
2833{
2834        struct ftrace_iterator *iter;
2835        struct trace_parser *parser;
2836        ssize_t ret, read;
2837
2838        if (!cnt)
2839                return 0;
2840
2841        mutex_lock(&ftrace_regex_lock);
2842
2843        ret = -ENODEV;
2844        if (unlikely(ftrace_disabled))
2845                goto out_unlock;
2846
2847        if (file->f_mode & FMODE_READ) {
2848                struct seq_file *m = file->private_data;
2849                iter = m->private;
2850        } else
2851                iter = file->private_data;
2852
2853        parser = &iter->parser;
2854        read = trace_get_user(parser, ubuf, cnt, ppos);
2855
2856        if (read >= 0 && trace_parser_loaded(parser) &&
2857            !trace_parser_cont(parser)) {
2858                ret = ftrace_process_regex(iter->hash, parser->buffer,
2859                                           parser->idx, enable);
2860                trace_parser_clear(parser);
2861                if (ret)
2862                        goto out_unlock;
2863        }
2864
2865        ret = read;
2866out_unlock:
2867        mutex_unlock(&ftrace_regex_lock);
2868
2869        return ret;
2870}
2871
2872static ssize_t
2873ftrace_filter_write(struct file *file, const char __user *ubuf,
2874                    size_t cnt, loff_t *ppos)
2875{
2876        return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2877}
2878
2879static ssize_t
2880ftrace_notrace_write(struct file *file, const char __user *ubuf,
2881                     size_t cnt, loff_t *ppos)
2882{
2883        return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2884}
2885
2886static int
2887ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2888                 int reset, int enable)
2889{
2890        struct ftrace_hash **orig_hash;
2891        struct ftrace_hash *hash;
2892        int ret;
2893
2894        /* All global ops uses the global ops filters */
2895        if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2896                ops = &global_ops;
2897
2898        if (unlikely(ftrace_disabled))
2899                return -ENODEV;
2900
2901        if (enable)
2902                orig_hash = &ops->filter_hash;
2903        else
2904                orig_hash = &ops->notrace_hash;
2905
2906        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2907        if (!hash)
2908                return -ENOMEM;
2909
2910        mutex_lock(&ftrace_regex_lock);
2911        if (reset)
2912                ftrace_filter_reset(hash);
2913        if (buf)
2914                ftrace_match_records(hash, buf, len);
2915
2916        mutex_lock(&ftrace_lock);
2917        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2918        if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2919            && ftrace_enabled)
2920                ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2921
2922        mutex_unlock(&ftrace_lock);
2923
2924        mutex_unlock(&ftrace_regex_lock);
2925
2926        free_ftrace_hash(hash);
2927        return ret;
2928}
2929
2930/**
2931 * ftrace_set_filter - set a function to filter on in ftrace
2932 * @ops - the ops to set the filter with
2933 * @buf - the string that holds the function filter text.
2934 * @len - the length of the string.
2935 * @reset - non zero to reset all filters before applying this filter.
2936 *
2937 * Filters denote which functions should be enabled when tracing is enabled.
2938 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2939 */
2940void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2941                       int len, int reset)
2942{
2943        ftrace_set_regex(ops, buf, len, reset, 1);
2944}
2945EXPORT_SYMBOL_GPL(ftrace_set_filter);
2946
2947/**
2948 * ftrace_set_notrace - set a function to not trace in ftrace
2949 * @ops - the ops to set the notrace filter with
2950 * @buf - the string that holds the function notrace text.
2951 * @len - the length of the string.
2952 * @reset - non zero to reset all filters before applying this filter.
2953 *
2954 * Notrace Filters denote which functions should not be enabled when tracing
2955 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2956 * for tracing.
2957 */
2958void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2959                        int len, int reset)
2960{
2961        ftrace_set_regex(ops, buf, len, reset, 0);
2962}
2963EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2964/**
2965 * ftrace_set_filter - set a function to filter on in ftrace
2966 * @ops - the ops to set the filter with
2967 * @buf - the string that holds the function filter text.
2968 * @len - the length of the string.
2969 * @reset - non zero to reset all filters before applying this filter.
2970 *
2971 * Filters denote which functions should be enabled when tracing is enabled.
2972 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2973 */
2974void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2975{
2976        ftrace_set_regex(&global_ops, buf, len, reset, 1);
2977}
2978EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2979
2980/**
2981 * ftrace_set_notrace - set a function to not trace in ftrace
2982 * @ops - the ops to set the notrace filter with
2983 * @buf - the string that holds the function notrace text.
2984 * @len - the length of the string.
2985 * @reset - non zero to reset all filters before applying this filter.
2986 *
2987 * Notrace Filters denote which functions should not be enabled when tracing
2988 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2989 * for tracing.
2990 */
2991void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2992{
2993        ftrace_set_regex(&global_ops, buf, len, reset, 0);
2994}
2995EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2996
2997/*
2998 * command line interface to allow users to set filters on boot up.
2999 */
3000#define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3001static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3002static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3003
3004static int __init set_ftrace_notrace(char *str)
3005{
3006        strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3007        return 1;
3008}
3009__setup("ftrace_notrace=", set_ftrace_notrace);
3010
3011static int __init set_ftrace_filter(char *str)
3012{
3013        strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3014        return 1;
3015}
3016__setup("ftrace_filter=", set_ftrace_filter);
3017
3018#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3019static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3020static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3021
3022static int __init set_graph_function(char *str)
3023{
3024        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3025        return 1;
3026}
3027__setup("ftrace_graph_filter=", set_graph_function);
3028
3029static void __init set_ftrace_early_graph(char *buf)
3030{
3031        int ret;
3032        char *func;
3033
3034        while (buf) {
3035                func = strsep(&buf, ",");
3036                /* we allow only one expression at a time */
3037                ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3038                                      func);
3039                if (ret)
3040                        printk(KERN_DEBUG "ftrace: function %s not "
3041                                          "traceable\n", func);
3042        }
3043}
3044#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3045
3046static void __init
3047set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3048{
3049        char *func;
3050
3051        while (buf) {
3052                func = strsep(&buf, ",");
3053                ftrace_set_regex(ops, func, strlen(func), 0, enable);
3054        }
3055}
3056
3057static void __init set_ftrace_early_filters(void)
3058{
3059        if (ftrace_filter_buf[0])
3060                set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
3061        if (ftrace_notrace_buf[0])
3062                set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
3063#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3064        if (ftrace_graph_buf[0])
3065                set_ftrace_early_graph(ftrace_graph_buf);
3066#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3067}
3068
3069static int
3070ftrace_regex_release(struct inode *inode, struct file *file)
3071{
3072        struct seq_file *m = (struct seq_file *)file->private_data;
3073        struct ftrace_iterator *iter;
3074        struct ftrace_hash **orig_hash;
3075        struct trace_parser *parser;
3076        int filter_hash;
3077        int ret;
3078
3079        mutex_lock(&ftrace_regex_lock);
3080        if (file->f_mode & FMODE_READ) {
3081                iter = m->private;
3082
3083                seq_release(inode, file);
3084        } else
3085                iter = file->private_data;
3086
3087        parser = &iter->parser;
3088        if (trace_parser_loaded(parser)) {
3089                parser->buffer[parser->idx] = 0;
3090                ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3091        }
3092
3093        trace_parser_put(parser);
3094
3095        if (file->f_mode & FMODE_WRITE) {
3096                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3097
3098                if (filter_hash)
3099                        orig_hash = &iter->ops->filter_hash;
3100                else
3101                        orig_hash = &iter->ops->notrace_hash;
3102
3103                mutex_lock(&ftrace_lock);
3104                ret = ftrace_hash_move(iter->ops, filter_hash,
3105                                       orig_hash, iter->hash);
3106                if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3107                    && ftrace_enabled)
3108                        ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3109
3110                mutex_unlock(&ftrace_lock);
3111        }
3112        free_ftrace_hash(iter->hash);
3113        kfree(iter);
3114
3115        mutex_unlock(&ftrace_regex_lock);
3116        return 0;
3117}
3118
3119static const struct file_operations ftrace_avail_fops = {
3120        .open = ftrace_avail_open,
3121        .read = seq_read,
3122        .llseek = seq_lseek,
3123        .release = seq_release_private,
3124};
3125
3126static const struct file_operations ftrace_enabled_fops = {
3127        .open = ftrace_enabled_open,
3128        .read = seq_read,
3129        .llseek = seq_lseek,
3130        .release = seq_release_private,
3131};
3132
3133static const struct file_operations ftrace_filter_fops = {
3134        .open = ftrace_filter_open,
3135        .read = seq_read,
3136        .write = ftrace_filter_write,
3137        .llseek = ftrace_regex_lseek,
3138        .release = ftrace_regex_release,
3139};
3140
3141static const struct file_operations ftrace_notrace_fops = {
3142        .open = ftrace_notrace_open,
3143        .read = seq_read,
3144        .write = ftrace_notrace_write,
3145        .llseek = ftrace_regex_lseek,
3146        .release = ftrace_regex_release,
3147};
3148
3149#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3150
3151static DEFINE_MUTEX(graph_lock);
3152
3153int ftrace_graph_count;
3154int ftrace_graph_filter_enabled;
3155unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3156
3157static void *
3158__g_next(struct seq_file *m, loff_t *pos)
3159{
3160        if (*pos >= ftrace_graph_count)
3161                return NULL;
3162        return &ftrace_graph_funcs[*pos];
3163}
3164
3165static void *
3166g_next(struct seq_file *m, void *v, loff_t *pos)
3167{
3168        (*pos)++;
3169        return __g_next(m, pos);
3170}
3171
3172static void *g_start(struct seq_file *m, loff_t *pos)
3173{
3174        mutex_lock(&graph_lock);
3175
3176        /* Nothing, tell g_show to print all functions are enabled */
3177        if (!ftrace_graph_filter_enabled && !*pos)
3178                return (void *)1;
3179
3180        return __g_next(m, pos);
3181}
3182
3183static void g_stop(struct seq_file *m, void *p)
3184{
3185        mutex_unlock(&graph_lock);
3186}
3187
3188static int g_show(struct seq_file *m, void *v)
3189{
3190        unsigned long *ptr = v;
3191
3192        if (!ptr)
3193                return 0;
3194
3195        if (ptr == (unsigned long *)1) {
3196                seq_printf(m, "#### all functions enabled ####\n");
3197                return 0;
3198        }
3199
3200        seq_printf(m, "%ps\n", (void *)*ptr);
3201
3202        return 0;
3203}
3204
3205static const struct seq_operations ftrace_graph_seq_ops = {
3206        .start = g_start,
3207        .next = g_next,
3208        .stop = g_stop,
3209        .show = g_show,
3210};
3211
3212static int
3213ftrace_graph_open(struct inode *inode, struct file *file)
3214{
3215        int ret = 0;
3216
3217        if (unlikely(ftrace_disabled))
3218                return -ENODEV;
3219
3220        mutex_lock(&graph_lock);
3221        if ((file->f_mode & FMODE_WRITE) &&
3222            (file->f_flags & O_TRUNC)) {
3223                ftrace_graph_filter_enabled = 0;
3224                ftrace_graph_count = 0;
3225                memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3226        }
3227        mutex_unlock(&graph_lock);
3228
3229        if (file->f_mode & FMODE_READ)
3230                ret = seq_open(file, &ftrace_graph_seq_ops);
3231
3232        return ret;
3233}
3234
3235static int
3236ftrace_graph_release(struct inode *inode, struct file *file)
3237{
3238        if (file->f_mode & FMODE_READ)
3239                seq_release(inode, file);
3240        return 0;
3241}
3242
3243static int
3244ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3245{
3246        struct dyn_ftrace *rec;
3247        struct ftrace_page *pg;
3248        int search_len;
3249        int fail = 1;
3250        int type, not;
3251        char *search;
3252        bool exists;
3253        int i;
3254
3255        /* decode regex */
3256        type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3257        if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3258                return -EBUSY;
3259
3260        search_len = strlen(search);
3261
3262        mutex_lock(&ftrace_lock);
3263
3264        if (unlikely(ftrace_disabled)) {
3265                mutex_unlock(&ftrace_lock);
3266                return -ENODEV;
3267        }
3268
3269        do_for_each_ftrace_rec(pg, rec) {
3270
3271                if (rec->flags & FTRACE_FL_FREE)
3272                        continue;
3273
3274                if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3275                        /* if it is in the array */
3276                        exists = false;
3277                        for (i = 0; i < *idx; i++) {
3278                                if (array[i] == rec->ip) {
3279                                        exists = true;
3280                                        break;
3281                                }
3282                        }
3283
3284                        if (!not) {
3285                                fail = 0;
3286                                if (!exists) {
3287                                        array[(*idx)++] = rec->ip;
3288                                        if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3289                                                goto out;
3290                                }
3291                        } else {
3292                                if (exists) {
3293                                        array[i] = array[--(*idx)];
3294                                        array[*idx] = 0;
3295                                        fail = 0;
3296                                }
3297                        }
3298                }
3299        } while_for_each_ftrace_rec();
3300out:
3301        mutex_unlock(&ftrace_lock);
3302
3303        if (fail)
3304                return -EINVAL;
3305
3306        ftrace_graph_filter_enabled = 1;
3307        return 0;
3308}
3309
3310static ssize_t
3311ftrace_graph_write(struct file *file, const char __user *ubuf,
3312                   size_t cnt, loff_t *ppos)
3313{
3314        struct trace_parser parser;
3315        ssize_t read, ret;
3316
3317        if (!cnt)
3318                return 0;
3319
3320        mutex_lock(&graph_lock);
3321
3322        if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3323                ret = -ENOMEM;
3324                goto out_unlock;
3325        }
3326
3327        read = trace_get_user(&parser, ubuf, cnt, ppos);
3328
3329        if (read >= 0 && trace_parser_loaded((&parser))) {
3330                parser.buffer[parser.idx] = 0;
3331
3332                /* we allow only one expression at a time */
3333                ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3334                                        parser.buffer);
3335                if (ret)
3336                        goto out_free;
3337        }
3338
3339        ret = read;
3340
3341out_free:
3342        trace_parser_put(&parser);
3343out_unlock:
3344        mutex_unlock(&graph_lock);
3345
3346        return ret;
3347}
3348
3349static const struct file_operations ftrace_graph_fops = {
3350        .open           = ftrace_graph_open,
3351        .read           = seq_read,
3352        .write          = ftrace_graph_write,
3353        .release        = ftrace_graph_release,
3354        .llseek         = seq_lseek,
3355};
3356#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3357
3358static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3359{
3360
3361        trace_create_file("available_filter_functions", 0444,
3362                        d_tracer, NULL, &ftrace_avail_fops);
3363
3364        trace_create_file("enabled_functions", 0444,
3365                        d_tracer, NULL, &ftrace_enabled_fops);
3366
3367        trace_create_file("set_ftrace_filter", 0644, d_tracer,
3368                        NULL, &ftrace_filter_fops);
3369
3370        trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3371                                    NULL, &ftrace_notrace_fops);
3372
3373#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3374        trace_create_file("set_graph_function", 0444, d_tracer,
3375                                    NULL,
3376                                    &ftrace_graph_fops);
3377#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3378
3379        return 0;
3380}
3381
3382static int ftrace_process_locs(struct module *mod,
3383                               unsigned long *start,
3384                               unsigned long *end)
3385{
3386        unsigned long *p;
3387        unsigned long addr;
3388        unsigned long flags = 0; /* Shut up gcc */
3389
3390        mutex_lock(&ftrace_lock);
3391        p = start;
3392        while (p < end) {
3393                addr = ftrace_call_adjust(*p++);
3394                /*
3395                 * Some architecture linkers will pad between
3396                 * the different mcount_loc sections of different
3397                 * object files to satisfy alignments.
3398                 * Skip any NULL pointers.
3399                 */
3400                if (!addr)
3401                        continue;
3402                ftrace_record_ip(addr);
3403        }
3404
3405        /*
3406         * We only need to disable interrupts on start up
3407         * because we are modifying code that an interrupt
3408         * may execute, and the modification is not atomic.
3409         * But for modules, nothing runs the code we modify
3410         * until we are finished with it, and there's no
3411         * reason to cause large interrupt latencies while we do it.
3412         */
3413        if (!mod)
3414                local_irq_save(flags);
3415        ftrace_update_code(mod);
3416        if (!mod)
3417                local_irq_restore(flags);
3418        mutex_unlock(&ftrace_lock);
3419
3420        return 0;
3421}
3422
3423#ifdef CONFIG_MODULES
3424void ftrace_release_mod(struct module *mod)
3425{
3426        struct dyn_ftrace *rec;
3427        struct ftrace_page *pg;
3428
3429        mutex_lock(&ftrace_lock);
3430
3431        if (ftrace_disabled)
3432                goto out_unlock;
3433
3434        do_for_each_ftrace_rec(pg, rec) {
3435                if (within_module_core(rec->ip, mod)) {
3436                        /*
3437                         * rec->ip is changed in ftrace_free_rec()
3438                         * It should not between s and e if record was freed.
3439                         */
3440                        FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3441                        ftrace_free_rec(rec);
3442                }
3443        } while_for_each_ftrace_rec();
3444 out_unlock:
3445        mutex_unlock(&ftrace_lock);
3446}
3447
3448static void ftrace_init_module(struct module *mod,
3449                               unsigned long *start, unsigned long *end)
3450{
3451        if (ftrace_disabled || start == end)
3452                return;
3453        ftrace_process_locs(mod, start, end);
3454}
3455
3456static int ftrace_module_notify(struct notifier_block *self,
3457                                unsigned long val, void *data)
3458{
3459        struct module *mod = data;
3460
3461        switch (val) {
3462        case MODULE_STATE_COMING:
3463                ftrace_init_module(mod, mod->ftrace_callsites,
3464                                   mod->ftrace_callsites +
3465                                   mod->num_ftrace_callsites);
3466                break;
3467        case MODULE_STATE_GOING:
3468                ftrace_release_mod(mod);
3469                break;
3470        }
3471
3472        return 0;
3473}
3474#else
3475static int ftrace_module_notify(struct notifier_block *self,
3476                                unsigned long val, void *data)
3477{
3478        return 0;
3479}
3480#endif /* CONFIG_MODULES */
3481
3482struct notifier_block ftrace_module_nb = {
3483        .notifier_call = ftrace_module_notify,
3484        .priority = 0,
3485};
3486
3487extern unsigned long __start_mcount_loc[];
3488extern unsigned long __stop_mcount_loc[];
3489
3490void __init ftrace_init(void)
3491{
3492        unsigned long count, addr, flags;
3493        int ret;
3494
3495        /* Keep the ftrace pointer to the stub */
3496        addr = (unsigned long)ftrace_stub;
3497
3498        local_irq_save(flags);
3499        ftrace_dyn_arch_init(&addr);
3500        local_irq_restore(flags);
3501
3502        /* ftrace_dyn_arch_init places the return code in addr */
3503        if (addr)
3504                goto failed;
3505
3506        count = __stop_mcount_loc - __start_mcount_loc;
3507
3508        ret = ftrace_dyn_table_alloc(count);
3509        if (ret)
3510                goto failed;
3511
3512        last_ftrace_enabled = ftrace_enabled = 1;
3513
3514        ret = ftrace_process_locs(NULL,
3515                                  __start_mcount_loc,
3516                                  __stop_mcount_loc);
3517
3518        ret = register_module_notifier(&ftrace_module_nb);
3519        if (ret)
3520                pr_warning("Failed to register trace ftrace module notifier\n");
3521
3522        set_ftrace_early_filters();
3523
3524        return;
3525 failed:
3526        ftrace_disabled = 1;
3527}
3528
3529#else
3530
3531static struct ftrace_ops global_ops = {
3532        .func                   = ftrace_stub,
3533};
3534
3535static int __init ftrace_nodyn_init(void)
3536{
3537        ftrace_enabled = 1;
3538        return 0;
3539}
3540device_initcall(ftrace_nodyn_init);
3541
3542static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3543static inline void ftrace_startup_enable(int command) { }
3544/* Keep as macros so we do not need to define the commands */
3545# define ftrace_startup(ops, command)                   \
3546        ({                                              \
3547                (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
3548                0;                                      \
3549        })
3550# define ftrace_shutdown(ops, command)  do { } while (0)
3551# define ftrace_startup_sysctl()        do { } while (0)
3552# define ftrace_shutdown_sysctl()       do { } while (0)
3553
3554static inline int
3555ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3556{
3557        return 1;
3558}
3559
3560#endif /* CONFIG_DYNAMIC_FTRACE */
3561
3562static void
3563ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3564{
3565        struct ftrace_ops *op;
3566
3567        if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3568                return;
3569
3570        trace_recursion_set(TRACE_INTERNAL_BIT);
3571        /*
3572         * Some of the ops may be dynamically allocated,
3573         * they must be freed after a synchronize_sched().
3574         */
3575        preempt_disable_notrace();
3576        op = rcu_dereference_raw(ftrace_ops_list);
3577        while (op != &ftrace_list_end) {
3578                if (ftrace_ops_test(op, ip))
3579                        op->func(ip, parent_ip);
3580                op = rcu_dereference_raw(op->next);
3581        };
3582        preempt_enable_notrace();
3583        trace_recursion_clear(TRACE_INTERNAL_BIT);
3584}
3585
3586static void clear_ftrace_swapper(void)
3587{
3588        struct task_struct *p;
3589        int cpu;
3590
3591        get_online_cpus();
3592        for_each_online_cpu(cpu) {
3593                p = idle_task(cpu);
3594                clear_tsk_trace_trace(p);
3595        }
3596        put_online_cpus();
3597}
3598
3599static void set_ftrace_swapper(void)
3600{
3601        struct task_struct *p;
3602        int cpu;
3603
3604        get_online_cpus();
3605        for_each_online_cpu(cpu) {
3606                p = idle_task(cpu);
3607                set_tsk_trace_trace(p);
3608        }
3609        put_online_cpus();
3610}
3611
3612static void clear_ftrace_pid(struct pid *pid)
3613{
3614        struct task_struct *p;
3615
3616        rcu_read_lock();
3617        do_each_pid_task(pid, PIDTYPE_PID, p) {
3618                clear_tsk_trace_trace(p);
3619        } while_each_pid_task(pid, PIDTYPE_PID, p);
3620        rcu_read_unlock();
3621
3622        put_pid(pid);
3623}
3624
3625static void set_ftrace_pid(struct pid *pid)
3626{
3627        struct task_struct *p;
3628
3629        rcu_read_lock();
3630        do_each_pid_task(pid, PIDTYPE_PID, p) {
3631                set_tsk_trace_trace(p);
3632        } while_each_pid_task(pid, PIDTYPE_PID, p);
3633        rcu_read_unlock();
3634}
3635
3636static void clear_ftrace_pid_task(struct pid *pid)
3637{
3638        if (pid == ftrace_swapper_pid)
3639                clear_ftrace_swapper();
3640        else
3641                clear_ftrace_pid(pid);
3642}
3643
3644static void set_ftrace_pid_task(struct pid *pid)
3645{
3646        if (pid == ftrace_swapper_pid)
3647                set_ftrace_swapper();
3648        else
3649                set_ftrace_pid(pid);
3650}
3651
3652static int ftrace_pid_add(int p)
3653{
3654        struct pid *pid;
3655        struct ftrace_pid *fpid;
3656        int ret = -EINVAL;
3657
3658        mutex_lock(&ftrace_lock);
3659
3660        if (!p)
3661                pid = ftrace_swapper_pid;
3662        else
3663                pid = find_get_pid(p);
3664
3665        if (!pid)
3666                goto out;
3667
3668        ret = 0;
3669
3670        list_for_each_entry(fpid, &ftrace_pids, list)
3671                if (fpid->pid == pid)
3672                        goto out_put;
3673
3674        ret = -ENOMEM;
3675
3676        fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3677        if (!fpid)
3678                goto out_put;
3679
3680        list_add(&fpid->list, &ftrace_pids);
3681        fpid->pid = pid;
3682
3683        set_ftrace_pid_task(pid);
3684
3685        ftrace_update_pid_func();
3686        ftrace_startup_enable(0);
3687
3688        mutex_unlock(&ftrace_lock);
3689        return 0;
3690
3691out_put:
3692        if (pid != ftrace_swapper_pid)
3693                put_pid(pid);
3694
3695out:
3696        mutex_unlock(&ftrace_lock);
3697        return ret;
3698}
3699
3700static void ftrace_pid_reset(void)
3701{
3702        struct ftrace_pid *fpid, *safe;
3703
3704        mutex_lock(&ftrace_lock);
3705        list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3706                struct pid *pid = fpid->pid;
3707
3708                clear_ftrace_pid_task(pid);
3709
3710                list_del(&fpid->list);
3711                kfree(fpid);
3712        }
3713
3714        ftrace_update_pid_func();
3715        ftrace_startup_enable(0);
3716
3717        mutex_unlock(&ftrace_lock);
3718}
3719
3720static void *fpid_start(struct seq_file *m, loff_t *pos)
3721{
3722        mutex_lock(&ftrace_lock);
3723
3724        if (list_empty(&ftrace_pids) && (!*pos))
3725                return (void *) 1;
3726
3727        return seq_list_start(&ftrace_pids, *pos);
3728}
3729
3730static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3731{
3732        if (v == (void *)1)
3733                return NULL;
3734
3735        return seq_list_next(v, &ftrace_pids, pos);
3736}
3737
3738static void fpid_stop(struct seq_file *m, void *p)
3739{
3740        mutex_unlock(&ftrace_lock);
3741}
3742
3743static int fpid_show(struct seq_file *m, void *v)
3744{
3745        const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3746
3747        if (v == (void *)1) {
3748                seq_printf(m, "no pid\n");
3749                return 0;
3750        }
3751
3752        if (fpid->pid == ftrace_swapper_pid)
3753                seq_printf(m, "swapper tasks\n");
3754        else
3755                seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3756
3757        return 0;
3758}
3759
3760static const struct seq_operations ftrace_pid_sops = {
3761        .start = fpid_start,
3762        .next = fpid_next,
3763        .stop = fpid_stop,
3764        .show = fpid_show,
3765};
3766
3767static int
3768ftrace_pid_open(struct inode *inode, struct file *file)
3769{
3770        int ret = 0;
3771
3772        if ((file->f_mode & FMODE_WRITE) &&
3773            (file->f_flags & O_TRUNC))
3774                ftrace_pid_reset();
3775
3776        if (file->f_mode & FMODE_READ)
3777                ret = seq_open(file, &ftrace_pid_sops);
3778
3779        return ret;
3780}
3781
3782static ssize_t
3783ftrace_pid_write(struct file *filp, const char __user *ubuf,
3784                   size_t cnt, loff_t *ppos)
3785{
3786        char buf[64], *tmp;
3787        long val;
3788        int ret;
3789
3790        if (cnt >= sizeof(buf))
3791                return -EINVAL;
3792
3793        if (copy_from_user(&buf, ubuf, cnt))
3794                return -EFAULT;
3795
3796        buf[cnt] = 0;
3797
3798        /*
3799         * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3800         * to clean the filter quietly.
3801         */
3802        tmp = strstrip(buf);
3803        if (strlen(tmp) == 0)
3804                return 1;
3805
3806        ret = strict_strtol(tmp, 10, &val);
3807        if (ret < 0)
3808                return ret;
3809
3810        ret = ftrace_pid_add(val);
3811
3812        return ret ? ret : cnt;
3813}
3814
3815static int
3816ftrace_pid_release(struct inode *inode, struct file *file)
3817{
3818        if (file->f_mode & FMODE_READ)
3819                seq_release(inode, file);
3820
3821        return 0;
3822}
3823
3824static const struct file_operations ftrace_pid_fops = {
3825        .open           = ftrace_pid_open,
3826        .write          = ftrace_pid_write,
3827        .read           = seq_read,
3828        .llseek         = seq_lseek,
3829        .release        = ftrace_pid_release,
3830};
3831
3832static __init int ftrace_init_debugfs(void)
3833{
3834        struct dentry *d_tracer;
3835
3836        d_tracer = tracing_init_dentry();
3837        if (!d_tracer)
3838                return 0;
3839
3840        ftrace_init_dyn_debugfs(d_tracer);
3841
3842        trace_create_file("set_ftrace_pid", 0644, d_tracer,
3843                            NULL, &ftrace_pid_fops);
3844
3845        ftrace_profile_debugfs(d_tracer);
3846
3847        return 0;
3848}
3849fs_initcall(ftrace_init_debugfs);
3850
3851/**
3852 * ftrace_kill - kill ftrace
3853 *
3854 * This function should be used by panic code. It stops ftrace
3855 * but in a not so nice way. If you need to simply kill ftrace
3856 * from a non-atomic section, use ftrace_kill.
3857 */
3858void ftrace_kill(void)
3859{
3860        ftrace_disabled = 1;
3861        ftrace_enabled = 0;
3862        clear_ftrace_function();
3863}
3864
3865/**
3866 * register_ftrace_function - register a function for profiling
3867 * @ops - ops structure that holds the function for profiling.
3868 *
3869 * Register a function to be called by all functions in the
3870 * kernel.
3871 *
3872 * Note: @ops->func and all the functions it calls must be labeled
3873 *       with "notrace", otherwise it will go into a
3874 *       recursive loop.
3875 */
3876int register_ftrace_function(struct ftrace_ops *ops)
3877{
3878        int ret = -1;
3879
3880        mutex_lock(&ftrace_lock);
3881
3882        if (unlikely(ftrace_disabled))
3883                goto out_unlock;
3884
3885        ret = __register_ftrace_function(ops);
3886        if (!ret)
3887                ret = ftrace_startup(ops, 0);
3888
3889
3890 out_unlock:
3891        mutex_unlock(&ftrace_lock);
3892        return ret;
3893}
3894EXPORT_SYMBOL_GPL(register_ftrace_function);
3895
3896/**
3897 * unregister_ftrace_function - unregister a function for profiling.
3898 * @ops - ops structure that holds the function to unregister
3899 *
3900 * Unregister a function that was added to be called by ftrace profiling.
3901 */
3902int unregister_ftrace_function(struct ftrace_ops *ops)
3903{
3904        int ret;
3905
3906        mutex_lock(&ftrace_lock);
3907        ret = __unregister_ftrace_function(ops);
3908        if (!ret)
3909                ftrace_shutdown(ops, 0);
3910        mutex_unlock(&ftrace_lock);
3911
3912        return ret;
3913}
3914EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3915
3916int
3917ftrace_enable_sysctl(struct ctl_table *table, int write,
3918                     void __user *buffer, size_t *lenp,
3919                     loff_t *ppos)
3920{
3921        int ret = -ENODEV;
3922
3923        mutex_lock(&ftrace_lock);
3924
3925        if (unlikely(ftrace_disabled))
3926                goto out;
3927
3928        ret = proc_dointvec(table, write, buffer, lenp, ppos);
3929
3930        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3931                goto out;
3932
3933        last_ftrace_enabled = !!ftrace_enabled;
3934
3935        if (ftrace_enabled) {
3936
3937                ftrace_startup_sysctl();
3938
3939                /* we are starting ftrace again */
3940                if (ftrace_ops_list != &ftrace_list_end) {
3941                        if (ftrace_ops_list->next == &ftrace_list_end)
3942                                ftrace_trace_function = ftrace_ops_list->func;
3943                        else
3944                                ftrace_trace_function = ftrace_ops_list_func;
3945                }
3946
3947        } else {
3948                /* stopping ftrace calls (just send to ftrace_stub) */
3949                ftrace_trace_function = ftrace_stub;
3950
3951                ftrace_shutdown_sysctl();
3952        }
3953
3954 out:
3955        mutex_unlock(&ftrace_lock);
3956        return ret;
3957}
3958
3959#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3960
3961static int ftrace_graph_active;
3962static struct notifier_block ftrace_suspend_notifier;
3963
3964int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3965{
3966        return 0;
3967}
3968
3969/* The callbacks that hook a function */
3970trace_func_graph_ret_t ftrace_graph_return =
3971                        (trace_func_graph_ret_t)ftrace_stub;
3972trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3973
3974/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3975static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3976{
3977        int i;
3978        int ret = 0;
3979        unsigned long flags;
3980        int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3981        struct task_struct *g, *t;
3982
3983        for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3984                ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3985                                        * sizeof(struct ftrace_ret_stack),
3986                                        GFP_KERNEL);
3987                if (!ret_stack_list[i]) {
3988                        start = 0;
3989                        end = i;
3990                        ret = -ENOMEM;
3991                        goto free;
3992                }
3993        }
3994
3995        read_lock_irqsave(&tasklist_lock, flags);
3996        do_each_thread(g, t) {
3997                if (start == end) {
3998                        ret = -EAGAIN;
3999                        goto unlock;
4000                }
4001
4002                if (t->ret_stack == NULL) {
4003                        atomic_set(&t->tracing_graph_pause, 0);
4004                        atomic_set(&t->trace_overrun, 0);
4005                        t->curr_ret_stack = -1;
4006                        /* Make sure the tasks see the -1 first: */
4007                        smp_wmb();
4008                        t->ret_stack = ret_stack_list[start++];
4009                }
4010        } while_each_thread(g, t);
4011
4012unlock:
4013        read_unlock_irqrestore(&tasklist_lock, flags);
4014free:
4015        for (i = start; i < end; i++)
4016                kfree(ret_stack_list[i]);
4017        return ret;
4018}
4019
4020static void
4021ftrace_graph_probe_sched_switch(void *ignore,
4022                        struct task_struct *prev, struct task_struct *next)
4023{
4024        unsigned long long timestamp;
4025        int index;
4026
4027        /*
4028         * Does the user want to count the time a function was asleep.
4029         * If so, do not update the time stamps.
4030         */
4031        if (trace_flags & TRACE_ITER_SLEEP_TIME)
4032                return;
4033
4034        timestamp = trace_clock_local();
4035
4036        prev->ftrace_timestamp = timestamp;
4037
4038        /* only process tasks that we timestamped */
4039        if (!next->ftrace_timestamp)
4040                return;
4041
4042        /*
4043         * Update all the counters in next to make up for the
4044         * time next was sleeping.
4045         */
4046        timestamp -= next->ftrace_timestamp;
4047
4048        for (index = next->curr_ret_stack; index >= 0; index--)
4049                next->ret_stack[index].calltime += timestamp;
4050}
4051
4052/* Allocate a return stack for each task */
4053static int start_graph_tracing(void)
4054{
4055        struct ftrace_ret_stack **ret_stack_list;
4056        int ret, cpu;
4057
4058        ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4059                                sizeof(struct ftrace_ret_stack *),
4060                                GFP_KERNEL);
4061
4062        if (!ret_stack_list)
4063                return -ENOMEM;
4064
4065        /* The cpu_boot init_task->ret_stack will never be freed */
4066        for_each_online_cpu(cpu) {
4067                if (!idle_task(cpu)->ret_stack)
4068                        ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4069        }
4070
4071        do {
4072                ret = alloc_retstack_tasklist(ret_stack_list);
4073        } while (ret == -EAGAIN);
4074
4075        if (!ret) {
4076                ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4077                if (ret)
4078                        pr_info("ftrace_graph: Couldn't activate tracepoint"
4079                                " probe to kernel_sched_switch\n");
4080        }
4081
4082        kfree(ret_stack_list);
4083        return ret;
4084}
4085
4086/*
4087 * Hibernation protection.
4088 * The state of the current task is too much unstable during
4089 * suspend/restore to disk. We want to protect against that.
4090 */
4091static int
4092ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4093                                                        void *unused)
4094{
4095        switch (state) {
4096        case PM_HIBERNATION_PREPARE:
4097                pause_graph_tracing();
4098                break;
4099
4100        case PM_POST_HIBERNATION:
4101                unpause_graph_tracing();
4102                break;
4103        }
4104        return NOTIFY_DONE;
4105}
4106
4107int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4108                        trace_func_graph_ent_t entryfunc)
4109{
4110        int ret = 0;
4111
4112        mutex_lock(&ftrace_lock);
4113
4114        /* we currently allow only one tracer registered at a time */
4115        if (ftrace_graph_active) {
4116                ret = -EBUSY;
4117                goto out;
4118        }
4119
4120        ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4121        register_pm_notifier(&ftrace_suspend_notifier);
4122
4123        ftrace_graph_active++;
4124        ret = start_graph_tracing();
4125        if (ret) {
4126                ftrace_graph_active--;
4127                goto out;
4128        }
4129
4130        ftrace_graph_return = retfunc;
4131        ftrace_graph_entry = entryfunc;
4132
4133        ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4134
4135out:
4136        mutex_unlock(&ftrace_lock);
4137        return ret;
4138}
4139
4140void unregister_ftrace_graph(void)
4141{
4142        mutex_lock(&ftrace_lock);
4143
4144        if (unlikely(!ftrace_graph_active))
4145                goto out;
4146
4147        ftrace_graph_active--;
4148        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4149        ftrace_graph_entry = ftrace_graph_entry_stub;
4150        ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4151        unregister_pm_notifier(&ftrace_suspend_notifier);
4152        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4153
4154 out:
4155        mutex_unlock(&ftrace_lock);
4156}
4157
4158static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4159
4160static void
4161graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4162{
4163        atomic_set(&t->tracing_graph_pause, 0);
4164        atomic_set(&t->trace_overrun, 0);
4165        t->ftrace_timestamp = 0;
4166        /* make curr_ret_stack visible before we add the ret_stack */
4167        smp_wmb();
4168        t->ret_stack = ret_stack;
4169}
4170
4171/*
4172 * Allocate a return stack for the idle task. May be the first
4173 * time through, or it may be done by CPU hotplug online.
4174 */
4175void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4176{
4177        t->curr_ret_stack = -1;
4178        /*
4179         * The idle task has no parent, it either has its own
4180         * stack or no stack at all.
4181         */
4182        if (t->ret_stack)
4183                WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4184
4185        if (ftrace_graph_active) {
4186                struct ftrace_ret_stack *ret_stack;
4187
4188                ret_stack = per_cpu(idle_ret_stack, cpu);
4189                if (!ret_stack) {
4190                        ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4191                                            * sizeof(struct ftrace_ret_stack),
4192                                            GFP_KERNEL);
4193                        if (!ret_stack)
4194                                return;
4195                        per_cpu(idle_ret_stack, cpu) = ret_stack;
4196                }
4197                graph_init_task(t, ret_stack);
4198        }
4199}
4200
4201/* Allocate a return stack for newly created task */
4202void ftrace_graph_init_task(struct task_struct *t)
4203{
4204        /* Make sure we do not use the parent ret_stack */
4205        t->ret_stack = NULL;
4206        t->curr_ret_stack = -1;
4207
4208        if (ftrace_graph_active) {
4209                struct ftrace_ret_stack *ret_stack;
4210
4211                ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4212                                * sizeof(struct ftrace_ret_stack),
4213                                GFP_KERNEL);
4214                if (!ret_stack)
4215                        return;
4216                graph_init_task(t, ret_stack);
4217        }
4218}
4219
4220void ftrace_graph_exit_task(struct task_struct *t)
4221{
4222        struct ftrace_ret_stack *ret_stack = t->ret_stack;
4223
4224        t->ret_stack = NULL;
4225        /* NULL must become visible to IRQs before we free it: */
4226        barrier();
4227
4228        kfree(ret_stack);
4229}
4230
4231void ftrace_graph_stop(void)
4232{
4233        ftrace_stop();
4234}
4235#endif
4236