linux/kernel/trace/ftrace.c
<<
>>
Prefs
   1/*
   2 * Infrastructure for profiling code inserted by 'gcc -pg'.
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally ported from the -rt patch by:
   8 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code in the latency_tracer, that is:
  11 *
  12 *  Copyright (C) 2004-2006 Ingo Molnar
  13 *  Copyright (C) 2004 Nadia Yvette Chambers
  14 */
  15
  16#include <linux/stop_machine.h>
  17#include <linux/clocksource.h>
  18#include <linux/sched/task.h>
  19#include <linux/kallsyms.h>
  20#include <linux/seq_file.h>
  21#include <linux/suspend.h>
  22#include <linux/tracefs.h>
  23#include <linux/hardirq.h>
  24#include <linux/kthread.h>
  25#include <linux/uaccess.h>
  26#include <linux/bsearch.h>
  27#include <linux/module.h>
  28#include <linux/ftrace.h>
  29#include <linux/sysctl.h>
  30#include <linux/slab.h>
  31#include <linux/ctype.h>
  32#include <linux/sort.h>
  33#include <linux/list.h>
  34#include <linux/hash.h>
  35#include <linux/rcupdate.h>
  36#include <linux/kprobes.h>
  37
  38#include <trace/events/sched.h>
  39
  40#include <asm/sections.h>
  41#include <asm/setup.h>
  42
  43#include "trace_output.h"
  44#include "trace_stat.h"
  45
  46#define FTRACE_WARN_ON(cond)                    \
  47        ({                                      \
  48                int ___r = cond;                \
  49                if (WARN_ON(___r))              \
  50                        ftrace_kill();          \
  51                ___r;                           \
  52        })
  53
  54#define FTRACE_WARN_ON_ONCE(cond)               \
  55        ({                                      \
  56                int ___r = cond;                \
  57                if (WARN_ON_ONCE(___r))         \
  58                        ftrace_kill();          \
  59                ___r;                           \
  60        })
  61
  62/* hash bits for specific function selection */
  63#define FTRACE_HASH_BITS 7
  64#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
  65#define FTRACE_HASH_DEFAULT_BITS 10
  66#define FTRACE_HASH_MAX_BITS 12
  67
  68#ifdef CONFIG_DYNAMIC_FTRACE
  69#define INIT_OPS_HASH(opsname)  \
  70        .func_hash              = &opsname.local_hash,                  \
  71        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
  72#define ASSIGN_OPS_HASH(opsname, val) \
  73        .func_hash              = val, \
  74        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
  75#else
  76#define INIT_OPS_HASH(opsname)
  77#define ASSIGN_OPS_HASH(opsname, val)
  78#endif
  79
  80static struct ftrace_ops ftrace_list_end __read_mostly = {
  81        .func           = ftrace_stub,
  82        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
  83        INIT_OPS_HASH(ftrace_list_end)
  84};
  85
  86/* ftrace_enabled is a method to turn ftrace on or off */
  87int ftrace_enabled __read_mostly;
  88static int last_ftrace_enabled;
  89
  90/* Current function tracing op */
  91struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
  92/* What to set function_trace_op to */
  93static struct ftrace_ops *set_function_trace_op;
  94
  95static bool ftrace_pids_enabled(struct ftrace_ops *ops)
  96{
  97        struct trace_array *tr;
  98
  99        if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
 100                return false;
 101
 102        tr = ops->private;
 103
 104        return tr->function_pids != NULL;
 105}
 106
 107static void ftrace_update_trampoline(struct ftrace_ops *ops);
 108
 109/*
 110 * ftrace_disabled is set when an anomaly is discovered.
 111 * ftrace_disabled is much stronger than ftrace_enabled.
 112 */
 113static int ftrace_disabled __read_mostly;
 114
 115static DEFINE_MUTEX(ftrace_lock);
 116
 117static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
 118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 119static struct ftrace_ops global_ops;
 120
 121#if ARCH_SUPPORTS_FTRACE_OPS
 122static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 123                                 struct ftrace_ops *op, struct pt_regs *regs);
 124#else
 125/* See comment below, where ftrace_ops_list_func is defined */
 126static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 127#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 128#endif
 129
 130/*
 131 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
 132 * can use rcu_dereference_raw_notrace() is that elements removed from this list
 133 * are simply leaked, so there is no need to interact with a grace-period
 134 * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
 135 * concurrent insertions into the ftrace_global_list.
 136 *
 137 * Silly Alpha and silly pointer-speculation compiler optimizations!
 138 */
 139#define do_for_each_ftrace_op(op, list)                 \
 140        op = rcu_dereference_raw_notrace(list);                 \
 141        do
 142
 143/*
 144 * Optimized for just a single item in the list (as that is the normal case).
 145 */
 146#define while_for_each_ftrace_op(op)                            \
 147        while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
 148               unlikely((op) != &ftrace_list_end))
 149
 150static inline void ftrace_ops_init(struct ftrace_ops *ops)
 151{
 152#ifdef CONFIG_DYNAMIC_FTRACE
 153        if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
 154                mutex_init(&ops->local_hash.regex_lock);
 155                ops->func_hash = &ops->local_hash;
 156                ops->flags |= FTRACE_OPS_FL_INITIALIZED;
 157        }
 158#endif
 159}
 160
 161/**
 162 * ftrace_nr_registered_ops - return number of ops registered
 163 *
 164 * Returns the number of ftrace_ops registered and tracing functions
 165 */
 166int ftrace_nr_registered_ops(void)
 167{
 168        struct ftrace_ops *ops;
 169        int cnt = 0;
 170
 171        mutex_lock(&ftrace_lock);
 172
 173        for (ops = rcu_dereference_protected(ftrace_ops_list,
 174                                             lockdep_is_held(&ftrace_lock));
 175             ops != &ftrace_list_end;
 176             ops = rcu_dereference_protected(ops->next,
 177                                             lockdep_is_held(&ftrace_lock)))
 178                cnt++;
 179
 180        mutex_unlock(&ftrace_lock);
 181
 182        return cnt;
 183}
 184
 185static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 186                            struct ftrace_ops *op, struct pt_regs *regs)
 187{
 188        struct trace_array *tr = op->private;
 189
 190        if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
 191                return;
 192
 193        op->saved_func(ip, parent_ip, op, regs);
 194}
 195
 196static void ftrace_sync_ipi(void *data)
 197{
 198        /* Probably not needed, but do it anyway */
 199        smp_rmb();
 200}
 201
 202#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 203static void update_function_graph_func(void);
 204
 205/* Both enabled by default (can be cleared by function_graph tracer flags */
 206static bool fgraph_sleep_time = true;
 207static bool fgraph_graph_time = true;
 208
 209#else
 210static inline void update_function_graph_func(void) { }
 211#endif
 212
 213
 214static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 215{
 216        /*
 217         * If this is a dynamic, RCU, or per CPU ops, or we force list func,
 218         * then it needs to call the list anyway.
 219         */
 220        if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
 221            FTRACE_FORCE_LIST_FUNC)
 222                return ftrace_ops_list_func;
 223
 224        return ftrace_ops_get_func(ops);
 225}
 226
 227static void update_ftrace_function(void)
 228{
 229        ftrace_func_t func;
 230
 231        /*
 232         * Prepare the ftrace_ops that the arch callback will use.
 233         * If there's only one ftrace_ops registered, the ftrace_ops_list
 234         * will point to the ops we want.
 235         */
 236        set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
 237                                                lockdep_is_held(&ftrace_lock));
 238
 239        /* If there's no ftrace_ops registered, just call the stub function */
 240        if (set_function_trace_op == &ftrace_list_end) {
 241                func = ftrace_stub;
 242
 243        /*
 244         * If we are at the end of the list and this ops is
 245         * recursion safe and not dynamic and the arch supports passing ops,
 246         * then have the mcount trampoline call the function directly.
 247         */
 248        } else if (rcu_dereference_protected(ftrace_ops_list->next,
 249                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 250                func = ftrace_ops_get_list_func(ftrace_ops_list);
 251
 252        } else {
 253                /* Just use the default ftrace_ops */
 254                set_function_trace_op = &ftrace_list_end;
 255                func = ftrace_ops_list_func;
 256        }
 257
 258        update_function_graph_func();
 259
 260        /* If there's no change, then do nothing more here */
 261        if (ftrace_trace_function == func)
 262                return;
 263
 264        /*
 265         * If we are using the list function, it doesn't care
 266         * about the function_trace_ops.
 267         */
 268        if (func == ftrace_ops_list_func) {
 269                ftrace_trace_function = func;
 270                /*
 271                 * Don't even bother setting function_trace_ops,
 272                 * it would be racy to do so anyway.
 273                 */
 274                return;
 275        }
 276
 277#ifndef CONFIG_DYNAMIC_FTRACE
 278        /*
 279         * For static tracing, we need to be a bit more careful.
 280         * The function change takes affect immediately. Thus,
 281         * we need to coorditate the setting of the function_trace_ops
 282         * with the setting of the ftrace_trace_function.
 283         *
 284         * Set the function to the list ops, which will call the
 285         * function we want, albeit indirectly, but it handles the
 286         * ftrace_ops and doesn't depend on function_trace_op.
 287         */
 288        ftrace_trace_function = ftrace_ops_list_func;
 289        /*
 290         * Make sure all CPUs see this. Yes this is slow, but static
 291         * tracing is slow and nasty to have enabled.
 292         */
 293        synchronize_rcu_tasks_rude();
 294        /* Now all cpus are using the list ops. */
 295        function_trace_op = set_function_trace_op;
 296        /* Make sure the function_trace_op is visible on all CPUs */
 297        smp_wmb();
 298        /* Nasty way to force a rmb on all cpus */
 299        smp_call_function(ftrace_sync_ipi, NULL, 1);
 300        /* OK, we are all set to update the ftrace_trace_function now! */
 301#endif /* !CONFIG_DYNAMIC_FTRACE */
 302
 303        ftrace_trace_function = func;
 304}
 305
 306int using_ftrace_ops_list_func(void)
 307{
 308        return ftrace_trace_function == ftrace_ops_list_func;
 309}
 310
 311static void add_ftrace_ops(struct ftrace_ops __rcu **list,
 312                           struct ftrace_ops *ops)
 313{
 314        rcu_assign_pointer(ops->next, *list);
 315
 316        /*
 317         * We are entering ops into the list but another
 318         * CPU might be walking that list. We need to make sure
 319         * the ops->next pointer is valid before another CPU sees
 320         * the ops pointer included into the list.
 321         */
 322        rcu_assign_pointer(*list, ops);
 323}
 324
 325static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
 326                             struct ftrace_ops *ops)
 327{
 328        struct ftrace_ops **p;
 329
 330        /*
 331         * If we are removing the last function, then simply point
 332         * to the ftrace_stub.
 333         */
 334        if (rcu_dereference_protected(*list,
 335                        lockdep_is_held(&ftrace_lock)) == ops &&
 336            rcu_dereference_protected(ops->next,
 337                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 338                *list = &ftrace_list_end;
 339                return 0;
 340        }
 341
 342        for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 343                if (*p == ops)
 344                        break;
 345
 346        if (*p != ops)
 347                return -1;
 348
 349        *p = (*p)->next;
 350        return 0;
 351}
 352
 353static void ftrace_update_trampoline(struct ftrace_ops *ops);
 354
 355static int __register_ftrace_function(struct ftrace_ops *ops)
 356{
 357        if (ops->flags & FTRACE_OPS_FL_DELETED)
 358                return -EINVAL;
 359
 360        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 361                return -EBUSY;
 362
 363#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 364        /*
 365         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
 366         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
 367         * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
 368         */
 369        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
 370            !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
 371                return -EINVAL;
 372
 373        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
 374                ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
 375#endif
 376        if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
 377                return -EBUSY;
 378
 379        if (!core_kernel_data((unsigned long)ops))
 380                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 381
 382        add_ftrace_ops(&ftrace_ops_list, ops);
 383
 384        /* Always save the function, and reset at unregistering */
 385        ops->saved_func = ops->func;
 386
 387        if (ftrace_pids_enabled(ops))
 388                ops->func = ftrace_pid_func;
 389
 390        ftrace_update_trampoline(ops);
 391
 392        if (ftrace_enabled)
 393                update_ftrace_function();
 394
 395        return 0;
 396}
 397
 398static int __unregister_ftrace_function(struct ftrace_ops *ops)
 399{
 400        int ret;
 401
 402        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 403                return -EBUSY;
 404
 405        ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 406
 407        if (ret < 0)
 408                return ret;
 409
 410        if (ftrace_enabled)
 411                update_ftrace_function();
 412
 413        ops->func = ops->saved_func;
 414
 415        return 0;
 416}
 417
 418static void ftrace_update_pid_func(void)
 419{
 420        struct ftrace_ops *op;
 421
 422        /* Only do something if we are tracing something */
 423        if (ftrace_trace_function == ftrace_stub)
 424                return;
 425
 426        do_for_each_ftrace_op(op, ftrace_ops_list) {
 427                if (op->flags & FTRACE_OPS_FL_PID) {
 428                        op->func = ftrace_pids_enabled(op) ?
 429                                ftrace_pid_func : op->saved_func;
 430                        ftrace_update_trampoline(op);
 431                }
 432        } while_for_each_ftrace_op(op);
 433
 434        update_ftrace_function();
 435}
 436
 437#ifdef CONFIG_FUNCTION_PROFILER
 438struct ftrace_profile {
 439        struct hlist_node               node;
 440        unsigned long                   ip;
 441        unsigned long                   counter;
 442#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 443        unsigned long long              time;
 444        unsigned long long              time_squared;
 445#endif
 446};
 447
 448struct ftrace_profile_page {
 449        struct ftrace_profile_page      *next;
 450        unsigned long                   index;
 451        struct ftrace_profile           records[];
 452};
 453
 454struct ftrace_profile_stat {
 455        atomic_t                        disabled;
 456        struct hlist_head               *hash;
 457        struct ftrace_profile_page      *pages;
 458        struct ftrace_profile_page      *start;
 459        struct tracer_stat              stat;
 460};
 461
 462#define PROFILE_RECORDS_SIZE                                            \
 463        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 464
 465#define PROFILES_PER_PAGE                                       \
 466        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 467
 468static int ftrace_profile_enabled __read_mostly;
 469
 470/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 471static DEFINE_MUTEX(ftrace_profile_lock);
 472
 473static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 474
 475#define FTRACE_PROFILE_HASH_BITS 10
 476#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
 477
 478static void *
 479function_stat_next(void *v, int idx)
 480{
 481        struct ftrace_profile *rec = v;
 482        struct ftrace_profile_page *pg;
 483
 484        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 485
 486 again:
 487        if (idx != 0)
 488                rec++;
 489
 490        if ((void *)rec >= (void *)&pg->records[pg->index]) {
 491                pg = pg->next;
 492                if (!pg)
 493                        return NULL;
 494                rec = &pg->records[0];
 495                if (!rec->counter)
 496                        goto again;
 497        }
 498
 499        return rec;
 500}
 501
 502static void *function_stat_start(struct tracer_stat *trace)
 503{
 504        struct ftrace_profile_stat *stat =
 505                container_of(trace, struct ftrace_profile_stat, stat);
 506
 507        if (!stat || !stat->start)
 508                return NULL;
 509
 510        return function_stat_next(&stat->start->records[0], 0);
 511}
 512
 513#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 514/* function graph compares on total time */
 515static int function_stat_cmp(void *p1, void *p2)
 516{
 517        struct ftrace_profile *a = p1;
 518        struct ftrace_profile *b = p2;
 519
 520        if (a->time < b->time)
 521                return -1;
 522        if (a->time > b->time)
 523                return 1;
 524        else
 525                return 0;
 526}
 527#else
 528/* not function graph compares against hits */
 529static int function_stat_cmp(void *p1, void *p2)
 530{
 531        struct ftrace_profile *a = p1;
 532        struct ftrace_profile *b = p2;
 533
 534        if (a->counter < b->counter)
 535                return -1;
 536        if (a->counter > b->counter)
 537                return 1;
 538        else
 539                return 0;
 540}
 541#endif
 542
 543static int function_stat_headers(struct seq_file *m)
 544{
 545#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 546        seq_puts(m, "  Function                               "
 547                 "Hit    Time            Avg             s^2\n"
 548                    "  --------                               "
 549                 "---    ----            ---             ---\n");
 550#else
 551        seq_puts(m, "  Function                               Hit\n"
 552                    "  --------                               ---\n");
 553#endif
 554        return 0;
 555}
 556
 557static int function_stat_show(struct seq_file *m, void *v)
 558{
 559        struct ftrace_profile *rec = v;
 560        char str[KSYM_SYMBOL_LEN];
 561        int ret = 0;
 562#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 563        static struct trace_seq s;
 564        unsigned long long avg;
 565        unsigned long long stddev;
 566#endif
 567        mutex_lock(&ftrace_profile_lock);
 568
 569        /* we raced with function_profile_reset() */
 570        if (unlikely(rec->counter == 0)) {
 571                ret = -EBUSY;
 572                goto out;
 573        }
 574
 575#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 576        avg = rec->time;
 577        do_div(avg, rec->counter);
 578        if (tracing_thresh && (avg < tracing_thresh))
 579                goto out;
 580#endif
 581
 582        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 583        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 584
 585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 586        seq_puts(m, "    ");
 587
 588        /* Sample standard deviation (s^2) */
 589        if (rec->counter <= 1)
 590                stddev = 0;
 591        else {
 592                /*
 593                 * Apply Welford's method:
 594                 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
 595                 */
 596                stddev = rec->counter * rec->time_squared -
 597                         rec->time * rec->time;
 598
 599                /*
 600                 * Divide only 1000 for ns^2 -> us^2 conversion.
 601                 * trace_print_graph_duration will divide 1000 again.
 602                 */
 603                do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
 604        }
 605
 606        trace_seq_init(&s);
 607        trace_print_graph_duration(rec->time, &s);
 608        trace_seq_puts(&s, "    ");
 609        trace_print_graph_duration(avg, &s);
 610        trace_seq_puts(&s, "    ");
 611        trace_print_graph_duration(stddev, &s);
 612        trace_print_seq(m, &s);
 613#endif
 614        seq_putc(m, '\n');
 615out:
 616        mutex_unlock(&ftrace_profile_lock);
 617
 618        return ret;
 619}
 620
 621static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 622{
 623        struct ftrace_profile_page *pg;
 624
 625        pg = stat->pages = stat->start;
 626
 627        while (pg) {
 628                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 629                pg->index = 0;
 630                pg = pg->next;
 631        }
 632
 633        memset(stat->hash, 0,
 634               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 635}
 636
 637int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 638{
 639        struct ftrace_profile_page *pg;
 640        int functions;
 641        int pages;
 642        int i;
 643
 644        /* If we already allocated, do nothing */
 645        if (stat->pages)
 646                return 0;
 647
 648        stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 649        if (!stat->pages)
 650                return -ENOMEM;
 651
 652#ifdef CONFIG_DYNAMIC_FTRACE
 653        functions = ftrace_update_tot_cnt;
 654#else
 655        /*
 656         * We do not know the number of functions that exist because
 657         * dynamic tracing is what counts them. With past experience
 658         * we have around 20K functions. That should be more than enough.
 659         * It is highly unlikely we will execute every function in
 660         * the kernel.
 661         */
 662        functions = 20000;
 663#endif
 664
 665        pg = stat->start = stat->pages;
 666
 667        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 668
 669        for (i = 1; i < pages; i++) {
 670                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 671                if (!pg->next)
 672                        goto out_free;
 673                pg = pg->next;
 674        }
 675
 676        return 0;
 677
 678 out_free:
 679        pg = stat->start;
 680        while (pg) {
 681                unsigned long tmp = (unsigned long)pg;
 682
 683                pg = pg->next;
 684                free_page(tmp);
 685        }
 686
 687        stat->pages = NULL;
 688        stat->start = NULL;
 689
 690        return -ENOMEM;
 691}
 692
 693static int ftrace_profile_init_cpu(int cpu)
 694{
 695        struct ftrace_profile_stat *stat;
 696        int size;
 697
 698        stat = &per_cpu(ftrace_profile_stats, cpu);
 699
 700        if (stat->hash) {
 701                /* If the profile is already created, simply reset it */
 702                ftrace_profile_reset(stat);
 703                return 0;
 704        }
 705
 706        /*
 707         * We are profiling all functions, but usually only a few thousand
 708         * functions are hit. We'll make a hash of 1024 items.
 709         */
 710        size = FTRACE_PROFILE_HASH_SIZE;
 711
 712        stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
 713
 714        if (!stat->hash)
 715                return -ENOMEM;
 716
 717        /* Preallocate the function profiling pages */
 718        if (ftrace_profile_pages_init(stat) < 0) {
 719                kfree(stat->hash);
 720                stat->hash = NULL;
 721                return -ENOMEM;
 722        }
 723
 724        return 0;
 725}
 726
 727static int ftrace_profile_init(void)
 728{
 729        int cpu;
 730        int ret = 0;
 731
 732        for_each_possible_cpu(cpu) {
 733                ret = ftrace_profile_init_cpu(cpu);
 734                if (ret)
 735                        break;
 736        }
 737
 738        return ret;
 739}
 740
 741/* interrupts must be disabled */
 742static struct ftrace_profile *
 743ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 744{
 745        struct ftrace_profile *rec;
 746        struct hlist_head *hhd;
 747        unsigned long key;
 748
 749        key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
 750        hhd = &stat->hash[key];
 751
 752        if (hlist_empty(hhd))
 753                return NULL;
 754
 755        hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
 756                if (rec->ip == ip)
 757                        return rec;
 758        }
 759
 760        return NULL;
 761}
 762
 763static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 764                               struct ftrace_profile *rec)
 765{
 766        unsigned long key;
 767
 768        key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
 769        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 770}
 771
 772/*
 773 * The memory is already allocated, this simply finds a new record to use.
 774 */
 775static struct ftrace_profile *
 776ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 777{
 778        struct ftrace_profile *rec = NULL;
 779
 780        /* prevent recursion (from NMIs) */
 781        if (atomic_inc_return(&stat->disabled) != 1)
 782                goto out;
 783
 784        /*
 785         * Try to find the function again since an NMI
 786         * could have added it
 787         */
 788        rec = ftrace_find_profiled_func(stat, ip);
 789        if (rec)
 790                goto out;
 791
 792        if (stat->pages->index == PROFILES_PER_PAGE) {
 793                if (!stat->pages->next)
 794                        goto out;
 795                stat->pages = stat->pages->next;
 796        }
 797
 798        rec = &stat->pages->records[stat->pages->index++];
 799        rec->ip = ip;
 800        ftrace_add_profile(stat, rec);
 801
 802 out:
 803        atomic_dec(&stat->disabled);
 804
 805        return rec;
 806}
 807
 808static void
 809function_profile_call(unsigned long ip, unsigned long parent_ip,
 810                      struct ftrace_ops *ops, struct pt_regs *regs)
 811{
 812        struct ftrace_profile_stat *stat;
 813        struct ftrace_profile *rec;
 814        unsigned long flags;
 815
 816        if (!ftrace_profile_enabled)
 817                return;
 818
 819        local_irq_save(flags);
 820
 821        stat = this_cpu_ptr(&ftrace_profile_stats);
 822        if (!stat->hash || !ftrace_profile_enabled)
 823                goto out;
 824
 825        rec = ftrace_find_profiled_func(stat, ip);
 826        if (!rec) {
 827                rec = ftrace_profile_alloc(stat, ip);
 828                if (!rec)
 829                        goto out;
 830        }
 831
 832        rec->counter++;
 833 out:
 834        local_irq_restore(flags);
 835}
 836
 837#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 838static int profile_graph_entry(struct ftrace_graph_ent *trace)
 839{
 840        int index = trace->depth;
 841
 842        function_profile_call(trace->func, 0, NULL, NULL);
 843
 844        /* If function graph is shutting down, ret_stack can be NULL */
 845        if (!current->ret_stack)
 846                return 0;
 847
 848        if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
 849                current->ret_stack[index].subtime = 0;
 850
 851        return 1;
 852}
 853
 854static void profile_graph_return(struct ftrace_graph_ret *trace)
 855{
 856        struct ftrace_profile_stat *stat;
 857        unsigned long long calltime;
 858        struct ftrace_profile *rec;
 859        unsigned long flags;
 860
 861        local_irq_save(flags);
 862        stat = this_cpu_ptr(&ftrace_profile_stats);
 863        if (!stat->hash || !ftrace_profile_enabled)
 864                goto out;
 865
 866        /* If the calltime was zero'd ignore it */
 867        if (!trace->calltime)
 868                goto out;
 869
 870        calltime = trace->rettime - trace->calltime;
 871
 872        if (!fgraph_graph_time) {
 873                int index;
 874
 875                index = trace->depth;
 876
 877                /* Append this call time to the parent time to subtract */
 878                if (index)
 879                        current->ret_stack[index - 1].subtime += calltime;
 880
 881                if (current->ret_stack[index].subtime < calltime)
 882                        calltime -= current->ret_stack[index].subtime;
 883                else
 884                        calltime = 0;
 885        }
 886
 887        rec = ftrace_find_profiled_func(stat, trace->func);
 888        if (rec) {
 889                rec->time += calltime;
 890                rec->time_squared += calltime * calltime;
 891        }
 892
 893 out:
 894        local_irq_restore(flags);
 895}
 896
 897static int register_ftrace_profiler(void)
 898{
 899        return register_ftrace_graph(&profile_graph_return,
 900                                     &profile_graph_entry);
 901}
 902
 903static void unregister_ftrace_profiler(void)
 904{
 905        unregister_ftrace_graph();
 906}
 907#else
 908static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 909        .func           = function_profile_call,
 910        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
 911        INIT_OPS_HASH(ftrace_profile_ops)
 912};
 913
 914static int register_ftrace_profiler(void)
 915{
 916        return register_ftrace_function(&ftrace_profile_ops);
 917}
 918
 919static void unregister_ftrace_profiler(void)
 920{
 921        unregister_ftrace_function(&ftrace_profile_ops);
 922}
 923#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 924
 925static ssize_t
 926ftrace_profile_write(struct file *filp, const char __user *ubuf,
 927                     size_t cnt, loff_t *ppos)
 928{
 929        unsigned long val;
 930        int ret;
 931
 932        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 933        if (ret)
 934                return ret;
 935
 936        val = !!val;
 937
 938        mutex_lock(&ftrace_profile_lock);
 939        if (ftrace_profile_enabled ^ val) {
 940                if (val) {
 941                        ret = ftrace_profile_init();
 942                        if (ret < 0) {
 943                                cnt = ret;
 944                                goto out;
 945                        }
 946
 947                        ret = register_ftrace_profiler();
 948                        if (ret < 0) {
 949                                cnt = ret;
 950                                goto out;
 951                        }
 952                        ftrace_profile_enabled = 1;
 953                } else {
 954                        ftrace_profile_enabled = 0;
 955                        /*
 956                         * unregister_ftrace_profiler calls stop_machine
 957                         * so this acts like an synchronize_rcu.
 958                         */
 959                        unregister_ftrace_profiler();
 960                }
 961        }
 962 out:
 963        mutex_unlock(&ftrace_profile_lock);
 964
 965        *ppos += cnt;
 966
 967        return cnt;
 968}
 969
 970static ssize_t
 971ftrace_profile_read(struct file *filp, char __user *ubuf,
 972                     size_t cnt, loff_t *ppos)
 973{
 974        char buf[64];           /* big enough to hold a number */
 975        int r;
 976
 977        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
 978        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 979}
 980
 981static const struct file_operations ftrace_profile_fops = {
 982        .open           = tracing_open_generic,
 983        .read           = ftrace_profile_read,
 984        .write          = ftrace_profile_write,
 985        .llseek         = default_llseek,
 986};
 987
 988/* used to initialize the real stat files */
 989static struct tracer_stat function_stats __initdata = {
 990        .name           = "functions",
 991        .stat_start     = function_stat_start,
 992        .stat_next      = function_stat_next,
 993        .stat_cmp       = function_stat_cmp,
 994        .stat_headers   = function_stat_headers,
 995        .stat_show      = function_stat_show
 996};
 997
 998static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 999{
1000        struct ftrace_profile_stat *stat;
1001        struct dentry *entry;
1002        char *name;
1003        int ret;
1004        int cpu;
1005
1006        for_each_possible_cpu(cpu) {
1007                stat = &per_cpu(ftrace_profile_stats, cpu);
1008
1009                name = kasprintf(GFP_KERNEL, "function%d", cpu);
1010                if (!name) {
1011                        /*
1012                         * The files created are permanent, if something happens
1013                         * we still do not free memory.
1014                         */
1015                        WARN(1,
1016                             "Could not allocate stat file for cpu %d\n",
1017                             cpu);
1018                        return;
1019                }
1020                stat->stat = function_stats;
1021                stat->stat.name = name;
1022                ret = register_stat_tracer(&stat->stat);
1023                if (ret) {
1024                        WARN(1,
1025                             "Could not register function stat for cpu %d\n",
1026                             cpu);
1027                        kfree(name);
1028                        return;
1029                }
1030        }
1031
1032        entry = tracefs_create_file("function_profile_enabled", 0644,
1033                                    d_tracer, NULL, &ftrace_profile_fops);
1034        if (!entry)
1035                pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
1036}
1037
1038#else /* CONFIG_FUNCTION_PROFILER */
1039static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1040{
1041}
1042#endif /* CONFIG_FUNCTION_PROFILER */
1043
1044static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1045
1046#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1047static int ftrace_graph_active;
1048#else
1049# define ftrace_graph_active 0
1050#endif
1051
1052#ifdef CONFIG_DYNAMIC_FTRACE
1053
1054static struct ftrace_ops *removed_ops;
1055
1056/*
1057 * Set when doing a global update, like enabling all recs or disabling them.
1058 * It is not set when just updating a single ftrace_ops.
1059 */
1060static bool update_all_ops;
1061
1062#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1063# error Dynamic ftrace depends on MCOUNT_RECORD
1064#endif
1065
1066struct ftrace_func_probe {
1067        struct ftrace_probe_ops *probe_ops;
1068        struct ftrace_ops       ops;
1069        struct trace_array      *tr;
1070        struct list_head        list;
1071        void                    *data;
1072        int                     ref;
1073};
1074
1075/*
1076 * We make these constant because no one should touch them,
1077 * but they are used as the default "empty hash", to avoid allocating
1078 * it all the time. These are in a read only section such that if
1079 * anyone does try to modify it, it will cause an exception.
1080 */
1081static const struct hlist_head empty_buckets[1];
1082static const struct ftrace_hash empty_hash = {
1083        .buckets = (struct hlist_head *)empty_buckets,
1084};
1085#define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1086
1087static struct ftrace_ops global_ops = {
1088        .func                           = ftrace_stub,
1089        .local_hash.notrace_hash        = EMPTY_HASH,
1090        .local_hash.filter_hash         = EMPTY_HASH,
1091        INIT_OPS_HASH(global_ops)
1092        .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
1093                                          FTRACE_OPS_FL_INITIALIZED |
1094                                          FTRACE_OPS_FL_PID,
1095};
1096
1097/*
1098 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1099 */
1100struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1101{
1102        struct ftrace_ops *op = NULL;
1103
1104        /*
1105         * Some of the ops may be dynamically allocated,
1106         * they are freed after a synchronize_rcu().
1107         */
1108        preempt_disable_notrace();
1109
1110        do_for_each_ftrace_op(op, ftrace_ops_list) {
1111                /*
1112                 * This is to check for dynamically allocated trampolines.
1113                 * Trampolines that are in kernel text will have
1114                 * core_kernel_text() return true.
1115                 */
1116                if (op->trampoline && op->trampoline_size)
1117                        if (addr >= op->trampoline &&
1118                            addr < op->trampoline + op->trampoline_size) {
1119                                preempt_enable_notrace();
1120                                return op;
1121                        }
1122        } while_for_each_ftrace_op(op);
1123        preempt_enable_notrace();
1124
1125        return NULL;
1126}
1127
1128/*
1129 * This is used by __kernel_text_address() to return true if the
1130 * address is on a dynamically allocated trampoline that would
1131 * not return true for either core_kernel_text() or
1132 * is_module_text_address().
1133 */
1134bool is_ftrace_trampoline(unsigned long addr)
1135{
1136        return ftrace_ops_trampoline(addr) != NULL;
1137}
1138
1139struct ftrace_page {
1140        struct ftrace_page      *next;
1141        struct dyn_ftrace       *records;
1142        int                     index;
1143        int                     size;
1144};
1145
1146#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1147#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1148
1149/* estimate from running different kernels */
1150#define NR_TO_INIT              10000
1151
1152static struct ftrace_page       *ftrace_pages_start;
1153static struct ftrace_page       *ftrace_pages;
1154
1155static __always_inline unsigned long
1156ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1157{
1158        if (hash->size_bits > 0)
1159                return hash_long(ip, hash->size_bits);
1160
1161        return 0;
1162}
1163
1164/* Only use this function if ftrace_hash_empty() has already been tested */
1165static __always_inline struct ftrace_func_entry *
1166__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1167{
1168        unsigned long key;
1169        struct ftrace_func_entry *entry;
1170        struct hlist_head *hhd;
1171
1172        key = ftrace_hash_key(hash, ip);
1173        hhd = &hash->buckets[key];
1174
1175        hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1176                if (entry->ip == ip)
1177                        return entry;
1178        }
1179        return NULL;
1180}
1181
1182/**
1183 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1184 * @hash: The hash to look at
1185 * @ip: The instruction pointer to test
1186 *
1187 * Search a given @hash to see if a given instruction pointer (@ip)
1188 * exists in it.
1189 *
1190 * Returns the entry that holds the @ip if found. NULL otherwise.
1191 */
1192struct ftrace_func_entry *
1193ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1194{
1195        if (ftrace_hash_empty(hash))
1196                return NULL;
1197
1198        return __ftrace_lookup_ip(hash, ip);
1199}
1200
1201static void __add_hash_entry(struct ftrace_hash *hash,
1202                             struct ftrace_func_entry *entry)
1203{
1204        struct hlist_head *hhd;
1205        unsigned long key;
1206
1207        key = ftrace_hash_key(hash, entry->ip);
1208        hhd = &hash->buckets[key];
1209        hlist_add_head(&entry->hlist, hhd);
1210        hash->count++;
1211}
1212
1213static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1214{
1215        struct ftrace_func_entry *entry;
1216
1217        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1218        if (!entry)
1219                return -ENOMEM;
1220
1221        entry->ip = ip;
1222        __add_hash_entry(hash, entry);
1223
1224        return 0;
1225}
1226
1227static void
1228free_hash_entry(struct ftrace_hash *hash,
1229                  struct ftrace_func_entry *entry)
1230{
1231        hlist_del(&entry->hlist);
1232        kfree(entry);
1233        hash->count--;
1234}
1235
1236static void
1237remove_hash_entry(struct ftrace_hash *hash,
1238                  struct ftrace_func_entry *entry)
1239{
1240        hlist_del_rcu(&entry->hlist);
1241        hash->count--;
1242}
1243
1244static void ftrace_hash_clear(struct ftrace_hash *hash)
1245{
1246        struct hlist_head *hhd;
1247        struct hlist_node *tn;
1248        struct ftrace_func_entry *entry;
1249        int size = 1 << hash->size_bits;
1250        int i;
1251
1252        if (!hash->count)
1253                return;
1254
1255        for (i = 0; i < size; i++) {
1256                hhd = &hash->buckets[i];
1257                hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1258                        free_hash_entry(hash, entry);
1259        }
1260        FTRACE_WARN_ON(hash->count);
1261}
1262
1263static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1264{
1265        list_del(&ftrace_mod->list);
1266        kfree(ftrace_mod->module);
1267        kfree(ftrace_mod->func);
1268        kfree(ftrace_mod);
1269}
1270
1271static void clear_ftrace_mod_list(struct list_head *head)
1272{
1273        struct ftrace_mod_load *p, *n;
1274
1275        /* stack tracer isn't supported yet */
1276        if (!head)
1277                return;
1278
1279        mutex_lock(&ftrace_lock);
1280        list_for_each_entry_safe(p, n, head, list)
1281                free_ftrace_mod(p);
1282        mutex_unlock(&ftrace_lock);
1283}
1284
1285static void free_ftrace_hash(struct ftrace_hash *hash)
1286{
1287        if (!hash || hash == EMPTY_HASH)
1288                return;
1289        ftrace_hash_clear(hash);
1290        kfree(hash->buckets);
1291        kfree(hash);
1292}
1293
1294static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1295{
1296        struct ftrace_hash *hash;
1297
1298        hash = container_of(rcu, struct ftrace_hash, rcu);
1299        free_ftrace_hash(hash);
1300}
1301
1302static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1303{
1304        if (!hash || hash == EMPTY_HASH)
1305                return;
1306        call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1307}
1308
1309void ftrace_free_filter(struct ftrace_ops *ops)
1310{
1311        ftrace_ops_init(ops);
1312        free_ftrace_hash(ops->func_hash->filter_hash);
1313        free_ftrace_hash(ops->func_hash->notrace_hash);
1314}
1315
1316static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1317{
1318        struct ftrace_hash *hash;
1319        int size;
1320
1321        hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1322        if (!hash)
1323                return NULL;
1324
1325        size = 1 << size_bits;
1326        hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1327
1328        if (!hash->buckets) {
1329                kfree(hash);
1330                return NULL;
1331        }
1332
1333        hash->size_bits = size_bits;
1334
1335        return hash;
1336}
1337
1338
1339static int ftrace_add_mod(struct trace_array *tr,
1340                          const char *func, const char *module,
1341                          int enable)
1342{
1343        struct ftrace_mod_load *ftrace_mod;
1344        struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1345
1346        ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1347        if (!ftrace_mod)
1348                return -ENOMEM;
1349
1350        ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1351        ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1352        ftrace_mod->enable = enable;
1353
1354        if (!ftrace_mod->func || !ftrace_mod->module)
1355                goto out_free;
1356
1357        list_add(&ftrace_mod->list, mod_head);
1358
1359        return 0;
1360
1361 out_free:
1362        free_ftrace_mod(ftrace_mod);
1363
1364        return -ENOMEM;
1365}
1366
1367static struct ftrace_hash *
1368alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1369{
1370        struct ftrace_func_entry *entry;
1371        struct ftrace_hash *new_hash;
1372        int size;
1373        int ret;
1374        int i;
1375
1376        new_hash = alloc_ftrace_hash(size_bits);
1377        if (!new_hash)
1378                return NULL;
1379
1380        if (hash)
1381                new_hash->flags = hash->flags;
1382
1383        /* Empty hash? */
1384        if (ftrace_hash_empty(hash))
1385                return new_hash;
1386
1387        size = 1 << hash->size_bits;
1388        for (i = 0; i < size; i++) {
1389                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1390                        ret = add_hash_entry(new_hash, entry->ip);
1391                        if (ret < 0)
1392                                goto free_hash;
1393                }
1394        }
1395
1396        FTRACE_WARN_ON(new_hash->count != hash->count);
1397
1398        return new_hash;
1399
1400 free_hash:
1401        free_ftrace_hash(new_hash);
1402        return NULL;
1403}
1404
1405static void
1406ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1407static void
1408ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1409
1410static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1411                                       struct ftrace_hash *new_hash);
1412
1413static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1414{
1415        struct ftrace_func_entry *entry;
1416        struct ftrace_hash *new_hash;
1417        struct hlist_head *hhd;
1418        struct hlist_node *tn;
1419        int bits = 0;
1420        int i;
1421
1422        /*
1423         * Make the hash size about 1/2 the # found
1424         */
1425        for (size /= 2; size; size >>= 1)
1426                bits++;
1427
1428        /* Don't allocate too much */
1429        if (bits > FTRACE_HASH_MAX_BITS)
1430                bits = FTRACE_HASH_MAX_BITS;
1431
1432        new_hash = alloc_ftrace_hash(bits);
1433        if (!new_hash)
1434                return NULL;
1435
1436        new_hash->flags = src->flags;
1437
1438        size = 1 << src->size_bits;
1439        for (i = 0; i < size; i++) {
1440                hhd = &src->buckets[i];
1441                hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1442                        remove_hash_entry(src, entry);
1443                        __add_hash_entry(new_hash, entry);
1444                }
1445        }
1446        return new_hash;
1447}
1448
1449static struct ftrace_hash *
1450__ftrace_hash_move(struct ftrace_hash *src)
1451{
1452        int size = src->count;
1453
1454        /*
1455         * If the new source is empty, just return the empty_hash.
1456         */
1457        if (ftrace_hash_empty(src))
1458                return EMPTY_HASH;
1459
1460        return dup_hash(src, size);
1461}
1462
1463static int
1464ftrace_hash_move(struct ftrace_ops *ops, int enable,
1465                 struct ftrace_hash **dst, struct ftrace_hash *src)
1466{
1467        struct ftrace_hash *new_hash;
1468        int ret;
1469
1470        /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1471        if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1472                return -EINVAL;
1473
1474        new_hash = __ftrace_hash_move(src);
1475        if (!new_hash)
1476                return -ENOMEM;
1477
1478        /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1479        if (enable) {
1480                /* IPMODIFY should be updated only when filter_hash updating */
1481                ret = ftrace_hash_ipmodify_update(ops, new_hash);
1482                if (ret < 0) {
1483                        free_ftrace_hash(new_hash);
1484                        return ret;
1485                }
1486        }
1487
1488        /*
1489         * Remove the current set, update the hash and add
1490         * them back.
1491         */
1492        ftrace_hash_rec_disable_modify(ops, enable);
1493
1494        rcu_assign_pointer(*dst, new_hash);
1495
1496        ftrace_hash_rec_enable_modify(ops, enable);
1497
1498        return 0;
1499}
1500
1501static bool hash_contains_ip(unsigned long ip,
1502                             struct ftrace_ops_hash *hash)
1503{
1504        /*
1505         * The function record is a match if it exists in the filter
1506         * hash and not in the notrace hash. Note, an emty hash is
1507         * considered a match for the filter hash, but an empty
1508         * notrace hash is considered not in the notrace hash.
1509         */
1510        return (ftrace_hash_empty(hash->filter_hash) ||
1511                __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1512                (ftrace_hash_empty(hash->notrace_hash) ||
1513                 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1514}
1515
1516/*
1517 * Test the hashes for this ops to see if we want to call
1518 * the ops->func or not.
1519 *
1520 * It's a match if the ip is in the ops->filter_hash or
1521 * the filter_hash does not exist or is empty,
1522 *  AND
1523 * the ip is not in the ops->notrace_hash.
1524 *
1525 * This needs to be called with preemption disabled as
1526 * the hashes are freed with call_rcu().
1527 */
1528static int
1529ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1530{
1531        struct ftrace_ops_hash hash;
1532        int ret;
1533
1534#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1535        /*
1536         * There's a small race when adding ops that the ftrace handler
1537         * that wants regs, may be called without them. We can not
1538         * allow that handler to be called if regs is NULL.
1539         */
1540        if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1541                return 0;
1542#endif
1543
1544        rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1545        rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1546
1547        if (hash_contains_ip(ip, &hash))
1548                ret = 1;
1549        else
1550                ret = 0;
1551
1552        return ret;
1553}
1554
1555/*
1556 * This is a double for. Do not use 'break' to break out of the loop,
1557 * you must use a goto.
1558 */
1559#define do_for_each_ftrace_rec(pg, rec)                                 \
1560        for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1561                int _____i;                                             \
1562                for (_____i = 0; _____i < pg->index; _____i++) {        \
1563                        rec = &pg->records[_____i];
1564
1565#define while_for_each_ftrace_rec()             \
1566                }                               \
1567        }
1568
1569
1570static int ftrace_cmp_recs(const void *a, const void *b)
1571{
1572        const struct dyn_ftrace *key = a;
1573        const struct dyn_ftrace *rec = b;
1574
1575        if (key->flags < rec->ip)
1576                return -1;
1577        if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1578                return 1;
1579        return 0;
1580}
1581
1582static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1583{
1584        struct ftrace_page *pg;
1585        struct dyn_ftrace *rec = NULL;
1586        struct dyn_ftrace key;
1587
1588        key.ip = start;
1589        key.flags = end;        /* overload flags, as it is unsigned long */
1590
1591        for (pg = ftrace_pages_start; pg; pg = pg->next) {
1592                if (end < pg->records[0].ip ||
1593                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1594                        continue;
1595                rec = bsearch(&key, pg->records, pg->index,
1596                              sizeof(struct dyn_ftrace),
1597                              ftrace_cmp_recs);
1598                if (rec)
1599                        break;
1600        }
1601        return rec;
1602}
1603
1604/**
1605 * ftrace_location_range - return the first address of a traced location
1606 *      if it touches the given ip range
1607 * @start: start of range to search.
1608 * @end: end of range to search (inclusive). @end points to the last byte
1609 *      to check.
1610 *
1611 * Returns rec->ip if the related ftrace location is a least partly within
1612 * the given address range. That is, the first address of the instruction
1613 * that is either a NOP or call to the function tracer. It checks the ftrace
1614 * internal tables to determine if the address belongs or not.
1615 */
1616unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1617{
1618        struct dyn_ftrace *rec;
1619
1620        rec = lookup_rec(start, end);
1621        if (rec)
1622                return rec->ip;
1623
1624        return 0;
1625}
1626
1627/**
1628 * ftrace_location - return true if the ip giving is a traced location
1629 * @ip: the instruction pointer to check
1630 *
1631 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1632 * That is, the instruction that is either a NOP or call to
1633 * the function tracer. It checks the ftrace internal tables to
1634 * determine if the address belongs or not.
1635 */
1636unsigned long ftrace_location(unsigned long ip)
1637{
1638        return ftrace_location_range(ip, ip);
1639}
1640
1641/**
1642 * ftrace_text_reserved - return true if range contains an ftrace location
1643 * @start: start of range to search
1644 * @end: end of range to search (inclusive). @end points to the last byte to check.
1645 *
1646 * Returns 1 if @start and @end contains a ftrace location.
1647 * That is, the instruction that is either a NOP or call to
1648 * the function tracer. It checks the ftrace internal tables to
1649 * determine if the address belongs or not.
1650 */
1651int ftrace_text_reserved(const void *start, const void *end)
1652{
1653        unsigned long ret;
1654
1655        ret = ftrace_location_range((unsigned long)start,
1656                                    (unsigned long)end);
1657
1658        return (int)!!ret;
1659}
1660
1661/* Test if ops registered to this rec needs regs */
1662static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1663{
1664        struct ftrace_ops *ops;
1665        bool keep_regs = false;
1666
1667        for (ops = ftrace_ops_list;
1668             ops != &ftrace_list_end; ops = ops->next) {
1669                /* pass rec in as regs to have non-NULL val */
1670                if (ftrace_ops_test(ops, rec->ip, rec)) {
1671                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1672                                keep_regs = true;
1673                                break;
1674                        }
1675                }
1676        }
1677
1678        return  keep_regs;
1679}
1680
1681static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1682                                     int filter_hash,
1683                                     bool inc)
1684{
1685        struct ftrace_hash *hash;
1686        struct ftrace_hash *other_hash;
1687        struct ftrace_page *pg;
1688        struct dyn_ftrace *rec;
1689        bool update = false;
1690        int count = 0;
1691        int all = false;
1692
1693        /* Only update if the ops has been registered */
1694        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1695                return false;
1696
1697        /*
1698         * In the filter_hash case:
1699         *   If the count is zero, we update all records.
1700         *   Otherwise we just update the items in the hash.
1701         *
1702         * In the notrace_hash case:
1703         *   We enable the update in the hash.
1704         *   As disabling notrace means enabling the tracing,
1705         *   and enabling notrace means disabling, the inc variable
1706         *   gets inversed.
1707         */
1708        if (filter_hash) {
1709                hash = ops->func_hash->filter_hash;
1710                other_hash = ops->func_hash->notrace_hash;
1711                if (ftrace_hash_empty(hash))
1712                        all = true;
1713        } else {
1714                inc = !inc;
1715                hash = ops->func_hash->notrace_hash;
1716                other_hash = ops->func_hash->filter_hash;
1717                /*
1718                 * If the notrace hash has no items,
1719                 * then there's nothing to do.
1720                 */
1721                if (ftrace_hash_empty(hash))
1722                        return false;
1723        }
1724
1725        do_for_each_ftrace_rec(pg, rec) {
1726                int in_other_hash = 0;
1727                int in_hash = 0;
1728                int match = 0;
1729
1730                if (rec->flags & FTRACE_FL_DISABLED)
1731                        continue;
1732
1733                if (all) {
1734                        /*
1735                         * Only the filter_hash affects all records.
1736                         * Update if the record is not in the notrace hash.
1737                         */
1738                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1739                                match = 1;
1740                } else {
1741                        in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1742                        in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1743
1744                        /*
1745                         * If filter_hash is set, we want to match all functions
1746                         * that are in the hash but not in the other hash.
1747                         *
1748                         * If filter_hash is not set, then we are decrementing.
1749                         * That means we match anything that is in the hash
1750                         * and also in the other_hash. That is, we need to turn
1751                         * off functions in the other hash because they are disabled
1752                         * by this hash.
1753                         */
1754                        if (filter_hash && in_hash && !in_other_hash)
1755                                match = 1;
1756                        else if (!filter_hash && in_hash &&
1757                                 (in_other_hash || ftrace_hash_empty(other_hash)))
1758                                match = 1;
1759                }
1760                if (!match)
1761                        continue;
1762
1763                if (inc) {
1764                        rec->flags++;
1765                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1766                                return false;
1767
1768                        if (ops->flags & FTRACE_OPS_FL_DIRECT)
1769                                rec->flags |= FTRACE_FL_DIRECT;
1770
1771                        /*
1772                         * If there's only a single callback registered to a
1773                         * function, and the ops has a trampoline registered
1774                         * for it, then we can call it directly.
1775                         */
1776                        if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1777                                rec->flags |= FTRACE_FL_TRAMP;
1778                        else
1779                                /*
1780                                 * If we are adding another function callback
1781                                 * to this function, and the previous had a
1782                                 * custom trampoline in use, then we need to go
1783                                 * back to the default trampoline.
1784                                 */
1785                                rec->flags &= ~FTRACE_FL_TRAMP;
1786
1787                        /*
1788                         * If any ops wants regs saved for this function
1789                         * then all ops will get saved regs.
1790                         */
1791                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1792                                rec->flags |= FTRACE_FL_REGS;
1793                } else {
1794                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1795                                return false;
1796                        rec->flags--;
1797
1798                        /*
1799                         * Only the internal direct_ops should have the
1800                         * DIRECT flag set. Thus, if it is removing a
1801                         * function, then that function should no longer
1802                         * be direct.
1803                         */
1804                        if (ops->flags & FTRACE_OPS_FL_DIRECT)
1805                                rec->flags &= ~FTRACE_FL_DIRECT;
1806
1807                        /*
1808                         * If the rec had REGS enabled and the ops that is
1809                         * being removed had REGS set, then see if there is
1810                         * still any ops for this record that wants regs.
1811                         * If not, we can stop recording them.
1812                         */
1813                        if (ftrace_rec_count(rec) > 0 &&
1814                            rec->flags & FTRACE_FL_REGS &&
1815                            ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1816                                if (!test_rec_ops_needs_regs(rec))
1817                                        rec->flags &= ~FTRACE_FL_REGS;
1818                        }
1819
1820                        /*
1821                         * If the rec had TRAMP enabled, then it needs to
1822                         * be cleared. As TRAMP can only be enabled iff
1823                         * there is only a single ops attached to it.
1824                         * In otherwords, always disable it on decrementing.
1825                         * In the future, we may set it if rec count is
1826                         * decremented to one, and the ops that is left
1827                         * has a trampoline.
1828                         */
1829                        rec->flags &= ~FTRACE_FL_TRAMP;
1830
1831                        /*
1832                         * flags will be cleared in ftrace_check_record()
1833                         * if rec count is zero.
1834                         */
1835                }
1836                count++;
1837
1838                /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1839                update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1840
1841                /* Shortcut, if we handled all records, we are done. */
1842                if (!all && count == hash->count)
1843                        return update;
1844        } while_for_each_ftrace_rec();
1845
1846        return update;
1847}
1848
1849static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1850                                    int filter_hash)
1851{
1852        return __ftrace_hash_rec_update(ops, filter_hash, 0);
1853}
1854
1855static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1856                                   int filter_hash)
1857{
1858        return __ftrace_hash_rec_update(ops, filter_hash, 1);
1859}
1860
1861static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1862                                          int filter_hash, int inc)
1863{
1864        struct ftrace_ops *op;
1865
1866        __ftrace_hash_rec_update(ops, filter_hash, inc);
1867
1868        if (ops->func_hash != &global_ops.local_hash)
1869                return;
1870
1871        /*
1872         * If the ops shares the global_ops hash, then we need to update
1873         * all ops that are enabled and use this hash.
1874         */
1875        do_for_each_ftrace_op(op, ftrace_ops_list) {
1876                /* Already done */
1877                if (op == ops)
1878                        continue;
1879                if (op->func_hash == &global_ops.local_hash)
1880                        __ftrace_hash_rec_update(op, filter_hash, inc);
1881        } while_for_each_ftrace_op(op);
1882}
1883
1884static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1885                                           int filter_hash)
1886{
1887        ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1888}
1889
1890static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1891                                          int filter_hash)
1892{
1893        ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1894}
1895
1896/*
1897 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1898 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1899 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1900 * Note that old_hash and new_hash has below meanings
1901 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1902 *  - If the hash is EMPTY_HASH, it hits nothing
1903 *  - Anything else hits the recs which match the hash entries.
1904 */
1905static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1906                                         struct ftrace_hash *old_hash,
1907                                         struct ftrace_hash *new_hash)
1908{
1909        struct ftrace_page *pg;
1910        struct dyn_ftrace *rec, *end = NULL;
1911        int in_old, in_new;
1912
1913        /* Only update if the ops has been registered */
1914        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1915                return 0;
1916
1917        if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1918                return 0;
1919
1920        /*
1921         * Since the IPMODIFY is a very address sensitive action, we do not
1922         * allow ftrace_ops to set all functions to new hash.
1923         */
1924        if (!new_hash || !old_hash)
1925                return -EINVAL;
1926
1927        /* Update rec->flags */
1928        do_for_each_ftrace_rec(pg, rec) {
1929
1930                if (rec->flags & FTRACE_FL_DISABLED)
1931                        continue;
1932
1933                /* We need to update only differences of filter_hash */
1934                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1935                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1936                if (in_old == in_new)
1937                        continue;
1938
1939                if (in_new) {
1940                        /* New entries must ensure no others are using it */
1941                        if (rec->flags & FTRACE_FL_IPMODIFY)
1942                                goto rollback;
1943                        rec->flags |= FTRACE_FL_IPMODIFY;
1944                } else /* Removed entry */
1945                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1946        } while_for_each_ftrace_rec();
1947
1948        return 0;
1949
1950rollback:
1951        end = rec;
1952
1953        /* Roll back what we did above */
1954        do_for_each_ftrace_rec(pg, rec) {
1955
1956                if (rec->flags & FTRACE_FL_DISABLED)
1957                        continue;
1958
1959                if (rec == end)
1960                        goto err_out;
1961
1962                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1963                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1964                if (in_old == in_new)
1965                        continue;
1966
1967                if (in_new)
1968                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1969                else
1970                        rec->flags |= FTRACE_FL_IPMODIFY;
1971        } while_for_each_ftrace_rec();
1972
1973err_out:
1974        return -EBUSY;
1975}
1976
1977static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1978{
1979        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1980
1981        if (ftrace_hash_empty(hash))
1982                hash = NULL;
1983
1984        return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1985}
1986
1987/* Disabling always succeeds */
1988static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1989{
1990        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1991
1992        if (ftrace_hash_empty(hash))
1993                hash = NULL;
1994
1995        __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1996}
1997
1998static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1999                                       struct ftrace_hash *new_hash)
2000{
2001        struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2002
2003        if (ftrace_hash_empty(old_hash))
2004                old_hash = NULL;
2005
2006        if (ftrace_hash_empty(new_hash))
2007                new_hash = NULL;
2008
2009        return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2010}
2011
2012static void print_ip_ins(const char *fmt, const unsigned char *p)
2013{
2014        int i;
2015
2016        printk(KERN_CONT "%s", fmt);
2017
2018        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
2019                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
2020}
2021
2022static struct ftrace_ops *
2023ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
2024static struct ftrace_ops *
2025ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
2026
2027enum ftrace_bug_type ftrace_bug_type;
2028const void *ftrace_expected;
2029
2030static void print_bug_type(void)
2031{
2032        switch (ftrace_bug_type) {
2033        case FTRACE_BUG_UNKNOWN:
2034                break;
2035        case FTRACE_BUG_INIT:
2036                pr_info("Initializing ftrace call sites\n");
2037                break;
2038        case FTRACE_BUG_NOP:
2039                pr_info("Setting ftrace call site to NOP\n");
2040                break;
2041        case FTRACE_BUG_CALL:
2042                pr_info("Setting ftrace call site to call ftrace function\n");
2043                break;
2044        case FTRACE_BUG_UPDATE:
2045                pr_info("Updating ftrace call site to call a different ftrace function\n");
2046                break;
2047        }
2048}
2049
2050/**
2051 * ftrace_bug - report and shutdown function tracer
2052 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2053 * @rec: The record that failed
2054 *
2055 * The arch code that enables or disables the function tracing
2056 * can call ftrace_bug() when it has detected a problem in
2057 * modifying the code. @failed should be one of either:
2058 * EFAULT - if the problem happens on reading the @ip address
2059 * EINVAL - if what is read at @ip is not what was expected
2060 * EPERM - if the problem happens on writting to the @ip address
2061 */
2062void ftrace_bug(int failed, struct dyn_ftrace *rec)
2063{
2064        unsigned long ip = rec ? rec->ip : 0;
2065
2066        switch (failed) {
2067        case -EFAULT:
2068                FTRACE_WARN_ON_ONCE(1);
2069                pr_info("ftrace faulted on modifying ");
2070                print_ip_sym(ip);
2071                break;
2072        case -EINVAL:
2073                FTRACE_WARN_ON_ONCE(1);
2074                pr_info("ftrace failed to modify ");
2075                print_ip_sym(ip);
2076                print_ip_ins(" actual:   ", (unsigned char *)ip);
2077                pr_cont("\n");
2078                if (ftrace_expected) {
2079                        print_ip_ins(" expected: ", ftrace_expected);
2080                        pr_cont("\n");
2081                }
2082                break;
2083        case -EPERM:
2084                FTRACE_WARN_ON_ONCE(1);
2085                pr_info("ftrace faulted on writing ");
2086                print_ip_sym(ip);
2087                break;
2088        default:
2089                FTRACE_WARN_ON_ONCE(1);
2090                pr_info("ftrace faulted on unknown error ");
2091                print_ip_sym(ip);
2092        }
2093        print_bug_type();
2094        if (rec) {
2095                struct ftrace_ops *ops = NULL;
2096
2097                pr_info("ftrace record flags: %lx\n", rec->flags);
2098                pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2099                        rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2100                if (rec->flags & FTRACE_FL_TRAMP_EN) {
2101                        ops = ftrace_find_tramp_ops_any(rec);
2102                        if (ops) {
2103                                do {
2104                                        pr_cont("\ttramp: %pS (%pS)",
2105                                                (void *)ops->trampoline,
2106                                                (void *)ops->func);
2107                                        ops = ftrace_find_tramp_ops_next(rec, ops);
2108                                } while (ops);
2109                        } else
2110                                pr_cont("\ttramp: ERROR!");
2111
2112                }
2113                ip = ftrace_get_addr_curr(rec);
2114                pr_cont("\n expected tramp: %lx\n", ip);
2115        }
2116}
2117
2118static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2119{
2120        unsigned long flag = 0UL;
2121
2122        ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2123
2124        if (rec->flags & FTRACE_FL_DISABLED)
2125                return FTRACE_UPDATE_IGNORE;
2126
2127        /*
2128         * If we are updating calls:
2129         *
2130         *   If the record has a ref count, then we need to enable it
2131         *   because someone is using it.
2132         *
2133         *   Otherwise we make sure its disabled.
2134         *
2135         * If we are disabling calls, then disable all records that
2136         * are enabled.
2137         */
2138        if (enable && ftrace_rec_count(rec))
2139                flag = FTRACE_FL_ENABLED;
2140
2141        /*
2142         * If enabling and the REGS flag does not match the REGS_EN, or
2143         * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2144         * this record. Set flags to fail the compare against ENABLED.
2145         * Same for direct calls.
2146         */
2147        if (flag) {
2148                if (!(rec->flags & FTRACE_FL_REGS) !=
2149                    !(rec->flags & FTRACE_FL_REGS_EN))
2150                        flag |= FTRACE_FL_REGS;
2151
2152                if (!(rec->flags & FTRACE_FL_TRAMP) !=
2153                    !(rec->flags & FTRACE_FL_TRAMP_EN))
2154                        flag |= FTRACE_FL_TRAMP;
2155
2156                /*
2157                 * Direct calls are special, as count matters.
2158                 * We must test the record for direct, if the
2159                 * DIRECT and DIRECT_EN do not match, but only
2160                 * if the count is 1. That's because, if the
2161                 * count is something other than one, we do not
2162                 * want the direct enabled (it will be done via the
2163                 * direct helper). But if DIRECT_EN is set, and
2164                 * the count is not one, we need to clear it.
2165                 */
2166                if (ftrace_rec_count(rec) == 1) {
2167                        if (!(rec->flags & FTRACE_FL_DIRECT) !=
2168                            !(rec->flags & FTRACE_FL_DIRECT_EN))
2169                                flag |= FTRACE_FL_DIRECT;
2170                } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2171                        flag |= FTRACE_FL_DIRECT;
2172                }
2173        }
2174
2175        /* If the state of this record hasn't changed, then do nothing */
2176        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2177                return FTRACE_UPDATE_IGNORE;
2178
2179        if (flag) {
2180                /* Save off if rec is being enabled (for return value) */
2181                flag ^= rec->flags & FTRACE_FL_ENABLED;
2182
2183                if (update) {
2184                        rec->flags |= FTRACE_FL_ENABLED;
2185                        if (flag & FTRACE_FL_REGS) {
2186                                if (rec->flags & FTRACE_FL_REGS)
2187                                        rec->flags |= FTRACE_FL_REGS_EN;
2188                                else
2189                                        rec->flags &= ~FTRACE_FL_REGS_EN;
2190                        }
2191                        if (flag & FTRACE_FL_TRAMP) {
2192                                if (rec->flags & FTRACE_FL_TRAMP)
2193                                        rec->flags |= FTRACE_FL_TRAMP_EN;
2194                                else
2195                                        rec->flags &= ~FTRACE_FL_TRAMP_EN;
2196                        }
2197                        if (flag & FTRACE_FL_DIRECT) {
2198                                /*
2199                                 * If there's only one user (direct_ops helper)
2200                                 * then we can call the direct function
2201                                 * directly (no ftrace trampoline).
2202                                 */
2203                                if (ftrace_rec_count(rec) == 1) {
2204                                        if (rec->flags & FTRACE_FL_DIRECT)
2205                                                rec->flags |= FTRACE_FL_DIRECT_EN;
2206                                        else
2207                                                rec->flags &= ~FTRACE_FL_DIRECT_EN;
2208                                } else {
2209                                        /*
2210                                         * Can only call directly if there's
2211                                         * only one callback to the function.
2212                                         */
2213                                        rec->flags &= ~FTRACE_FL_DIRECT_EN;
2214                                }
2215                        }
2216                }
2217
2218                /*
2219                 * If this record is being updated from a nop, then
2220                 *   return UPDATE_MAKE_CALL.
2221                 * Otherwise,
2222                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2223                 *   from the save regs, to a non-save regs function or
2224                 *   vice versa, or from a trampoline call.
2225                 */
2226                if (flag & FTRACE_FL_ENABLED) {
2227                        ftrace_bug_type = FTRACE_BUG_CALL;
2228                        return FTRACE_UPDATE_MAKE_CALL;
2229                }
2230
2231                ftrace_bug_type = FTRACE_BUG_UPDATE;
2232                return FTRACE_UPDATE_MODIFY_CALL;
2233        }
2234
2235        if (update) {
2236                /* If there's no more users, clear all flags */
2237                if (!ftrace_rec_count(rec))
2238                        rec->flags = 0;
2239                else
2240                        /*
2241                         * Just disable the record, but keep the ops TRAMP
2242                         * and REGS states. The _EN flags must be disabled though.
2243                         */
2244                        rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2245                                        FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2246        }
2247
2248        ftrace_bug_type = FTRACE_BUG_NOP;
2249        return FTRACE_UPDATE_MAKE_NOP;
2250}
2251
2252/**
2253 * ftrace_update_record, set a record that now is tracing or not
2254 * @rec: the record to update
2255 * @enable: set to 1 if the record is tracing, zero to force disable
2256 *
2257 * The records that represent all functions that can be traced need
2258 * to be updated when tracing has been enabled.
2259 */
2260int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2261{
2262        return ftrace_check_record(rec, enable, 1);
2263}
2264
2265/**
2266 * ftrace_test_record, check if the record has been enabled or not
2267 * @rec: the record to test
2268 * @enable: set to 1 to check if enabled, 0 if it is disabled
2269 *
2270 * The arch code may need to test if a record is already set to
2271 * tracing to determine how to modify the function code that it
2272 * represents.
2273 */
2274int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2275{
2276        return ftrace_check_record(rec, enable, 0);
2277}
2278
2279static struct ftrace_ops *
2280ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2281{
2282        struct ftrace_ops *op;
2283        unsigned long ip = rec->ip;
2284
2285        do_for_each_ftrace_op(op, ftrace_ops_list) {
2286
2287                if (!op->trampoline)
2288                        continue;
2289
2290                if (hash_contains_ip(ip, op->func_hash))
2291                        return op;
2292        } while_for_each_ftrace_op(op);
2293
2294        return NULL;
2295}
2296
2297static struct ftrace_ops *
2298ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2299                           struct ftrace_ops *op)
2300{
2301        unsigned long ip = rec->ip;
2302
2303        while_for_each_ftrace_op(op) {
2304
2305                if (!op->trampoline)
2306                        continue;
2307
2308                if (hash_contains_ip(ip, op->func_hash))
2309                        return op;
2310        } 
2311
2312        return NULL;
2313}
2314
2315static struct ftrace_ops *
2316ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2317{
2318        struct ftrace_ops *op;
2319        unsigned long ip = rec->ip;
2320
2321        /*
2322         * Need to check removed ops first.
2323         * If they are being removed, and this rec has a tramp,
2324         * and this rec is in the ops list, then it would be the
2325         * one with the tramp.
2326         */
2327        if (removed_ops) {
2328                if (hash_contains_ip(ip, &removed_ops->old_hash))
2329                        return removed_ops;
2330        }
2331
2332        /*
2333         * Need to find the current trampoline for a rec.
2334         * Now, a trampoline is only attached to a rec if there
2335         * was a single 'ops' attached to it. But this can be called
2336         * when we are adding another op to the rec or removing the
2337         * current one. Thus, if the op is being added, we can
2338         * ignore it because it hasn't attached itself to the rec
2339         * yet.
2340         *
2341         * If an ops is being modified (hooking to different functions)
2342         * then we don't care about the new functions that are being
2343         * added, just the old ones (that are probably being removed).
2344         *
2345         * If we are adding an ops to a function that already is using
2346         * a trampoline, it needs to be removed (trampolines are only
2347         * for single ops connected), then an ops that is not being
2348         * modified also needs to be checked.
2349         */
2350        do_for_each_ftrace_op(op, ftrace_ops_list) {
2351
2352                if (!op->trampoline)
2353                        continue;
2354
2355                /*
2356                 * If the ops is being added, it hasn't gotten to
2357                 * the point to be removed from this tree yet.
2358                 */
2359                if (op->flags & FTRACE_OPS_FL_ADDING)
2360                        continue;
2361
2362
2363                /*
2364                 * If the ops is being modified and is in the old
2365                 * hash, then it is probably being removed from this
2366                 * function.
2367                 */
2368                if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2369                    hash_contains_ip(ip, &op->old_hash))
2370                        return op;
2371                /*
2372                 * If the ops is not being added or modified, and it's
2373                 * in its normal filter hash, then this must be the one
2374                 * we want!
2375                 */
2376                if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2377                    hash_contains_ip(ip, op->func_hash))
2378                        return op;
2379
2380        } while_for_each_ftrace_op(op);
2381
2382        return NULL;
2383}
2384
2385static struct ftrace_ops *
2386ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2387{
2388        struct ftrace_ops *op;
2389        unsigned long ip = rec->ip;
2390
2391        do_for_each_ftrace_op(op, ftrace_ops_list) {
2392                /* pass rec in as regs to have non-NULL val */
2393                if (hash_contains_ip(ip, op->func_hash))
2394                        return op;
2395        } while_for_each_ftrace_op(op);
2396
2397        return NULL;
2398}
2399
2400#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2401/* Protected by rcu_tasks for reading, and direct_mutex for writing */
2402static struct ftrace_hash *direct_functions = EMPTY_HASH;
2403static DEFINE_MUTEX(direct_mutex);
2404int ftrace_direct_func_count;
2405
2406/*
2407 * Search the direct_functions hash to see if the given instruction pointer
2408 * has a direct caller attached to it.
2409 */
2410unsigned long ftrace_find_rec_direct(unsigned long ip)
2411{
2412        struct ftrace_func_entry *entry;
2413
2414        entry = __ftrace_lookup_ip(direct_functions, ip);
2415        if (!entry)
2416                return 0;
2417
2418        return entry->direct;
2419}
2420
2421static void call_direct_funcs(unsigned long ip, unsigned long pip,
2422                              struct ftrace_ops *ops, struct pt_regs *regs)
2423{
2424        unsigned long addr;
2425
2426        addr = ftrace_find_rec_direct(ip);
2427        if (!addr)
2428                return;
2429
2430        arch_ftrace_set_direct_caller(regs, addr);
2431}
2432
2433struct ftrace_ops direct_ops = {
2434        .func           = call_direct_funcs,
2435        .flags          = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
2436                          | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2437                          | FTRACE_OPS_FL_PERMANENT,
2438};
2439#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2440
2441/**
2442 * ftrace_get_addr_new - Get the call address to set to
2443 * @rec:  The ftrace record descriptor
2444 *
2445 * If the record has the FTRACE_FL_REGS set, that means that it
2446 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2447 * is not not set, then it wants to convert to the normal callback.
2448 *
2449 * Returns the address of the trampoline to set to
2450 */
2451unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2452{
2453        struct ftrace_ops *ops;
2454        unsigned long addr;
2455
2456        if ((rec->flags & FTRACE_FL_DIRECT) &&
2457            (ftrace_rec_count(rec) == 1)) {
2458                addr = ftrace_find_rec_direct(rec->ip);
2459                if (addr)
2460                        return addr;
2461                WARN_ON_ONCE(1);
2462        }
2463
2464        /* Trampolines take precedence over regs */
2465        if (rec->flags & FTRACE_FL_TRAMP) {
2466                ops = ftrace_find_tramp_ops_new(rec);
2467                if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2468                        pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2469                                (void *)rec->ip, (void *)rec->ip, rec->flags);
2470                        /* Ftrace is shutting down, return anything */
2471                        return (unsigned long)FTRACE_ADDR;
2472                }
2473                return ops->trampoline;
2474        }
2475
2476        if (rec->flags & FTRACE_FL_REGS)
2477                return (unsigned long)FTRACE_REGS_ADDR;
2478        else
2479                return (unsigned long)FTRACE_ADDR;
2480}
2481
2482/**
2483 * ftrace_get_addr_curr - Get the call address that is already there
2484 * @rec:  The ftrace record descriptor
2485 *
2486 * The FTRACE_FL_REGS_EN is set when the record already points to
2487 * a function that saves all the regs. Basically the '_EN' version
2488 * represents the current state of the function.
2489 *
2490 * Returns the address of the trampoline that is currently being called
2491 */
2492unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2493{
2494        struct ftrace_ops *ops;
2495        unsigned long addr;
2496
2497        /* Direct calls take precedence over trampolines */
2498        if (rec->flags & FTRACE_FL_DIRECT_EN) {
2499                addr = ftrace_find_rec_direct(rec->ip);
2500                if (addr)
2501                        return addr;
2502                WARN_ON_ONCE(1);
2503        }
2504
2505        /* Trampolines take precedence over regs */
2506        if (rec->flags & FTRACE_FL_TRAMP_EN) {
2507                ops = ftrace_find_tramp_ops_curr(rec);
2508                if (FTRACE_WARN_ON(!ops)) {
2509                        pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2510                                (void *)rec->ip, (void *)rec->ip);
2511                        /* Ftrace is shutting down, return anything */
2512                        return (unsigned long)FTRACE_ADDR;
2513                }
2514                return ops->trampoline;
2515        }
2516
2517        if (rec->flags & FTRACE_FL_REGS_EN)
2518                return (unsigned long)FTRACE_REGS_ADDR;
2519        else
2520                return (unsigned long)FTRACE_ADDR;
2521}
2522
2523static int
2524__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2525{
2526        unsigned long ftrace_old_addr;
2527        unsigned long ftrace_addr;
2528        int ret;
2529
2530        ftrace_addr = ftrace_get_addr_new(rec);
2531
2532        /* This needs to be done before we call ftrace_update_record */
2533        ftrace_old_addr = ftrace_get_addr_curr(rec);
2534
2535        ret = ftrace_update_record(rec, enable);
2536
2537        ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2538
2539        switch (ret) {
2540        case FTRACE_UPDATE_IGNORE:
2541                return 0;
2542
2543        case FTRACE_UPDATE_MAKE_CALL:
2544                ftrace_bug_type = FTRACE_BUG_CALL;
2545                return ftrace_make_call(rec, ftrace_addr);
2546
2547        case FTRACE_UPDATE_MAKE_NOP:
2548                ftrace_bug_type = FTRACE_BUG_NOP;
2549                return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2550
2551        case FTRACE_UPDATE_MODIFY_CALL:
2552                ftrace_bug_type = FTRACE_BUG_UPDATE;
2553                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2554        }
2555
2556        return -1; /* unknow ftrace bug */
2557}
2558
2559void __weak ftrace_replace_code(int enable)
2560{
2561        struct dyn_ftrace *rec;
2562        struct ftrace_page *pg;
2563        int failed;
2564
2565        if (unlikely(ftrace_disabled))
2566                return;
2567
2568        do_for_each_ftrace_rec(pg, rec) {
2569
2570                if (rec->flags & FTRACE_FL_DISABLED)
2571                        continue;
2572
2573                failed = __ftrace_replace_code(rec, enable);
2574                if (failed) {
2575                        ftrace_bug(failed, rec);
2576                        /* Stop processing */
2577                        return;
2578                }
2579        } while_for_each_ftrace_rec();
2580}
2581
2582struct ftrace_rec_iter {
2583        struct ftrace_page      *pg;
2584        int                     index;
2585};
2586
2587/**
2588 * ftrace_rec_iter_start, start up iterating over traced functions
2589 *
2590 * Returns an iterator handle that is used to iterate over all
2591 * the records that represent address locations where functions
2592 * are traced.
2593 *
2594 * May return NULL if no records are available.
2595 */
2596struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2597{
2598        /*
2599         * We only use a single iterator.
2600         * Protected by the ftrace_lock mutex.
2601         */
2602        static struct ftrace_rec_iter ftrace_rec_iter;
2603        struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2604
2605        iter->pg = ftrace_pages_start;
2606        iter->index = 0;
2607
2608        /* Could have empty pages */
2609        while (iter->pg && !iter->pg->index)
2610                iter->pg = iter->pg->next;
2611
2612        if (!iter->pg)
2613                return NULL;
2614
2615        return iter;
2616}
2617
2618/**
2619 * ftrace_rec_iter_next, get the next record to process.
2620 * @iter: The handle to the iterator.
2621 *
2622 * Returns the next iterator after the given iterator @iter.
2623 */
2624struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2625{
2626        iter->index++;
2627
2628        if (iter->index >= iter->pg->index) {
2629                iter->pg = iter->pg->next;
2630                iter->index = 0;
2631
2632                /* Could have empty pages */
2633                while (iter->pg && !iter->pg->index)
2634                        iter->pg = iter->pg->next;
2635        }
2636
2637        if (!iter->pg)
2638                return NULL;
2639
2640        return iter;
2641}
2642
2643/**
2644 * ftrace_rec_iter_record, get the record at the iterator location
2645 * @iter: The current iterator location
2646 *
2647 * Returns the record that the current @iter is at.
2648 */
2649struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2650{
2651        return &iter->pg->records[iter->index];
2652}
2653
2654static int
2655ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2656{
2657        int ret;
2658
2659        if (unlikely(ftrace_disabled))
2660                return 0;
2661
2662        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2663        if (ret) {
2664                ftrace_bug_type = FTRACE_BUG_INIT;
2665                ftrace_bug(ret, rec);
2666                return 0;
2667        }
2668        return 1;
2669}
2670
2671/*
2672 * archs can override this function if they must do something
2673 * before the modifying code is performed.
2674 */
2675int __weak ftrace_arch_code_modify_prepare(void)
2676{
2677        return 0;
2678}
2679
2680/*
2681 * archs can override this function if they must do something
2682 * after the modifying code is performed.
2683 */
2684int __weak ftrace_arch_code_modify_post_process(void)
2685{
2686        return 0;
2687}
2688
2689void ftrace_modify_all_code(int command)
2690{
2691        int update = command & FTRACE_UPDATE_TRACE_FUNC;
2692        int err = 0;
2693
2694        /*
2695         * If the ftrace_caller calls a ftrace_ops func directly,
2696         * we need to make sure that it only traces functions it
2697         * expects to trace. When doing the switch of functions,
2698         * we need to update to the ftrace_ops_list_func first
2699         * before the transition between old and new calls are set,
2700         * as the ftrace_ops_list_func will check the ops hashes
2701         * to make sure the ops are having the right functions
2702         * traced.
2703         */
2704        if (update) {
2705                err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2706                if (FTRACE_WARN_ON(err))
2707                        return;
2708        }
2709
2710        if (command & FTRACE_UPDATE_CALLS)
2711                ftrace_replace_code(1);
2712        else if (command & FTRACE_DISABLE_CALLS)
2713                ftrace_replace_code(0);
2714
2715        if (update && ftrace_trace_function != ftrace_ops_list_func) {
2716                function_trace_op = set_function_trace_op;
2717                smp_wmb();
2718                /* If irqs are disabled, we are in stop machine */
2719                if (!irqs_disabled())
2720                        smp_call_function(ftrace_sync_ipi, NULL, 1);
2721                err = ftrace_update_ftrace_func(ftrace_trace_function);
2722                if (FTRACE_WARN_ON(err))
2723                        return;
2724        }
2725
2726        if (command & FTRACE_START_FUNC_RET)
2727                err = ftrace_enable_ftrace_graph_caller();
2728        else if (command & FTRACE_STOP_FUNC_RET)
2729                err = ftrace_disable_ftrace_graph_caller();
2730        FTRACE_WARN_ON(err);
2731}
2732
2733static int __ftrace_modify_code(void *data)
2734{
2735        int *command = data;
2736
2737        ftrace_modify_all_code(*command);
2738
2739        return 0;
2740}
2741
2742/**
2743 * ftrace_run_stop_machine, go back to the stop machine method
2744 * @command: The command to tell ftrace what to do
2745 *
2746 * If an arch needs to fall back to the stop machine method, the
2747 * it can call this function.
2748 */
2749void ftrace_run_stop_machine(int command)
2750{
2751        stop_machine(__ftrace_modify_code, &command, NULL);
2752}
2753
2754/**
2755 * arch_ftrace_update_code, modify the code to trace or not trace
2756 * @command: The command that needs to be done
2757 *
2758 * Archs can override this function if it does not need to
2759 * run stop_machine() to modify code.
2760 */
2761void __weak arch_ftrace_update_code(int command)
2762{
2763        ftrace_run_stop_machine(command);
2764}
2765
2766static void ftrace_run_update_code(int command)
2767{
2768        int ret;
2769
2770        ret = ftrace_arch_code_modify_prepare();
2771        FTRACE_WARN_ON(ret);
2772        if (ret)
2773                return;
2774
2775        /*
2776         * By default we use stop_machine() to modify the code.
2777         * But archs can do what ever they want as long as it
2778         * is safe. The stop_machine() is the safest, but also
2779         * produces the most overhead.
2780         */
2781        arch_ftrace_update_code(command);
2782
2783        ret = ftrace_arch_code_modify_post_process();
2784        FTRACE_WARN_ON(ret);
2785}
2786
2787static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2788                                   struct ftrace_ops_hash *old_hash)
2789{
2790        ops->flags |= FTRACE_OPS_FL_MODIFYING;
2791        ops->old_hash.filter_hash = old_hash->filter_hash;
2792        ops->old_hash.notrace_hash = old_hash->notrace_hash;
2793        ftrace_run_update_code(command);
2794        ops->old_hash.filter_hash = NULL;
2795        ops->old_hash.notrace_hash = NULL;
2796        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2797}
2798
2799static ftrace_func_t saved_ftrace_func;
2800static int ftrace_start_up;
2801
2802void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2803{
2804}
2805
2806static void ftrace_startup_enable(int command)
2807{
2808        if (saved_ftrace_func != ftrace_trace_function) {
2809                saved_ftrace_func = ftrace_trace_function;
2810                command |= FTRACE_UPDATE_TRACE_FUNC;
2811        }
2812
2813        if (!command || !ftrace_enabled)
2814                return;
2815
2816        ftrace_run_update_code(command);
2817}
2818
2819static void ftrace_startup_all(int command)
2820{
2821        update_all_ops = true;
2822        ftrace_startup_enable(command);
2823        update_all_ops = false;
2824}
2825
2826static int ftrace_startup(struct ftrace_ops *ops, int command)
2827{
2828        int ret;
2829
2830        if (unlikely(ftrace_disabled))
2831                return -ENODEV;
2832
2833        ret = __register_ftrace_function(ops);
2834        if (ret)
2835                return ret;
2836
2837        ftrace_start_up++;
2838
2839        /*
2840         * Note that ftrace probes uses this to start up
2841         * and modify functions it will probe. But we still
2842         * set the ADDING flag for modification, as probes
2843         * do not have trampolines. If they add them in the
2844         * future, then the probes will need to distinguish
2845         * between adding and updating probes.
2846         */
2847        ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2848
2849        ret = ftrace_hash_ipmodify_enable(ops);
2850        if (ret < 0) {
2851                /* Rollback registration process */
2852                __unregister_ftrace_function(ops);
2853                ftrace_start_up--;
2854                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2855                return ret;
2856        }
2857
2858        if (ftrace_hash_rec_enable(ops, 1))
2859                command |= FTRACE_UPDATE_CALLS;
2860
2861        ftrace_startup_enable(command);
2862
2863        ops->flags &= ~FTRACE_OPS_FL_ADDING;
2864
2865        return 0;
2866}
2867
2868static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2869{
2870        int ret;
2871
2872        if (unlikely(ftrace_disabled))
2873                return -ENODEV;
2874
2875        ret = __unregister_ftrace_function(ops);
2876        if (ret)
2877                return ret;
2878
2879        ftrace_start_up--;
2880        /*
2881         * Just warn in case of unbalance, no need to kill ftrace, it's not
2882         * critical but the ftrace_call callers may be never nopped again after
2883         * further ftrace uses.
2884         */
2885        WARN_ON_ONCE(ftrace_start_up < 0);
2886
2887        /* Disabling ipmodify never fails */
2888        ftrace_hash_ipmodify_disable(ops);
2889
2890        if (ftrace_hash_rec_disable(ops, 1))
2891                command |= FTRACE_UPDATE_CALLS;
2892
2893        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2894
2895        if (saved_ftrace_func != ftrace_trace_function) {
2896                saved_ftrace_func = ftrace_trace_function;
2897                command |= FTRACE_UPDATE_TRACE_FUNC;
2898        }
2899
2900        if (!command || !ftrace_enabled) {
2901                /*
2902                 * If these are dynamic or per_cpu ops, they still
2903                 * need their data freed. Since, function tracing is
2904                 * not currently active, we can just free them
2905                 * without synchronizing all CPUs.
2906                 */
2907                if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2908                        goto free_ops;
2909
2910                return 0;
2911        }
2912
2913        /*
2914         * If the ops uses a trampoline, then it needs to be
2915         * tested first on update.
2916         */
2917        ops->flags |= FTRACE_OPS_FL_REMOVING;
2918        removed_ops = ops;
2919
2920        /* The trampoline logic checks the old hashes */
2921        ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2922        ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2923
2924        ftrace_run_update_code(command);
2925
2926        /*
2927         * If there's no more ops registered with ftrace, run a
2928         * sanity check to make sure all rec flags are cleared.
2929         */
2930        if (rcu_dereference_protected(ftrace_ops_list,
2931                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2932                struct ftrace_page *pg;
2933                struct dyn_ftrace *rec;
2934
2935                do_for_each_ftrace_rec(pg, rec) {
2936                        if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2937                                pr_warn("  %pS flags:%lx\n",
2938                                        (void *)rec->ip, rec->flags);
2939                } while_for_each_ftrace_rec();
2940        }
2941
2942        ops->old_hash.filter_hash = NULL;
2943        ops->old_hash.notrace_hash = NULL;
2944
2945        removed_ops = NULL;
2946        ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2947
2948        /*
2949         * Dynamic ops may be freed, we must make sure that all
2950         * callers are done before leaving this function.
2951         * The same goes for freeing the per_cpu data of the per_cpu
2952         * ops.
2953         */
2954        if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
2955                /*
2956                 * We need to do a hard force of sched synchronization.
2957                 * This is because we use preempt_disable() to do RCU, but
2958                 * the function tracers can be called where RCU is not watching
2959                 * (like before user_exit()). We can not rely on the RCU
2960                 * infrastructure to do the synchronization, thus we must do it
2961                 * ourselves.
2962                 */
2963                synchronize_rcu_tasks_rude();
2964
2965                /*
2966                 * When the kernel is preeptive, tasks can be preempted
2967                 * while on a ftrace trampoline. Just scheduling a task on
2968                 * a CPU is not good enough to flush them. Calling
2969                 * synchornize_rcu_tasks() will wait for those tasks to
2970                 * execute and either schedule voluntarily or enter user space.
2971                 */
2972                if (IS_ENABLED(CONFIG_PREEMPTION))
2973                        synchronize_rcu_tasks();
2974
2975 free_ops:
2976                arch_ftrace_trampoline_free(ops);
2977        }
2978
2979        return 0;
2980}
2981
2982static void ftrace_startup_sysctl(void)
2983{
2984        int command;
2985
2986        if (unlikely(ftrace_disabled))
2987                return;
2988
2989        /* Force update next time */
2990        saved_ftrace_func = NULL;
2991        /* ftrace_start_up is true if we want ftrace running */
2992        if (ftrace_start_up) {
2993                command = FTRACE_UPDATE_CALLS;
2994                if (ftrace_graph_active)
2995                        command |= FTRACE_START_FUNC_RET;
2996                ftrace_startup_enable(command);
2997        }
2998}
2999
3000static void ftrace_shutdown_sysctl(void)
3001{
3002        int command;
3003
3004        if (unlikely(ftrace_disabled))
3005                return;
3006
3007        /* ftrace_start_up is true if ftrace is running */
3008        if (ftrace_start_up) {
3009                command = FTRACE_DISABLE_CALLS;
3010                if (ftrace_graph_active)
3011                        command |= FTRACE_STOP_FUNC_RET;
3012                ftrace_run_update_code(command);
3013        }
3014}
3015
3016static u64              ftrace_update_time;
3017unsigned long           ftrace_update_tot_cnt;
3018unsigned long           ftrace_number_of_pages;
3019unsigned long           ftrace_number_of_groups;
3020
3021static inline int ops_traces_mod(struct ftrace_ops *ops)
3022{
3023        /*
3024         * Filter_hash being empty will default to trace module.
3025         * But notrace hash requires a test of individual module functions.
3026         */
3027        return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3028                ftrace_hash_empty(ops->func_hash->notrace_hash);
3029}
3030
3031/*
3032 * Check if the current ops references the record.
3033 *
3034 * If the ops traces all functions, then it was already accounted for.
3035 * If the ops does not trace the current record function, skip it.
3036 * If the ops ignores the function via notrace filter, skip it.
3037 */
3038static inline bool
3039ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3040{
3041        /* If ops isn't enabled, ignore it */
3042        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
3043                return 0;
3044
3045        /* If ops traces all then it includes this function */
3046        if (ops_traces_mod(ops))
3047                return 1;
3048
3049        /* The function must be in the filter */
3050        if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
3051            !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
3052                return 0;
3053
3054        /* If in notrace hash, we ignore it too */
3055        if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
3056                return 0;
3057
3058        return 1;
3059}
3060
3061static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3062{
3063        struct ftrace_page *pg;
3064        struct dyn_ftrace *p;
3065        u64 start, stop;
3066        unsigned long update_cnt = 0;
3067        unsigned long rec_flags = 0;
3068        int i;
3069
3070        start = ftrace_now(raw_smp_processor_id());
3071
3072        /*
3073         * When a module is loaded, this function is called to convert
3074         * the calls to mcount in its text to nops, and also to create
3075         * an entry in the ftrace data. Now, if ftrace is activated
3076         * after this call, but before the module sets its text to
3077         * read-only, the modification of enabling ftrace can fail if
3078         * the read-only is done while ftrace is converting the calls.
3079         * To prevent this, the module's records are set as disabled
3080         * and will be enabled after the call to set the module's text
3081         * to read-only.
3082         */
3083        if (mod)
3084                rec_flags |= FTRACE_FL_DISABLED;
3085
3086        for (pg = new_pgs; pg; pg = pg->next) {
3087
3088                for (i = 0; i < pg->index; i++) {
3089
3090                        /* If something went wrong, bail without enabling anything */
3091                        if (unlikely(ftrace_disabled))
3092                                return -1;
3093
3094                        p = &pg->records[i];
3095                        p->flags = rec_flags;
3096
3097                        /*
3098                         * Do the initial record conversion from mcount jump
3099                         * to the NOP instructions.
3100                         */
3101                        if (!ftrace_code_disable(mod, p))
3102                                break;
3103
3104                        update_cnt++;
3105                }
3106        }
3107
3108        stop = ftrace_now(raw_smp_processor_id());
3109        ftrace_update_time = stop - start;
3110        ftrace_update_tot_cnt += update_cnt;
3111
3112        return 0;
3113}
3114
3115static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3116{
3117        int order;
3118        int cnt;
3119
3120        if (WARN_ON(!count))
3121                return -EINVAL;
3122
3123        order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
3124
3125        /*
3126         * We want to fill as much as possible. No more than a page
3127         * may be empty.
3128         */
3129        while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
3130                order--;
3131
3132 again:
3133        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3134
3135        if (!pg->records) {
3136                /* if we can't allocate this size, try something smaller */
3137                if (!order)
3138                        return -ENOMEM;
3139                order >>= 1;
3140                goto again;
3141        }
3142
3143        ftrace_number_of_pages += 1 << order;
3144        ftrace_number_of_groups++;
3145
3146        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3147        pg->size = cnt;
3148
3149        if (cnt > count)
3150                cnt = count;
3151
3152        return cnt;
3153}
3154
3155static struct ftrace_page *
3156ftrace_allocate_pages(unsigned long num_to_init)
3157{
3158        struct ftrace_page *start_pg;
3159        struct ftrace_page *pg;
3160        int order;
3161        int cnt;
3162
3163        if (!num_to_init)
3164                return 0;
3165
3166        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3167        if (!pg)
3168                return NULL;
3169
3170        /*
3171         * Try to allocate as much as possible in one continues
3172         * location that fills in all of the space. We want to
3173         * waste as little space as possible.
3174         */
3175        for (;;) {
3176                cnt = ftrace_allocate_records(pg, num_to_init);
3177                if (cnt < 0)
3178                        goto free_pages;
3179
3180                num_to_init -= cnt;
3181                if (!num_to_init)
3182                        break;
3183
3184                pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3185                if (!pg->next)
3186                        goto free_pages;
3187
3188                pg = pg->next;
3189        }
3190
3191        return start_pg;
3192
3193 free_pages:
3194        pg = start_pg;
3195        while (pg) {
3196                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3197                free_pages((unsigned long)pg->records, order);
3198                start_pg = pg->next;
3199                kfree(pg);
3200                pg = start_pg;
3201                ftrace_number_of_pages -= 1 << order;
3202                ftrace_number_of_groups--;
3203        }
3204        pr_info("ftrace: FAILED to allocate memory for functions\n");
3205        return NULL;
3206}
3207
3208#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3209
3210struct ftrace_iterator {
3211        loff_t                          pos;
3212        loff_t                          func_pos;
3213        loff_t                          mod_pos;
3214        struct ftrace_page              *pg;
3215        struct dyn_ftrace               *func;
3216        struct ftrace_func_probe        *probe;
3217        struct ftrace_func_entry        *probe_entry;
3218        struct trace_parser             parser;
3219        struct ftrace_hash              *hash;
3220        struct ftrace_ops               *ops;
3221        struct trace_array              *tr;
3222        struct list_head                *mod_list;
3223        int                             pidx;
3224        int                             idx;
3225        unsigned                        flags;
3226};
3227
3228static void *
3229t_probe_next(struct seq_file *m, loff_t *pos)
3230{
3231        struct ftrace_iterator *iter = m->private;
3232        struct trace_array *tr = iter->ops->private;
3233        struct list_head *func_probes;
3234        struct ftrace_hash *hash;
3235        struct list_head *next;
3236        struct hlist_node *hnd = NULL;
3237        struct hlist_head *hhd;
3238        int size;
3239
3240        (*pos)++;
3241        iter->pos = *pos;
3242
3243        if (!tr)
3244                return NULL;
3245
3246        func_probes = &tr->func_probes;
3247        if (list_empty(func_probes))
3248                return NULL;
3249
3250        if (!iter->probe) {
3251                next = func_probes->next;
3252                iter->probe = list_entry(next, struct ftrace_func_probe, list);
3253        }
3254
3255        if (iter->probe_entry)
3256                hnd = &iter->probe_entry->hlist;
3257
3258        hash = iter->probe->ops.func_hash->filter_hash;
3259
3260        /*
3261         * A probe being registered may temporarily have an empty hash
3262         * and it's at the end of the func_probes list.
3263         */
3264        if (!hash || hash == EMPTY_HASH)
3265                return NULL;
3266
3267        size = 1 << hash->size_bits;
3268
3269 retry:
3270        if (iter->pidx >= size) {
3271                if (iter->probe->list.next == func_probes)
3272                        return NULL;
3273                next = iter->probe->list.next;
3274                iter->probe = list_entry(next, struct ftrace_func_probe, list);
3275                hash = iter->probe->ops.func_hash->filter_hash;
3276                size = 1 << hash->size_bits;
3277                iter->pidx = 0;
3278        }
3279
3280        hhd = &hash->buckets[iter->pidx];
3281
3282        if (hlist_empty(hhd)) {
3283                iter->pidx++;
3284                hnd = NULL;
3285                goto retry;
3286        }
3287
3288        if (!hnd)
3289                hnd = hhd->first;
3290        else {
3291                hnd = hnd->next;
3292                if (!hnd) {
3293                        iter->pidx++;
3294                        goto retry;
3295                }
3296        }
3297
3298        if (WARN_ON_ONCE(!hnd))
3299                return NULL;
3300
3301        iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3302
3303        return iter;
3304}
3305
3306static void *t_probe_start(struct seq_file *m, loff_t *pos)
3307{
3308        struct ftrace_iterator *iter = m->private;
3309        void *p = NULL;
3310        loff_t l;
3311
3312        if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3313                return NULL;
3314
3315        if (iter->mod_pos > *pos)
3316                return NULL;
3317
3318        iter->probe = NULL;
3319        iter->probe_entry = NULL;
3320        iter->pidx = 0;
3321        for (l = 0; l <= (*pos - iter->mod_pos); ) {
3322                p = t_probe_next(m, &l);
3323                if (!p)
3324                        break;
3325        }
3326        if (!p)
3327                return NULL;
3328
3329        /* Only set this if we have an item */
3330        iter->flags |= FTRACE_ITER_PROBE;
3331
3332        return iter;
3333}
3334
3335static int
3336t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3337{
3338        struct ftrace_func_entry *probe_entry;
3339        struct ftrace_probe_ops *probe_ops;
3340        struct ftrace_func_probe *probe;
3341
3342        probe = iter->probe;
3343        probe_entry = iter->probe_entry;
3344
3345        if (WARN_ON_ONCE(!probe || !probe_entry))
3346                return -EIO;
3347
3348        probe_ops = probe->probe_ops;
3349
3350        if (probe_ops->print)
3351                return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3352
3353        seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3354                   (void *)probe_ops->func);
3355
3356        return 0;
3357}
3358
3359static void *
3360t_mod_next(struct seq_file *m, loff_t *pos)
3361{
3362        struct ftrace_iterator *iter = m->private;
3363        struct trace_array *tr = iter->tr;
3364
3365        (*pos)++;
3366        iter->pos = *pos;
3367
3368        iter->mod_list = iter->mod_list->next;
3369
3370        if (iter->mod_list == &tr->mod_trace ||
3371            iter->mod_list == &tr->mod_notrace) {
3372                iter->flags &= ~FTRACE_ITER_MOD;
3373                return NULL;
3374        }
3375
3376        iter->mod_pos = *pos;
3377
3378        return iter;
3379}
3380
3381static void *t_mod_start(struct seq_file *m, loff_t *pos)
3382{
3383        struct ftrace_iterator *iter = m->private;
3384        void *p = NULL;
3385        loff_t l;
3386
3387        if (iter->func_pos > *pos)
3388                return NULL;
3389
3390        iter->mod_pos = iter->func_pos;
3391
3392        /* probes are only available if tr is set */
3393        if (!iter->tr)
3394                return NULL;
3395
3396        for (l = 0; l <= (*pos - iter->func_pos); ) {
3397                p = t_mod_next(m, &l);
3398                if (!p)
3399                        break;
3400        }
3401        if (!p) {
3402                iter->flags &= ~FTRACE_ITER_MOD;
3403                return t_probe_start(m, pos);
3404        }
3405
3406        /* Only set this if we have an item */
3407        iter->flags |= FTRACE_ITER_MOD;
3408
3409        return iter;
3410}
3411
3412static int
3413t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3414{
3415        struct ftrace_mod_load *ftrace_mod;
3416        struct trace_array *tr = iter->tr;
3417
3418        if (WARN_ON_ONCE(!iter->mod_list) ||
3419                         iter->mod_list == &tr->mod_trace ||
3420                         iter->mod_list == &tr->mod_notrace)
3421                return -EIO;
3422
3423        ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3424
3425        if (ftrace_mod->func)
3426                seq_printf(m, "%s", ftrace_mod->func);
3427        else
3428                seq_putc(m, '*');
3429
3430        seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3431
3432        return 0;
3433}
3434
3435static void *
3436t_func_next(struct seq_file *m, loff_t *pos)
3437{
3438        struct ftrace_iterator *iter = m->private;
3439        struct dyn_ftrace *rec = NULL;
3440
3441        (*pos)++;
3442
3443 retry:
3444        if (iter->idx >= iter->pg->index) {
3445                if (iter->pg->next) {
3446                        iter->pg = iter->pg->next;
3447                        iter->idx = 0;
3448                        goto retry;
3449                }
3450        } else {
3451                rec = &iter->pg->records[iter->idx++];
3452                if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3453                     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3454
3455                    ((iter->flags & FTRACE_ITER_ENABLED) &&
3456                     !(rec->flags & FTRACE_FL_ENABLED))) {
3457
3458                        rec = NULL;
3459                        goto retry;
3460                }
3461        }
3462
3463        if (!rec)
3464                return NULL;
3465
3466        iter->pos = iter->func_pos = *pos;
3467        iter->func = rec;
3468
3469        return iter;
3470}
3471
3472static void *
3473t_next(struct seq_file *m, void *v, loff_t *pos)
3474{
3475        struct ftrace_iterator *iter = m->private;
3476        loff_t l = *pos; /* t_probe_start() must use original pos */
3477        void *ret;
3478
3479        if (unlikely(ftrace_disabled))
3480                return NULL;
3481
3482        if (iter->flags & FTRACE_ITER_PROBE)
3483                return t_probe_next(m, pos);
3484
3485        if (iter->flags & FTRACE_ITER_MOD)
3486                return t_mod_next(m, pos);
3487
3488        if (iter->flags & FTRACE_ITER_PRINTALL) {
3489                /* next must increment pos, and t_probe_start does not */
3490                (*pos)++;
3491                return t_mod_start(m, &l);
3492        }
3493
3494        ret = t_func_next(m, pos);
3495
3496        if (!ret)
3497                return t_mod_start(m, &l);
3498
3499        return ret;
3500}
3501
3502static void reset_iter_read(struct ftrace_iterator *iter)
3503{
3504        iter->pos = 0;
3505        iter->func_pos = 0;
3506        iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3507}
3508
3509static void *t_start(struct seq_file *m, loff_t *pos)
3510{
3511        struct ftrace_iterator *iter = m->private;
3512        void *p = NULL;
3513        loff_t l;
3514
3515        mutex_lock(&ftrace_lock);
3516
3517        if (unlikely(ftrace_disabled))
3518                return NULL;
3519
3520        /*
3521         * If an lseek was done, then reset and start from beginning.
3522         */
3523        if (*pos < iter->pos)
3524                reset_iter_read(iter);
3525
3526        /*
3527         * For set_ftrace_filter reading, if we have the filter
3528         * off, we can short cut and just print out that all
3529         * functions are enabled.
3530         */
3531        if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3532            ftrace_hash_empty(iter->hash)) {
3533                iter->func_pos = 1; /* Account for the message */
3534                if (*pos > 0)
3535                        return t_mod_start(m, pos);
3536                iter->flags |= FTRACE_ITER_PRINTALL;
3537                /* reset in case of seek/pread */
3538                iter->flags &= ~FTRACE_ITER_PROBE;
3539                return iter;
3540        }
3541
3542        if (iter->flags & FTRACE_ITER_MOD)
3543                return t_mod_start(m, pos);
3544
3545        /*
3546         * Unfortunately, we need to restart at ftrace_pages_start
3547         * every time we let go of the ftrace_mutex. This is because
3548         * those pointers can change without the lock.
3549         */
3550        iter->pg = ftrace_pages_start;
3551        iter->idx = 0;
3552        for (l = 0; l <= *pos; ) {
3553                p = t_func_next(m, &l);
3554                if (!p)
3555                        break;
3556        }
3557
3558        if (!p)
3559                return t_mod_start(m, pos);
3560
3561        return iter;
3562}
3563
3564static void t_stop(struct seq_file *m, void *p)
3565{
3566        mutex_unlock(&ftrace_lock);
3567}
3568
3569void * __weak
3570arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3571{
3572        return NULL;
3573}
3574
3575static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3576                                struct dyn_ftrace *rec)
3577{
3578        void *ptr;
3579
3580        ptr = arch_ftrace_trampoline_func(ops, rec);
3581        if (ptr)
3582                seq_printf(m, " ->%pS", ptr);
3583}
3584
3585static int t_show(struct seq_file *m, void *v)
3586{
3587        struct ftrace_iterator *iter = m->private;
3588        struct dyn_ftrace *rec;
3589
3590        if (iter->flags & FTRACE_ITER_PROBE)
3591                return t_probe_show(m, iter);
3592
3593        if (iter->flags & FTRACE_ITER_MOD)
3594                return t_mod_show(m, iter);
3595
3596        if (iter->flags & FTRACE_ITER_PRINTALL) {
3597                if (iter->flags & FTRACE_ITER_NOTRACE)
3598                        seq_puts(m, "#### no functions disabled ####\n");
3599                else
3600                        seq_puts(m, "#### all functions enabled ####\n");
3601                return 0;
3602        }
3603
3604        rec = iter->func;
3605
3606        if (!rec)
3607                return 0;
3608
3609        seq_printf(m, "%ps", (void *)rec->ip);
3610        if (iter->flags & FTRACE_ITER_ENABLED) {
3611                struct ftrace_ops *ops;
3612
3613                seq_printf(m, " (%ld)%s%s%s",
3614                           ftrace_rec_count(rec),
3615                           rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3616                           rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
3617                           rec->flags & FTRACE_FL_DIRECT ? " D" : "  ");
3618                if (rec->flags & FTRACE_FL_TRAMP_EN) {
3619                        ops = ftrace_find_tramp_ops_any(rec);
3620                        if (ops) {
3621                                do {
3622                                        seq_printf(m, "\ttramp: %pS (%pS)",
3623                                                   (void *)ops->trampoline,
3624                                                   (void *)ops->func);
3625                                        add_trampoline_func(m, ops, rec);
3626                                        ops = ftrace_find_tramp_ops_next(rec, ops);
3627                                } while (ops);
3628                        } else
3629                                seq_puts(m, "\ttramp: ERROR!");
3630                } else {
3631                        add_trampoline_func(m, NULL, rec);
3632                }
3633                if (rec->flags & FTRACE_FL_DIRECT) {
3634                        unsigned long direct;
3635
3636                        direct = ftrace_find_rec_direct(rec->ip);
3637                        if (direct)
3638                                seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3639                }
3640        }       
3641
3642        seq_putc(m, '\n');
3643
3644        return 0;
3645}
3646
3647static const struct seq_operations show_ftrace_seq_ops = {
3648        .start = t_start,
3649        .next = t_next,
3650        .stop = t_stop,
3651        .show = t_show,
3652};
3653
3654static int
3655ftrace_avail_open(struct inode *inode, struct file *file)
3656{
3657        struct ftrace_iterator *iter;
3658
3659        if (unlikely(ftrace_disabled))
3660                return -ENODEV;
3661
3662        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3663        if (!iter)
3664                return -ENOMEM;
3665
3666        iter->pg = ftrace_pages_start;
3667        iter->ops = &global_ops;
3668
3669        return 0;
3670}
3671
3672static int
3673ftrace_enabled_open(struct inode *inode, struct file *file)
3674{
3675        struct ftrace_iterator *iter;
3676
3677        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3678        if (!iter)
3679                return -ENOMEM;
3680
3681        iter->pg = ftrace_pages_start;
3682        iter->flags = FTRACE_ITER_ENABLED;
3683        iter->ops = &global_ops;
3684
3685        return 0;
3686}
3687
3688/**
3689 * ftrace_regex_open - initialize function tracer filter files
3690 * @ops: The ftrace_ops that hold the hash filters
3691 * @flag: The type of filter to process
3692 * @inode: The inode, usually passed in to your open routine
3693 * @file: The file, usually passed in to your open routine
3694 *
3695 * ftrace_regex_open() initializes the filter files for the
3696 * @ops. Depending on @flag it may process the filter hash or
3697 * the notrace hash of @ops. With this called from the open
3698 * routine, you can use ftrace_filter_write() for the write
3699 * routine if @flag has FTRACE_ITER_FILTER set, or
3700 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3701 * tracing_lseek() should be used as the lseek routine, and
3702 * release must call ftrace_regex_release().
3703 */
3704int
3705ftrace_regex_open(struct ftrace_ops *ops, int flag,
3706                  struct inode *inode, struct file *file)
3707{
3708        struct ftrace_iterator *iter;
3709        struct ftrace_hash *hash;
3710        struct list_head *mod_head;
3711        struct trace_array *tr = ops->private;
3712        int ret = 0;
3713
3714        ftrace_ops_init(ops);
3715
3716        if (unlikely(ftrace_disabled))
3717                return -ENODEV;
3718
3719        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3720        if (!iter)
3721                return -ENOMEM;
3722
3723        if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3724                kfree(iter);
3725                return -ENOMEM;
3726        }
3727
3728        iter->ops = ops;
3729        iter->flags = flag;
3730        iter->tr = tr;
3731
3732        mutex_lock(&ops->func_hash->regex_lock);
3733
3734        if (flag & FTRACE_ITER_NOTRACE) {
3735                hash = ops->func_hash->notrace_hash;
3736                mod_head = tr ? &tr->mod_notrace : NULL;
3737        } else {
3738                hash = ops->func_hash->filter_hash;
3739                mod_head = tr ? &tr->mod_trace : NULL;
3740        }
3741
3742        iter->mod_list = mod_head;
3743
3744        if (file->f_mode & FMODE_WRITE) {
3745                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3746
3747                if (file->f_flags & O_TRUNC) {
3748                        iter->hash = alloc_ftrace_hash(size_bits);
3749                        clear_ftrace_mod_list(mod_head);
3750                } else {
3751                        iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3752                }
3753
3754                if (!iter->hash) {
3755                        trace_parser_put(&iter->parser);
3756                        kfree(iter);
3757                        ret = -ENOMEM;
3758                        goto out_unlock;
3759                }
3760        } else
3761                iter->hash = hash;
3762
3763        if (file->f_mode & FMODE_READ) {
3764                iter->pg = ftrace_pages_start;
3765
3766                ret = seq_open(file, &show_ftrace_seq_ops);
3767                if (!ret) {
3768                        struct seq_file *m = file->private_data;
3769                        m->private = iter;
3770                } else {
3771                        /* Failed */
3772                        free_ftrace_hash(iter->hash);
3773                        trace_parser_put(&iter->parser);
3774                        kfree(iter);
3775                }
3776        } else
3777                file->private_data = iter;
3778
3779 out_unlock:
3780        mutex_unlock(&ops->func_hash->regex_lock);
3781
3782        return ret;
3783}
3784
3785static int
3786ftrace_filter_open(struct inode *inode, struct file *file)
3787{
3788        struct ftrace_ops *ops = inode->i_private;
3789
3790        return ftrace_regex_open(ops,
3791                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3792                        inode, file);
3793}
3794
3795static int
3796ftrace_notrace_open(struct inode *inode, struct file *file)
3797{
3798        struct ftrace_ops *ops = inode->i_private;
3799
3800        return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3801                                 inode, file);
3802}
3803
3804/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3805struct ftrace_glob {
3806        char *search;
3807        unsigned len;
3808        int type;
3809};
3810
3811/*
3812 * If symbols in an architecture don't correspond exactly to the user-visible
3813 * name of what they represent, it is possible to define this function to
3814 * perform the necessary adjustments.
3815*/
3816char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3817{
3818        return str;
3819}
3820
3821static int ftrace_match(char *str, struct ftrace_glob *g)
3822{
3823        int matched = 0;
3824        int slen;
3825
3826        str = arch_ftrace_match_adjust(str, g->search);
3827
3828        switch (g->type) {
3829        case MATCH_FULL:
3830                if (strcmp(str, g->search) == 0)
3831                        matched = 1;
3832                break;
3833        case MATCH_FRONT_ONLY:
3834                if (strncmp(str, g->search, g->len) == 0)
3835                        matched = 1;
3836                break;
3837        case MATCH_MIDDLE_ONLY:
3838                if (strstr(str, g->search))
3839                        matched = 1;
3840                break;
3841        case MATCH_END_ONLY:
3842                slen = strlen(str);
3843                if (slen >= g->len &&
3844                    memcmp(str + slen - g->len, g->search, g->len) == 0)
3845                        matched = 1;
3846                break;
3847        case MATCH_GLOB:
3848                if (glob_match(g->search, str))
3849                        matched = 1;
3850                break;
3851        }
3852
3853        return matched;
3854}
3855
3856static int
3857enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3858{
3859        struct ftrace_func_entry *entry;
3860        int ret = 0;
3861
3862        entry = ftrace_lookup_ip(hash, rec->ip);
3863        if (clear_filter) {
3864                /* Do nothing if it doesn't exist */
3865                if (!entry)
3866                        return 0;
3867
3868                free_hash_entry(hash, entry);
3869        } else {
3870                /* Do nothing if it exists */
3871                if (entry)
3872                        return 0;
3873
3874                ret = add_hash_entry(hash, rec->ip);
3875        }
3876        return ret;
3877}
3878
3879static int
3880ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3881                struct ftrace_glob *mod_g, int exclude_mod)
3882{
3883        char str[KSYM_SYMBOL_LEN];
3884        char *modname;
3885
3886        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3887
3888        if (mod_g) {
3889                int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3890
3891                /* blank module name to match all modules */
3892                if (!mod_g->len) {
3893                        /* blank module globbing: modname xor exclude_mod */
3894                        if (!exclude_mod != !modname)
3895                                goto func_match;
3896                        return 0;
3897                }
3898
3899                /*
3900                 * exclude_mod is set to trace everything but the given
3901                 * module. If it is set and the module matches, then
3902                 * return 0. If it is not set, and the module doesn't match
3903                 * also return 0. Otherwise, check the function to see if
3904                 * that matches.
3905                 */
3906                if (!mod_matches == !exclude_mod)
3907                        return 0;
3908func_match:
3909                /* blank search means to match all funcs in the mod */
3910                if (!func_g->len)
3911                        return 1;
3912        }
3913
3914        return ftrace_match(str, func_g);
3915}
3916
3917static int
3918match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3919{
3920        struct ftrace_page *pg;
3921        struct dyn_ftrace *rec;
3922        struct ftrace_glob func_g = { .type = MATCH_FULL };
3923        struct ftrace_glob mod_g = { .type = MATCH_FULL };
3924        struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3925        int exclude_mod = 0;
3926        int found = 0;
3927        int ret;
3928        int clear_filter = 0;
3929
3930        if (func) {
3931                func_g.type = filter_parse_regex(func, len, &func_g.search,
3932                                                 &clear_filter);
3933                func_g.len = strlen(func_g.search);
3934        }
3935
3936        if (mod) {
3937                mod_g.type = filter_parse_regex(mod, strlen(mod),
3938                                &mod_g.search, &exclude_mod);
3939                mod_g.len = strlen(mod_g.search);
3940        }
3941
3942        mutex_lock(&ftrace_lock);
3943
3944        if (unlikely(ftrace_disabled))
3945                goto out_unlock;
3946
3947        do_for_each_ftrace_rec(pg, rec) {
3948
3949                if (rec->flags & FTRACE_FL_DISABLED)
3950                        continue;
3951
3952                if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3953                        ret = enter_record(hash, rec, clear_filter);
3954                        if (ret < 0) {
3955                                found = ret;
3956                                goto out_unlock;
3957                        }
3958                        found = 1;
3959                }
3960        } while_for_each_ftrace_rec();
3961 out_unlock:
3962        mutex_unlock(&ftrace_lock);
3963
3964        return found;
3965}
3966
3967static int
3968ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3969{
3970        return match_records(hash, buff, len, NULL);
3971}
3972
3973static void ftrace_ops_update_code(struct ftrace_ops *ops,
3974                                   struct ftrace_ops_hash *old_hash)
3975{
3976        struct ftrace_ops *op;
3977
3978        if (!ftrace_enabled)
3979                return;
3980
3981        if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3982                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3983                return;
3984        }
3985
3986        /*
3987         * If this is the shared global_ops filter, then we need to
3988         * check if there is another ops that shares it, is enabled.
3989         * If so, we still need to run the modify code.
3990         */
3991        if (ops->func_hash != &global_ops.local_hash)
3992                return;
3993
3994        do_for_each_ftrace_op(op, ftrace_ops_list) {
3995                if (op->func_hash == &global_ops.local_hash &&
3996                    op->flags & FTRACE_OPS_FL_ENABLED) {
3997                        ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3998                        /* Only need to do this once */
3999                        return;
4000                }
4001        } while_for_each_ftrace_op(op);
4002}
4003
4004static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4005                                           struct ftrace_hash **orig_hash,
4006                                           struct ftrace_hash *hash,
4007                                           int enable)
4008{
4009        struct ftrace_ops_hash old_hash_ops;
4010        struct ftrace_hash *old_hash;
4011        int ret;
4012
4013        old_hash = *orig_hash;
4014        old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4015        old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4016        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4017        if (!ret) {
4018                ftrace_ops_update_code(ops, &old_hash_ops);
4019                free_ftrace_hash_rcu(old_hash);
4020        }
4021        return ret;
4022}
4023
4024static bool module_exists(const char *module)
4025{
4026        /* All modules have the symbol __this_module */
4027        const char this_mod[] = "__this_module";
4028        char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4029        unsigned long val;
4030        int n;
4031
4032        n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4033
4034        if (n > sizeof(modname) - 1)
4035                return false;
4036
4037        val = module_kallsyms_lookup_name(modname);
4038        return val != 0;
4039}
4040
4041static int cache_mod(struct trace_array *tr,
4042                     const char *func, char *module, int enable)
4043{
4044        struct ftrace_mod_load *ftrace_mod, *n;
4045        struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4046        int ret;
4047
4048        mutex_lock(&ftrace_lock);
4049
4050        /* We do not cache inverse filters */
4051        if (func[0] == '!') {
4052                func++;
4053                ret = -EINVAL;
4054
4055                /* Look to remove this hash */
4056                list_for_each_entry_safe(ftrace_mod, n, head, list) {
4057                        if (strcmp(ftrace_mod->module, module) != 0)
4058                                continue;
4059
4060                        /* no func matches all */
4061                        if (strcmp(func, "*") == 0 ||
4062                            (ftrace_mod->func &&
4063                             strcmp(ftrace_mod->func, func) == 0)) {
4064                                ret = 0;
4065                                free_ftrace_mod(ftrace_mod);
4066                                continue;
4067                        }
4068                }
4069                goto out;
4070        }
4071
4072        ret = -EINVAL;
4073        /* We only care about modules that have not been loaded yet */
4074        if (module_exists(module))
4075                goto out;
4076
4077        /* Save this string off, and execute it when the module is loaded */
4078        ret = ftrace_add_mod(tr, func, module, enable);
4079 out:
4080        mutex_unlock(&ftrace_lock);
4081
4082        return ret;
4083}
4084
4085static int
4086ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4087                 int reset, int enable);
4088
4089#ifdef CONFIG_MODULES
4090static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4091                             char *mod, bool enable)
4092{
4093        struct ftrace_mod_load *ftrace_mod, *n;
4094        struct ftrace_hash **orig_hash, *new_hash;
4095        LIST_HEAD(process_mods);
4096        char *func;
4097        int ret;
4098
4099        mutex_lock(&ops->func_hash->regex_lock);
4100
4101        if (enable)
4102                orig_hash = &ops->func_hash->filter_hash;
4103        else
4104                orig_hash = &ops->func_hash->notrace_hash;
4105
4106        new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4107                                              *orig_hash);
4108        if (!new_hash)
4109                goto out; /* warn? */
4110
4111        mutex_lock(&ftrace_lock);
4112
4113        list_for_each_entry_safe(ftrace_mod, n, head, list) {
4114
4115                if (strcmp(ftrace_mod->module, mod) != 0)
4116                        continue;
4117
4118                if (ftrace_mod->func)
4119                        func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4120                else
4121                        func = kstrdup("*", GFP_KERNEL);
4122
4123                if (!func) /* warn? */
4124                        continue;
4125
4126                list_del(&ftrace_mod->list);
4127                list_add(&ftrace_mod->list, &process_mods);
4128
4129                /* Use the newly allocated func, as it may be "*" */
4130                kfree(ftrace_mod->func);
4131                ftrace_mod->func = func;
4132        }
4133
4134        mutex_unlock(&ftrace_lock);
4135
4136        list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4137
4138                func = ftrace_mod->func;
4139
4140                /* Grabs ftrace_lock, which is why we have this extra step */
4141                match_records(new_hash, func, strlen(func), mod);
4142                free_ftrace_mod(ftrace_mod);
4143        }
4144
4145        if (enable && list_empty(head))
4146                new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4147
4148        mutex_lock(&ftrace_lock);
4149
4150        ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4151                                              new_hash, enable);
4152        mutex_unlock(&ftrace_lock);
4153
4154 out:
4155        mutex_unlock(&ops->func_hash->regex_lock);
4156
4157        free_ftrace_hash(new_hash);
4158}
4159
4160static void process_cached_mods(const char *mod_name)
4161{
4162        struct trace_array *tr;
4163        char *mod;
4164
4165        mod = kstrdup(mod_name, GFP_KERNEL);
4166        if (!mod)
4167                return;
4168
4169        mutex_lock(&trace_types_lock);
4170        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4171                if (!list_empty(&tr->mod_trace))
4172                        process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4173                if (!list_empty(&tr->mod_notrace))
4174                        process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4175        }
4176        mutex_unlock(&trace_types_lock);
4177
4178        kfree(mod);
4179}
4180#endif
4181
4182/*
4183 * We register the module command as a template to show others how
4184 * to register the a command as well.
4185 */
4186
4187static int
4188ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4189                    char *func_orig, char *cmd, char *module, int enable)
4190{
4191        char *func;
4192        int ret;
4193
4194        /* match_records() modifies func, and we need the original */
4195        func = kstrdup(func_orig, GFP_KERNEL);
4196        if (!func)
4197                return -ENOMEM;
4198
4199        /*
4200         * cmd == 'mod' because we only registered this func
4201         * for the 'mod' ftrace_func_command.
4202         * But if you register one func with multiple commands,
4203         * you can tell which command was used by the cmd
4204         * parameter.
4205         */
4206        ret = match_records(hash, func, strlen(func), module);
4207        kfree(func);
4208
4209        if (!ret)
4210                return cache_mod(tr, func_orig, module, enable);
4211        if (ret < 0)
4212                return ret;
4213        return 0;
4214}
4215
4216static struct ftrace_func_command ftrace_mod_cmd = {
4217        .name                   = "mod",
4218        .func                   = ftrace_mod_callback,
4219};
4220
4221static int __init ftrace_mod_cmd_init(void)
4222{
4223        return register_ftrace_command(&ftrace_mod_cmd);
4224}
4225core_initcall(ftrace_mod_cmd_init);
4226
4227static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4228                                      struct ftrace_ops *op, struct pt_regs *pt_regs)
4229{
4230        struct ftrace_probe_ops *probe_ops;
4231        struct ftrace_func_probe *probe;
4232
4233        probe = container_of(op, struct ftrace_func_probe, ops);
4234        probe_ops = probe->probe_ops;
4235
4236        /*
4237         * Disable preemption for these calls to prevent a RCU grace
4238         * period. This syncs the hash iteration and freeing of items
4239         * on the hash. rcu_read_lock is too dangerous here.
4240         */
4241        preempt_disable_notrace();
4242        probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4243        preempt_enable_notrace();
4244}
4245
4246struct ftrace_func_map {
4247        struct ftrace_func_entry        entry;
4248        void                            *data;
4249};
4250
4251struct ftrace_func_mapper {
4252        struct ftrace_hash              hash;
4253};
4254
4255/**
4256 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4257 *
4258 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4259 */
4260struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4261{
4262        struct ftrace_hash *hash;
4263
4264        /*
4265         * The mapper is simply a ftrace_hash, but since the entries
4266         * in the hash are not ftrace_func_entry type, we define it
4267         * as a separate structure.
4268         */
4269        hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4270        return (struct ftrace_func_mapper *)hash;
4271}
4272
4273/**
4274 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4275 * @mapper: The mapper that has the ip maps
4276 * @ip: the instruction pointer to find the data for
4277 *
4278 * Returns the data mapped to @ip if found otherwise NULL. The return
4279 * is actually the address of the mapper data pointer. The address is
4280 * returned for use cases where the data is no bigger than a long, and
4281 * the user can use the data pointer as its data instead of having to
4282 * allocate more memory for the reference.
4283 */
4284void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4285                                  unsigned long ip)
4286{
4287        struct ftrace_func_entry *entry;
4288        struct ftrace_func_map *map;
4289
4290        entry = ftrace_lookup_ip(&mapper->hash, ip);
4291        if (!entry)
4292                return NULL;
4293
4294        map = (struct ftrace_func_map *)entry;
4295        return &map->data;
4296}
4297
4298/**
4299 * ftrace_func_mapper_add_ip - Map some data to an ip
4300 * @mapper: The mapper that has the ip maps
4301 * @ip: The instruction pointer address to map @data to
4302 * @data: The data to map to @ip
4303 *
4304 * Returns 0 on succes otherwise an error.
4305 */
4306int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4307                              unsigned long ip, void *data)
4308{
4309        struct ftrace_func_entry *entry;
4310        struct ftrace_func_map *map;
4311
4312        entry = ftrace_lookup_ip(&mapper->hash, ip);
4313        if (entry)
4314                return -EBUSY;
4315
4316        map = kmalloc(sizeof(*map), GFP_KERNEL);
4317        if (!map)
4318                return -ENOMEM;
4319
4320        map->entry.ip = ip;
4321        map->data = data;
4322
4323        __add_hash_entry(&mapper->hash, &map->entry);
4324
4325        return 0;
4326}
4327
4328/**
4329 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4330 * @mapper: The mapper that has the ip maps
4331 * @ip: The instruction pointer address to remove the data from
4332 *
4333 * Returns the data if it is found, otherwise NULL.
4334 * Note, if the data pointer is used as the data itself, (see 
4335 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4336 * if the data pointer was set to zero.
4337 */
4338void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4339                                   unsigned long ip)
4340{
4341        struct ftrace_func_entry *entry;
4342        struct ftrace_func_map *map;
4343        void *data;
4344
4345        entry = ftrace_lookup_ip(&mapper->hash, ip);
4346        if (!entry)
4347                return NULL;
4348
4349        map = (struct ftrace_func_map *)entry;
4350        data = map->data;
4351
4352        remove_hash_entry(&mapper->hash, entry);
4353        kfree(entry);
4354
4355        return data;
4356}
4357
4358/**
4359 * free_ftrace_func_mapper - free a mapping of ips and data
4360 * @mapper: The mapper that has the ip maps
4361 * @free_func: A function to be called on each data item.
4362 *
4363 * This is used to free the function mapper. The @free_func is optional
4364 * and can be used if the data needs to be freed as well.
4365 */
4366void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4367                             ftrace_mapper_func free_func)
4368{
4369        struct ftrace_func_entry *entry;
4370        struct ftrace_func_map *map;
4371        struct hlist_head *hhd;
4372        int size = 1 << mapper->hash.size_bits;
4373        int i;
4374
4375        if (free_func && mapper->hash.count) {
4376                for (i = 0; i < size; i++) {
4377                        hhd = &mapper->hash.buckets[i];
4378                        hlist_for_each_entry(entry, hhd, hlist) {
4379                                map = (struct ftrace_func_map *)entry;
4380                                free_func(map);
4381                        }
4382                }
4383        }
4384        free_ftrace_hash(&mapper->hash);
4385}
4386
4387static void release_probe(struct ftrace_func_probe *probe)
4388{
4389        struct ftrace_probe_ops *probe_ops;
4390
4391        mutex_lock(&ftrace_lock);
4392
4393        WARN_ON(probe->ref <= 0);
4394
4395        /* Subtract the ref that was used to protect this instance */
4396        probe->ref--;
4397
4398        if (!probe->ref) {
4399                probe_ops = probe->probe_ops;
4400                /*
4401                 * Sending zero as ip tells probe_ops to free
4402                 * the probe->data itself
4403                 */
4404                if (probe_ops->free)
4405                        probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4406                list_del(&probe->list);
4407                kfree(probe);
4408        }
4409        mutex_unlock(&ftrace_lock);
4410}
4411
4412static void acquire_probe_locked(struct ftrace_func_probe *probe)
4413{
4414        /*
4415         * Add one ref to keep it from being freed when releasing the
4416         * ftrace_lock mutex.
4417         */
4418        probe->ref++;
4419}
4420
4421int
4422register_ftrace_function_probe(char *glob, struct trace_array *tr,
4423                               struct ftrace_probe_ops *probe_ops,
4424                               void *data)
4425{
4426        struct ftrace_func_entry *entry;
4427        struct ftrace_func_probe *probe;
4428        struct ftrace_hash **orig_hash;
4429        struct ftrace_hash *old_hash;
4430        struct ftrace_hash *hash;
4431        int count = 0;
4432        int size;
4433        int ret;
4434        int i;
4435
4436        if (WARN_ON(!tr))
4437                return -EINVAL;
4438
4439        /* We do not support '!' for function probes */
4440        if (WARN_ON(glob[0] == '!'))
4441                return -EINVAL;
4442
4443
4444        mutex_lock(&ftrace_lock);
4445        /* Check if the probe_ops is already registered */
4446        list_for_each_entry(probe, &tr->func_probes, list) {
4447                if (probe->probe_ops == probe_ops)
4448                        break;
4449        }
4450        if (&probe->list == &tr->func_probes) {
4451                probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4452                if (!probe) {
4453                        mutex_unlock(&ftrace_lock);
4454                        return -ENOMEM;
4455                }
4456                probe->probe_ops = probe_ops;
4457                probe->ops.func = function_trace_probe_call;
4458                probe->tr = tr;
4459                ftrace_ops_init(&probe->ops);
4460                list_add(&probe->list, &tr->func_probes);
4461        }
4462
4463        acquire_probe_locked(probe);
4464
4465        mutex_unlock(&ftrace_lock);
4466
4467        /*
4468         * Note, there's a small window here that the func_hash->filter_hash
4469         * may be NULL or empty. Need to be carefule when reading the loop.
4470         */
4471        mutex_lock(&probe->ops.func_hash->regex_lock);
4472
4473        orig_hash = &probe->ops.func_hash->filter_hash;
4474        old_hash = *orig_hash;
4475        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4476
4477        if (!hash) {
4478                ret = -ENOMEM;
4479                goto out;
4480        }
4481
4482        ret = ftrace_match_records(hash, glob, strlen(glob));
4483
4484        /* Nothing found? */
4485        if (!ret)
4486                ret = -EINVAL;
4487
4488        if (ret < 0)
4489                goto out;
4490
4491        size = 1 << hash->size_bits;
4492        for (i = 0; i < size; i++) {
4493                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4494                        if (ftrace_lookup_ip(old_hash, entry->ip))
4495                                continue;
4496                        /*
4497                         * The caller might want to do something special
4498                         * for each function we find. We call the callback
4499                         * to give the caller an opportunity to do so.
4500                         */
4501                        if (probe_ops->init) {
4502                                ret = probe_ops->init(probe_ops, tr,
4503                                                      entry->ip, data,
4504                                                      &probe->data);
4505                                if (ret < 0) {
4506                                        if (probe_ops->free && count)
4507                                                probe_ops->free(probe_ops, tr,
4508                                                                0, probe->data);
4509                                        probe->data = NULL;
4510                                        goto out;
4511                                }
4512                        }
4513                        count++;
4514                }
4515        }
4516
4517        mutex_lock(&ftrace_lock);
4518
4519        if (!count) {
4520                /* Nothing was added? */
4521                ret = -EINVAL;
4522                goto out_unlock;
4523        }
4524
4525        ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4526                                              hash, 1);
4527        if (ret < 0)
4528                goto err_unlock;
4529
4530        /* One ref for each new function traced */
4531        probe->ref += count;
4532
4533        if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4534                ret = ftrace_startup(&probe->ops, 0);
4535
4536 out_unlock:
4537        mutex_unlock(&ftrace_lock);
4538
4539        if (!ret)
4540                ret = count;
4541 out:
4542        mutex_unlock(&probe->ops.func_hash->regex_lock);
4543        free_ftrace_hash(hash);
4544
4545        release_probe(probe);
4546
4547        return ret;
4548
4549 err_unlock:
4550        if (!probe_ops->free || !count)
4551                goto out_unlock;
4552
4553        /* Failed to do the move, need to call the free functions */
4554        for (i = 0; i < size; i++) {
4555                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4556                        if (ftrace_lookup_ip(old_hash, entry->ip))
4557                                continue;
4558                        probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4559                }
4560        }
4561        goto out_unlock;
4562}
4563
4564int
4565unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4566                                      struct ftrace_probe_ops *probe_ops)
4567{
4568        struct ftrace_ops_hash old_hash_ops;
4569        struct ftrace_func_entry *entry;
4570        struct ftrace_func_probe *probe;
4571        struct ftrace_glob func_g;
4572        struct ftrace_hash **orig_hash;
4573        struct ftrace_hash *old_hash;
4574        struct ftrace_hash *hash = NULL;
4575        struct hlist_node *tmp;
4576        struct hlist_head hhd;
4577        char str[KSYM_SYMBOL_LEN];
4578        int count = 0;
4579        int i, ret = -ENODEV;
4580        int size;
4581
4582        if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4583                func_g.search = NULL;
4584        else {
4585                int not;
4586
4587                func_g.type = filter_parse_regex(glob, strlen(glob),
4588                                                 &func_g.search, &not);
4589                func_g.len = strlen(func_g.search);
4590
4591                /* we do not support '!' for function probes */
4592                if (WARN_ON(not))
4593                        return -EINVAL;
4594        }
4595
4596        mutex_lock(&ftrace_lock);
4597        /* Check if the probe_ops is already registered */
4598        list_for_each_entry(probe, &tr->func_probes, list) {
4599                if (probe->probe_ops == probe_ops)
4600                        break;
4601        }
4602        if (&probe->list == &tr->func_probes)
4603                goto err_unlock_ftrace;
4604
4605        ret = -EINVAL;
4606        if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4607                goto err_unlock_ftrace;
4608
4609        acquire_probe_locked(probe);
4610
4611        mutex_unlock(&ftrace_lock);
4612
4613        mutex_lock(&probe->ops.func_hash->regex_lock);
4614
4615        orig_hash = &probe->ops.func_hash->filter_hash;
4616        old_hash = *orig_hash;
4617
4618        if (ftrace_hash_empty(old_hash))
4619                goto out_unlock;
4620
4621        old_hash_ops.filter_hash = old_hash;
4622        /* Probes only have filters */
4623        old_hash_ops.notrace_hash = NULL;
4624
4625        ret = -ENOMEM;
4626        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4627        if (!hash)
4628                goto out_unlock;
4629
4630        INIT_HLIST_HEAD(&hhd);
4631
4632        size = 1 << hash->size_bits;
4633        for (i = 0; i < size; i++) {
4634                hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4635
4636                        if (func_g.search) {
4637                                kallsyms_lookup(entry->ip, NULL, NULL,
4638                                                NULL, str);
4639                                if (!ftrace_match(str, &func_g))
4640                                        continue;
4641                        }
4642                        count++;
4643                        remove_hash_entry(hash, entry);
4644                        hlist_add_head(&entry->hlist, &hhd);
4645                }
4646        }
4647
4648        /* Nothing found? */
4649        if (!count) {
4650                ret = -EINVAL;
4651                goto out_unlock;
4652        }
4653
4654        mutex_lock(&ftrace_lock);
4655
4656        WARN_ON(probe->ref < count);
4657
4658        probe->ref -= count;
4659
4660        if (ftrace_hash_empty(hash))
4661                ftrace_shutdown(&probe->ops, 0);
4662
4663        ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4664                                              hash, 1);
4665
4666        /* still need to update the function call sites */
4667        if (ftrace_enabled && !ftrace_hash_empty(hash))
4668                ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4669                                       &old_hash_ops);
4670        synchronize_rcu();
4671
4672        hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4673                hlist_del(&entry->hlist);
4674                if (probe_ops->free)
4675                        probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4676                kfree(entry);
4677        }
4678        mutex_unlock(&ftrace_lock);
4679
4680 out_unlock:
4681        mutex_unlock(&probe->ops.func_hash->regex_lock);
4682        free_ftrace_hash(hash);
4683
4684        release_probe(probe);
4685
4686        return ret;
4687
4688 err_unlock_ftrace:
4689        mutex_unlock(&ftrace_lock);
4690        return ret;
4691}
4692
4693void clear_ftrace_function_probes(struct trace_array *tr)
4694{
4695        struct ftrace_func_probe *probe, *n;
4696
4697        list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4698                unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4699}
4700
4701static LIST_HEAD(ftrace_commands);
4702static DEFINE_MUTEX(ftrace_cmd_mutex);
4703
4704/*
4705 * Currently we only register ftrace commands from __init, so mark this
4706 * __init too.
4707 */
4708__init int register_ftrace_command(struct ftrace_func_command *cmd)
4709{
4710        struct ftrace_func_command *p;
4711        int ret = 0;
4712
4713        mutex_lock(&ftrace_cmd_mutex);
4714        list_for_each_entry(p, &ftrace_commands, list) {
4715                if (strcmp(cmd->name, p->name) == 0) {
4716                        ret = -EBUSY;
4717                        goto out_unlock;
4718                }
4719        }
4720        list_add(&cmd->list, &ftrace_commands);
4721 out_unlock:
4722        mutex_unlock(&ftrace_cmd_mutex);
4723
4724        return ret;
4725}
4726
4727/*
4728 * Currently we only unregister ftrace commands from __init, so mark
4729 * this __init too.
4730 */
4731__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4732{
4733        struct ftrace_func_command *p, *n;
4734        int ret = -ENODEV;
4735
4736        mutex_lock(&ftrace_cmd_mutex);
4737        list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4738                if (strcmp(cmd->name, p->name) == 0) {
4739                        ret = 0;
4740                        list_del_init(&p->list);
4741                        goto out_unlock;
4742                }
4743        }
4744 out_unlock:
4745        mutex_unlock(&ftrace_cmd_mutex);
4746
4747        return ret;
4748}
4749
4750static int ftrace_process_regex(struct ftrace_iterator *iter,
4751                                char *buff, int len, int enable)
4752{
4753        struct ftrace_hash *hash = iter->hash;
4754        struct trace_array *tr = iter->ops->private;
4755        char *func, *command, *next = buff;
4756        struct ftrace_func_command *p;
4757        int ret = -EINVAL;
4758
4759        func = strsep(&next, ":");
4760
4761        if (!next) {
4762                ret = ftrace_match_records(hash, func, len);
4763                if (!ret)
4764                        ret = -EINVAL;
4765                if (ret < 0)
4766                        return ret;
4767                return 0;
4768        }
4769
4770        /* command found */
4771
4772        command = strsep(&next, ":");
4773
4774        mutex_lock(&ftrace_cmd_mutex);
4775        list_for_each_entry(p, &ftrace_commands, list) {
4776                if (strcmp(p->name, command) == 0) {
4777                        ret = p->func(tr, hash, func, command, next, enable);
4778                        goto out_unlock;
4779                }
4780        }
4781 out_unlock:
4782        mutex_unlock(&ftrace_cmd_mutex);
4783
4784        return ret;
4785}
4786
4787static ssize_t
4788ftrace_regex_write(struct file *file, const char __user *ubuf,
4789                   size_t cnt, loff_t *ppos, int enable)
4790{
4791        struct ftrace_iterator *iter;
4792        struct trace_parser *parser;
4793        ssize_t ret, read;
4794
4795        if (!cnt)
4796                return 0;
4797
4798        if (file->f_mode & FMODE_READ) {
4799                struct seq_file *m = file->private_data;
4800                iter = m->private;
4801        } else
4802                iter = file->private_data;
4803
4804        if (unlikely(ftrace_disabled))
4805                return -ENODEV;
4806
4807        /* iter->hash is a local copy, so we don't need regex_lock */
4808
4809        parser = &iter->parser;
4810        read = trace_get_user(parser, ubuf, cnt, ppos);
4811
4812        if (read >= 0 && trace_parser_loaded(parser) &&
4813            !trace_parser_cont(parser)) {
4814                ret = ftrace_process_regex(iter, parser->buffer,
4815                                           parser->idx, enable);
4816                trace_parser_clear(parser);
4817                if (ret < 0)
4818                        goto out;
4819        }
4820
4821        ret = read;
4822 out:
4823        return ret;
4824}
4825
4826ssize_t
4827ftrace_filter_write(struct file *file, const char __user *ubuf,
4828                    size_t cnt, loff_t *ppos)
4829{
4830        return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4831}
4832
4833ssize_t
4834ftrace_notrace_write(struct file *file, const char __user *ubuf,
4835                     size_t cnt, loff_t *ppos)
4836{
4837        return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4838}
4839
4840static int
4841ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4842{
4843        struct ftrace_func_entry *entry;
4844
4845        if (!ftrace_location(ip))
4846                return -EINVAL;
4847
4848        if (remove) {
4849                entry = ftrace_lookup_ip(hash, ip);
4850                if (!entry)
4851                        return -ENOENT;
4852                free_hash_entry(hash, entry);
4853                return 0;
4854        }
4855
4856        return add_hash_entry(hash, ip);
4857}
4858
4859static int
4860ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4861                unsigned long ip, int remove, int reset, int enable)
4862{
4863        struct ftrace_hash **orig_hash;
4864        struct ftrace_hash *hash;
4865        int ret;
4866
4867        if (unlikely(ftrace_disabled))
4868                return -ENODEV;
4869
4870        mutex_lock(&ops->func_hash->regex_lock);
4871
4872        if (enable)
4873                orig_hash = &ops->func_hash->filter_hash;
4874        else
4875                orig_hash = &ops->func_hash->notrace_hash;
4876
4877        if (reset)
4878                hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4879        else
4880                hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4881
4882        if (!hash) {
4883                ret = -ENOMEM;
4884                goto out_regex_unlock;
4885        }
4886
4887        if (buf && !ftrace_match_records(hash, buf, len)) {
4888                ret = -EINVAL;
4889                goto out_regex_unlock;
4890        }
4891        if (ip) {
4892                ret = ftrace_match_addr(hash, ip, remove);
4893                if (ret < 0)
4894                        goto out_regex_unlock;
4895        }
4896
4897        mutex_lock(&ftrace_lock);
4898        ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4899        mutex_unlock(&ftrace_lock);
4900
4901 out_regex_unlock:
4902        mutex_unlock(&ops->func_hash->regex_lock);
4903
4904        free_ftrace_hash(hash);
4905        return ret;
4906}
4907
4908static int
4909ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4910                int reset, int enable)
4911{
4912        return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4913}
4914
4915#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
4916
4917struct ftrace_direct_func {
4918        struct list_head        next;
4919        unsigned long           addr;
4920        int                     count;
4921};
4922
4923static LIST_HEAD(ftrace_direct_funcs);
4924
4925/**
4926 * ftrace_find_direct_func - test an address if it is a registered direct caller
4927 * @addr: The address of a registered direct caller
4928 *
4929 * This searches to see if a ftrace direct caller has been registered
4930 * at a specific address, and if so, it returns a descriptor for it.
4931 *
4932 * This can be used by architecture code to see if an address is
4933 * a direct caller (trampoline) attached to a fentry/mcount location.
4934 * This is useful for the function_graph tracer, as it may need to
4935 * do adjustments if it traced a location that also has a direct
4936 * trampoline attached to it.
4937 */
4938struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
4939{
4940        struct ftrace_direct_func *entry;
4941        bool found = false;
4942
4943        /* May be called by fgraph trampoline (protected by rcu tasks) */
4944        list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
4945                if (entry->addr == addr) {
4946                        found = true;
4947                        break;
4948                }
4949        }
4950        if (found)
4951                return entry;
4952
4953        return NULL;
4954}
4955
4956static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
4957{
4958        struct ftrace_direct_func *direct;
4959
4960        direct = kmalloc(sizeof(*direct), GFP_KERNEL);
4961        if (!direct)
4962                return NULL;
4963        direct->addr = addr;
4964        direct->count = 0;
4965        list_add_rcu(&direct->next, &ftrace_direct_funcs);
4966        ftrace_direct_func_count++;
4967        return direct;
4968}
4969
4970/**
4971 * register_ftrace_direct - Call a custom trampoline directly
4972 * @ip: The address of the nop at the beginning of a function
4973 * @addr: The address of the trampoline to call at @ip
4974 *
4975 * This is used to connect a direct call from the nop location (@ip)
4976 * at the start of ftrace traced functions. The location that it calls
4977 * (@addr) must be able to handle a direct call, and save the parameters
4978 * of the function being traced, and restore them (or inject new ones
4979 * if needed), before returning.
4980 *
4981 * Returns:
4982 *  0 on success
4983 *  -EBUSY - Another direct function is already attached (there can be only one)
4984 *  -ENODEV - @ip does not point to a ftrace nop location (or not supported)
4985 *  -ENOMEM - There was an allocation failure.
4986 */
4987int register_ftrace_direct(unsigned long ip, unsigned long addr)
4988{
4989        struct ftrace_direct_func *direct;
4990        struct ftrace_func_entry *entry;
4991        struct ftrace_hash *free_hash = NULL;
4992        struct dyn_ftrace *rec;
4993        int ret = -EBUSY;
4994
4995        mutex_lock(&direct_mutex);
4996
4997        /* See if there's a direct function at @ip already */
4998        if (ftrace_find_rec_direct(ip))
4999                goto out_unlock;
5000
5001        ret = -ENODEV;
5002        rec = lookup_rec(ip, ip);
5003        if (!rec)
5004                goto out_unlock;
5005
5006        /*
5007         * Check if the rec says it has a direct call but we didn't
5008         * find one earlier?
5009         */
5010        if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5011                goto out_unlock;
5012
5013        /* Make sure the ip points to the exact record */
5014        if (ip != rec->ip) {
5015                ip = rec->ip;
5016                /* Need to check this ip for a direct. */
5017                if (ftrace_find_rec_direct(ip))
5018                        goto out_unlock;
5019        }
5020
5021        ret = -ENOMEM;
5022        if (ftrace_hash_empty(direct_functions) ||
5023            direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
5024                struct ftrace_hash *new_hash;
5025                int size = ftrace_hash_empty(direct_functions) ? 0 :
5026                        direct_functions->count + 1;
5027
5028                if (size < 32)
5029                        size = 32;
5030
5031                new_hash = dup_hash(direct_functions, size);
5032                if (!new_hash)
5033                        goto out_unlock;
5034
5035                free_hash = direct_functions;
5036                direct_functions = new_hash;
5037        }
5038
5039        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5040        if (!entry)
5041                goto out_unlock;
5042
5043        direct = ftrace_find_direct_func(addr);
5044        if (!direct) {
5045                direct = ftrace_alloc_direct_func(addr);
5046                if (!direct) {
5047                        kfree(entry);
5048                        goto out_unlock;
5049                }
5050        }
5051
5052        entry->ip = ip;
5053        entry->direct = addr;
5054        __add_hash_entry(direct_functions, entry);
5055
5056        ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5057        if (ret)
5058                remove_hash_entry(direct_functions, entry);
5059
5060        if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5061                ret = register_ftrace_function(&direct_ops);
5062                if (ret)
5063                        ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5064        }
5065
5066        if (ret) {
5067                kfree(entry);
5068                if (!direct->count) {
5069                        list_del_rcu(&direct->next);
5070                        synchronize_rcu_tasks();
5071                        kfree(direct);
5072                        if (free_hash)
5073                                free_ftrace_hash(free_hash);
5074                        free_hash = NULL;
5075                        ftrace_direct_func_count--;
5076                }
5077        } else {
5078                direct->count++;
5079        }
5080 out_unlock:
5081        mutex_unlock(&direct_mutex);
5082
5083        if (free_hash) {
5084                synchronize_rcu_tasks();
5085                free_ftrace_hash(free_hash);
5086        }
5087
5088        return ret;
5089}
5090EXPORT_SYMBOL_GPL(register_ftrace_direct);
5091
5092static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5093                                                   struct dyn_ftrace **recp)
5094{
5095        struct ftrace_func_entry *entry;
5096        struct dyn_ftrace *rec;
5097
5098        rec = lookup_rec(*ip, *ip);
5099        if (!rec)
5100                return NULL;
5101
5102        entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5103        if (!entry) {
5104                WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5105                return NULL;
5106        }
5107
5108        WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5109
5110        /* Passed in ip just needs to be on the call site */
5111        *ip = rec->ip;
5112
5113        if (recp)
5114                *recp = rec;
5115
5116        return entry;
5117}
5118
5119int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5120{
5121        struct ftrace_direct_func *direct;
5122        struct ftrace_func_entry *entry;
5123        int ret = -ENODEV;
5124
5125        mutex_lock(&direct_mutex);
5126
5127        entry = find_direct_entry(&ip, NULL);
5128        if (!entry)
5129                goto out_unlock;
5130
5131        if (direct_functions->count == 1)
5132                unregister_ftrace_function(&direct_ops);
5133
5134        ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5135
5136        WARN_ON(ret);
5137
5138        remove_hash_entry(direct_functions, entry);
5139
5140        direct = ftrace_find_direct_func(addr);
5141        if (!WARN_ON(!direct)) {
5142                /* This is the good path (see the ! before WARN) */
5143                direct->count--;
5144                WARN_ON(direct->count < 0);
5145                if (!direct->count) {
5146                        list_del_rcu(&direct->next);
5147                        synchronize_rcu_tasks();
5148                        kfree(direct);
5149                        kfree(entry);
5150                        ftrace_direct_func_count--;
5151                }
5152        }
5153 out_unlock:
5154        mutex_unlock(&direct_mutex);
5155
5156        return ret;
5157}
5158EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5159
5160static struct ftrace_ops stub_ops = {
5161        .func           = ftrace_stub,
5162};
5163
5164/**
5165 * ftrace_modify_direct_caller - modify ftrace nop directly
5166 * @entry: The ftrace hash entry of the direct helper for @rec
5167 * @rec: The record representing the function site to patch
5168 * @old_addr: The location that the site at @rec->ip currently calls
5169 * @new_addr: The location that the site at @rec->ip should call
5170 *
5171 * An architecture may overwrite this function to optimize the
5172 * changing of the direct callback on an ftrace nop location.
5173 * This is called with the ftrace_lock mutex held, and no other
5174 * ftrace callbacks are on the associated record (@rec). Thus,
5175 * it is safe to modify the ftrace record, where it should be
5176 * currently calling @old_addr directly, to call @new_addr.
5177 *
5178 * Safety checks should be made to make sure that the code at
5179 * @rec->ip is currently calling @old_addr. And this must
5180 * also update entry->direct to @new_addr.
5181 */
5182int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5183                                       struct dyn_ftrace *rec,
5184                                       unsigned long old_addr,
5185                                       unsigned long new_addr)
5186{
5187        unsigned long ip = rec->ip;
5188        int ret;
5189
5190        /*
5191         * The ftrace_lock was used to determine if the record
5192         * had more than one registered user to it. If it did,
5193         * we needed to prevent that from changing to do the quick
5194         * switch. But if it did not (only a direct caller was attached)
5195         * then this function is called. But this function can deal
5196         * with attached callers to the rec that we care about, and
5197         * since this function uses standard ftrace calls that take
5198         * the ftrace_lock mutex, we need to release it.
5199         */
5200        mutex_unlock(&ftrace_lock);
5201
5202        /*
5203         * By setting a stub function at the same address, we force
5204         * the code to call the iterator and the direct_ops helper.
5205         * This means that @ip does not call the direct call, and
5206         * we can simply modify it.
5207         */
5208        ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5209        if (ret)
5210                goto out_lock;
5211
5212        ret = register_ftrace_function(&stub_ops);
5213        if (ret) {
5214                ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5215                goto out_lock;
5216        }
5217
5218        entry->direct = new_addr;
5219
5220        /*
5221         * By removing the stub, we put back the direct call, calling
5222         * the @new_addr.
5223         */
5224        unregister_ftrace_function(&stub_ops);
5225        ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5226
5227 out_lock:
5228        mutex_lock(&ftrace_lock);
5229
5230        return ret;
5231}
5232
5233/**
5234 * modify_ftrace_direct - Modify an existing direct call to call something else
5235 * @ip: The instruction pointer to modify
5236 * @old_addr: The address that the current @ip calls directly
5237 * @new_addr: The address that the @ip should call
5238 *
5239 * This modifies a ftrace direct caller at an instruction pointer without
5240 * having to disable it first. The direct call will switch over to the
5241 * @new_addr without missing anything.
5242 *
5243 * Returns: zero on success. Non zero on error, which includes:
5244 *  -ENODEV : the @ip given has no direct caller attached
5245 *  -EINVAL : the @old_addr does not match the current direct caller
5246 */
5247int modify_ftrace_direct(unsigned long ip,
5248                         unsigned long old_addr, unsigned long new_addr)
5249{
5250        struct ftrace_direct_func *direct, *new_direct = NULL;
5251        struct ftrace_func_entry *entry;
5252        struct dyn_ftrace *rec;
5253        int ret = -ENODEV;
5254
5255        mutex_lock(&direct_mutex);
5256
5257        mutex_lock(&ftrace_lock);
5258        entry = find_direct_entry(&ip, &rec);
5259        if (!entry)
5260                goto out_unlock;
5261
5262        ret = -EINVAL;
5263        if (entry->direct != old_addr)
5264                goto out_unlock;
5265
5266        direct = ftrace_find_direct_func(old_addr);
5267        if (WARN_ON(!direct))
5268                goto out_unlock;
5269        if (direct->count > 1) {
5270                ret = -ENOMEM;
5271                new_direct = ftrace_alloc_direct_func(new_addr);
5272                if (!new_direct)
5273                        goto out_unlock;
5274                direct->count--;
5275                new_direct->count++;
5276        } else {
5277                direct->addr = new_addr;
5278        }
5279
5280        /*
5281         * If there's no other ftrace callback on the rec->ip location,
5282         * then it can be changed directly by the architecture.
5283         * If there is another caller, then we just need to change the
5284         * direct caller helper to point to @new_addr.
5285         */
5286        if (ftrace_rec_count(rec) == 1) {
5287                ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5288        } else {
5289                entry->direct = new_addr;
5290                ret = 0;
5291        }
5292
5293        if (unlikely(ret && new_direct)) {
5294                direct->count++;
5295                list_del_rcu(&new_direct->next);
5296                synchronize_rcu_tasks();
5297                kfree(new_direct);
5298                ftrace_direct_func_count--;
5299        }
5300
5301 out_unlock:
5302        mutex_unlock(&ftrace_lock);
5303        mutex_unlock(&direct_mutex);
5304        return ret;
5305}
5306EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5307#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5308
5309/**
5310 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5311 * @ops - the ops to set the filter with
5312 * @ip - the address to add to or remove from the filter.
5313 * @remove - non zero to remove the ip from the filter
5314 * @reset - non zero to reset all filters before applying this filter.
5315 *
5316 * Filters denote which functions should be enabled when tracing is enabled
5317 * If @ip is NULL, it failes to update filter.
5318 */
5319int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5320                         int remove, int reset)
5321{
5322        ftrace_ops_init(ops);
5323        return ftrace_set_addr(ops, ip, remove, reset, 1);
5324}
5325EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5326
5327/**
5328 * ftrace_ops_set_global_filter - setup ops to use global filters
5329 * @ops - the ops which will use the global filters
5330 *
5331 * ftrace users who need global function trace filtering should call this.
5332 * It can set the global filter only if ops were not initialized before.
5333 */
5334void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5335{
5336        if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5337                return;
5338
5339        ftrace_ops_init(ops);
5340        ops->func_hash = &global_ops.local_hash;
5341}
5342EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5343
5344static int
5345ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5346                 int reset, int enable)
5347{
5348        return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
5349}
5350
5351/**
5352 * ftrace_set_filter - set a function to filter on in ftrace
5353 * @ops - the ops to set the filter with
5354 * @buf - the string that holds the function filter text.
5355 * @len - the length of the string.
5356 * @reset - non zero to reset all filters before applying this filter.
5357 *
5358 * Filters denote which functions should be enabled when tracing is enabled.
5359 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5360 */
5361int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5362                       int len, int reset)
5363{
5364        ftrace_ops_init(ops);
5365        return ftrace_set_regex(ops, buf, len, reset, 1);
5366}
5367EXPORT_SYMBOL_GPL(ftrace_set_filter);
5368
5369/**
5370 * ftrace_set_notrace - set a function to not trace in ftrace
5371 * @ops - the ops to set the notrace filter with
5372 * @buf - the string that holds the function notrace text.
5373 * @len - the length of the string.
5374 * @reset - non zero to reset all filters before applying this filter.
5375 *
5376 * Notrace Filters denote which functions should not be enabled when tracing
5377 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5378 * for tracing.
5379 */
5380int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5381                        int len, int reset)
5382{
5383        ftrace_ops_init(ops);
5384        return ftrace_set_regex(ops, buf, len, reset, 0);
5385}
5386EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5387/**
5388 * ftrace_set_global_filter - set a function to filter on with global tracers
5389 * @buf - the string that holds the function filter text.
5390 * @len - the length of the string.
5391 * @reset - non zero to reset all filters before applying this filter.
5392 *
5393 * Filters denote which functions should be enabled when tracing is enabled.
5394 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5395 */
5396void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5397{
5398        ftrace_set_regex(&global_ops, buf, len, reset, 1);
5399}
5400EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5401
5402/**
5403 * ftrace_set_global_notrace - set a function to not trace with global tracers
5404 * @buf - the string that holds the function notrace text.
5405 * @len - the length of the string.
5406 * @reset - non zero to reset all filters before applying this filter.
5407 *
5408 * Notrace Filters denote which functions should not be enabled when tracing
5409 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5410 * for tracing.
5411 */
5412void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5413{
5414        ftrace_set_regex(&global_ops, buf, len, reset, 0);
5415}
5416EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5417
5418/*
5419 * command line interface to allow users to set filters on boot up.
5420 */
5421#define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
5422static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5423static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5424
5425/* Used by function selftest to not test if filter is set */
5426bool ftrace_filter_param __initdata;
5427
5428static int __init set_ftrace_notrace(char *str)
5429{
5430        ftrace_filter_param = true;
5431        strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5432        return 1;
5433}
5434__setup("ftrace_notrace=", set_ftrace_notrace);
5435
5436static int __init set_ftrace_filter(char *str)
5437{
5438        ftrace_filter_param = true;
5439        strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5440        return 1;
5441}
5442__setup("ftrace_filter=", set_ftrace_filter);
5443
5444#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5445static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5446static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5447static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5448
5449static int __init set_graph_function(char *str)
5450{
5451        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5452        return 1;
5453}
5454__setup("ftrace_graph_filter=", set_graph_function);
5455
5456static int __init set_graph_notrace_function(char *str)
5457{
5458        strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5459        return 1;
5460}
5461__setup("ftrace_graph_notrace=", set_graph_notrace_function);
5462
5463static int __init set_graph_max_depth_function(char *str)
5464{
5465        if (!str)
5466                return 0;
5467        fgraph_max_depth = simple_strtoul(str, NULL, 0);
5468        return 1;
5469}
5470__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5471
5472static void __init set_ftrace_early_graph(char *buf, int enable)
5473{
5474        int ret;
5475        char *func;
5476        struct ftrace_hash *hash;
5477
5478        hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5479        if (WARN_ON(!hash))
5480                return;
5481
5482        while (buf) {
5483                func = strsep(&buf, ",");
5484                /* we allow only one expression at a time */
5485                ret = ftrace_graph_set_hash(hash, func);
5486                if (ret)
5487                        printk(KERN_DEBUG "ftrace: function %s not "
5488                                          "traceable\n", func);
5489        }
5490
5491        if (enable)
5492                ftrace_graph_hash = hash;
5493        else
5494                ftrace_graph_notrace_hash = hash;
5495}
5496#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5497
5498void __init
5499ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5500{
5501        char *func;
5502
5503        ftrace_ops_init(ops);
5504
5505        while (buf) {
5506                func = strsep(&buf, ",");
5507                ftrace_set_regex(ops, func, strlen(func), 0, enable);
5508        }
5509}
5510
5511static void __init set_ftrace_early_filters(void)
5512{
5513        if (ftrace_filter_buf[0])
5514                ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5515        if (ftrace_notrace_buf[0])
5516                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5517#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5518        if (ftrace_graph_buf[0])
5519                set_ftrace_early_graph(ftrace_graph_buf, 1);
5520        if (ftrace_graph_notrace_buf[0])
5521                set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5522#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5523}
5524
5525int ftrace_regex_release(struct inode *inode, struct file *file)
5526{
5527        struct seq_file *m = (struct seq_file *)file->private_data;
5528        struct ftrace_iterator *iter;
5529        struct ftrace_hash **orig_hash;
5530        struct trace_parser *parser;
5531        int filter_hash;
5532        int ret;
5533
5534        if (file->f_mode & FMODE_READ) {
5535                iter = m->private;
5536                seq_release(inode, file);
5537        } else
5538                iter = file->private_data;
5539
5540        parser = &iter->parser;
5541        if (trace_parser_loaded(parser)) {
5542                ftrace_match_records(iter->hash, parser->buffer, parser->idx);
5543        }
5544
5545        trace_parser_put(parser);
5546
5547        mutex_lock(&iter->ops->func_hash->regex_lock);
5548
5549        if (file->f_mode & FMODE_WRITE) {
5550                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5551
5552                if (filter_hash) {
5553                        orig_hash = &iter->ops->func_hash->filter_hash;
5554                        if (iter->tr && !list_empty(&iter->tr->mod_trace))
5555                                iter->hash->flags |= FTRACE_HASH_FL_MOD;
5556                } else
5557                        orig_hash = &iter->ops->func_hash->notrace_hash;
5558
5559                mutex_lock(&ftrace_lock);
5560                ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5561                                                      iter->hash, filter_hash);
5562                mutex_unlock(&ftrace_lock);
5563        } else {
5564                /* For read only, the hash is the ops hash */
5565                iter->hash = NULL;
5566        }
5567
5568        mutex_unlock(&iter->ops->func_hash->regex_lock);
5569        free_ftrace_hash(iter->hash);
5570        kfree(iter);
5571
5572        return 0;
5573}
5574
5575static const struct file_operations ftrace_avail_fops = {
5576        .open = ftrace_avail_open,
5577        .read = seq_read,
5578        .llseek = seq_lseek,
5579        .release = seq_release_private,
5580};
5581
5582static const struct file_operations ftrace_enabled_fops = {
5583        .open = ftrace_enabled_open,
5584        .read = seq_read,
5585        .llseek = seq_lseek,
5586        .release = seq_release_private,
5587};
5588
5589static const struct file_operations ftrace_filter_fops = {
5590        .open = ftrace_filter_open,
5591        .read = seq_read,
5592        .write = ftrace_filter_write,
5593        .llseek = tracing_lseek,
5594        .release = ftrace_regex_release,
5595};
5596
5597static const struct file_operations ftrace_notrace_fops = {
5598        .open = ftrace_notrace_open,
5599        .read = seq_read,
5600        .write = ftrace_notrace_write,
5601        .llseek = tracing_lseek,
5602        .release = ftrace_regex_release,
5603};
5604
5605#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5606
5607static DEFINE_MUTEX(graph_lock);
5608
5609struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
5610struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
5611
5612enum graph_filter_type {
5613        GRAPH_FILTER_NOTRACE    = 0,
5614        GRAPH_FILTER_FUNCTION,
5615};
5616
5617#define FTRACE_GRAPH_EMPTY      ((void *)1)
5618
5619struct ftrace_graph_data {
5620        struct ftrace_hash              *hash;
5621        struct ftrace_func_entry        *entry;
5622        int                             idx;   /* for hash table iteration */
5623        enum graph_filter_type          type;
5624        struct ftrace_hash              *new_hash;
5625        const struct seq_operations     *seq_ops;
5626        struct trace_parser             parser;
5627};
5628
5629static void *
5630__g_next(struct seq_file *m, loff_t *pos)
5631{
5632        struct ftrace_graph_data *fgd = m->private;
5633        struct ftrace_func_entry *entry = fgd->entry;
5634        struct hlist_head *head;
5635        int i, idx = fgd->idx;
5636
5637        if (*pos >= fgd->hash->count)
5638                return NULL;
5639
5640        if (entry) {
5641                hlist_for_each_entry_continue(entry, hlist) {
5642                        fgd->entry = entry;
5643                        return entry;
5644                }
5645
5646                idx++;
5647        }
5648
5649        for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5650                head = &fgd->hash->buckets[i];
5651                hlist_for_each_entry(entry, head, hlist) {
5652                        fgd->entry = entry;
5653                        fgd->idx = i;
5654                        return entry;
5655                }
5656        }
5657        return NULL;
5658}
5659
5660static void *
5661g_next(struct seq_file *m, void *v, loff_t *pos)
5662{
5663        (*pos)++;
5664        return __g_next(m, pos);
5665}
5666
5667static void *g_start(struct seq_file *m, loff_t *pos)
5668{
5669        struct ftrace_graph_data *fgd = m->private;
5670
5671        mutex_lock(&graph_lock);
5672
5673        if (fgd->type == GRAPH_FILTER_FUNCTION)
5674                fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5675                                        lockdep_is_held(&graph_lock));
5676        else
5677                fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5678                                        lockdep_is_held(&graph_lock));
5679
5680        /* Nothing, tell g_show to print all functions are enabled */
5681        if (ftrace_hash_empty(fgd->hash) && !*pos)
5682                return FTRACE_GRAPH_EMPTY;
5683
5684        fgd->idx = 0;
5685        fgd->entry = NULL;
5686        return __g_next(m, pos);
5687}
5688
5689static void g_stop(struct seq_file *m, void *p)
5690{
5691        mutex_unlock(&graph_lock);
5692}
5693
5694static int g_show(struct seq_file *m, void *v)
5695{
5696        struct ftrace_func_entry *entry = v;
5697
5698        if (!entry)
5699                return 0;
5700
5701        if (entry == FTRACE_GRAPH_EMPTY) {
5702                struct ftrace_graph_data *fgd = m->private;
5703
5704                if (fgd->type == GRAPH_FILTER_FUNCTION)
5705                        seq_puts(m, "#### all functions enabled ####\n");
5706                else
5707                        seq_puts(m, "#### no functions disabled ####\n");
5708                return 0;
5709        }
5710
5711        seq_printf(m, "%ps\n", (void *)entry->ip);
5712
5713        return 0;
5714}
5715
5716static const struct seq_operations ftrace_graph_seq_ops = {
5717        .start = g_start,
5718        .next = g_next,
5719        .stop = g_stop,
5720        .show = g_show,
5721};
5722
5723static int
5724__ftrace_graph_open(struct inode *inode, struct file *file,
5725                    struct ftrace_graph_data *fgd)
5726{
5727        int ret = 0;
5728        struct ftrace_hash *new_hash = NULL;
5729
5730        if (file->f_mode & FMODE_WRITE) {
5731                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5732
5733                if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5734                        return -ENOMEM;
5735
5736                if (file->f_flags & O_TRUNC)
5737                        new_hash = alloc_ftrace_hash(size_bits);
5738                else
5739                        new_hash = alloc_and_copy_ftrace_hash(size_bits,
5740                                                              fgd->hash);
5741                if (!new_hash) {
5742                        ret = -ENOMEM;
5743                        goto out;
5744                }
5745        }
5746
5747        if (file->f_mode & FMODE_READ) {
5748                ret = seq_open(file, &ftrace_graph_seq_ops);
5749                if (!ret) {
5750                        struct seq_file *m = file->private_data;
5751                        m->private = fgd;
5752                } else {
5753                        /* Failed */
5754                        free_ftrace_hash(new_hash);
5755                        new_hash = NULL;
5756                }
5757        } else
5758                file->private_data = fgd;
5759
5760out:
5761        if (ret < 0 && file->f_mode & FMODE_WRITE)
5762                trace_parser_put(&fgd->parser);
5763
5764        fgd->new_hash = new_hash;
5765
5766        /*
5767         * All uses of fgd->hash must be taken with the graph_lock
5768         * held. The graph_lock is going to be released, so force
5769         * fgd->hash to be reinitialized when it is taken again.
5770         */
5771        fgd->hash = NULL;
5772
5773        return ret;
5774}
5775
5776static int
5777ftrace_graph_open(struct inode *inode, struct file *file)
5778{
5779        struct ftrace_graph_data *fgd;
5780        int ret;
5781
5782        if (unlikely(ftrace_disabled))
5783                return -ENODEV;
5784
5785        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5786        if (fgd == NULL)
5787                return -ENOMEM;
5788
5789        mutex_lock(&graph_lock);
5790
5791        fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5792                                        lockdep_is_held(&graph_lock));
5793        fgd->type = GRAPH_FILTER_FUNCTION;
5794        fgd->seq_ops = &ftrace_graph_seq_ops;
5795
5796        ret = __ftrace_graph_open(inode, file, fgd);
5797        if (ret < 0)
5798                kfree(fgd);
5799
5800        mutex_unlock(&graph_lock);
5801        return ret;
5802}
5803
5804static int
5805ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5806{
5807        struct ftrace_graph_data *fgd;
5808        int ret;
5809
5810        if (unlikely(ftrace_disabled))
5811                return -ENODEV;
5812
5813        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5814        if (fgd == NULL)
5815                return -ENOMEM;
5816
5817        mutex_lock(&graph_lock);
5818
5819        fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5820                                        lockdep_is_held(&graph_lock));
5821        fgd->type = GRAPH_FILTER_NOTRACE;
5822        fgd->seq_ops = &ftrace_graph_seq_ops;
5823
5824        ret = __ftrace_graph_open(inode, file, fgd);
5825        if (ret < 0)
5826                kfree(fgd);
5827
5828        mutex_unlock(&graph_lock);
5829        return ret;
5830}
5831
5832static int
5833ftrace_graph_release(struct inode *inode, struct file *file)
5834{
5835        struct ftrace_graph_data *fgd;
5836        struct ftrace_hash *old_hash, *new_hash;
5837        struct trace_parser *parser;
5838        int ret = 0;
5839
5840        if (file->f_mode & FMODE_READ) {
5841                struct seq_file *m = file->private_data;
5842
5843                fgd = m->private;
5844                seq_release(inode, file);
5845        } else {
5846                fgd = file->private_data;
5847        }
5848
5849
5850        if (file->f_mode & FMODE_WRITE) {
5851
5852                parser = &fgd->parser;
5853
5854                if (trace_parser_loaded((parser))) {
5855                        ret = ftrace_graph_set_hash(fgd->new_hash,
5856                                                    parser->buffer);
5857                }
5858
5859                trace_parser_put(parser);
5860
5861                new_hash = __ftrace_hash_move(fgd->new_hash);
5862                if (!new_hash) {
5863                        ret = -ENOMEM;
5864                        goto out;
5865                }
5866
5867                mutex_lock(&graph_lock);
5868
5869                if (fgd->type == GRAPH_FILTER_FUNCTION) {
5870                        old_hash = rcu_dereference_protected(ftrace_graph_hash,
5871                                        lockdep_is_held(&graph_lock));
5872                        rcu_assign_pointer(ftrace_graph_hash, new_hash);
5873                } else {
5874                        old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5875                                        lockdep_is_held(&graph_lock));
5876                        rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5877                }
5878
5879                mutex_unlock(&graph_lock);
5880
5881                /*
5882                 * We need to do a hard force of sched synchronization.
5883                 * This is because we use preempt_disable() to do RCU, but
5884                 * the function tracers can be called where RCU is not watching
5885                 * (like before user_exit()). We can not rely on the RCU
5886                 * infrastructure to do the synchronization, thus we must do it
5887                 * ourselves.
5888                 */
5889                synchronize_rcu_tasks_rude();
5890
5891                free_ftrace_hash(old_hash);
5892        }
5893
5894 out:
5895        free_ftrace_hash(fgd->new_hash);
5896        kfree(fgd);
5897
5898        return ret;
5899}
5900
5901static int
5902ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
5903{
5904        struct ftrace_glob func_g;
5905        struct dyn_ftrace *rec;
5906        struct ftrace_page *pg;
5907        struct ftrace_func_entry *entry;
5908        int fail = 1;
5909        int not;
5910
5911        /* decode regex */
5912        func_g.type = filter_parse_regex(buffer, strlen(buffer),
5913                                         &func_g.search, &not);
5914
5915        func_g.len = strlen(func_g.search);
5916
5917        mutex_lock(&ftrace_lock);
5918
5919        if (unlikely(ftrace_disabled)) {
5920                mutex_unlock(&ftrace_lock);
5921                return -ENODEV;
5922        }
5923
5924        do_for_each_ftrace_rec(pg, rec) {
5925
5926                if (rec->flags & FTRACE_FL_DISABLED)
5927                        continue;
5928
5929                if (ftrace_match_record(rec, &func_g, NULL, 0)) {
5930                        entry = ftrace_lookup_ip(hash, rec->ip);
5931
5932                        if (!not) {
5933                                fail = 0;
5934
5935                                if (entry)
5936                                        continue;
5937                                if (add_hash_entry(hash, rec->ip) < 0)
5938                                        goto out;
5939                        } else {
5940                                if (entry) {
5941                                        free_hash_entry(hash, entry);
5942                                        fail = 0;
5943                                }
5944                        }
5945                }
5946        } while_for_each_ftrace_rec();
5947out:
5948        mutex_unlock(&ftrace_lock);
5949
5950        if (fail)
5951                return -EINVAL;
5952
5953        return 0;
5954}
5955
5956static ssize_t
5957ftrace_graph_write(struct file *file, const char __user *ubuf,
5958                   size_t cnt, loff_t *ppos)
5959{
5960        ssize_t read, ret = 0;
5961        struct ftrace_graph_data *fgd = file->private_data;
5962        struct trace_parser *parser;
5963
5964        if (!cnt)
5965                return 0;
5966
5967        /* Read mode uses seq functions */
5968        if (file->f_mode & FMODE_READ) {
5969                struct seq_file *m = file->private_data;
5970                fgd = m->private;
5971        }
5972
5973        parser = &fgd->parser;
5974
5975        read = trace_get_user(parser, ubuf, cnt, ppos);
5976
5977        if (read >= 0 && trace_parser_loaded(parser) &&
5978            !trace_parser_cont(parser)) {
5979
5980                ret = ftrace_graph_set_hash(fgd->new_hash,
5981                                            parser->buffer);
5982                trace_parser_clear(parser);
5983        }
5984
5985        if (!ret)
5986                ret = read;
5987
5988        return ret;
5989}
5990
5991static const struct file_operations ftrace_graph_fops = {
5992        .open           = ftrace_graph_open,
5993        .read           = seq_read,
5994        .write          = ftrace_graph_write,
5995        .llseek         = tracing_lseek,
5996        .release        = ftrace_graph_release,
5997};
5998
5999static const struct file_operations ftrace_graph_notrace_fops = {
6000        .open           = ftrace_graph_notrace_open,
6001        .read           = seq_read,
6002        .write          = ftrace_graph_write,
6003        .llseek         = tracing_lseek,
6004        .release        = ftrace_graph_release,
6005};
6006#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6007
6008void ftrace_create_filter_files(struct ftrace_ops *ops,
6009                                struct dentry *parent)
6010{
6011
6012        trace_create_file("set_ftrace_filter", 0644, parent,
6013                          ops, &ftrace_filter_fops);
6014
6015        trace_create_file("set_ftrace_notrace", 0644, parent,
6016                          ops, &ftrace_notrace_fops);
6017}
6018
6019/*
6020 * The name "destroy_filter_files" is really a misnomer. Although
6021 * in the future, it may actualy delete the files, but this is
6022 * really intended to make sure the ops passed in are disabled
6023 * and that when this function returns, the caller is free to
6024 * free the ops.
6025 *
6026 * The "destroy" name is only to match the "create" name that this
6027 * should be paired with.
6028 */
6029void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6030{
6031        mutex_lock(&ftrace_lock);
6032        if (ops->flags & FTRACE_OPS_FL_ENABLED)
6033                ftrace_shutdown(ops, 0);
6034        ops->flags |= FTRACE_OPS_FL_DELETED;
6035        mutex_unlock(&ftrace_lock);
6036}
6037
6038static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6039{
6040
6041        trace_create_file("available_filter_functions", 0444,
6042                        d_tracer, NULL, &ftrace_avail_fops);
6043
6044        trace_create_file("enabled_functions", 0444,
6045                        d_tracer, NULL, &ftrace_enabled_fops);
6046
6047        ftrace_create_filter_files(&global_ops, d_tracer);
6048
6049#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6050        trace_create_file("set_graph_function", 0644, d_tracer,
6051                                    NULL,
6052                                    &ftrace_graph_fops);
6053        trace_create_file("set_graph_notrace", 0644, d_tracer,
6054                                    NULL,
6055                                    &ftrace_graph_notrace_fops);
6056#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6057
6058        return 0;
6059}
6060
6061static int ftrace_cmp_ips(const void *a, const void *b)
6062{
6063        const unsigned long *ipa = a;
6064        const unsigned long *ipb = b;
6065
6066        if (*ipa > *ipb)
6067                return 1;
6068        if (*ipa < *ipb)
6069                return -1;
6070        return 0;
6071}
6072
6073static int ftrace_process_locs(struct module *mod,
6074                               unsigned long *start,
6075                               unsigned long *end)
6076{
6077        struct ftrace_page *start_pg;
6078        struct ftrace_page *pg;
6079        struct dyn_ftrace *rec;
6080        unsigned long count;
6081        unsigned long *p;
6082        unsigned long addr;
6083        unsigned long flags = 0; /* Shut up gcc */
6084        int ret = -ENOMEM;
6085
6086        count = end - start;
6087
6088        if (!count)
6089                return 0;
6090
6091        sort(start, count, sizeof(*start),
6092             ftrace_cmp_ips, NULL);
6093
6094        start_pg = ftrace_allocate_pages(count);
6095        if (!start_pg)
6096                return -ENOMEM;
6097
6098        mutex_lock(&ftrace_lock);
6099
6100        /*
6101         * Core and each module needs their own pages, as
6102         * modules will free them when they are removed.
6103         * Force a new page to be allocated for modules.
6104         */
6105        if (!mod) {
6106                WARN_ON(ftrace_pages || ftrace_pages_start);
6107                /* First initialization */
6108                ftrace_pages = ftrace_pages_start = start_pg;
6109        } else {
6110                if (!ftrace_pages)
6111                        goto out;
6112
6113                if (WARN_ON(ftrace_pages->next)) {
6114                        /* Hmm, we have free pages? */
6115                        while (ftrace_pages->next)
6116                                ftrace_pages = ftrace_pages->next;
6117                }
6118
6119                ftrace_pages->next = start_pg;
6120        }
6121
6122        p = start;
6123        pg = start_pg;
6124        while (p < end) {
6125                addr = ftrace_call_adjust(*p++);
6126                /*
6127                 * Some architecture linkers will pad between
6128                 * the different mcount_loc sections of different
6129                 * object files to satisfy alignments.
6130                 * Skip any NULL pointers.
6131                 */
6132                if (!addr)
6133                        continue;
6134
6135                if (pg->index == pg->size) {
6136                        /* We should have allocated enough */
6137                        if (WARN_ON(!pg->next))
6138                                break;
6139                        pg = pg->next;
6140                }
6141
6142                rec = &pg->records[pg->index++];
6143                rec->ip = addr;
6144        }
6145
6146        /* We should have used all pages */
6147        WARN_ON(pg->next);
6148
6149        /* Assign the last page to ftrace_pages */
6150        ftrace_pages = pg;
6151
6152        /*
6153         * We only need to disable interrupts on start up
6154         * because we are modifying code that an interrupt
6155         * may execute, and the modification is not atomic.
6156         * But for modules, nothing runs the code we modify
6157         * until we are finished with it, and there's no
6158         * reason to cause large interrupt latencies while we do it.
6159         */
6160        if (!mod)
6161                local_irq_save(flags);
6162        ftrace_update_code(mod, start_pg);
6163        if (!mod)
6164                local_irq_restore(flags);
6165        ret = 0;
6166 out:
6167        mutex_unlock(&ftrace_lock);
6168
6169        return ret;
6170}
6171
6172struct ftrace_mod_func {
6173        struct list_head        list;
6174        char                    *name;
6175        unsigned long           ip;
6176        unsigned int            size;
6177};
6178
6179struct ftrace_mod_map {
6180        struct rcu_head         rcu;
6181        struct list_head        list;
6182        struct module           *mod;
6183        unsigned long           start_addr;
6184        unsigned long           end_addr;
6185        struct list_head        funcs;
6186        unsigned int            num_funcs;
6187};
6188
6189#ifdef CONFIG_MODULES
6190
6191#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6192
6193static LIST_HEAD(ftrace_mod_maps);
6194
6195static int referenced_filters(struct dyn_ftrace *rec)
6196{
6197        struct ftrace_ops *ops;
6198        int cnt = 0;
6199
6200        for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6201                if (ops_references_rec(ops, rec))
6202                    cnt++;
6203        }
6204
6205        return cnt;
6206}
6207
6208static void
6209clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6210{
6211        struct ftrace_func_entry *entry;
6212        struct dyn_ftrace *rec;
6213        int i;
6214
6215        if (ftrace_hash_empty(hash))
6216                return;
6217
6218        for (i = 0; i < pg->index; i++) {
6219                rec = &pg->records[i];
6220                entry = __ftrace_lookup_ip(hash, rec->ip);
6221                /*
6222                 * Do not allow this rec to match again.
6223                 * Yeah, it may waste some memory, but will be removed
6224                 * if/when the hash is modified again.
6225                 */
6226                if (entry)
6227                        entry->ip = 0;
6228        }
6229}
6230
6231/* Clear any records from hashs */
6232static void clear_mod_from_hashes(struct ftrace_page *pg)
6233{
6234        struct trace_array *tr;
6235
6236        mutex_lock(&trace_types_lock);
6237        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6238                if (!tr->ops || !tr->ops->func_hash)
6239                        continue;
6240                mutex_lock(&tr->ops->func_hash->regex_lock);
6241                clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6242                clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6243                mutex_unlock(&tr->ops->func_hash->regex_lock);
6244        }
6245        mutex_unlock(&trace_types_lock);
6246}
6247
6248static void ftrace_free_mod_map(struct rcu_head *rcu)
6249{
6250        struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6251        struct ftrace_mod_func *mod_func;
6252        struct ftrace_mod_func *n;
6253
6254        /* All the contents of mod_map are now not visible to readers */
6255        list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6256                kfree(mod_func->name);
6257                list_del(&mod_func->list);
6258                kfree(mod_func);
6259        }
6260
6261        kfree(mod_map);
6262}
6263
6264void ftrace_release_mod(struct module *mod)
6265{
6266        struct ftrace_mod_map *mod_map;
6267        struct ftrace_mod_map *n;
6268        struct dyn_ftrace *rec;
6269        struct ftrace_page **last_pg;
6270        struct ftrace_page *tmp_page = NULL;
6271        struct ftrace_page *pg;
6272        int order;
6273
6274        mutex_lock(&ftrace_lock);
6275
6276        if (ftrace_disabled)
6277                goto out_unlock;
6278
6279        list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6280                if (mod_map->mod == mod) {
6281                        list_del_rcu(&mod_map->list);
6282                        call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6283                        break;
6284                }
6285        }
6286
6287        /*
6288         * Each module has its own ftrace_pages, remove
6289         * them from the list.
6290         */
6291        last_pg = &ftrace_pages_start;
6292        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6293                rec = &pg->records[0];
6294                if (within_module_core(rec->ip, mod) ||
6295                    within_module_init(rec->ip, mod)) {
6296                        /*
6297                         * As core pages are first, the first
6298                         * page should never be a module page.
6299                         */
6300                        if (WARN_ON(pg == ftrace_pages_start))
6301                                goto out_unlock;
6302
6303                        /* Check if we are deleting the last page */
6304                        if (pg == ftrace_pages)
6305                                ftrace_pages = next_to_ftrace_page(last_pg);
6306
6307                        ftrace_update_tot_cnt -= pg->index;
6308                        *last_pg = pg->next;
6309
6310                        pg->next = tmp_page;
6311                        tmp_page = pg;
6312                } else
6313                        last_pg = &pg->next;
6314        }
6315 out_unlock:
6316        mutex_unlock(&ftrace_lock);
6317
6318        for (pg = tmp_page; pg; pg = tmp_page) {
6319
6320                /* Needs to be called outside of ftrace_lock */
6321                clear_mod_from_hashes(pg);
6322
6323                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6324                free_pages((unsigned long)pg->records, order);
6325                tmp_page = pg->next;
6326                kfree(pg);
6327                ftrace_number_of_pages -= 1 << order;
6328                ftrace_number_of_groups--;
6329        }
6330}
6331
6332void ftrace_module_enable(struct module *mod)
6333{
6334        struct dyn_ftrace *rec;
6335        struct ftrace_page *pg;
6336
6337        mutex_lock(&ftrace_lock);
6338
6339        if (ftrace_disabled)
6340                goto out_unlock;
6341
6342        /*
6343         * If the tracing is enabled, go ahead and enable the record.
6344         *
6345         * The reason not to enable the record immediatelly is the
6346         * inherent check of ftrace_make_nop/ftrace_make_call for
6347         * correct previous instructions.  Making first the NOP
6348         * conversion puts the module to the correct state, thus
6349         * passing the ftrace_make_call check.
6350         *
6351         * We also delay this to after the module code already set the
6352         * text to read-only, as we now need to set it back to read-write
6353         * so that we can modify the text.
6354         */
6355        if (ftrace_start_up)
6356                ftrace_arch_code_modify_prepare();
6357
6358        do_for_each_ftrace_rec(pg, rec) {
6359                int cnt;
6360                /*
6361                 * do_for_each_ftrace_rec() is a double loop.
6362                 * module text shares the pg. If a record is
6363                 * not part of this module, then skip this pg,
6364                 * which the "break" will do.
6365                 */
6366                if (!within_module_core(rec->ip, mod) &&
6367                    !within_module_init(rec->ip, mod))
6368                        break;
6369
6370                cnt = 0;
6371
6372                /*
6373                 * When adding a module, we need to check if tracers are
6374                 * currently enabled and if they are, and can trace this record,
6375                 * we need to enable the module functions as well as update the
6376                 * reference counts for those function records.
6377                 */
6378                if (ftrace_start_up)
6379                        cnt += referenced_filters(rec);
6380
6381                /* This clears FTRACE_FL_DISABLED */
6382                rec->flags = cnt;
6383
6384                if (ftrace_start_up && cnt) {
6385                        int failed = __ftrace_replace_code(rec, 1);
6386                        if (failed) {
6387                                ftrace_bug(failed, rec);
6388                                goto out_loop;
6389                        }
6390                }
6391
6392        } while_for_each_ftrace_rec();
6393
6394 out_loop:
6395        if (ftrace_start_up)
6396                ftrace_arch_code_modify_post_process();
6397
6398 out_unlock:
6399        mutex_unlock(&ftrace_lock);
6400
6401        process_cached_mods(mod->name);
6402}
6403
6404void ftrace_module_init(struct module *mod)
6405{
6406        if (ftrace_disabled || !mod->num_ftrace_callsites)
6407                return;
6408
6409        ftrace_process_locs(mod, mod->ftrace_callsites,
6410                            mod->ftrace_callsites + mod->num_ftrace_callsites);
6411}
6412
6413static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6414                                struct dyn_ftrace *rec)
6415{
6416        struct ftrace_mod_func *mod_func;
6417        unsigned long symsize;
6418        unsigned long offset;
6419        char str[KSYM_SYMBOL_LEN];
6420        char *modname;
6421        const char *ret;
6422
6423        ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6424        if (!ret)
6425                return;
6426
6427        mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6428        if (!mod_func)
6429                return;
6430
6431        mod_func->name = kstrdup(str, GFP_KERNEL);
6432        if (!mod_func->name) {
6433                kfree(mod_func);
6434                return;
6435        }
6436
6437        mod_func->ip = rec->ip - offset;
6438        mod_func->size = symsize;
6439
6440        mod_map->num_funcs++;
6441
6442        list_add_rcu(&mod_func->list, &mod_map->funcs);
6443}
6444
6445static struct ftrace_mod_map *
6446allocate_ftrace_mod_map(struct module *mod,
6447                        unsigned long start, unsigned long end)
6448{
6449        struct ftrace_mod_map *mod_map;
6450
6451        mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
6452        if (!mod_map)
6453                return NULL;
6454
6455        mod_map->mod = mod;
6456        mod_map->start_addr = start;
6457        mod_map->end_addr = end;
6458        mod_map->num_funcs = 0;
6459
6460        INIT_LIST_HEAD_RCU(&mod_map->funcs);
6461
6462        list_add_rcu(&mod_map->list, &ftrace_mod_maps);
6463
6464        return mod_map;
6465}
6466
6467static const char *
6468ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
6469                           unsigned long addr, unsigned long *size,
6470                           unsigned long *off, char *sym)
6471{
6472        struct ftrace_mod_func *found_func =  NULL;
6473        struct ftrace_mod_func *mod_func;
6474
6475        list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6476                if (addr >= mod_func->ip &&
6477                    addr < mod_func->ip + mod_func->size) {
6478                        found_func = mod_func;
6479                        break;
6480                }
6481        }
6482
6483        if (found_func) {
6484                if (size)
6485                        *size = found_func->size;
6486                if (off)
6487                        *off = addr - found_func->ip;
6488                if (sym)
6489                        strlcpy(sym, found_func->name, KSYM_NAME_LEN);
6490
6491                return found_func->name;
6492        }
6493
6494        return NULL;
6495}
6496
6497const char *
6498ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
6499                   unsigned long *off, char **modname, char *sym)
6500{
6501        struct ftrace_mod_map *mod_map;
6502        const char *ret = NULL;
6503
6504        /* mod_map is freed via call_rcu() */
6505        preempt_disable();
6506        list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6507                ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
6508                if (ret) {
6509                        if (modname)
6510                                *modname = mod_map->mod->name;
6511                        break;
6512                }
6513        }
6514        preempt_enable();
6515
6516        return ret;
6517}
6518
6519int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6520                           char *type, char *name,
6521                           char *module_name, int *exported)
6522{
6523        struct ftrace_mod_map *mod_map;
6524        struct ftrace_mod_func *mod_func;
6525
6526        preempt_disable();
6527        list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6528
6529                if (symnum >= mod_map->num_funcs) {
6530                        symnum -= mod_map->num_funcs;
6531                        continue;
6532                }
6533
6534                list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6535                        if (symnum > 1) {
6536                                symnum--;
6537                                continue;
6538                        }
6539
6540                        *value = mod_func->ip;
6541                        *type = 'T';
6542                        strlcpy(name, mod_func->name, KSYM_NAME_LEN);
6543                        strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
6544                        *exported = 1;
6545                        preempt_enable();
6546                        return 0;
6547                }
6548                WARN_ON(1);
6549                break;
6550        }
6551        preempt_enable();
6552        return -ERANGE;
6553}
6554
6555#else
6556static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6557                                struct dyn_ftrace *rec) { }
6558static inline struct ftrace_mod_map *
6559allocate_ftrace_mod_map(struct module *mod,
6560                        unsigned long start, unsigned long end)
6561{
6562        return NULL;
6563}
6564#endif /* CONFIG_MODULES */
6565
6566struct ftrace_init_func {
6567        struct list_head list;
6568        unsigned long ip;
6569};
6570
6571/* Clear any init ips from hashes */
6572static void
6573clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
6574{
6575        struct ftrace_func_entry *entry;
6576
6577        if (ftrace_hash_empty(hash))
6578                return;
6579
6580        entry = __ftrace_lookup_ip(hash, func->ip);
6581
6582        /*
6583         * Do not allow this rec to match again.
6584         * Yeah, it may waste some memory, but will be removed
6585         * if/when the hash is modified again.
6586         */
6587        if (entry)
6588                entry->ip = 0;
6589}
6590
6591static void
6592clear_func_from_hashes(struct ftrace_init_func *func)
6593{
6594        struct trace_array *tr;
6595
6596        mutex_lock(&trace_types_lock);
6597        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6598                if (!tr->ops || !tr->ops->func_hash)
6599                        continue;
6600                mutex_lock(&tr->ops->func_hash->regex_lock);
6601                clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6602                clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6603                mutex_unlock(&tr->ops->func_hash->regex_lock);
6604        }
6605        mutex_unlock(&trace_types_lock);
6606}
6607
6608static void add_to_clear_hash_list(struct list_head *clear_list,
6609                                   struct dyn_ftrace *rec)
6610{
6611        struct ftrace_init_func *func;
6612
6613        func = kmalloc(sizeof(*func), GFP_KERNEL);
6614        if (!func) {
6615                WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
6616                return;
6617        }
6618
6619        func->ip = rec->ip;
6620        list_add(&func->list, clear_list);
6621}
6622
6623void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
6624{
6625        unsigned long start = (unsigned long)(start_ptr);
6626        unsigned long end = (unsigned long)(end_ptr);
6627        struct ftrace_page **last_pg = &ftrace_pages_start;
6628        struct ftrace_page *pg;
6629        struct dyn_ftrace *rec;
6630        struct dyn_ftrace key;
6631        struct ftrace_mod_map *mod_map = NULL;
6632        struct ftrace_init_func *func, *func_next;
6633        struct list_head clear_hash;
6634        int order;
6635
6636        INIT_LIST_HEAD(&clear_hash);
6637
6638        key.ip = start;
6639        key.flags = end;        /* overload flags, as it is unsigned long */
6640
6641        mutex_lock(&ftrace_lock);
6642
6643        /*
6644         * If we are freeing module init memory, then check if
6645         * any tracer is active. If so, we need to save a mapping of
6646         * the module functions being freed with the address.
6647         */
6648        if (mod && ftrace_ops_list != &ftrace_list_end)
6649                mod_map = allocate_ftrace_mod_map(mod, start, end);
6650
6651        for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
6652                if (end < pg->records[0].ip ||
6653                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
6654                        continue;
6655 again:
6656                rec = bsearch(&key, pg->records, pg->index,
6657                              sizeof(struct dyn_ftrace),
6658                              ftrace_cmp_recs);
6659                if (!rec)
6660                        continue;
6661
6662                /* rec will be cleared from hashes after ftrace_lock unlock */
6663                add_to_clear_hash_list(&clear_hash, rec);
6664
6665                if (mod_map)
6666                        save_ftrace_mod_rec(mod_map, rec);
6667
6668                pg->index--;
6669                ftrace_update_tot_cnt--;
6670                if (!pg->index) {
6671                        *last_pg = pg->next;
6672                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6673                        free_pages((unsigned long)pg->records, order);
6674                        ftrace_number_of_pages -= 1 << order;
6675                        ftrace_number_of_groups--;
6676                        kfree(pg);
6677                        pg = container_of(last_pg, struct ftrace_page, next);
6678                        if (!(*last_pg))
6679                                ftrace_pages = pg;
6680                        continue;
6681                }
6682                memmove(rec, rec + 1,
6683                        (pg->index - (rec - pg->records)) * sizeof(*rec));
6684                /* More than one function may be in this block */
6685                goto again;
6686        }
6687        mutex_unlock(&ftrace_lock);
6688
6689        list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6690                clear_func_from_hashes(func);
6691                kfree(func);
6692        }
6693}
6694
6695void __init ftrace_free_init_mem(void)
6696{
6697        void *start = (void *)(&__init_begin);
6698        void *end = (void *)(&__init_end);
6699
6700        ftrace_free_mem(NULL, start, end);
6701}
6702
6703void __init ftrace_init(void)
6704{
6705        extern unsigned long __start_mcount_loc[];
6706        extern unsigned long __stop_mcount_loc[];
6707        unsigned long count, flags;
6708        int ret;
6709
6710        local_irq_save(flags);
6711        ret = ftrace_dyn_arch_init();
6712        local_irq_restore(flags);
6713        if (ret)
6714                goto failed;
6715
6716        count = __stop_mcount_loc - __start_mcount_loc;
6717        if (!count) {
6718                pr_info("ftrace: No functions to be traced?\n");
6719                goto failed;
6720        }
6721
6722        pr_info("ftrace: allocating %ld entries in %ld pages\n",
6723                count, count / ENTRIES_PER_PAGE + 1);
6724
6725        last_ftrace_enabled = ftrace_enabled = 1;
6726
6727        ret = ftrace_process_locs(NULL,
6728                                  __start_mcount_loc,
6729                                  __stop_mcount_loc);
6730
6731        pr_info("ftrace: allocated %ld pages with %ld groups\n",
6732                ftrace_number_of_pages, ftrace_number_of_groups);
6733
6734        set_ftrace_early_filters();
6735
6736        return;
6737 failed:
6738        ftrace_disabled = 1;
6739}
6740
6741/* Do nothing if arch does not support this */
6742void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
6743{
6744}
6745
6746static void ftrace_update_trampoline(struct ftrace_ops *ops)
6747{
6748        arch_ftrace_update_trampoline(ops);
6749}
6750
6751void ftrace_init_trace_array(struct trace_array *tr)
6752{
6753        INIT_LIST_HEAD(&tr->func_probes);
6754        INIT_LIST_HEAD(&tr->mod_trace);
6755        INIT_LIST_HEAD(&tr->mod_notrace);
6756}
6757#else
6758
6759static struct ftrace_ops global_ops = {
6760        .func                   = ftrace_stub,
6761        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
6762                                  FTRACE_OPS_FL_INITIALIZED |
6763                                  FTRACE_OPS_FL_PID,
6764};
6765
6766static int __init ftrace_nodyn_init(void)
6767{
6768        ftrace_enabled = 1;
6769        return 0;
6770}
6771core_initcall(ftrace_nodyn_init);
6772
6773static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
6774static inline void ftrace_startup_enable(int command) { }
6775static inline void ftrace_startup_all(int command) { }
6776/* Keep as macros so we do not need to define the commands */
6777# define ftrace_startup(ops, command)                                   \
6778        ({                                                              \
6779                int ___ret = __register_ftrace_function(ops);           \
6780                if (!___ret)                                            \
6781                        (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
6782                ___ret;                                                 \
6783        })
6784# define ftrace_shutdown(ops, command)                                  \
6785        ({                                                              \
6786                int ___ret = __unregister_ftrace_function(ops);         \
6787                if (!___ret)                                            \
6788                        (ops)->flags &= ~FTRACE_OPS_FL_ENABLED;         \
6789                ___ret;                                                 \
6790        })
6791
6792# define ftrace_startup_sysctl()        do { } while (0)
6793# define ftrace_shutdown_sysctl()       do { } while (0)
6794
6795static inline int
6796ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
6797{
6798        return 1;
6799}
6800
6801static void ftrace_update_trampoline(struct ftrace_ops *ops)
6802{
6803}
6804
6805#endif /* CONFIG_DYNAMIC_FTRACE */
6806
6807__init void ftrace_init_global_array_ops(struct trace_array *tr)
6808{
6809        tr->ops = &global_ops;
6810        tr->ops->private = tr;
6811        ftrace_init_trace_array(tr);
6812}
6813
6814void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6815{
6816        /* If we filter on pids, update to use the pid function */
6817        if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6818                if (WARN_ON(tr->ops->func != ftrace_stub))
6819                        printk("ftrace ops had %pS for function\n",
6820                               tr->ops->func);
6821        }
6822        tr->ops->func = func;
6823        tr->ops->private = tr;
6824}
6825
6826void ftrace_reset_array_ops(struct trace_array *tr)
6827{
6828        tr->ops->func = ftrace_stub;
6829}
6830
6831static nokprobe_inline void
6832__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6833                       struct ftrace_ops *ignored, struct pt_regs *regs)
6834{
6835        struct ftrace_ops *op;
6836        int bit;
6837
6838        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6839        if (bit < 0)
6840                return;
6841
6842        /*
6843         * Some of the ops may be dynamically allocated,
6844         * they must be freed after a synchronize_rcu().
6845         */
6846        preempt_disable_notrace();
6847
6848        do_for_each_ftrace_op(op, ftrace_ops_list) {
6849                /*
6850                 * Check the following for each ops before calling their func:
6851                 *  if RCU flag is set, then rcu_is_watching() must be true
6852                 *  if PER_CPU is set, then ftrace_function_local_disable()
6853                 *                          must be false
6854                 *  Otherwise test if the ip matches the ops filter
6855                 *
6856                 * If any of the above fails then the op->func() is not executed.
6857                 */
6858                if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
6859                    ftrace_ops_test(op, ip, regs)) {
6860                        if (FTRACE_WARN_ON(!op->func)) {
6861                                pr_warn("op=%p %pS\n", op, op);
6862                                goto out;
6863                        }
6864                        op->func(ip, parent_ip, op, regs);
6865                }
6866        } while_for_each_ftrace_op(op);
6867out:
6868        preempt_enable_notrace();
6869        trace_clear_recursion(bit);
6870}
6871
6872/*
6873 * Some archs only support passing ip and parent_ip. Even though
6874 * the list function ignores the op parameter, we do not want any
6875 * C side effects, where a function is called without the caller
6876 * sending a third parameter.
6877 * Archs are to support both the regs and ftrace_ops at the same time.
6878 * If they support ftrace_ops, it is assumed they support regs.
6879 * If call backs want to use regs, they must either check for regs
6880 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
6881 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
6882 * An architecture can pass partial regs with ftrace_ops and still
6883 * set the ARCH_SUPPORTS_FTRACE_OPS.
6884 */
6885#if ARCH_SUPPORTS_FTRACE_OPS
6886static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6887                                 struct ftrace_ops *op, struct pt_regs *regs)
6888{
6889        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
6890}
6891NOKPROBE_SYMBOL(ftrace_ops_list_func);
6892#else
6893static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
6894{
6895        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
6896}
6897NOKPROBE_SYMBOL(ftrace_ops_no_ops);
6898#endif
6899
6900/*
6901 * If there's only one function registered but it does not support
6902 * recursion, needs RCU protection and/or requires per cpu handling, then
6903 * this function will be called by the mcount trampoline.
6904 */
6905static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
6906                                   struct ftrace_ops *op, struct pt_regs *regs)
6907{
6908        int bit;
6909
6910        if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
6911                return;
6912
6913        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6914        if (bit < 0)
6915                return;
6916
6917        preempt_disable_notrace();
6918
6919        op->func(ip, parent_ip, op, regs);
6920
6921        preempt_enable_notrace();
6922        trace_clear_recursion(bit);
6923}
6924NOKPROBE_SYMBOL(ftrace_ops_assist_func);
6925
6926/**
6927 * ftrace_ops_get_func - get the function a trampoline should call
6928 * @ops: the ops to get the function for
6929 *
6930 * Normally the mcount trampoline will call the ops->func, but there
6931 * are times that it should not. For example, if the ops does not
6932 * have its own recursion protection, then it should call the
6933 * ftrace_ops_assist_func() instead.
6934 *
6935 * Returns the function that the trampoline should call for @ops.
6936 */
6937ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6938{
6939        /*
6940         * If the function does not handle recursion, needs to be RCU safe,
6941         * or does per cpu logic, then we need to call the assist handler.
6942         */
6943        if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
6944            ops->flags & FTRACE_OPS_FL_RCU)
6945                return ftrace_ops_assist_func;
6946
6947        return ops->func;
6948}
6949
6950static void
6951ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6952                    struct task_struct *prev, struct task_struct *next)
6953{
6954        struct trace_array *tr = data;
6955        struct trace_pid_list *pid_list;
6956
6957        pid_list = rcu_dereference_sched(tr->function_pids);
6958
6959        this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
6960                       trace_ignore_this_task(pid_list, next));
6961}
6962
6963static void
6964ftrace_pid_follow_sched_process_fork(void *data,
6965                                     struct task_struct *self,
6966                                     struct task_struct *task)
6967{
6968        struct trace_pid_list *pid_list;
6969        struct trace_array *tr = data;
6970
6971        pid_list = rcu_dereference_sched(tr->function_pids);
6972        trace_filter_add_remove_task(pid_list, self, task);
6973}
6974
6975static void
6976ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6977{
6978        struct trace_pid_list *pid_list;
6979        struct trace_array *tr = data;
6980
6981        pid_list = rcu_dereference_sched(tr->function_pids);
6982        trace_filter_add_remove_task(pid_list, NULL, task);
6983}
6984
6985void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6986{
6987        if (enable) {
6988                register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6989                                                  tr);
6990                register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6991                                                  tr);
6992        } else {
6993                unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6994                                                    tr);
6995                unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6996                                                    tr);
6997        }
6998}
6999
7000static void clear_ftrace_pids(struct trace_array *tr)
7001{
7002        struct trace_pid_list *pid_list;
7003        int cpu;
7004
7005        pid_list = rcu_dereference_protected(tr->function_pids,
7006                                             lockdep_is_held(&ftrace_lock));
7007        if (!pid_list)
7008                return;
7009
7010        unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7011
7012        for_each_possible_cpu(cpu)
7013                per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
7014
7015        rcu_assign_pointer(tr->function_pids, NULL);
7016
7017        /* Wait till all users are no longer using pid filtering */
7018        synchronize_rcu();
7019
7020        trace_free_pid_list(pid_list);
7021}
7022
7023void ftrace_clear_pids(struct trace_array *tr)
7024{
7025        mutex_lock(&ftrace_lock);
7026
7027        clear_ftrace_pids(tr);
7028
7029        mutex_unlock(&ftrace_lock);
7030}
7031
7032static void ftrace_pid_reset(struct trace_array *tr)
7033{
7034        mutex_lock(&ftrace_lock);
7035        clear_ftrace_pids(tr);
7036
7037        ftrace_update_pid_func();
7038        ftrace_startup_all(0);
7039
7040        mutex_unlock(&ftrace_lock);
7041}
7042
7043/* Greater than any max PID */
7044#define FTRACE_NO_PIDS          (void *)(PID_MAX_LIMIT + 1)
7045
7046static void *fpid_start(struct seq_file *m, loff_t *pos)
7047        __acquires(RCU)
7048{
7049        struct trace_pid_list *pid_list;
7050        struct trace_array *tr = m->private;
7051
7052        mutex_lock(&ftrace_lock);
7053        rcu_read_lock_sched();
7054
7055        pid_list = rcu_dereference_sched(tr->function_pids);
7056
7057        if (!pid_list)
7058                return !(*pos) ? FTRACE_NO_PIDS : NULL;
7059
7060        return trace_pid_start(pid_list, pos);
7061}
7062
7063static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7064{
7065        struct trace_array *tr = m->private;
7066        struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7067
7068        if (v == FTRACE_NO_PIDS)
7069                return NULL;
7070
7071        return trace_pid_next(pid_list, v, pos);
7072}
7073
7074static void fpid_stop(struct seq_file *m, void *p)
7075        __releases(RCU)
7076{
7077        rcu_read_unlock_sched();
7078        mutex_unlock(&ftrace_lock);
7079}
7080
7081static int fpid_show(struct seq_file *m, void *v)
7082{
7083        if (v == FTRACE_NO_PIDS) {
7084                seq_puts(m, "no pid\n");
7085                return 0;
7086        }
7087
7088        return trace_pid_show(m, v);
7089}
7090
7091static const struct seq_operations ftrace_pid_sops = {
7092        .start = fpid_start,
7093        .next = fpid_next,
7094        .stop = fpid_stop,
7095        .show = fpid_show,
7096};
7097
7098static int
7099ftrace_pid_open(struct inode *inode, struct file *file)
7100{
7101        struct trace_array *tr = inode->i_private;
7102        struct seq_file *m;
7103        int ret = 0;
7104
7105        if (trace_array_get(tr) < 0)
7106                return -ENODEV;
7107
7108        if ((file->f_mode & FMODE_WRITE) &&
7109            (file->f_flags & O_TRUNC))
7110                ftrace_pid_reset(tr);
7111
7112        ret = seq_open(file, &ftrace_pid_sops);
7113        if (ret < 0) {
7114                trace_array_put(tr);
7115        } else {
7116                m = file->private_data;
7117                /* copy tr over to seq ops */
7118                m->private = tr;
7119        }
7120
7121        return ret;
7122}
7123
7124static void ignore_task_cpu(void *data)
7125{
7126        struct trace_array *tr = data;
7127        struct trace_pid_list *pid_list;
7128
7129        /*
7130         * This function is called by on_each_cpu() while the
7131         * event_mutex is held.
7132         */
7133        pid_list = rcu_dereference_protected(tr->function_pids,
7134                                             mutex_is_locked(&ftrace_lock));
7135
7136        this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7137                       trace_ignore_this_task(pid_list, current));
7138}
7139
7140static ssize_t
7141ftrace_pid_write(struct file *filp, const char __user *ubuf,
7142                   size_t cnt, loff_t *ppos)
7143{
7144        struct seq_file *m = filp->private_data;
7145        struct trace_array *tr = m->private;
7146        struct trace_pid_list *filtered_pids = NULL;
7147        struct trace_pid_list *pid_list;
7148        ssize_t ret;
7149
7150        if (!cnt)
7151                return 0;
7152
7153        mutex_lock(&ftrace_lock);
7154
7155        filtered_pids = rcu_dereference_protected(tr->function_pids,
7156                                             lockdep_is_held(&ftrace_lock));
7157
7158        ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7159        if (ret < 0)
7160                goto out;
7161
7162        rcu_assign_pointer(tr->function_pids, pid_list);
7163
7164        if (filtered_pids) {
7165                synchronize_rcu();
7166                trace_free_pid_list(filtered_pids);
7167        } else if (pid_list) {
7168                /* Register a probe to set whether to ignore the tracing of a task */
7169                register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7170        }
7171
7172        /*
7173         * Ignoring of pids is done at task switch. But we have to
7174         * check for those tasks that are currently running.
7175         * Always do this in case a pid was appended or removed.
7176         */
7177        on_each_cpu(ignore_task_cpu, tr, 1);
7178
7179        ftrace_update_pid_func();
7180        ftrace_startup_all(0);
7181 out:
7182        mutex_unlock(&ftrace_lock);
7183
7184        if (ret > 0)
7185                *ppos += ret;
7186
7187        return ret;
7188}
7189
7190static int
7191ftrace_pid_release(struct inode *inode, struct file *file)
7192{
7193        struct trace_array *tr = inode->i_private;
7194
7195        trace_array_put(tr);
7196
7197        return seq_release(inode, file);
7198}
7199
7200static const struct file_operations ftrace_pid_fops = {
7201        .open           = ftrace_pid_open,
7202        .write          = ftrace_pid_write,
7203        .read           = seq_read,
7204        .llseek         = tracing_lseek,
7205        .release        = ftrace_pid_release,
7206};
7207
7208void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7209{
7210        trace_create_file("set_ftrace_pid", 0644, d_tracer,
7211                            tr, &ftrace_pid_fops);
7212}
7213
7214void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7215                                         struct dentry *d_tracer)
7216{
7217        /* Only the top level directory has the dyn_tracefs and profile */
7218        WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7219
7220        ftrace_init_dyn_tracefs(d_tracer);
7221        ftrace_profile_tracefs(d_tracer);
7222}
7223
7224/**
7225 * ftrace_kill - kill ftrace
7226 *
7227 * This function should be used by panic code. It stops ftrace
7228 * but in a not so nice way. If you need to simply kill ftrace
7229 * from a non-atomic section, use ftrace_kill.
7230 */
7231void ftrace_kill(void)
7232{
7233        ftrace_disabled = 1;
7234        ftrace_enabled = 0;
7235        ftrace_trace_function = ftrace_stub;
7236}
7237
7238/**
7239 * Test if ftrace is dead or not.
7240 */
7241int ftrace_is_dead(void)
7242{
7243        return ftrace_disabled;
7244}
7245
7246/**
7247 * register_ftrace_function - register a function for profiling
7248 * @ops - ops structure that holds the function for profiling.
7249 *
7250 * Register a function to be called by all functions in the
7251 * kernel.
7252 *
7253 * Note: @ops->func and all the functions it calls must be labeled
7254 *       with "notrace", otherwise it will go into a
7255 *       recursive loop.
7256 */
7257int register_ftrace_function(struct ftrace_ops *ops)
7258{
7259        int ret = -1;
7260
7261        ftrace_ops_init(ops);
7262
7263        mutex_lock(&ftrace_lock);
7264
7265        ret = ftrace_startup(ops, 0);
7266
7267        mutex_unlock(&ftrace_lock);
7268
7269        return ret;
7270}
7271EXPORT_SYMBOL_GPL(register_ftrace_function);
7272
7273/**
7274 * unregister_ftrace_function - unregister a function for profiling.
7275 * @ops - ops structure that holds the function to unregister
7276 *
7277 * Unregister a function that was added to be called by ftrace profiling.
7278 */
7279int unregister_ftrace_function(struct ftrace_ops *ops)
7280{
7281        int ret;
7282
7283        mutex_lock(&ftrace_lock);
7284        ret = ftrace_shutdown(ops, 0);
7285        mutex_unlock(&ftrace_lock);
7286
7287        return ret;
7288}
7289EXPORT_SYMBOL_GPL(unregister_ftrace_function);
7290
7291static bool is_permanent_ops_registered(void)
7292{
7293        struct ftrace_ops *op;
7294
7295        do_for_each_ftrace_op(op, ftrace_ops_list) {
7296                if (op->flags & FTRACE_OPS_FL_PERMANENT)
7297                        return true;
7298        } while_for_each_ftrace_op(op);
7299
7300        return false;
7301}
7302
7303int
7304ftrace_enable_sysctl(struct ctl_table *table, int write,
7305                     void __user *buffer, size_t *lenp,
7306                     loff_t *ppos)
7307{
7308        int ret = -ENODEV;
7309
7310        mutex_lock(&ftrace_lock);
7311
7312        if (unlikely(ftrace_disabled))
7313                goto out;
7314
7315        ret = proc_dointvec(table, write, buffer, lenp, ppos);
7316
7317        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
7318                goto out;
7319
7320        if (ftrace_enabled) {
7321
7322                /* we are starting ftrace again */
7323                if (rcu_dereference_protected(ftrace_ops_list,
7324                        lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
7325                        update_ftrace_function();
7326
7327                ftrace_startup_sysctl();
7328
7329        } else {
7330                if (is_permanent_ops_registered()) {
7331                        ftrace_enabled = true;
7332                        ret = -EBUSY;
7333                        goto out;
7334                }
7335
7336                /* stopping ftrace calls (just send to ftrace_stub) */
7337                ftrace_trace_function = ftrace_stub;
7338
7339                ftrace_shutdown_sysctl();
7340        }
7341
7342        last_ftrace_enabled = !!ftrace_enabled;
7343 out:
7344        mutex_unlock(&ftrace_lock);
7345        return ret;
7346}
7347
7348#ifdef CONFIG_FUNCTION_GRAPH_TRACER
7349
7350static struct ftrace_ops graph_ops = {
7351        .func                   = ftrace_stub,
7352        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
7353                                   FTRACE_OPS_FL_INITIALIZED |
7354                                   FTRACE_OPS_FL_PID |
7355                                   FTRACE_OPS_FL_STUB,
7356#ifdef FTRACE_GRAPH_TRAMP_ADDR
7357        .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
7358        /* trampoline_size is only needed for dynamically allocated tramps */
7359#endif
7360        ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
7361};
7362
7363void ftrace_graph_sleep_time_control(bool enable)
7364{
7365        fgraph_sleep_time = enable;
7366}
7367
7368void ftrace_graph_graph_time_control(bool enable)
7369{
7370        fgraph_graph_time = enable;
7371}
7372
7373int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
7374{
7375        return 0;
7376}
7377
7378/* The callbacks that hook a function */
7379trace_func_graph_ret_t ftrace_graph_return =
7380                        (trace_func_graph_ret_t)ftrace_stub;
7381trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
7382static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
7383
7384/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
7385static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
7386{
7387        int i;
7388        int ret = 0;
7389        int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
7390        struct task_struct *g, *t;
7391
7392        for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
7393                ret_stack_list[i] =
7394                        kmalloc_array(FTRACE_RETFUNC_DEPTH,
7395                                      sizeof(struct ftrace_ret_stack),
7396                                      GFP_KERNEL);
7397                if (!ret_stack_list[i]) {
7398                        start = 0;
7399                        end = i;
7400                        ret = -ENOMEM;
7401                        goto free;
7402                }
7403        }
7404
7405        read_lock(&tasklist_lock);
7406        do_each_thread(g, t) {
7407                if (start == end) {
7408                        ret = -EAGAIN;
7409                        goto unlock;
7410                }
7411
7412                if (t->ret_stack == NULL) {
7413                        atomic_set(&t->tracing_graph_pause, 0);
7414                        atomic_set(&t->trace_overrun, 0);
7415                        t->curr_ret_stack = -1;
7416                        /* Make sure the tasks see the -1 first: */
7417                        smp_wmb();
7418                        t->ret_stack = ret_stack_list[start++];
7419                }
7420        } while_each_thread(g, t);
7421
7422unlock:
7423        read_unlock(&tasklist_lock);
7424free:
7425        for (i = start; i < end; i++)
7426                kfree(ret_stack_list[i]);
7427        return ret;
7428}
7429
7430static void
7431ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
7432                        struct task_struct *prev, struct task_struct *next)
7433{
7434        unsigned long long timestamp;
7435        int index;
7436
7437        /*
7438         * Does the user want to count the time a function was asleep.
7439         * If so, do not update the time stamps.
7440         */
7441        if (fgraph_sleep_time)
7442                return;
7443
7444        timestamp = trace_clock_local();
7445
7446        prev->ftrace_timestamp = timestamp;
7447
7448        /* only process tasks that we timestamped */
7449        if (!next->ftrace_timestamp)
7450                return;
7451
7452        /*
7453         * Update all the counters in next to make up for the
7454         * time next was sleeping.
7455         */
7456        timestamp -= next->ftrace_timestamp;
7457
7458        for (index = next->curr_ret_stack; index >= 0; index--)
7459                next->ret_stack[index].calltime += timestamp;
7460}
7461
7462/* Allocate a return stack for each task */
7463static int start_graph_tracing(void)
7464{
7465        struct ftrace_ret_stack **ret_stack_list;
7466        int ret, cpu;
7467
7468        ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
7469                                       sizeof(struct ftrace_ret_stack *),
7470                                       GFP_KERNEL);
7471
7472        if (!ret_stack_list)
7473                return -ENOMEM;
7474
7475        /* The cpu_boot init_task->ret_stack will never be freed */
7476        for_each_online_cpu(cpu) {
7477                if (!idle_task(cpu)->ret_stack)
7478                        ftrace_graph_init_idle_task(idle_task(cpu), cpu);
7479        }
7480
7481        do {
7482                ret = alloc_retstack_tasklist(ret_stack_list);
7483        } while (ret == -EAGAIN);
7484
7485        if (!ret) {
7486                ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
7487                if (ret)
7488                        pr_info("ftrace_graph: Couldn't activate tracepoint"
7489                                " probe to kernel_sched_switch\n");
7490        }
7491
7492        kfree(ret_stack_list);
7493        return ret;
7494}
7495
7496/*
7497 * Hibernation protection.
7498 * The state of the current task is too much unstable during
7499 * suspend/restore to disk. We want to protect against that.
7500 */
7501static int
7502ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
7503                                                        void *unused)
7504{
7505        switch (state) {
7506        case PM_HIBERNATION_PREPARE:
7507                pause_graph_tracing();
7508                break;
7509
7510        case PM_POST_HIBERNATION:
7511                unpause_graph_tracing();
7512                break;
7513        }
7514        return NOTIFY_DONE;
7515}
7516
7517static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
7518{
7519        if (!ftrace_ops_test(&global_ops, trace->func, NULL))
7520                return 0;
7521        return __ftrace_graph_entry(trace);
7522}
7523
7524/*
7525 * The function graph tracer should only trace the functions defined
7526 * by set_ftrace_filter and set_ftrace_notrace. If another function
7527 * tracer ops is registered, the graph tracer requires testing the
7528 * function against the global ops, and not just trace any function
7529 * that any ftrace_ops registered.
7530 */
7531static void update_function_graph_func(void)
7532{
7533        struct ftrace_ops *op;
7534        bool do_test = false;
7535
7536        /*
7537         * The graph and global ops share the same set of functions
7538         * to test. If any other ops is on the list, then
7539         * the graph tracing needs to test if its the function
7540         * it should call.
7541         */
7542        do_for_each_ftrace_op(op, ftrace_ops_list) {
7543                if (op != &global_ops && op != &graph_ops &&
7544                    op != &ftrace_list_end) {
7545                        do_test = true;
7546                        /* in double loop, break out with goto */
7547                        goto out;
7548                }
7549        } while_for_each_ftrace_op(op);
7550 out:
7551        if (do_test)
7552                ftrace_graph_entry = ftrace_graph_entry_test;
7553        else
7554                ftrace_graph_entry = __ftrace_graph_entry;
7555}
7556
7557static struct notifier_block ftrace_suspend_notifier = {
7558        .notifier_call = ftrace_suspend_notifier_call,
7559};
7560
7561int register_ftrace_graph(trace_func_graph_ret_t retfunc,
7562                        trace_func_graph_ent_t entryfunc)
7563{
7564        int ret = 0;
7565
7566        mutex_lock(&ftrace_lock);
7567
7568        /* we currently allow only one tracer registered at a time */
7569        if (ftrace_graph_active) {
7570                ret = -EBUSY;
7571                goto out;
7572        }
7573
7574        register_pm_notifier(&ftrace_suspend_notifier);
7575
7576        ftrace_graph_active++;
7577        ret = start_graph_tracing();
7578        if (ret) {
7579                ftrace_graph_active--;
7580                goto out;
7581        }
7582
7583        ftrace_graph_return = retfunc;
7584
7585        /*
7586         * Update the indirect function to the entryfunc, and the
7587         * function that gets called to the entry_test first. Then
7588         * call the update fgraph entry function to determine if
7589         * the entryfunc should be called directly or not.
7590         */
7591        __ftrace_graph_entry = entryfunc;
7592        ftrace_graph_entry = ftrace_graph_entry_test;
7593        update_function_graph_func();
7594
7595        ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
7596out:
7597        mutex_unlock(&ftrace_lock);
7598        return ret;
7599}
7600
7601void unregister_ftrace_graph(void)
7602{
7603        mutex_lock(&ftrace_lock);
7604
7605        if (unlikely(!ftrace_graph_active))
7606                goto out;
7607
7608        ftrace_graph_active--;
7609        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
7610        ftrace_graph_entry = ftrace_graph_entry_stub;
7611        __ftrace_graph_entry = ftrace_graph_entry_stub;
7612        ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
7613        unregister_pm_notifier(&ftrace_suspend_notifier);
7614        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
7615
7616 out:
7617        mutex_unlock(&ftrace_lock);
7618}
7619
7620static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
7621
7622static void
7623graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
7624{
7625        atomic_set(&t->tracing_graph_pause, 0);
7626        atomic_set(&t->trace_overrun, 0);
7627        t->ftrace_timestamp = 0;
7628        /* make curr_ret_stack visible before we add the ret_stack */
7629        smp_wmb();
7630        t->ret_stack = ret_stack;
7631}
7632
7633/*
7634 * Allocate a return stack for the idle task. May be the first
7635 * time through, or it may be done by CPU hotplug online.
7636 */
7637void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
7638{
7639        t->curr_ret_stack = -1;
7640        /*
7641         * The idle task has no parent, it either has its own
7642         * stack or no stack at all.
7643         */
7644        if (t->ret_stack)
7645                WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
7646
7647        if (ftrace_graph_active) {
7648                struct ftrace_ret_stack *ret_stack;
7649
7650                ret_stack = per_cpu(idle_ret_stack, cpu);
7651                if (!ret_stack) {
7652                        ret_stack =
7653                                kmalloc_array(FTRACE_RETFUNC_DEPTH,
7654                                              sizeof(struct ftrace_ret_stack),
7655                                              GFP_KERNEL);
7656                        if (!ret_stack)
7657                                return;
7658                        per_cpu(idle_ret_stack, cpu) = ret_stack;
7659                }
7660                graph_init_task(t, ret_stack);
7661        }
7662}
7663
7664/* Allocate a return stack for newly created task */
7665void ftrace_graph_init_task(struct task_struct *t)
7666{
7667        /* Make sure we do not use the parent ret_stack */
7668        t->ret_stack = NULL;
7669        t->curr_ret_stack = -1;
7670
7671        if (ftrace_graph_active) {
7672                struct ftrace_ret_stack *ret_stack;
7673
7674                ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
7675                                          sizeof(struct ftrace_ret_stack),
7676                                          GFP_KERNEL);
7677                if (!ret_stack)
7678                        return;
7679                graph_init_task(t, ret_stack);
7680        }
7681}
7682
7683void ftrace_graph_exit_task(struct task_struct *t)
7684{
7685        struct ftrace_ret_stack *ret_stack = t->ret_stack;
7686
7687        t->ret_stack = NULL;
7688        /* NULL must become visible to IRQs before we free it: */
7689        barrier();
7690
7691        kfree(ret_stack);
7692}
7693#endif
7694