linux/kernel/trace/ftrace.c
<<
>>
Prefs
   1/*
   2 * Infrastructure for profiling code inserted by 'gcc -pg'.
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally ported from the -rt patch by:
   8 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code in the latency_tracer, that is:
  11 *
  12 *  Copyright (C) 2004-2006 Ingo Molnar
  13 *  Copyright (C) 2004 Nadia Yvette Chambers
  14 */
  15
  16#include <linux/stop_machine.h>
  17#include <linux/clocksource.h>
  18#include <linux/kallsyms.h>
  19#include <linux/seq_file.h>
  20#include <linux/suspend.h>
  21#include <linux/tracefs.h>
  22#include <linux/hardirq.h>
  23#include <linux/kthread.h>
  24#include <linux/uaccess.h>
  25#include <linux/bsearch.h>
  26#include <linux/module.h>
  27#include <linux/ftrace.h>
  28#include <linux/sysctl.h>
  29#include <linux/slab.h>
  30#include <linux/ctype.h>
  31#include <linux/sort.h>
  32#include <linux/list.h>
  33#include <linux/hash.h>
  34#include <linux/rcupdate.h>
  35
  36#include <trace/events/sched.h>
  37
  38#include <asm/setup.h>
  39
  40#include "trace_output.h"
  41#include "trace_stat.h"
  42
  43#define FTRACE_WARN_ON(cond)                    \
  44        ({                                      \
  45                int ___r = cond;                \
  46                if (WARN_ON(___r))              \
  47                        ftrace_kill();          \
  48                ___r;                           \
  49        })
  50
  51#define FTRACE_WARN_ON_ONCE(cond)               \
  52        ({                                      \
  53                int ___r = cond;                \
  54                if (WARN_ON_ONCE(___r))         \
  55                        ftrace_kill();          \
  56                ___r;                           \
  57        })
  58
  59/* hash bits for specific function selection */
  60#define FTRACE_HASH_BITS 7
  61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
  62#define FTRACE_HASH_DEFAULT_BITS 10
  63#define FTRACE_HASH_MAX_BITS 12
  64
  65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
  66
  67#ifdef CONFIG_DYNAMIC_FTRACE
  68#define INIT_OPS_HASH(opsname)  \
  69        .func_hash              = &opsname.local_hash,                  \
  70        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
  71#define ASSIGN_OPS_HASH(opsname, val) \
  72        .func_hash              = val, \
  73        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
  74#else
  75#define INIT_OPS_HASH(opsname)
  76#define ASSIGN_OPS_HASH(opsname, val)
  77#endif
  78
  79static struct ftrace_ops ftrace_list_end __read_mostly = {
  80        .func           = ftrace_stub,
  81        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
  82        INIT_OPS_HASH(ftrace_list_end)
  83};
  84
  85/* ftrace_enabled is a method to turn ftrace on or off */
  86int ftrace_enabled __read_mostly;
  87static int last_ftrace_enabled;
  88
  89/* Current function tracing op */
  90struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
  91/* What to set function_trace_op to */
  92static struct ftrace_ops *set_function_trace_op;
  93
  94/* List for set_ftrace_pid's pids. */
  95LIST_HEAD(ftrace_pids);
  96struct ftrace_pid {
  97        struct list_head list;
  98        struct pid *pid;
  99};
 100
 101static bool ftrace_pids_enabled(void)
 102{
 103        return !list_empty(&ftrace_pids);
 104}
 105
 106static void ftrace_update_trampoline(struct ftrace_ops *ops);
 107
 108/*
 109 * ftrace_disabled is set when an anomaly is discovered.
 110 * ftrace_disabled is much stronger than ftrace_enabled.
 111 */
 112static int ftrace_disabled __read_mostly;
 113
 114static DEFINE_MUTEX(ftrace_lock);
 115
 116static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 117static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 119static struct ftrace_ops global_ops;
 120static struct ftrace_ops control_ops;
 121
 122static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
 123                                   struct ftrace_ops *op, struct pt_regs *regs);
 124
 125#if ARCH_SUPPORTS_FTRACE_OPS
 126static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 127                                 struct ftrace_ops *op, struct pt_regs *regs);
 128#else
 129/* See comment below, where ftrace_ops_list_func is defined */
 130static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 131#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 132#endif
 133
 134/*
 135 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
 136 * can use rcu_dereference_raw_notrace() is that elements removed from this list
 137 * are simply leaked, so there is no need to interact with a grace-period
 138 * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
 139 * concurrent insertions into the ftrace_global_list.
 140 *
 141 * Silly Alpha and silly pointer-speculation compiler optimizations!
 142 */
 143#define do_for_each_ftrace_op(op, list)                 \
 144        op = rcu_dereference_raw_notrace(list);                 \
 145        do
 146
 147/*
 148 * Optimized for just a single item in the list (as that is the normal case).
 149 */
 150#define while_for_each_ftrace_op(op)                            \
 151        while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
 152               unlikely((op) != &ftrace_list_end))
 153
 154static inline void ftrace_ops_init(struct ftrace_ops *ops)
 155{
 156#ifdef CONFIG_DYNAMIC_FTRACE
 157        if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
 158                mutex_init(&ops->local_hash.regex_lock);
 159                ops->func_hash = &ops->local_hash;
 160                ops->flags |= FTRACE_OPS_FL_INITIALIZED;
 161        }
 162#endif
 163}
 164
 165/**
 166 * ftrace_nr_registered_ops - return number of ops registered
 167 *
 168 * Returns the number of ftrace_ops registered and tracing functions
 169 */
 170int ftrace_nr_registered_ops(void)
 171{
 172        struct ftrace_ops *ops;
 173        int cnt = 0;
 174
 175        mutex_lock(&ftrace_lock);
 176
 177        for (ops = ftrace_ops_list;
 178             ops != &ftrace_list_end; ops = ops->next)
 179                cnt++;
 180
 181        mutex_unlock(&ftrace_lock);
 182
 183        return cnt;
 184}
 185
 186static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 187                            struct ftrace_ops *op, struct pt_regs *regs)
 188{
 189        if (!test_tsk_trace_trace(current))
 190                return;
 191
 192        op->saved_func(ip, parent_ip, op, regs);
 193}
 194
 195/**
 196 * clear_ftrace_function - reset the ftrace function
 197 *
 198 * This NULLs the ftrace function and in essence stops
 199 * tracing.  There may be lag
 200 */
 201void clear_ftrace_function(void)
 202{
 203        ftrace_trace_function = ftrace_stub;
 204}
 205
 206static void control_ops_disable_all(struct ftrace_ops *ops)
 207{
 208        int cpu;
 209
 210        for_each_possible_cpu(cpu)
 211                *per_cpu_ptr(ops->disabled, cpu) = 1;
 212}
 213
 214static int control_ops_alloc(struct ftrace_ops *ops)
 215{
 216        int __percpu *disabled;
 217
 218        disabled = alloc_percpu(int);
 219        if (!disabled)
 220                return -ENOMEM;
 221
 222        ops->disabled = disabled;
 223        control_ops_disable_all(ops);
 224        return 0;
 225}
 226
 227static void ftrace_sync(struct work_struct *work)
 228{
 229        /*
 230         * This function is just a stub to implement a hard force
 231         * of synchronize_sched(). This requires synchronizing
 232         * tasks even in userspace and idle.
 233         *
 234         * Yes, function tracing is rude.
 235         */
 236}
 237
 238static void ftrace_sync_ipi(void *data)
 239{
 240        /* Probably not needed, but do it anyway */
 241        smp_rmb();
 242}
 243
 244#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 245static void update_function_graph_func(void);
 246#else
 247static inline void update_function_graph_func(void) { }
 248#endif
 249
 250
 251static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 252{
 253        /*
 254         * If this is a dynamic ops or we force list func,
 255         * then it needs to call the list anyway.
 256         */
 257        if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
 258                return ftrace_ops_list_func;
 259
 260        return ftrace_ops_get_func(ops);
 261}
 262
 263static void update_ftrace_function(void)
 264{
 265        ftrace_func_t func;
 266
 267        /*
 268         * Prepare the ftrace_ops that the arch callback will use.
 269         * If there's only one ftrace_ops registered, the ftrace_ops_list
 270         * will point to the ops we want.
 271         */
 272        set_function_trace_op = ftrace_ops_list;
 273
 274        /* If there's no ftrace_ops registered, just call the stub function */
 275        if (ftrace_ops_list == &ftrace_list_end) {
 276                func = ftrace_stub;
 277
 278        /*
 279         * If we are at the end of the list and this ops is
 280         * recursion safe and not dynamic and the arch supports passing ops,
 281         * then have the mcount trampoline call the function directly.
 282         */
 283        } else if (ftrace_ops_list->next == &ftrace_list_end) {
 284                func = ftrace_ops_get_list_func(ftrace_ops_list);
 285
 286        } else {
 287                /* Just use the default ftrace_ops */
 288                set_function_trace_op = &ftrace_list_end;
 289                func = ftrace_ops_list_func;
 290        }
 291
 292        update_function_graph_func();
 293
 294        /* If there's no change, then do nothing more here */
 295        if (ftrace_trace_function == func)
 296                return;
 297
 298        /*
 299         * If we are using the list function, it doesn't care
 300         * about the function_trace_ops.
 301         */
 302        if (func == ftrace_ops_list_func) {
 303                ftrace_trace_function = func;
 304                /*
 305                 * Don't even bother setting function_trace_ops,
 306                 * it would be racy to do so anyway.
 307                 */
 308                return;
 309        }
 310
 311#ifndef CONFIG_DYNAMIC_FTRACE
 312        /*
 313         * For static tracing, we need to be a bit more careful.
 314         * The function change takes affect immediately. Thus,
 315         * we need to coorditate the setting of the function_trace_ops
 316         * with the setting of the ftrace_trace_function.
 317         *
 318         * Set the function to the list ops, which will call the
 319         * function we want, albeit indirectly, but it handles the
 320         * ftrace_ops and doesn't depend on function_trace_op.
 321         */
 322        ftrace_trace_function = ftrace_ops_list_func;
 323        /*
 324         * Make sure all CPUs see this. Yes this is slow, but static
 325         * tracing is slow and nasty to have enabled.
 326         */
 327        schedule_on_each_cpu(ftrace_sync);
 328        /* Now all cpus are using the list ops. */
 329        function_trace_op = set_function_trace_op;
 330        /* Make sure the function_trace_op is visible on all CPUs */
 331        smp_wmb();
 332        /* Nasty way to force a rmb on all cpus */
 333        smp_call_function(ftrace_sync_ipi, NULL, 1);
 334        /* OK, we are all set to update the ftrace_trace_function now! */
 335#endif /* !CONFIG_DYNAMIC_FTRACE */
 336
 337        ftrace_trace_function = func;
 338}
 339
 340int using_ftrace_ops_list_func(void)
 341{
 342        return ftrace_trace_function == ftrace_ops_list_func;
 343}
 344
 345static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 346{
 347        ops->next = *list;
 348        /*
 349         * We are entering ops into the list but another
 350         * CPU might be walking that list. We need to make sure
 351         * the ops->next pointer is valid before another CPU sees
 352         * the ops pointer included into the list.
 353         */
 354        rcu_assign_pointer(*list, ops);
 355}
 356
 357static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 358{
 359        struct ftrace_ops **p;
 360
 361        /*
 362         * If we are removing the last function, then simply point
 363         * to the ftrace_stub.
 364         */
 365        if (*list == ops && ops->next == &ftrace_list_end) {
 366                *list = &ftrace_list_end;
 367                return 0;
 368        }
 369
 370        for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 371                if (*p == ops)
 372                        break;
 373
 374        if (*p != ops)
 375                return -1;
 376
 377        *p = (*p)->next;
 378        return 0;
 379}
 380
 381static void add_ftrace_list_ops(struct ftrace_ops **list,
 382                                struct ftrace_ops *main_ops,
 383                                struct ftrace_ops *ops)
 384{
 385        int first = *list == &ftrace_list_end;
 386        add_ftrace_ops(list, ops);
 387        if (first)
 388                add_ftrace_ops(&ftrace_ops_list, main_ops);
 389}
 390
 391static int remove_ftrace_list_ops(struct ftrace_ops **list,
 392                                  struct ftrace_ops *main_ops,
 393                                  struct ftrace_ops *ops)
 394{
 395        int ret = remove_ftrace_ops(list, ops);
 396        if (!ret && *list == &ftrace_list_end)
 397                ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
 398        return ret;
 399}
 400
 401static void ftrace_update_trampoline(struct ftrace_ops *ops);
 402
 403static int __register_ftrace_function(struct ftrace_ops *ops)
 404{
 405        if (ops->flags & FTRACE_OPS_FL_DELETED)
 406                return -EINVAL;
 407
 408        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 409                return -EBUSY;
 410
 411#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 412        /*
 413         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
 414         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
 415         * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
 416         */
 417        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
 418            !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
 419                return -EINVAL;
 420
 421        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
 422                ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
 423#endif
 424
 425        if (!core_kernel_data((unsigned long)ops))
 426                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 427
 428        if (ops->flags & FTRACE_OPS_FL_CONTROL) {
 429                if (control_ops_alloc(ops))
 430                        return -ENOMEM;
 431                add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
 432                /* The control_ops needs the trampoline update */
 433                ops = &control_ops;
 434        } else
 435                add_ftrace_ops(&ftrace_ops_list, ops);
 436
 437        /* Always save the function, and reset at unregistering */
 438        ops->saved_func = ops->func;
 439
 440        if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
 441                ops->func = ftrace_pid_func;
 442
 443        ftrace_update_trampoline(ops);
 444
 445        if (ftrace_enabled)
 446                update_ftrace_function();
 447
 448        return 0;
 449}
 450
 451static int __unregister_ftrace_function(struct ftrace_ops *ops)
 452{
 453        int ret;
 454
 455        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 456                return -EBUSY;
 457
 458        if (ops->flags & FTRACE_OPS_FL_CONTROL) {
 459                ret = remove_ftrace_list_ops(&ftrace_control_list,
 460                                             &control_ops, ops);
 461        } else
 462                ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 463
 464        if (ret < 0)
 465                return ret;
 466
 467        if (ftrace_enabled)
 468                update_ftrace_function();
 469
 470        ops->func = ops->saved_func;
 471
 472        return 0;
 473}
 474
 475static void ftrace_update_pid_func(void)
 476{
 477        bool enabled = ftrace_pids_enabled();
 478        struct ftrace_ops *op;
 479
 480        /* Only do something if we are tracing something */
 481        if (ftrace_trace_function == ftrace_stub)
 482                return;
 483
 484        do_for_each_ftrace_op(op, ftrace_ops_list) {
 485                if (op->flags & FTRACE_OPS_FL_PID) {
 486                        op->func = enabled ? ftrace_pid_func :
 487                                op->saved_func;
 488                        ftrace_update_trampoline(op);
 489                }
 490        } while_for_each_ftrace_op(op);
 491
 492        update_ftrace_function();
 493}
 494
 495#ifdef CONFIG_FUNCTION_PROFILER
 496struct ftrace_profile {
 497        struct hlist_node               node;
 498        unsigned long                   ip;
 499        unsigned long                   counter;
 500#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 501        unsigned long long              time;
 502        unsigned long long              time_squared;
 503#endif
 504};
 505
 506struct ftrace_profile_page {
 507        struct ftrace_profile_page      *next;
 508        unsigned long                   index;
 509        struct ftrace_profile           records[];
 510};
 511
 512struct ftrace_profile_stat {
 513        atomic_t                        disabled;
 514        struct hlist_head               *hash;
 515        struct ftrace_profile_page      *pages;
 516        struct ftrace_profile_page      *start;
 517        struct tracer_stat              stat;
 518};
 519
 520#define PROFILE_RECORDS_SIZE                                            \
 521        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 522
 523#define PROFILES_PER_PAGE                                       \
 524        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 525
 526static int ftrace_profile_enabled __read_mostly;
 527
 528/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 529static DEFINE_MUTEX(ftrace_profile_lock);
 530
 531static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 532
 533#define FTRACE_PROFILE_HASH_BITS 10
 534#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
 535
 536static void *
 537function_stat_next(void *v, int idx)
 538{
 539        struct ftrace_profile *rec = v;
 540        struct ftrace_profile_page *pg;
 541
 542        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 543
 544 again:
 545        if (idx != 0)
 546                rec++;
 547
 548        if ((void *)rec >= (void *)&pg->records[pg->index]) {
 549                pg = pg->next;
 550                if (!pg)
 551                        return NULL;
 552                rec = &pg->records[0];
 553                if (!rec->counter)
 554                        goto again;
 555        }
 556
 557        return rec;
 558}
 559
 560static void *function_stat_start(struct tracer_stat *trace)
 561{
 562        struct ftrace_profile_stat *stat =
 563                container_of(trace, struct ftrace_profile_stat, stat);
 564
 565        if (!stat || !stat->start)
 566                return NULL;
 567
 568        return function_stat_next(&stat->start->records[0], 0);
 569}
 570
 571#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 572/* function graph compares on total time */
 573static int function_stat_cmp(void *p1, void *p2)
 574{
 575        struct ftrace_profile *a = p1;
 576        struct ftrace_profile *b = p2;
 577
 578        if (a->time < b->time)
 579                return -1;
 580        if (a->time > b->time)
 581                return 1;
 582        else
 583                return 0;
 584}
 585#else
 586/* not function graph compares against hits */
 587static int function_stat_cmp(void *p1, void *p2)
 588{
 589        struct ftrace_profile *a = p1;
 590        struct ftrace_profile *b = p2;
 591
 592        if (a->counter < b->counter)
 593                return -1;
 594        if (a->counter > b->counter)
 595                return 1;
 596        else
 597                return 0;
 598}
 599#endif
 600
 601static int function_stat_headers(struct seq_file *m)
 602{
 603#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 604        seq_puts(m, "  Function                               "
 605                 "Hit    Time            Avg             s^2\n"
 606                    "  --------                               "
 607                 "---    ----            ---             ---\n");
 608#else
 609        seq_puts(m, "  Function                               Hit\n"
 610                    "  --------                               ---\n");
 611#endif
 612        return 0;
 613}
 614
 615static int function_stat_show(struct seq_file *m, void *v)
 616{
 617        struct ftrace_profile *rec = v;
 618        char str[KSYM_SYMBOL_LEN];
 619        int ret = 0;
 620#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 621        static struct trace_seq s;
 622        unsigned long long avg;
 623        unsigned long long stddev;
 624#endif
 625        mutex_lock(&ftrace_profile_lock);
 626
 627        /* we raced with function_profile_reset() */
 628        if (unlikely(rec->counter == 0)) {
 629                ret = -EBUSY;
 630                goto out;
 631        }
 632
 633#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 634        avg = rec->time;
 635        do_div(avg, rec->counter);
 636        if (tracing_thresh && (avg < tracing_thresh))
 637                goto out;
 638#endif
 639
 640        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 641        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 642
 643#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 644        seq_puts(m, "    ");
 645
 646        /* Sample standard deviation (s^2) */
 647        if (rec->counter <= 1)
 648                stddev = 0;
 649        else {
 650                /*
 651                 * Apply Welford's method:
 652                 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
 653                 */
 654                stddev = rec->counter * rec->time_squared -
 655                         rec->time * rec->time;
 656
 657                /*
 658                 * Divide only 1000 for ns^2 -> us^2 conversion.
 659                 * trace_print_graph_duration will divide 1000 again.
 660                 */
 661                do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
 662        }
 663
 664        trace_seq_init(&s);
 665        trace_print_graph_duration(rec->time, &s);
 666        trace_seq_puts(&s, "    ");
 667        trace_print_graph_duration(avg, &s);
 668        trace_seq_puts(&s, "    ");
 669        trace_print_graph_duration(stddev, &s);
 670        trace_print_seq(m, &s);
 671#endif
 672        seq_putc(m, '\n');
 673out:
 674        mutex_unlock(&ftrace_profile_lock);
 675
 676        return ret;
 677}
 678
 679static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 680{
 681        struct ftrace_profile_page *pg;
 682
 683        pg = stat->pages = stat->start;
 684
 685        while (pg) {
 686                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 687                pg->index = 0;
 688                pg = pg->next;
 689        }
 690
 691        memset(stat->hash, 0,
 692               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 693}
 694
 695int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 696{
 697        struct ftrace_profile_page *pg;
 698        int functions;
 699        int pages;
 700        int i;
 701
 702        /* If we already allocated, do nothing */
 703        if (stat->pages)
 704                return 0;
 705
 706        stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 707        if (!stat->pages)
 708                return -ENOMEM;
 709
 710#ifdef CONFIG_DYNAMIC_FTRACE
 711        functions = ftrace_update_tot_cnt;
 712#else
 713        /*
 714         * We do not know the number of functions that exist because
 715         * dynamic tracing is what counts them. With past experience
 716         * we have around 20K functions. That should be more than enough.
 717         * It is highly unlikely we will execute every function in
 718         * the kernel.
 719         */
 720        functions = 20000;
 721#endif
 722
 723        pg = stat->start = stat->pages;
 724
 725        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 726
 727        for (i = 1; i < pages; i++) {
 728                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 729                if (!pg->next)
 730                        goto out_free;
 731                pg = pg->next;
 732        }
 733
 734        return 0;
 735
 736 out_free:
 737        pg = stat->start;
 738        while (pg) {
 739                unsigned long tmp = (unsigned long)pg;
 740
 741                pg = pg->next;
 742                free_page(tmp);
 743        }
 744
 745        stat->pages = NULL;
 746        stat->start = NULL;
 747
 748        return -ENOMEM;
 749}
 750
 751static int ftrace_profile_init_cpu(int cpu)
 752{
 753        struct ftrace_profile_stat *stat;
 754        int size;
 755
 756        stat = &per_cpu(ftrace_profile_stats, cpu);
 757
 758        if (stat->hash) {
 759                /* If the profile is already created, simply reset it */
 760                ftrace_profile_reset(stat);
 761                return 0;
 762        }
 763
 764        /*
 765         * We are profiling all functions, but usually only a few thousand
 766         * functions are hit. We'll make a hash of 1024 items.
 767         */
 768        size = FTRACE_PROFILE_HASH_SIZE;
 769
 770        stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
 771
 772        if (!stat->hash)
 773                return -ENOMEM;
 774
 775        /* Preallocate the function profiling pages */
 776        if (ftrace_profile_pages_init(stat) < 0) {
 777                kfree(stat->hash);
 778                stat->hash = NULL;
 779                return -ENOMEM;
 780        }
 781
 782        return 0;
 783}
 784
 785static int ftrace_profile_init(void)
 786{
 787        int cpu;
 788        int ret = 0;
 789
 790        for_each_possible_cpu(cpu) {
 791                ret = ftrace_profile_init_cpu(cpu);
 792                if (ret)
 793                        break;
 794        }
 795
 796        return ret;
 797}
 798
 799/* interrupts must be disabled */
 800static struct ftrace_profile *
 801ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 802{
 803        struct ftrace_profile *rec;
 804        struct hlist_head *hhd;
 805        unsigned long key;
 806
 807        key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
 808        hhd = &stat->hash[key];
 809
 810        if (hlist_empty(hhd))
 811                return NULL;
 812
 813        hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
 814                if (rec->ip == ip)
 815                        return rec;
 816        }
 817
 818        return NULL;
 819}
 820
 821static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 822                               struct ftrace_profile *rec)
 823{
 824        unsigned long key;
 825
 826        key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
 827        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 828}
 829
 830/*
 831 * The memory is already allocated, this simply finds a new record to use.
 832 */
 833static struct ftrace_profile *
 834ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 835{
 836        struct ftrace_profile *rec = NULL;
 837
 838        /* prevent recursion (from NMIs) */
 839        if (atomic_inc_return(&stat->disabled) != 1)
 840                goto out;
 841
 842        /*
 843         * Try to find the function again since an NMI
 844         * could have added it
 845         */
 846        rec = ftrace_find_profiled_func(stat, ip);
 847        if (rec)
 848                goto out;
 849
 850        if (stat->pages->index == PROFILES_PER_PAGE) {
 851                if (!stat->pages->next)
 852                        goto out;
 853                stat->pages = stat->pages->next;
 854        }
 855
 856        rec = &stat->pages->records[stat->pages->index++];
 857        rec->ip = ip;
 858        ftrace_add_profile(stat, rec);
 859
 860 out:
 861        atomic_dec(&stat->disabled);
 862
 863        return rec;
 864}
 865
 866static void
 867function_profile_call(unsigned long ip, unsigned long parent_ip,
 868                      struct ftrace_ops *ops, struct pt_regs *regs)
 869{
 870        struct ftrace_profile_stat *stat;
 871        struct ftrace_profile *rec;
 872        unsigned long flags;
 873
 874        if (!ftrace_profile_enabled)
 875                return;
 876
 877        local_irq_save(flags);
 878
 879        stat = this_cpu_ptr(&ftrace_profile_stats);
 880        if (!stat->hash || !ftrace_profile_enabled)
 881                goto out;
 882
 883        rec = ftrace_find_profiled_func(stat, ip);
 884        if (!rec) {
 885                rec = ftrace_profile_alloc(stat, ip);
 886                if (!rec)
 887                        goto out;
 888        }
 889
 890        rec->counter++;
 891 out:
 892        local_irq_restore(flags);
 893}
 894
 895#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 896static int profile_graph_entry(struct ftrace_graph_ent *trace)
 897{
 898        function_profile_call(trace->func, 0, NULL, NULL);
 899        return 1;
 900}
 901
 902static void profile_graph_return(struct ftrace_graph_ret *trace)
 903{
 904        struct ftrace_profile_stat *stat;
 905        unsigned long long calltime;
 906        struct ftrace_profile *rec;
 907        unsigned long flags;
 908
 909        local_irq_save(flags);
 910        stat = this_cpu_ptr(&ftrace_profile_stats);
 911        if (!stat->hash || !ftrace_profile_enabled)
 912                goto out;
 913
 914        /* If the calltime was zero'd ignore it */
 915        if (!trace->calltime)
 916                goto out;
 917
 918        calltime = trace->rettime - trace->calltime;
 919
 920        if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
 921                int index;
 922
 923                index = trace->depth;
 924
 925                /* Append this call time to the parent time to subtract */
 926                if (index)
 927                        current->ret_stack[index - 1].subtime += calltime;
 928
 929                if (current->ret_stack[index].subtime < calltime)
 930                        calltime -= current->ret_stack[index].subtime;
 931                else
 932                        calltime = 0;
 933        }
 934
 935        rec = ftrace_find_profiled_func(stat, trace->func);
 936        if (rec) {
 937                rec->time += calltime;
 938                rec->time_squared += calltime * calltime;
 939        }
 940
 941 out:
 942        local_irq_restore(flags);
 943}
 944
 945static int register_ftrace_profiler(void)
 946{
 947        return register_ftrace_graph(&profile_graph_return,
 948                                     &profile_graph_entry);
 949}
 950
 951static void unregister_ftrace_profiler(void)
 952{
 953        unregister_ftrace_graph();
 954}
 955#else
 956static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 957        .func           = function_profile_call,
 958        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
 959        INIT_OPS_HASH(ftrace_profile_ops)
 960};
 961
 962static int register_ftrace_profiler(void)
 963{
 964        return register_ftrace_function(&ftrace_profile_ops);
 965}
 966
 967static void unregister_ftrace_profiler(void)
 968{
 969        unregister_ftrace_function(&ftrace_profile_ops);
 970}
 971#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 972
 973static ssize_t
 974ftrace_profile_write(struct file *filp, const char __user *ubuf,
 975                     size_t cnt, loff_t *ppos)
 976{
 977        unsigned long val;
 978        int ret;
 979
 980        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 981        if (ret)
 982                return ret;
 983
 984        val = !!val;
 985
 986        mutex_lock(&ftrace_profile_lock);
 987        if (ftrace_profile_enabled ^ val) {
 988                if (val) {
 989                        ret = ftrace_profile_init();
 990                        if (ret < 0) {
 991                                cnt = ret;
 992                                goto out;
 993                        }
 994
 995                        ret = register_ftrace_profiler();
 996                        if (ret < 0) {
 997                                cnt = ret;
 998                                goto out;
 999                        }
1000                        ftrace_profile_enabled = 1;
1001                } else {
1002                        ftrace_profile_enabled = 0;
1003                        /*
1004                         * unregister_ftrace_profiler calls stop_machine
1005                         * so this acts like an synchronize_sched.
1006                         */
1007                        unregister_ftrace_profiler();
1008                }
1009        }
1010 out:
1011        mutex_unlock(&ftrace_profile_lock);
1012
1013        *ppos += cnt;
1014
1015        return cnt;
1016}
1017
1018static ssize_t
1019ftrace_profile_read(struct file *filp, char __user *ubuf,
1020                     size_t cnt, loff_t *ppos)
1021{
1022        char buf[64];           /* big enough to hold a number */
1023        int r;
1024
1025        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1026        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1027}
1028
1029static const struct file_operations ftrace_profile_fops = {
1030        .open           = tracing_open_generic,
1031        .read           = ftrace_profile_read,
1032        .write          = ftrace_profile_write,
1033        .llseek         = default_llseek,
1034};
1035
1036/* used to initialize the real stat files */
1037static struct tracer_stat function_stats __initdata = {
1038        .name           = "functions",
1039        .stat_start     = function_stat_start,
1040        .stat_next      = function_stat_next,
1041        .stat_cmp       = function_stat_cmp,
1042        .stat_headers   = function_stat_headers,
1043        .stat_show      = function_stat_show
1044};
1045
1046static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1047{
1048        struct ftrace_profile_stat *stat;
1049        struct dentry *entry;
1050        char *name;
1051        int ret;
1052        int cpu;
1053
1054        for_each_possible_cpu(cpu) {
1055                stat = &per_cpu(ftrace_profile_stats, cpu);
1056
1057                /* allocate enough for function name + cpu number */
1058                name = kmalloc(32, GFP_KERNEL);
1059                if (!name) {
1060                        /*
1061                         * The files created are permanent, if something happens
1062                         * we still do not free memory.
1063                         */
1064                        WARN(1,
1065                             "Could not allocate stat file for cpu %d\n",
1066                             cpu);
1067                        return;
1068                }
1069                stat->stat = function_stats;
1070                snprintf(name, 32, "function%d", cpu);
1071                stat->stat.name = name;
1072                ret = register_stat_tracer(&stat->stat);
1073                if (ret) {
1074                        WARN(1,
1075                             "Could not register function stat for cpu %d\n",
1076                             cpu);
1077                        kfree(name);
1078                        return;
1079                }
1080        }
1081
1082        entry = tracefs_create_file("function_profile_enabled", 0644,
1083                                    d_tracer, NULL, &ftrace_profile_fops);
1084        if (!entry)
1085                pr_warning("Could not create tracefs "
1086                           "'function_profile_enabled' entry\n");
1087}
1088
1089#else /* CONFIG_FUNCTION_PROFILER */
1090static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1091{
1092}
1093#endif /* CONFIG_FUNCTION_PROFILER */
1094
1095static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1096
1097#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1098static int ftrace_graph_active;
1099#else
1100# define ftrace_graph_active 0
1101#endif
1102
1103#ifdef CONFIG_DYNAMIC_FTRACE
1104
1105static struct ftrace_ops *removed_ops;
1106
1107/*
1108 * Set when doing a global update, like enabling all recs or disabling them.
1109 * It is not set when just updating a single ftrace_ops.
1110 */
1111static bool update_all_ops;
1112
1113#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1114# error Dynamic ftrace depends on MCOUNT_RECORD
1115#endif
1116
1117static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1118
1119struct ftrace_func_probe {
1120        struct hlist_node       node;
1121        struct ftrace_probe_ops *ops;
1122        unsigned long           flags;
1123        unsigned long           ip;
1124        void                    *data;
1125        struct list_head        free_list;
1126};
1127
1128struct ftrace_func_entry {
1129        struct hlist_node hlist;
1130        unsigned long ip;
1131};
1132
1133struct ftrace_hash {
1134        unsigned long           size_bits;
1135        struct hlist_head       *buckets;
1136        unsigned long           count;
1137        struct rcu_head         rcu;
1138};
1139
1140/*
1141 * We make these constant because no one should touch them,
1142 * but they are used as the default "empty hash", to avoid allocating
1143 * it all the time. These are in a read only section such that if
1144 * anyone does try to modify it, it will cause an exception.
1145 */
1146static const struct hlist_head empty_buckets[1];
1147static const struct ftrace_hash empty_hash = {
1148        .buckets = (struct hlist_head *)empty_buckets,
1149};
1150#define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1151
1152static struct ftrace_ops global_ops = {
1153        .func                           = ftrace_stub,
1154        .local_hash.notrace_hash        = EMPTY_HASH,
1155        .local_hash.filter_hash         = EMPTY_HASH,
1156        INIT_OPS_HASH(global_ops)
1157        .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
1158                                          FTRACE_OPS_FL_INITIALIZED |
1159                                          FTRACE_OPS_FL_PID,
1160};
1161
1162/*
1163 * This is used by __kernel_text_address() to return true if the
1164 * address is on a dynamically allocated trampoline that would
1165 * not return true for either core_kernel_text() or
1166 * is_module_text_address().
1167 */
1168bool is_ftrace_trampoline(unsigned long addr)
1169{
1170        struct ftrace_ops *op;
1171        bool ret = false;
1172
1173        /*
1174         * Some of the ops may be dynamically allocated,
1175         * they are freed after a synchronize_sched().
1176         */
1177        preempt_disable_notrace();
1178
1179        do_for_each_ftrace_op(op, ftrace_ops_list) {
1180                /*
1181                 * This is to check for dynamically allocated trampolines.
1182                 * Trampolines that are in kernel text will have
1183                 * core_kernel_text() return true.
1184                 */
1185                if (op->trampoline && op->trampoline_size)
1186                        if (addr >= op->trampoline &&
1187                            addr < op->trampoline + op->trampoline_size) {
1188                                ret = true;
1189                                goto out;
1190                        }
1191        } while_for_each_ftrace_op(op);
1192
1193 out:
1194        preempt_enable_notrace();
1195
1196        return ret;
1197}
1198
1199struct ftrace_page {
1200        struct ftrace_page      *next;
1201        struct dyn_ftrace       *records;
1202        int                     index;
1203        int                     size;
1204};
1205
1206#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1207#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1208
1209/* estimate from running different kernels */
1210#define NR_TO_INIT              10000
1211
1212static struct ftrace_page       *ftrace_pages_start;
1213static struct ftrace_page       *ftrace_pages;
1214
1215static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1216{
1217        return !hash || !hash->count;
1218}
1219
1220static struct ftrace_func_entry *
1221ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1222{
1223        unsigned long key;
1224        struct ftrace_func_entry *entry;
1225        struct hlist_head *hhd;
1226
1227        if (ftrace_hash_empty(hash))
1228                return NULL;
1229
1230        if (hash->size_bits > 0)
1231                key = hash_long(ip, hash->size_bits);
1232        else
1233                key = 0;
1234
1235        hhd = &hash->buckets[key];
1236
1237        hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1238                if (entry->ip == ip)
1239                        return entry;
1240        }
1241        return NULL;
1242}
1243
1244static void __add_hash_entry(struct ftrace_hash *hash,
1245                             struct ftrace_func_entry *entry)
1246{
1247        struct hlist_head *hhd;
1248        unsigned long key;
1249
1250        if (hash->size_bits)
1251                key = hash_long(entry->ip, hash->size_bits);
1252        else
1253                key = 0;
1254
1255        hhd = &hash->buckets[key];
1256        hlist_add_head(&entry->hlist, hhd);
1257        hash->count++;
1258}
1259
1260static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1261{
1262        struct ftrace_func_entry *entry;
1263
1264        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1265        if (!entry)
1266                return -ENOMEM;
1267
1268        entry->ip = ip;
1269        __add_hash_entry(hash, entry);
1270
1271        return 0;
1272}
1273
1274static void
1275free_hash_entry(struct ftrace_hash *hash,
1276                  struct ftrace_func_entry *entry)
1277{
1278        hlist_del(&entry->hlist);
1279        kfree(entry);
1280        hash->count--;
1281}
1282
1283static void
1284remove_hash_entry(struct ftrace_hash *hash,
1285                  struct ftrace_func_entry *entry)
1286{
1287        hlist_del(&entry->hlist);
1288        hash->count--;
1289}
1290
1291static void ftrace_hash_clear(struct ftrace_hash *hash)
1292{
1293        struct hlist_head *hhd;
1294        struct hlist_node *tn;
1295        struct ftrace_func_entry *entry;
1296        int size = 1 << hash->size_bits;
1297        int i;
1298
1299        if (!hash->count)
1300                return;
1301
1302        for (i = 0; i < size; i++) {
1303                hhd = &hash->buckets[i];
1304                hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1305                        free_hash_entry(hash, entry);
1306        }
1307        FTRACE_WARN_ON(hash->count);
1308}
1309
1310static void free_ftrace_hash(struct ftrace_hash *hash)
1311{
1312        if (!hash || hash == EMPTY_HASH)
1313                return;
1314        ftrace_hash_clear(hash);
1315        kfree(hash->buckets);
1316        kfree(hash);
1317}
1318
1319static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1320{
1321        struct ftrace_hash *hash;
1322
1323        hash = container_of(rcu, struct ftrace_hash, rcu);
1324        free_ftrace_hash(hash);
1325}
1326
1327static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1328{
1329        if (!hash || hash == EMPTY_HASH)
1330                return;
1331        call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1332}
1333
1334void ftrace_free_filter(struct ftrace_ops *ops)
1335{
1336        ftrace_ops_init(ops);
1337        free_ftrace_hash(ops->func_hash->filter_hash);
1338        free_ftrace_hash(ops->func_hash->notrace_hash);
1339}
1340
1341static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1342{
1343        struct ftrace_hash *hash;
1344        int size;
1345
1346        hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1347        if (!hash)
1348                return NULL;
1349
1350        size = 1 << size_bits;
1351        hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1352
1353        if (!hash->buckets) {
1354                kfree(hash);
1355                return NULL;
1356        }
1357
1358        hash->size_bits = size_bits;
1359
1360        return hash;
1361}
1362
1363static struct ftrace_hash *
1364alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1365{
1366        struct ftrace_func_entry *entry;
1367        struct ftrace_hash *new_hash;
1368        int size;
1369        int ret;
1370        int i;
1371
1372        new_hash = alloc_ftrace_hash(size_bits);
1373        if (!new_hash)
1374                return NULL;
1375
1376        /* Empty hash? */
1377        if (ftrace_hash_empty(hash))
1378                return new_hash;
1379
1380        size = 1 << hash->size_bits;
1381        for (i = 0; i < size; i++) {
1382                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1383                        ret = add_hash_entry(new_hash, entry->ip);
1384                        if (ret < 0)
1385                                goto free_hash;
1386                }
1387        }
1388
1389        FTRACE_WARN_ON(new_hash->count != hash->count);
1390
1391        return new_hash;
1392
1393 free_hash:
1394        free_ftrace_hash(new_hash);
1395        return NULL;
1396}
1397
1398static void
1399ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1400static void
1401ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1402
1403static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1404                                       struct ftrace_hash *new_hash);
1405
1406static int
1407ftrace_hash_move(struct ftrace_ops *ops, int enable,
1408                 struct ftrace_hash **dst, struct ftrace_hash *src)
1409{
1410        struct ftrace_func_entry *entry;
1411        struct hlist_node *tn;
1412        struct hlist_head *hhd;
1413        struct ftrace_hash *new_hash;
1414        int size = src->count;
1415        int bits = 0;
1416        int ret;
1417        int i;
1418
1419        /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1420        if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1421                return -EINVAL;
1422
1423        /*
1424         * If the new source is empty, just free dst and assign it
1425         * the empty_hash.
1426         */
1427        if (!src->count) {
1428                new_hash = EMPTY_HASH;
1429                goto update;
1430        }
1431
1432        /*
1433         * Make the hash size about 1/2 the # found
1434         */
1435        for (size /= 2; size; size >>= 1)
1436                bits++;
1437
1438        /* Don't allocate too much */
1439        if (bits > FTRACE_HASH_MAX_BITS)
1440                bits = FTRACE_HASH_MAX_BITS;
1441
1442        new_hash = alloc_ftrace_hash(bits);
1443        if (!new_hash)
1444                return -ENOMEM;
1445
1446        size = 1 << src->size_bits;
1447        for (i = 0; i < size; i++) {
1448                hhd = &src->buckets[i];
1449                hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1450                        remove_hash_entry(src, entry);
1451                        __add_hash_entry(new_hash, entry);
1452                }
1453        }
1454
1455update:
1456        /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1457        if (enable) {
1458                /* IPMODIFY should be updated only when filter_hash updating */
1459                ret = ftrace_hash_ipmodify_update(ops, new_hash);
1460                if (ret < 0) {
1461                        free_ftrace_hash(new_hash);
1462                        return ret;
1463                }
1464        }
1465
1466        /*
1467         * Remove the current set, update the hash and add
1468         * them back.
1469         */
1470        ftrace_hash_rec_disable_modify(ops, enable);
1471
1472        rcu_assign_pointer(*dst, new_hash);
1473
1474        ftrace_hash_rec_enable_modify(ops, enable);
1475
1476        return 0;
1477}
1478
1479static bool hash_contains_ip(unsigned long ip,
1480                             struct ftrace_ops_hash *hash)
1481{
1482        /*
1483         * The function record is a match if it exists in the filter
1484         * hash and not in the notrace hash. Note, an emty hash is
1485         * considered a match for the filter hash, but an empty
1486         * notrace hash is considered not in the notrace hash.
1487         */
1488        return (ftrace_hash_empty(hash->filter_hash) ||
1489                ftrace_lookup_ip(hash->filter_hash, ip)) &&
1490                (ftrace_hash_empty(hash->notrace_hash) ||
1491                 !ftrace_lookup_ip(hash->notrace_hash, ip));
1492}
1493
1494/*
1495 * Test the hashes for this ops to see if we want to call
1496 * the ops->func or not.
1497 *
1498 * It's a match if the ip is in the ops->filter_hash or
1499 * the filter_hash does not exist or is empty,
1500 *  AND
1501 * the ip is not in the ops->notrace_hash.
1502 *
1503 * This needs to be called with preemption disabled as
1504 * the hashes are freed with call_rcu_sched().
1505 */
1506static int
1507ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1508{
1509        struct ftrace_ops_hash hash;
1510        int ret;
1511
1512#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1513        /*
1514         * There's a small race when adding ops that the ftrace handler
1515         * that wants regs, may be called without them. We can not
1516         * allow that handler to be called if regs is NULL.
1517         */
1518        if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1519                return 0;
1520#endif
1521
1522        hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1523        hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1524
1525        if (hash_contains_ip(ip, &hash))
1526                ret = 1;
1527        else
1528                ret = 0;
1529
1530        return ret;
1531}
1532
1533/*
1534 * This is a double for. Do not use 'break' to break out of the loop,
1535 * you must use a goto.
1536 */
1537#define do_for_each_ftrace_rec(pg, rec)                                 \
1538        for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1539                int _____i;                                             \
1540                for (_____i = 0; _____i < pg->index; _____i++) {        \
1541                        rec = &pg->records[_____i];
1542
1543#define while_for_each_ftrace_rec()             \
1544                }                               \
1545        }
1546
1547
1548static int ftrace_cmp_recs(const void *a, const void *b)
1549{
1550        const struct dyn_ftrace *key = a;
1551        const struct dyn_ftrace *rec = b;
1552
1553        if (key->flags < rec->ip)
1554                return -1;
1555        if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1556                return 1;
1557        return 0;
1558}
1559
1560static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1561{
1562        struct ftrace_page *pg;
1563        struct dyn_ftrace *rec;
1564        struct dyn_ftrace key;
1565
1566        key.ip = start;
1567        key.flags = end;        /* overload flags, as it is unsigned long */
1568
1569        for (pg = ftrace_pages_start; pg; pg = pg->next) {
1570                if (end < pg->records[0].ip ||
1571                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1572                        continue;
1573                rec = bsearch(&key, pg->records, pg->index,
1574                              sizeof(struct dyn_ftrace),
1575                              ftrace_cmp_recs);
1576                if (rec)
1577                        return rec->ip;
1578        }
1579
1580        return 0;
1581}
1582
1583/**
1584 * ftrace_location - return true if the ip giving is a traced location
1585 * @ip: the instruction pointer to check
1586 *
1587 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1588 * That is, the instruction that is either a NOP or call to
1589 * the function tracer. It checks the ftrace internal tables to
1590 * determine if the address belongs or not.
1591 */
1592unsigned long ftrace_location(unsigned long ip)
1593{
1594        return ftrace_location_range(ip, ip);
1595}
1596
1597/**
1598 * ftrace_text_reserved - return true if range contains an ftrace location
1599 * @start: start of range to search
1600 * @end: end of range to search (inclusive). @end points to the last byte to check.
1601 *
1602 * Returns 1 if @start and @end contains a ftrace location.
1603 * That is, the instruction that is either a NOP or call to
1604 * the function tracer. It checks the ftrace internal tables to
1605 * determine if the address belongs or not.
1606 */
1607int ftrace_text_reserved(const void *start, const void *end)
1608{
1609        unsigned long ret;
1610
1611        ret = ftrace_location_range((unsigned long)start,
1612                                    (unsigned long)end);
1613
1614        return (int)!!ret;
1615}
1616
1617/* Test if ops registered to this rec needs regs */
1618static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1619{
1620        struct ftrace_ops *ops;
1621        bool keep_regs = false;
1622
1623        for (ops = ftrace_ops_list;
1624             ops != &ftrace_list_end; ops = ops->next) {
1625                /* pass rec in as regs to have non-NULL val */
1626                if (ftrace_ops_test(ops, rec->ip, rec)) {
1627                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1628                                keep_regs = true;
1629                                break;
1630                        }
1631                }
1632        }
1633
1634        return  keep_regs;
1635}
1636
1637static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1638                                     int filter_hash,
1639                                     bool inc)
1640{
1641        struct ftrace_hash *hash;
1642        struct ftrace_hash *other_hash;
1643        struct ftrace_page *pg;
1644        struct dyn_ftrace *rec;
1645        int count = 0;
1646        int all = 0;
1647
1648        /* Only update if the ops has been registered */
1649        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1650                return;
1651
1652        /*
1653         * In the filter_hash case:
1654         *   If the count is zero, we update all records.
1655         *   Otherwise we just update the items in the hash.
1656         *
1657         * In the notrace_hash case:
1658         *   We enable the update in the hash.
1659         *   As disabling notrace means enabling the tracing,
1660         *   and enabling notrace means disabling, the inc variable
1661         *   gets inversed.
1662         */
1663        if (filter_hash) {
1664                hash = ops->func_hash->filter_hash;
1665                other_hash = ops->func_hash->notrace_hash;
1666                if (ftrace_hash_empty(hash))
1667                        all = 1;
1668        } else {
1669                inc = !inc;
1670                hash = ops->func_hash->notrace_hash;
1671                other_hash = ops->func_hash->filter_hash;
1672                /*
1673                 * If the notrace hash has no items,
1674                 * then there's nothing to do.
1675                 */
1676                if (ftrace_hash_empty(hash))
1677                        return;
1678        }
1679
1680        do_for_each_ftrace_rec(pg, rec) {
1681                int in_other_hash = 0;
1682                int in_hash = 0;
1683                int match = 0;
1684
1685                if (all) {
1686                        /*
1687                         * Only the filter_hash affects all records.
1688                         * Update if the record is not in the notrace hash.
1689                         */
1690                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1691                                match = 1;
1692                } else {
1693                        in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1694                        in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1695
1696                        /*
1697                         * If filter_hash is set, we want to match all functions
1698                         * that are in the hash but not in the other hash.
1699                         *
1700                         * If filter_hash is not set, then we are decrementing.
1701                         * That means we match anything that is in the hash
1702                         * and also in the other_hash. That is, we need to turn
1703                         * off functions in the other hash because they are disabled
1704                         * by this hash.
1705                         */
1706                        if (filter_hash && in_hash && !in_other_hash)
1707                                match = 1;
1708                        else if (!filter_hash && in_hash &&
1709                                 (in_other_hash || ftrace_hash_empty(other_hash)))
1710                                match = 1;
1711                }
1712                if (!match)
1713                        continue;
1714
1715                if (inc) {
1716                        rec->flags++;
1717                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1718                                return;
1719
1720                        /*
1721                         * If there's only a single callback registered to a
1722                         * function, and the ops has a trampoline registered
1723                         * for it, then we can call it directly.
1724                         */
1725                        if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1726                                rec->flags |= FTRACE_FL_TRAMP;
1727                        else
1728                                /*
1729                                 * If we are adding another function callback
1730                                 * to this function, and the previous had a
1731                                 * custom trampoline in use, then we need to go
1732                                 * back to the default trampoline.
1733                                 */
1734                                rec->flags &= ~FTRACE_FL_TRAMP;
1735
1736                        /*
1737                         * If any ops wants regs saved for this function
1738                         * then all ops will get saved regs.
1739                         */
1740                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1741                                rec->flags |= FTRACE_FL_REGS;
1742                } else {
1743                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1744                                return;
1745                        rec->flags--;
1746
1747                        /*
1748                         * If the rec had REGS enabled and the ops that is
1749                         * being removed had REGS set, then see if there is
1750                         * still any ops for this record that wants regs.
1751                         * If not, we can stop recording them.
1752                         */
1753                        if (ftrace_rec_count(rec) > 0 &&
1754                            rec->flags & FTRACE_FL_REGS &&
1755                            ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1756                                if (!test_rec_ops_needs_regs(rec))
1757                                        rec->flags &= ~FTRACE_FL_REGS;
1758                        }
1759
1760                        /*
1761                         * If the rec had TRAMP enabled, then it needs to
1762                         * be cleared. As TRAMP can only be enabled iff
1763                         * there is only a single ops attached to it.
1764                         * In otherwords, always disable it on decrementing.
1765                         * In the future, we may set it if rec count is
1766                         * decremented to one, and the ops that is left
1767                         * has a trampoline.
1768                         */
1769                        rec->flags &= ~FTRACE_FL_TRAMP;
1770
1771                        /*
1772                         * flags will be cleared in ftrace_check_record()
1773                         * if rec count is zero.
1774                         */
1775                }
1776                count++;
1777                /* Shortcut, if we handled all records, we are done. */
1778                if (!all && count == hash->count)
1779                        return;
1780        } while_for_each_ftrace_rec();
1781}
1782
1783static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1784                                    int filter_hash)
1785{
1786        __ftrace_hash_rec_update(ops, filter_hash, 0);
1787}
1788
1789static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1790                                   int filter_hash)
1791{
1792        __ftrace_hash_rec_update(ops, filter_hash, 1);
1793}
1794
1795static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1796                                          int filter_hash, int inc)
1797{
1798        struct ftrace_ops *op;
1799
1800        __ftrace_hash_rec_update(ops, filter_hash, inc);
1801
1802        if (ops->func_hash != &global_ops.local_hash)
1803                return;
1804
1805        /*
1806         * If the ops shares the global_ops hash, then we need to update
1807         * all ops that are enabled and use this hash.
1808         */
1809        do_for_each_ftrace_op(op, ftrace_ops_list) {
1810                /* Already done */
1811                if (op == ops)
1812                        continue;
1813                if (op->func_hash == &global_ops.local_hash)
1814                        __ftrace_hash_rec_update(op, filter_hash, inc);
1815        } while_for_each_ftrace_op(op);
1816}
1817
1818static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1819                                           int filter_hash)
1820{
1821        ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1822}
1823
1824static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1825                                          int filter_hash)
1826{
1827        ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1828}
1829
1830/*
1831 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1832 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1833 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1834 * Note that old_hash and new_hash has below meanings
1835 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1836 *  - If the hash is EMPTY_HASH, it hits nothing
1837 *  - Anything else hits the recs which match the hash entries.
1838 */
1839static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1840                                         struct ftrace_hash *old_hash,
1841                                         struct ftrace_hash *new_hash)
1842{
1843        struct ftrace_page *pg;
1844        struct dyn_ftrace *rec, *end = NULL;
1845        int in_old, in_new;
1846
1847        /* Only update if the ops has been registered */
1848        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1849                return 0;
1850
1851        if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1852                return 0;
1853
1854        /*
1855         * Since the IPMODIFY is a very address sensitive action, we do not
1856         * allow ftrace_ops to set all functions to new hash.
1857         */
1858        if (!new_hash || !old_hash)
1859                return -EINVAL;
1860
1861        /* Update rec->flags */
1862        do_for_each_ftrace_rec(pg, rec) {
1863                /* We need to update only differences of filter_hash */
1864                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1865                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1866                if (in_old == in_new)
1867                        continue;
1868
1869                if (in_new) {
1870                        /* New entries must ensure no others are using it */
1871                        if (rec->flags & FTRACE_FL_IPMODIFY)
1872                                goto rollback;
1873                        rec->flags |= FTRACE_FL_IPMODIFY;
1874                } else /* Removed entry */
1875                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1876        } while_for_each_ftrace_rec();
1877
1878        return 0;
1879
1880rollback:
1881        end = rec;
1882
1883        /* Roll back what we did above */
1884        do_for_each_ftrace_rec(pg, rec) {
1885                if (rec == end)
1886                        goto err_out;
1887
1888                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1889                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1890                if (in_old == in_new)
1891                        continue;
1892
1893                if (in_new)
1894                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1895                else
1896                        rec->flags |= FTRACE_FL_IPMODIFY;
1897        } while_for_each_ftrace_rec();
1898
1899err_out:
1900        return -EBUSY;
1901}
1902
1903static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1904{
1905        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1906
1907        if (ftrace_hash_empty(hash))
1908                hash = NULL;
1909
1910        return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1911}
1912
1913/* Disabling always succeeds */
1914static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1915{
1916        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1917
1918        if (ftrace_hash_empty(hash))
1919                hash = NULL;
1920
1921        __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1922}
1923
1924static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1925                                       struct ftrace_hash *new_hash)
1926{
1927        struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1928
1929        if (ftrace_hash_empty(old_hash))
1930                old_hash = NULL;
1931
1932        if (ftrace_hash_empty(new_hash))
1933                new_hash = NULL;
1934
1935        return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1936}
1937
1938static void print_ip_ins(const char *fmt, unsigned char *p)
1939{
1940        int i;
1941
1942        printk(KERN_CONT "%s", fmt);
1943
1944        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1945                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1946}
1947
1948static struct ftrace_ops *
1949ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1950
1951/**
1952 * ftrace_bug - report and shutdown function tracer
1953 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1954 * @rec: The record that failed
1955 *
1956 * The arch code that enables or disables the function tracing
1957 * can call ftrace_bug() when it has detected a problem in
1958 * modifying the code. @failed should be one of either:
1959 * EFAULT - if the problem happens on reading the @ip address
1960 * EINVAL - if what is read at @ip is not what was expected
1961 * EPERM - if the problem happens on writting to the @ip address
1962 */
1963void ftrace_bug(int failed, struct dyn_ftrace *rec)
1964{
1965        unsigned long ip = rec ? rec->ip : 0;
1966
1967        switch (failed) {
1968        case -EFAULT:
1969                FTRACE_WARN_ON_ONCE(1);
1970                pr_info("ftrace faulted on modifying ");
1971                print_ip_sym(ip);
1972                break;
1973        case -EINVAL:
1974                FTRACE_WARN_ON_ONCE(1);
1975                pr_info("ftrace failed to modify ");
1976                print_ip_sym(ip);
1977                print_ip_ins(" actual: ", (unsigned char *)ip);
1978                pr_cont("\n");
1979                break;
1980        case -EPERM:
1981                FTRACE_WARN_ON_ONCE(1);
1982                pr_info("ftrace faulted on writing ");
1983                print_ip_sym(ip);
1984                break;
1985        default:
1986                FTRACE_WARN_ON_ONCE(1);
1987                pr_info("ftrace faulted on unknown error ");
1988                print_ip_sym(ip);
1989        }
1990        if (rec) {
1991                struct ftrace_ops *ops = NULL;
1992
1993                pr_info("ftrace record flags: %lx\n", rec->flags);
1994                pr_cont(" (%ld)%s", ftrace_rec_count(rec),
1995                        rec->flags & FTRACE_FL_REGS ? " R" : "  ");
1996                if (rec->flags & FTRACE_FL_TRAMP_EN) {
1997                        ops = ftrace_find_tramp_ops_any(rec);
1998                        if (ops)
1999                                pr_cont("\ttramp: %pS",
2000                                        (void *)ops->trampoline);
2001                        else
2002                                pr_cont("\ttramp: ERROR!");
2003
2004                }
2005                ip = ftrace_get_addr_curr(rec);
2006                pr_cont(" expected tramp: %lx\n", ip);
2007        }
2008}
2009
2010static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2011{
2012        unsigned long flag = 0UL;
2013
2014        /*
2015         * If we are updating calls:
2016         *
2017         *   If the record has a ref count, then we need to enable it
2018         *   because someone is using it.
2019         *
2020         *   Otherwise we make sure its disabled.
2021         *
2022         * If we are disabling calls, then disable all records that
2023         * are enabled.
2024         */
2025        if (enable && ftrace_rec_count(rec))
2026                flag = FTRACE_FL_ENABLED;
2027
2028        /*
2029         * If enabling and the REGS flag does not match the REGS_EN, or
2030         * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2031         * this record. Set flags to fail the compare against ENABLED.
2032         */
2033        if (flag) {
2034                if (!(rec->flags & FTRACE_FL_REGS) != 
2035                    !(rec->flags & FTRACE_FL_REGS_EN))
2036                        flag |= FTRACE_FL_REGS;
2037
2038                if (!(rec->flags & FTRACE_FL_TRAMP) != 
2039                    !(rec->flags & FTRACE_FL_TRAMP_EN))
2040                        flag |= FTRACE_FL_TRAMP;
2041        }
2042
2043        /* If the state of this record hasn't changed, then do nothing */
2044        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2045                return FTRACE_UPDATE_IGNORE;
2046
2047        if (flag) {
2048                /* Save off if rec is being enabled (for return value) */
2049                flag ^= rec->flags & FTRACE_FL_ENABLED;
2050
2051                if (update) {
2052                        rec->flags |= FTRACE_FL_ENABLED;
2053                        if (flag & FTRACE_FL_REGS) {
2054                                if (rec->flags & FTRACE_FL_REGS)
2055                                        rec->flags |= FTRACE_FL_REGS_EN;
2056                                else
2057                                        rec->flags &= ~FTRACE_FL_REGS_EN;
2058                        }
2059                        if (flag & FTRACE_FL_TRAMP) {
2060                                if (rec->flags & FTRACE_FL_TRAMP)
2061                                        rec->flags |= FTRACE_FL_TRAMP_EN;
2062                                else
2063                                        rec->flags &= ~FTRACE_FL_TRAMP_EN;
2064                        }
2065                }
2066
2067                /*
2068                 * If this record is being updated from a nop, then
2069                 *   return UPDATE_MAKE_CALL.
2070                 * Otherwise,
2071                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2072                 *   from the save regs, to a non-save regs function or
2073                 *   vice versa, or from a trampoline call.
2074                 */
2075                if (flag & FTRACE_FL_ENABLED)
2076                        return FTRACE_UPDATE_MAKE_CALL;
2077
2078                return FTRACE_UPDATE_MODIFY_CALL;
2079        }
2080
2081        if (update) {
2082                /* If there's no more users, clear all flags */
2083                if (!ftrace_rec_count(rec))
2084                        rec->flags = 0;
2085                else
2086                        /*
2087                         * Just disable the record, but keep the ops TRAMP
2088                         * and REGS states. The _EN flags must be disabled though.
2089                         */
2090                        rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2091                                        FTRACE_FL_REGS_EN);
2092        }
2093
2094        return FTRACE_UPDATE_MAKE_NOP;
2095}
2096
2097/**
2098 * ftrace_update_record, set a record that now is tracing or not
2099 * @rec: the record to update
2100 * @enable: set to 1 if the record is tracing, zero to force disable
2101 *
2102 * The records that represent all functions that can be traced need
2103 * to be updated when tracing has been enabled.
2104 */
2105int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2106{
2107        return ftrace_check_record(rec, enable, 1);
2108}
2109
2110/**
2111 * ftrace_test_record, check if the record has been enabled or not
2112 * @rec: the record to test
2113 * @enable: set to 1 to check if enabled, 0 if it is disabled
2114 *
2115 * The arch code may need to test if a record is already set to
2116 * tracing to determine how to modify the function code that it
2117 * represents.
2118 */
2119int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2120{
2121        return ftrace_check_record(rec, enable, 0);
2122}
2123
2124static struct ftrace_ops *
2125ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2126{
2127        struct ftrace_ops *op;
2128        unsigned long ip = rec->ip;
2129
2130        do_for_each_ftrace_op(op, ftrace_ops_list) {
2131
2132                if (!op->trampoline)
2133                        continue;
2134
2135                if (hash_contains_ip(ip, op->func_hash))
2136                        return op;
2137        } while_for_each_ftrace_op(op);
2138
2139        return NULL;
2140}
2141
2142static struct ftrace_ops *
2143ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2144{
2145        struct ftrace_ops *op;
2146        unsigned long ip = rec->ip;
2147
2148        /*
2149         * Need to check removed ops first.
2150         * If they are being removed, and this rec has a tramp,
2151         * and this rec is in the ops list, then it would be the
2152         * one with the tramp.
2153         */
2154        if (removed_ops) {
2155                if (hash_contains_ip(ip, &removed_ops->old_hash))
2156                        return removed_ops;
2157        }
2158
2159        /*
2160         * Need to find the current trampoline for a rec.
2161         * Now, a trampoline is only attached to a rec if there
2162         * was a single 'ops' attached to it. But this can be called
2163         * when we are adding another op to the rec or removing the
2164         * current one. Thus, if the op is being added, we can
2165         * ignore it because it hasn't attached itself to the rec
2166         * yet.
2167         *
2168         * If an ops is being modified (hooking to different functions)
2169         * then we don't care about the new functions that are being
2170         * added, just the old ones (that are probably being removed).
2171         *
2172         * If we are adding an ops to a function that already is using
2173         * a trampoline, it needs to be removed (trampolines are only
2174         * for single ops connected), then an ops that is not being
2175         * modified also needs to be checked.
2176         */
2177        do_for_each_ftrace_op(op, ftrace_ops_list) {
2178
2179                if (!op->trampoline)
2180                        continue;
2181
2182                /*
2183                 * If the ops is being added, it hasn't gotten to
2184                 * the point to be removed from this tree yet.
2185                 */
2186                if (op->flags & FTRACE_OPS_FL_ADDING)
2187                        continue;
2188
2189
2190                /*
2191                 * If the ops is being modified and is in the old
2192                 * hash, then it is probably being removed from this
2193                 * function.
2194                 */
2195                if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2196                    hash_contains_ip(ip, &op->old_hash))
2197                        return op;
2198                /*
2199                 * If the ops is not being added or modified, and it's
2200                 * in its normal filter hash, then this must be the one
2201                 * we want!
2202                 */
2203                if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2204                    hash_contains_ip(ip, op->func_hash))
2205                        return op;
2206
2207        } while_for_each_ftrace_op(op);
2208
2209        return NULL;
2210}
2211
2212static struct ftrace_ops *
2213ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2214{
2215        struct ftrace_ops *op;
2216        unsigned long ip = rec->ip;
2217
2218        do_for_each_ftrace_op(op, ftrace_ops_list) {
2219                /* pass rec in as regs to have non-NULL val */
2220                if (hash_contains_ip(ip, op->func_hash))
2221                        return op;
2222        } while_for_each_ftrace_op(op);
2223
2224        return NULL;
2225}
2226
2227/**
2228 * ftrace_get_addr_new - Get the call address to set to
2229 * @rec:  The ftrace record descriptor
2230 *
2231 * If the record has the FTRACE_FL_REGS set, that means that it
2232 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2233 * is not not set, then it wants to convert to the normal callback.
2234 *
2235 * Returns the address of the trampoline to set to
2236 */
2237unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2238{
2239        struct ftrace_ops *ops;
2240
2241        /* Trampolines take precedence over regs */
2242        if (rec->flags & FTRACE_FL_TRAMP) {
2243                ops = ftrace_find_tramp_ops_new(rec);
2244                if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2245                        pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2246                                (void *)rec->ip, (void *)rec->ip, rec->flags);
2247                        /* Ftrace is shutting down, return anything */
2248                        return (unsigned long)FTRACE_ADDR;
2249                }
2250                return ops->trampoline;
2251        }
2252
2253        if (rec->flags & FTRACE_FL_REGS)
2254                return (unsigned long)FTRACE_REGS_ADDR;
2255        else
2256                return (unsigned long)FTRACE_ADDR;
2257}
2258
2259/**
2260 * ftrace_get_addr_curr - Get the call address that is already there
2261 * @rec:  The ftrace record descriptor
2262 *
2263 * The FTRACE_FL_REGS_EN is set when the record already points to
2264 * a function that saves all the regs. Basically the '_EN' version
2265 * represents the current state of the function.
2266 *
2267 * Returns the address of the trampoline that is currently being called
2268 */
2269unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2270{
2271        struct ftrace_ops *ops;
2272
2273        /* Trampolines take precedence over regs */
2274        if (rec->flags & FTRACE_FL_TRAMP_EN) {
2275                ops = ftrace_find_tramp_ops_curr(rec);
2276                if (FTRACE_WARN_ON(!ops)) {
2277                        pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2278                                    (void *)rec->ip, (void *)rec->ip);
2279                        /* Ftrace is shutting down, return anything */
2280                        return (unsigned long)FTRACE_ADDR;
2281                }
2282                return ops->trampoline;
2283        }
2284
2285        if (rec->flags & FTRACE_FL_REGS_EN)
2286                return (unsigned long)FTRACE_REGS_ADDR;
2287        else
2288                return (unsigned long)FTRACE_ADDR;
2289}
2290
2291static int
2292__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2293{
2294        unsigned long ftrace_old_addr;
2295        unsigned long ftrace_addr;
2296        int ret;
2297
2298        ftrace_addr = ftrace_get_addr_new(rec);
2299
2300        /* This needs to be done before we call ftrace_update_record */
2301        ftrace_old_addr = ftrace_get_addr_curr(rec);
2302
2303        ret = ftrace_update_record(rec, enable);
2304
2305        switch (ret) {
2306        case FTRACE_UPDATE_IGNORE:
2307                return 0;
2308
2309        case FTRACE_UPDATE_MAKE_CALL:
2310                return ftrace_make_call(rec, ftrace_addr);
2311
2312        case FTRACE_UPDATE_MAKE_NOP:
2313                return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2314
2315        case FTRACE_UPDATE_MODIFY_CALL:
2316                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2317        }
2318
2319        return -1; /* unknow ftrace bug */
2320}
2321
2322void __weak ftrace_replace_code(int enable)
2323{
2324        struct dyn_ftrace *rec;
2325        struct ftrace_page *pg;
2326        int failed;
2327
2328        if (unlikely(ftrace_disabled))
2329                return;
2330
2331        do_for_each_ftrace_rec(pg, rec) {
2332                failed = __ftrace_replace_code(rec, enable);
2333                if (failed) {
2334                        ftrace_bug(failed, rec);
2335                        /* Stop processing */
2336                        return;
2337                }
2338        } while_for_each_ftrace_rec();
2339}
2340
2341struct ftrace_rec_iter {
2342        struct ftrace_page      *pg;
2343        int                     index;
2344};
2345
2346/**
2347 * ftrace_rec_iter_start, start up iterating over traced functions
2348 *
2349 * Returns an iterator handle that is used to iterate over all
2350 * the records that represent address locations where functions
2351 * are traced.
2352 *
2353 * May return NULL if no records are available.
2354 */
2355struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2356{
2357        /*
2358         * We only use a single iterator.
2359         * Protected by the ftrace_lock mutex.
2360         */
2361        static struct ftrace_rec_iter ftrace_rec_iter;
2362        struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2363
2364        iter->pg = ftrace_pages_start;
2365        iter->index = 0;
2366
2367        /* Could have empty pages */
2368        while (iter->pg && !iter->pg->index)
2369                iter->pg = iter->pg->next;
2370
2371        if (!iter->pg)
2372                return NULL;
2373
2374        return iter;
2375}
2376
2377/**
2378 * ftrace_rec_iter_next, get the next record to process.
2379 * @iter: The handle to the iterator.
2380 *
2381 * Returns the next iterator after the given iterator @iter.
2382 */
2383struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2384{
2385        iter->index++;
2386
2387        if (iter->index >= iter->pg->index) {
2388                iter->pg = iter->pg->next;
2389                iter->index = 0;
2390
2391                /* Could have empty pages */
2392                while (iter->pg && !iter->pg->index)
2393                        iter->pg = iter->pg->next;
2394        }
2395
2396        if (!iter->pg)
2397                return NULL;
2398
2399        return iter;
2400}
2401
2402/**
2403 * ftrace_rec_iter_record, get the record at the iterator location
2404 * @iter: The current iterator location
2405 *
2406 * Returns the record that the current @iter is at.
2407 */
2408struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2409{
2410        return &iter->pg->records[iter->index];
2411}
2412
2413static int
2414ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2415{
2416        int ret;
2417
2418        if (unlikely(ftrace_disabled))
2419                return 0;
2420
2421        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2422        if (ret) {
2423                ftrace_bug(ret, rec);
2424                return 0;
2425        }
2426        return 1;
2427}
2428
2429/*
2430 * archs can override this function if they must do something
2431 * before the modifying code is performed.
2432 */
2433int __weak ftrace_arch_code_modify_prepare(void)
2434{
2435        return 0;
2436}
2437
2438/*
2439 * archs can override this function if they must do something
2440 * after the modifying code is performed.
2441 */
2442int __weak ftrace_arch_code_modify_post_process(void)
2443{
2444        return 0;
2445}
2446
2447void ftrace_modify_all_code(int command)
2448{
2449        int update = command & FTRACE_UPDATE_TRACE_FUNC;
2450        int err = 0;
2451
2452        /*
2453         * If the ftrace_caller calls a ftrace_ops func directly,
2454         * we need to make sure that it only traces functions it
2455         * expects to trace. When doing the switch of functions,
2456         * we need to update to the ftrace_ops_list_func first
2457         * before the transition between old and new calls are set,
2458         * as the ftrace_ops_list_func will check the ops hashes
2459         * to make sure the ops are having the right functions
2460         * traced.
2461         */
2462        if (update) {
2463                err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2464                if (FTRACE_WARN_ON(err))
2465                        return;
2466        }
2467
2468        if (command & FTRACE_UPDATE_CALLS)
2469                ftrace_replace_code(1);
2470        else if (command & FTRACE_DISABLE_CALLS)
2471                ftrace_replace_code(0);
2472
2473        if (update && ftrace_trace_function != ftrace_ops_list_func) {
2474                function_trace_op = set_function_trace_op;
2475                smp_wmb();
2476                /* If irqs are disabled, we are in stop machine */
2477                if (!irqs_disabled())
2478                        smp_call_function(ftrace_sync_ipi, NULL, 1);
2479                err = ftrace_update_ftrace_func(ftrace_trace_function);
2480                if (FTRACE_WARN_ON(err))
2481                        return;
2482        }
2483
2484        if (command & FTRACE_START_FUNC_RET)
2485                err = ftrace_enable_ftrace_graph_caller();
2486        else if (command & FTRACE_STOP_FUNC_RET)
2487                err = ftrace_disable_ftrace_graph_caller();
2488        FTRACE_WARN_ON(err);
2489}
2490
2491static int __ftrace_modify_code(void *data)
2492{
2493        int *command = data;
2494
2495        ftrace_modify_all_code(*command);
2496
2497        return 0;
2498}
2499
2500/**
2501 * ftrace_run_stop_machine, go back to the stop machine method
2502 * @command: The command to tell ftrace what to do
2503 *
2504 * If an arch needs to fall back to the stop machine method, the
2505 * it can call this function.
2506 */
2507void ftrace_run_stop_machine(int command)
2508{
2509        stop_machine(__ftrace_modify_code, &command, NULL);
2510}
2511
2512/**
2513 * arch_ftrace_update_code, modify the code to trace or not trace
2514 * @command: The command that needs to be done
2515 *
2516 * Archs can override this function if it does not need to
2517 * run stop_machine() to modify code.
2518 */
2519void __weak arch_ftrace_update_code(int command)
2520{
2521        ftrace_run_stop_machine(command);
2522}
2523
2524static void ftrace_run_update_code(int command)
2525{
2526        int ret;
2527
2528        ret = ftrace_arch_code_modify_prepare();
2529        FTRACE_WARN_ON(ret);
2530        if (ret)
2531                return;
2532
2533        /*
2534         * By default we use stop_machine() to modify the code.
2535         * But archs can do what ever they want as long as it
2536         * is safe. The stop_machine() is the safest, but also
2537         * produces the most overhead.
2538         */
2539        arch_ftrace_update_code(command);
2540
2541        ret = ftrace_arch_code_modify_post_process();
2542        FTRACE_WARN_ON(ret);
2543}
2544
2545static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2546                                   struct ftrace_ops_hash *old_hash)
2547{
2548        ops->flags |= FTRACE_OPS_FL_MODIFYING;
2549        ops->old_hash.filter_hash = old_hash->filter_hash;
2550        ops->old_hash.notrace_hash = old_hash->notrace_hash;
2551        ftrace_run_update_code(command);
2552        ops->old_hash.filter_hash = NULL;
2553        ops->old_hash.notrace_hash = NULL;
2554        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2555}
2556
2557static ftrace_func_t saved_ftrace_func;
2558static int ftrace_start_up;
2559
2560void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2561{
2562}
2563
2564static void control_ops_free(struct ftrace_ops *ops)
2565{
2566        free_percpu(ops->disabled);
2567}
2568
2569static void ftrace_startup_enable(int command)
2570{
2571        if (saved_ftrace_func != ftrace_trace_function) {
2572                saved_ftrace_func = ftrace_trace_function;
2573                command |= FTRACE_UPDATE_TRACE_FUNC;
2574        }
2575
2576        if (!command || !ftrace_enabled)
2577                return;
2578
2579        ftrace_run_update_code(command);
2580}
2581
2582static void ftrace_startup_all(int command)
2583{
2584        update_all_ops = true;
2585        ftrace_startup_enable(command);
2586        update_all_ops = false;
2587}
2588
2589static int ftrace_startup(struct ftrace_ops *ops, int command)
2590{
2591        int ret;
2592
2593        if (unlikely(ftrace_disabled))
2594                return -ENODEV;
2595
2596        ret = __register_ftrace_function(ops);
2597        if (ret)
2598                return ret;
2599
2600        ftrace_start_up++;
2601        command |= FTRACE_UPDATE_CALLS;
2602
2603        /*
2604         * Note that ftrace probes uses this to start up
2605         * and modify functions it will probe. But we still
2606         * set the ADDING flag for modification, as probes
2607         * do not have trampolines. If they add them in the
2608         * future, then the probes will need to distinguish
2609         * between adding and updating probes.
2610         */
2611        ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2612
2613        ret = ftrace_hash_ipmodify_enable(ops);
2614        if (ret < 0) {
2615                /* Rollback registration process */
2616                __unregister_ftrace_function(ops);
2617                ftrace_start_up--;
2618                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2619                return ret;
2620        }
2621
2622        ftrace_hash_rec_enable(ops, 1);
2623
2624        ftrace_startup_enable(command);
2625
2626        ops->flags &= ~FTRACE_OPS_FL_ADDING;
2627
2628        return 0;
2629}
2630
2631static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2632{
2633        int ret;
2634
2635        if (unlikely(ftrace_disabled))
2636                return -ENODEV;
2637
2638        ret = __unregister_ftrace_function(ops);
2639        if (ret)
2640                return ret;
2641
2642        ftrace_start_up--;
2643        /*
2644         * Just warn in case of unbalance, no need to kill ftrace, it's not
2645         * critical but the ftrace_call callers may be never nopped again after
2646         * further ftrace uses.
2647         */
2648        WARN_ON_ONCE(ftrace_start_up < 0);
2649
2650        /* Disabling ipmodify never fails */
2651        ftrace_hash_ipmodify_disable(ops);
2652        ftrace_hash_rec_disable(ops, 1);
2653
2654        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2655
2656        command |= FTRACE_UPDATE_CALLS;
2657
2658        if (saved_ftrace_func != ftrace_trace_function) {
2659                saved_ftrace_func = ftrace_trace_function;
2660                command |= FTRACE_UPDATE_TRACE_FUNC;
2661        }
2662
2663        if (!command || !ftrace_enabled) {
2664                /*
2665                 * If these are control ops, they still need their
2666                 * per_cpu field freed. Since, function tracing is
2667                 * not currently active, we can just free them
2668                 * without synchronizing all CPUs.
2669                 */
2670                if (ops->flags & FTRACE_OPS_FL_CONTROL)
2671                        control_ops_free(ops);
2672                return 0;
2673        }
2674
2675        /*
2676         * If the ops uses a trampoline, then it needs to be
2677         * tested first on update.
2678         */
2679        ops->flags |= FTRACE_OPS_FL_REMOVING;
2680        removed_ops = ops;
2681
2682        /* The trampoline logic checks the old hashes */
2683        ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2684        ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2685
2686        ftrace_run_update_code(command);
2687
2688        /*
2689         * If there's no more ops registered with ftrace, run a
2690         * sanity check to make sure all rec flags are cleared.
2691         */
2692        if (ftrace_ops_list == &ftrace_list_end) {
2693                struct ftrace_page *pg;
2694                struct dyn_ftrace *rec;
2695
2696                do_for_each_ftrace_rec(pg, rec) {
2697                        if (FTRACE_WARN_ON_ONCE(rec->flags))
2698                                pr_warn("  %pS flags:%lx\n",
2699                                        (void *)rec->ip, rec->flags);
2700                } while_for_each_ftrace_rec();
2701        }
2702
2703        ops->old_hash.filter_hash = NULL;
2704        ops->old_hash.notrace_hash = NULL;
2705
2706        removed_ops = NULL;
2707        ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2708
2709        /*
2710         * Dynamic ops may be freed, we must make sure that all
2711         * callers are done before leaving this function.
2712         * The same goes for freeing the per_cpu data of the control
2713         * ops.
2714         *
2715         * Again, normal synchronize_sched() is not good enough.
2716         * We need to do a hard force of sched synchronization.
2717         * This is because we use preempt_disable() to do RCU, but
2718         * the function tracers can be called where RCU is not watching
2719         * (like before user_exit()). We can not rely on the RCU
2720         * infrastructure to do the synchronization, thus we must do it
2721         * ourselves.
2722         */
2723        if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2724                schedule_on_each_cpu(ftrace_sync);
2725
2726                arch_ftrace_trampoline_free(ops);
2727
2728                if (ops->flags & FTRACE_OPS_FL_CONTROL)
2729                        control_ops_free(ops);
2730        }
2731
2732        return 0;
2733}
2734
2735static void ftrace_startup_sysctl(void)
2736{
2737        int command;
2738
2739        if (unlikely(ftrace_disabled))
2740                return;
2741
2742        /* Force update next time */
2743        saved_ftrace_func = NULL;
2744        /* ftrace_start_up is true if we want ftrace running */
2745        if (ftrace_start_up) {
2746                command = FTRACE_UPDATE_CALLS;
2747                if (ftrace_graph_active)
2748                        command |= FTRACE_START_FUNC_RET;
2749                ftrace_startup_enable(command);
2750        }
2751}
2752
2753static void ftrace_shutdown_sysctl(void)
2754{
2755        int command;
2756
2757        if (unlikely(ftrace_disabled))
2758                return;
2759
2760        /* ftrace_start_up is true if ftrace is running */
2761        if (ftrace_start_up) {
2762                command = FTRACE_DISABLE_CALLS;
2763                if (ftrace_graph_active)
2764                        command |= FTRACE_STOP_FUNC_RET;
2765                ftrace_run_update_code(command);
2766        }
2767}
2768
2769static cycle_t          ftrace_update_time;
2770unsigned long           ftrace_update_tot_cnt;
2771
2772static inline int ops_traces_mod(struct ftrace_ops *ops)
2773{
2774        /*
2775         * Filter_hash being empty will default to trace module.
2776         * But notrace hash requires a test of individual module functions.
2777         */
2778        return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2779                ftrace_hash_empty(ops->func_hash->notrace_hash);
2780}
2781
2782/*
2783 * Check if the current ops references the record.
2784 *
2785 * If the ops traces all functions, then it was already accounted for.
2786 * If the ops does not trace the current record function, skip it.
2787 * If the ops ignores the function via notrace filter, skip it.
2788 */
2789static inline bool
2790ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2791{
2792        /* If ops isn't enabled, ignore it */
2793        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2794                return 0;
2795
2796        /* If ops traces all mods, we already accounted for it */
2797        if (ops_traces_mod(ops))
2798                return 0;
2799
2800        /* The function must be in the filter */
2801        if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2802            !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2803                return 0;
2804
2805        /* If in notrace hash, we ignore it too */
2806        if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2807                return 0;
2808
2809        return 1;
2810}
2811
2812static int referenced_filters(struct dyn_ftrace *rec)
2813{
2814        struct ftrace_ops *ops;
2815        int cnt = 0;
2816
2817        for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2818                if (ops_references_rec(ops, rec))
2819                    cnt++;
2820        }
2821
2822        return cnt;
2823}
2824
2825static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2826{
2827        struct ftrace_page *pg;
2828        struct dyn_ftrace *p;
2829        cycle_t start, stop;
2830        unsigned long update_cnt = 0;
2831        unsigned long ref = 0;
2832        bool test = false;
2833        int i;
2834
2835        /*
2836         * When adding a module, we need to check if tracers are
2837         * currently enabled and if they are set to trace all functions.
2838         * If they are, we need to enable the module functions as well
2839         * as update the reference counts for those function records.
2840         */
2841        if (mod) {
2842                struct ftrace_ops *ops;
2843
2844                for (ops = ftrace_ops_list;
2845                     ops != &ftrace_list_end; ops = ops->next) {
2846                        if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2847                                if (ops_traces_mod(ops))
2848                                        ref++;
2849                                else
2850                                        test = true;
2851                        }
2852                }
2853        }
2854
2855        start = ftrace_now(raw_smp_processor_id());
2856
2857        for (pg = new_pgs; pg; pg = pg->next) {
2858
2859                for (i = 0; i < pg->index; i++) {
2860                        int cnt = ref;
2861
2862                        /* If something went wrong, bail without enabling anything */
2863                        if (unlikely(ftrace_disabled))
2864                                return -1;
2865
2866                        p = &pg->records[i];
2867                        if (test)
2868                                cnt += referenced_filters(p);
2869                        p->flags = cnt;
2870
2871                        /*
2872                         * Do the initial record conversion from mcount jump
2873                         * to the NOP instructions.
2874                         */
2875                        if (!ftrace_code_disable(mod, p))
2876                                break;
2877
2878                        update_cnt++;
2879
2880                        /*
2881                         * If the tracing is enabled, go ahead and enable the record.
2882                         *
2883                         * The reason not to enable the record immediatelly is the
2884                         * inherent check of ftrace_make_nop/ftrace_make_call for
2885                         * correct previous instructions.  Making first the NOP
2886                         * conversion puts the module to the correct state, thus
2887                         * passing the ftrace_make_call check.
2888                         */
2889                        if (ftrace_start_up && cnt) {
2890                                int failed = __ftrace_replace_code(p, 1);
2891                                if (failed)
2892                                        ftrace_bug(failed, p);
2893                        }
2894                }
2895        }
2896
2897        stop = ftrace_now(raw_smp_processor_id());
2898        ftrace_update_time = stop - start;
2899        ftrace_update_tot_cnt += update_cnt;
2900
2901        return 0;
2902}
2903
2904static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2905{
2906        int order;
2907        int cnt;
2908
2909        if (WARN_ON(!count))
2910                return -EINVAL;
2911
2912        order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2913
2914        /*
2915         * We want to fill as much as possible. No more than a page
2916         * may be empty.
2917         */
2918        while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2919                order--;
2920
2921 again:
2922        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2923
2924        if (!pg->records) {
2925                /* if we can't allocate this size, try something smaller */
2926                if (!order)
2927                        return -ENOMEM;
2928                order >>= 1;
2929                goto again;
2930        }
2931
2932        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2933        pg->size = cnt;
2934
2935        if (cnt > count)
2936                cnt = count;
2937
2938        return cnt;
2939}
2940
2941static struct ftrace_page *
2942ftrace_allocate_pages(unsigned long num_to_init)
2943{
2944        struct ftrace_page *start_pg;
2945        struct ftrace_page *pg;
2946        int order;
2947        int cnt;
2948
2949        if (!num_to_init)
2950                return 0;
2951
2952        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2953        if (!pg)
2954                return NULL;
2955
2956        /*
2957         * Try to allocate as much as possible in one continues
2958         * location that fills in all of the space. We want to
2959         * waste as little space as possible.
2960         */
2961        for (;;) {
2962                cnt = ftrace_allocate_records(pg, num_to_init);
2963                if (cnt < 0)
2964                        goto free_pages;
2965
2966                num_to_init -= cnt;
2967                if (!num_to_init)
2968                        break;
2969
2970                pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2971                if (!pg->next)
2972                        goto free_pages;
2973
2974                pg = pg->next;
2975        }
2976
2977        return start_pg;
2978
2979 free_pages:
2980        pg = start_pg;
2981        while (pg) {
2982                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2983                free_pages((unsigned long)pg->records, order);
2984                start_pg = pg->next;
2985                kfree(pg);
2986                pg = start_pg;
2987        }
2988        pr_info("ftrace: FAILED to allocate memory for functions\n");
2989        return NULL;
2990}
2991
2992#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2993
2994struct ftrace_iterator {
2995        loff_t                          pos;
2996        loff_t                          func_pos;
2997        struct ftrace_page              *pg;
2998        struct dyn_ftrace               *func;
2999        struct ftrace_func_probe        *probe;
3000        struct trace_parser             parser;
3001        struct ftrace_hash              *hash;
3002        struct ftrace_ops               *ops;
3003        int                             hidx;
3004        int                             idx;
3005        unsigned                        flags;
3006};
3007
3008static void *
3009t_hash_next(struct seq_file *m, loff_t *pos)
3010{
3011        struct ftrace_iterator *iter = m->private;
3012        struct hlist_node *hnd = NULL;
3013        struct hlist_head *hhd;
3014
3015        (*pos)++;
3016        iter->pos = *pos;
3017
3018        if (iter->probe)
3019                hnd = &iter->probe->node;
3020 retry:
3021        if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
3022                return NULL;
3023
3024        hhd = &ftrace_func_hash[iter->hidx];
3025
3026        if (hlist_empty(hhd)) {
3027                iter->hidx++;
3028                hnd = NULL;
3029                goto retry;
3030        }
3031
3032        if (!hnd)
3033                hnd = hhd->first;
3034        else {
3035                hnd = hnd->next;
3036                if (!hnd) {
3037                        iter->hidx++;
3038                        goto retry;
3039                }
3040        }
3041
3042        if (WARN_ON_ONCE(!hnd))
3043                return NULL;
3044
3045        iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
3046
3047        return iter;
3048}
3049
3050static void *t_hash_start(struct seq_file *m, loff_t *pos)
3051{
3052        struct ftrace_iterator *iter = m->private;
3053        void *p = NULL;
3054        loff_t l;
3055
3056        if (!(iter->flags & FTRACE_ITER_DO_HASH))
3057                return NULL;
3058
3059        if (iter->func_pos > *pos)
3060                return NULL;
3061
3062        iter->hidx = 0;
3063        for (l = 0; l <= (*pos - iter->func_pos); ) {
3064                p = t_hash_next(m, &l);
3065                if (!p)
3066                        break;
3067        }
3068        if (!p)
3069                return NULL;
3070
3071        /* Only set this if we have an item */
3072        iter->flags |= FTRACE_ITER_HASH;
3073
3074        return iter;
3075}
3076
3077static int
3078t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
3079{
3080        struct ftrace_func_probe *rec;
3081
3082        rec = iter->probe;
3083        if (WARN_ON_ONCE(!rec))
3084                return -EIO;
3085
3086        if (rec->ops->print)
3087                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
3088
3089        seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
3090
3091        if (rec->data)
3092                seq_printf(m, ":%p", rec->data);
3093        seq_putc(m, '\n');
3094
3095        return 0;
3096}
3097
3098static void *
3099t_next(struct seq_file *m, void *v, loff_t *pos)
3100{
3101        struct ftrace_iterator *iter = m->private;
3102        struct ftrace_ops *ops = iter->ops;
3103        struct dyn_ftrace *rec = NULL;
3104
3105        if (unlikely(ftrace_disabled))
3106                return NULL;
3107
3108        if (iter->flags & FTRACE_ITER_HASH)
3109                return t_hash_next(m, pos);
3110
3111        (*pos)++;
3112        iter->pos = iter->func_pos = *pos;
3113
3114        if (iter->flags & FTRACE_ITER_PRINTALL)
3115                return t_hash_start(m, pos);
3116
3117 retry:
3118        if (iter->idx >= iter->pg->index) {
3119                if (iter->pg->next) {
3120                        iter->pg = iter->pg->next;
3121                        iter->idx = 0;
3122                        goto retry;
3123                }
3124        } else {
3125                rec = &iter->pg->records[iter->idx++];
3126                if (((iter->flags & FTRACE_ITER_FILTER) &&
3127                     !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
3128
3129                    ((iter->flags & FTRACE_ITER_NOTRACE) &&
3130                     !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
3131
3132                    ((iter->flags & FTRACE_ITER_ENABLED) &&
3133                     !(rec->flags & FTRACE_FL_ENABLED))) {
3134
3135                        rec = NULL;
3136                        goto retry;
3137                }
3138        }
3139
3140        if (!rec)
3141                return t_hash_start(m, pos);
3142
3143        iter->func = rec;
3144
3145        return iter;
3146}
3147
3148static void reset_iter_read(struct ftrace_iterator *iter)
3149{
3150        iter->pos = 0;
3151        iter->func_pos = 0;
3152        iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
3153}
3154
3155static void *t_start(struct seq_file *m, loff_t *pos)
3156{
3157        struct ftrace_iterator *iter = m->private;
3158        struct ftrace_ops *ops = iter->ops;
3159        void *p = NULL;
3160        loff_t l;
3161
3162        mutex_lock(&ftrace_lock);
3163
3164        if (unlikely(ftrace_disabled))
3165                return NULL;
3166
3167        /*
3168         * If an lseek was done, then reset and start from beginning.
3169         */
3170        if (*pos < iter->pos)
3171                reset_iter_read(iter);
3172
3173        /*
3174         * For set_ftrace_filter reading, if we have the filter
3175         * off, we can short cut and just print out that all
3176         * functions are enabled.
3177         */
3178        if ((iter->flags & FTRACE_ITER_FILTER &&
3179             ftrace_hash_empty(ops->func_hash->filter_hash)) ||
3180            (iter->flags & FTRACE_ITER_NOTRACE &&
3181             ftrace_hash_empty(ops->func_hash->notrace_hash))) {
3182                if (*pos > 0)
3183                        return t_hash_start(m, pos);
3184                iter->flags |= FTRACE_ITER_PRINTALL;
3185                /* reset in case of seek/pread */
3186                iter->flags &= ~FTRACE_ITER_HASH;
3187                return iter;
3188        }
3189
3190        if (iter->flags & FTRACE_ITER_HASH)
3191                return t_hash_start(m, pos);
3192
3193        /*
3194         * Unfortunately, we need to restart at ftrace_pages_start
3195         * every time we let go of the ftrace_mutex. This is because
3196         * those pointers can change without the lock.
3197         */
3198        iter->pg = ftrace_pages_start;
3199        iter->idx = 0;
3200        for (l = 0; l <= *pos; ) {
3201                p = t_next(m, p, &l);
3202                if (!p)
3203                        break;
3204        }
3205
3206        if (!p)
3207                return t_hash_start(m, pos);
3208
3209        return iter;
3210}
3211
3212static void t_stop(struct seq_file *m, void *p)
3213{
3214        mutex_unlock(&ftrace_lock);
3215}
3216
3217void * __weak
3218arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3219{
3220        return NULL;
3221}
3222
3223static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3224                                struct dyn_ftrace *rec)
3225{
3226        void *ptr;
3227
3228        ptr = arch_ftrace_trampoline_func(ops, rec);
3229        if (ptr)
3230                seq_printf(m, " ->%pS", ptr);
3231}
3232
3233static int t_show(struct seq_file *m, void *v)
3234{
3235        struct ftrace_iterator *iter = m->private;
3236        struct dyn_ftrace *rec;
3237
3238        if (iter->flags & FTRACE_ITER_HASH)
3239                return t_hash_show(m, iter);
3240
3241        if (iter->flags & FTRACE_ITER_PRINTALL) {
3242                if (iter->flags & FTRACE_ITER_NOTRACE)
3243                        seq_puts(m, "#### no functions disabled ####\n");
3244                else
3245                        seq_puts(m, "#### all functions enabled ####\n");
3246                return 0;
3247        }
3248
3249        rec = iter->func;
3250
3251        if (!rec)
3252                return 0;
3253
3254        seq_printf(m, "%ps", (void *)rec->ip);
3255        if (iter->flags & FTRACE_ITER_ENABLED) {
3256                struct ftrace_ops *ops = NULL;
3257
3258                seq_printf(m, " (%ld)%s%s",
3259                           ftrace_rec_count(rec),
3260                           rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3261                           rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
3262                if (rec->flags & FTRACE_FL_TRAMP_EN) {
3263                        ops = ftrace_find_tramp_ops_any(rec);
3264                        if (ops)
3265                                seq_printf(m, "\ttramp: %pS",
3266                                           (void *)ops->trampoline);
3267                        else
3268                                seq_puts(m, "\ttramp: ERROR!");
3269
3270                }
3271                add_trampoline_func(m, ops, rec);
3272        }       
3273
3274        seq_putc(m, '\n');
3275
3276        return 0;
3277}
3278
3279static const struct seq_operations show_ftrace_seq_ops = {
3280        .start = t_start,
3281        .next = t_next,
3282        .stop = t_stop,
3283        .show = t_show,
3284};
3285
3286static int
3287ftrace_avail_open(struct inode *inode, struct file *file)
3288{
3289        struct ftrace_iterator *iter;
3290
3291        if (unlikely(ftrace_disabled))
3292                return -ENODEV;
3293
3294        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3295        if (iter) {
3296                iter->pg = ftrace_pages_start;
3297                iter->ops = &global_ops;
3298        }
3299
3300        return iter ? 0 : -ENOMEM;
3301}
3302
3303static int
3304ftrace_enabled_open(struct inode *inode, struct file *file)
3305{
3306        struct ftrace_iterator *iter;
3307
3308        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3309        if (iter) {
3310                iter->pg = ftrace_pages_start;
3311                iter->flags = FTRACE_ITER_ENABLED;
3312                iter->ops = &global_ops;
3313        }
3314
3315        return iter ? 0 : -ENOMEM;
3316}
3317
3318/**
3319 * ftrace_regex_open - initialize function tracer filter files
3320 * @ops: The ftrace_ops that hold the hash filters
3321 * @flag: The type of filter to process
3322 * @inode: The inode, usually passed in to your open routine
3323 * @file: The file, usually passed in to your open routine
3324 *
3325 * ftrace_regex_open() initializes the filter files for the
3326 * @ops. Depending on @flag it may process the filter hash or
3327 * the notrace hash of @ops. With this called from the open
3328 * routine, you can use ftrace_filter_write() for the write
3329 * routine if @flag has FTRACE_ITER_FILTER set, or
3330 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3331 * tracing_lseek() should be used as the lseek routine, and
3332 * release must call ftrace_regex_release().
3333 */
3334int
3335ftrace_regex_open(struct ftrace_ops *ops, int flag,
3336                  struct inode *inode, struct file *file)
3337{
3338        struct ftrace_iterator *iter;
3339        struct ftrace_hash *hash;
3340        int ret = 0;
3341
3342        ftrace_ops_init(ops);
3343
3344        if (unlikely(ftrace_disabled))
3345                return -ENODEV;
3346
3347        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3348        if (!iter)
3349                return -ENOMEM;
3350
3351        if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3352                kfree(iter);
3353                return -ENOMEM;
3354        }
3355
3356        iter->ops = ops;
3357        iter->flags = flag;
3358
3359        mutex_lock(&ops->func_hash->regex_lock);
3360
3361        if (flag & FTRACE_ITER_NOTRACE)
3362                hash = ops->func_hash->notrace_hash;
3363        else
3364                hash = ops->func_hash->filter_hash;
3365
3366        if (file->f_mode & FMODE_WRITE) {
3367                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3368
3369                if (file->f_flags & O_TRUNC)
3370                        iter->hash = alloc_ftrace_hash(size_bits);
3371                else
3372                        iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3373
3374                if (!iter->hash) {
3375                        trace_parser_put(&iter->parser);
3376                        kfree(iter);
3377                        ret = -ENOMEM;
3378                        goto out_unlock;
3379                }
3380        }
3381
3382        if (file->f_mode & FMODE_READ) {
3383                iter->pg = ftrace_pages_start;
3384
3385                ret = seq_open(file, &show_ftrace_seq_ops);
3386                if (!ret) {
3387                        struct seq_file *m = file->private_data;
3388                        m->private = iter;
3389                } else {
3390                        /* Failed */
3391                        free_ftrace_hash(iter->hash);
3392                        trace_parser_put(&iter->parser);
3393                        kfree(iter);
3394                }
3395        } else
3396                file->private_data = iter;
3397
3398 out_unlock:
3399        mutex_unlock(&ops->func_hash->regex_lock);
3400
3401        return ret;
3402}
3403
3404static int
3405ftrace_filter_open(struct inode *inode, struct file *file)
3406{
3407        struct ftrace_ops *ops = inode->i_private;
3408
3409        return ftrace_regex_open(ops,
3410                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3411                        inode, file);
3412}
3413
3414static int
3415ftrace_notrace_open(struct inode *inode, struct file *file)
3416{
3417        struct ftrace_ops *ops = inode->i_private;
3418
3419        return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3420                                 inode, file);
3421}
3422
3423static int ftrace_match(char *str, char *regex, int len, int type)
3424{
3425        int matched = 0;
3426        int slen;
3427
3428        switch (type) {
3429        case MATCH_FULL:
3430                if (strcmp(str, regex) == 0)
3431                        matched = 1;
3432                break;
3433        case MATCH_FRONT_ONLY:
3434                if (strncmp(str, regex, len) == 0)
3435                        matched = 1;
3436                break;
3437        case MATCH_MIDDLE_ONLY:
3438                if (strstr(str, regex))
3439                        matched = 1;
3440                break;
3441        case MATCH_END_ONLY:
3442                slen = strlen(str);
3443                if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
3444                        matched = 1;
3445                break;
3446        }
3447
3448        return matched;
3449}
3450
3451static int
3452enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3453{
3454        struct ftrace_func_entry *entry;
3455        int ret = 0;
3456
3457        entry = ftrace_lookup_ip(hash, rec->ip);
3458        if (not) {
3459                /* Do nothing if it doesn't exist */
3460                if (!entry)
3461                        return 0;
3462
3463                free_hash_entry(hash, entry);
3464        } else {
3465                /* Do nothing if it exists */
3466                if (entry)
3467                        return 0;
3468
3469                ret = add_hash_entry(hash, rec->ip);
3470        }
3471        return ret;
3472}
3473
3474static int
3475ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3476                    char *regex, int len, int type)
3477{
3478        char str[KSYM_SYMBOL_LEN];
3479        char *modname;
3480
3481        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3482
3483        if (mod) {
3484                /* module lookup requires matching the module */
3485                if (!modname || strcmp(modname, mod))
3486                        return 0;
3487
3488                /* blank search means to match all funcs in the mod */
3489                if (!len)
3490                        return 1;
3491        }
3492
3493        return ftrace_match(str, regex, len, type);
3494}
3495
3496static int
3497match_records(struct ftrace_hash *hash, char *buff,
3498              int len, char *mod, int not)
3499{
3500        unsigned search_len = 0;
3501        struct ftrace_page *pg;
3502        struct dyn_ftrace *rec;
3503        int type = MATCH_FULL;
3504        char *search = buff;
3505        int found = 0;
3506        int ret;
3507
3508        if (len) {
3509                type = filter_parse_regex(buff, len, &search, &not);
3510                search_len = strlen(search);
3511        }
3512
3513        mutex_lock(&ftrace_lock);
3514
3515        if (unlikely(ftrace_disabled))
3516                goto out_unlock;
3517
3518        do_for_each_ftrace_rec(pg, rec) {
3519                if (ftrace_match_record(rec, mod, search, search_len, type)) {
3520                        ret = enter_record(hash, rec, not);
3521                        if (ret < 0) {
3522                                found = ret;
3523                                goto out_unlock;
3524                        }
3525                        found = 1;
3526                }
3527        } while_for_each_ftrace_rec();
3528 out_unlock:
3529        mutex_unlock(&ftrace_lock);
3530
3531        return found;
3532}
3533
3534static int
3535ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3536{
3537        return match_records(hash, buff, len, NULL, 0);
3538}
3539
3540static int
3541ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3542{
3543        int not = 0;
3544
3545        /* blank or '*' mean the same */
3546        if (strcmp(buff, "*") == 0)
3547                buff[0] = 0;
3548
3549        /* handle the case of 'dont filter this module' */
3550        if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3551                buff[0] = 0;
3552                not = 1;
3553        }
3554
3555        return match_records(hash, buff, strlen(buff), mod, not);
3556}
3557
3558/*
3559 * We register the module command as a template to show others how
3560 * to register the a command as well.
3561 */
3562
3563static int
3564ftrace_mod_callback(struct ftrace_hash *hash,
3565                    char *func, char *cmd, char *param, int enable)
3566{
3567        char *mod;
3568        int ret = -EINVAL;
3569
3570        /*
3571         * cmd == 'mod' because we only registered this func
3572         * for the 'mod' ftrace_func_command.
3573         * But if you register one func with multiple commands,
3574         * you can tell which command was used by the cmd
3575         * parameter.
3576         */
3577
3578        /* we must have a module name */
3579        if (!param)
3580                return ret;
3581
3582        mod = strsep(&param, ":");
3583        if (!strlen(mod))
3584                return ret;
3585
3586        ret = ftrace_match_module_records(hash, func, mod);
3587        if (!ret)
3588                ret = -EINVAL;
3589        if (ret < 0)
3590                return ret;
3591
3592        return 0;
3593}
3594
3595static struct ftrace_func_command ftrace_mod_cmd = {
3596        .name                   = "mod",
3597        .func                   = ftrace_mod_callback,
3598};
3599
3600static int __init ftrace_mod_cmd_init(void)
3601{
3602        return register_ftrace_command(&ftrace_mod_cmd);
3603}
3604core_initcall(ftrace_mod_cmd_init);
3605
3606static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3607                                      struct ftrace_ops *op, struct pt_regs *pt_regs)
3608{
3609        struct ftrace_func_probe *entry;
3610        struct hlist_head *hhd;
3611        unsigned long key;
3612
3613        key = hash_long(ip, FTRACE_HASH_BITS);
3614
3615        hhd = &ftrace_func_hash[key];
3616
3617        if (hlist_empty(hhd))
3618                return;
3619
3620        /*
3621         * Disable preemption for these calls to prevent a RCU grace
3622         * period. This syncs the hash iteration and freeing of items
3623         * on the hash. rcu_read_lock is too dangerous here.
3624         */
3625        preempt_disable_notrace();
3626        hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3627                if (entry->ip == ip)
3628                        entry->ops->func(ip, parent_ip, &entry->data);
3629        }
3630        preempt_enable_notrace();
3631}
3632
3633static struct ftrace_ops trace_probe_ops __read_mostly =
3634{
3635        .func           = function_trace_probe_call,
3636        .flags          = FTRACE_OPS_FL_INITIALIZED,
3637        INIT_OPS_HASH(trace_probe_ops)
3638};
3639
3640static int ftrace_probe_registered;
3641
3642static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3643{
3644        int ret;
3645        int i;
3646
3647        if (ftrace_probe_registered) {
3648                /* still need to update the function call sites */
3649                if (ftrace_enabled)
3650                        ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3651                                               old_hash);
3652                return;
3653        }
3654
3655        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3656                struct hlist_head *hhd = &ftrace_func_hash[i];
3657                if (hhd->first)
3658                        break;
3659        }
3660        /* Nothing registered? */
3661        if (i == FTRACE_FUNC_HASHSIZE)
3662                return;
3663
3664        ret = ftrace_startup(&trace_probe_ops, 0);
3665
3666        ftrace_probe_registered = 1;
3667}
3668
3669static void __disable_ftrace_function_probe(void)
3670{
3671        int i;
3672
3673        if (!ftrace_probe_registered)
3674                return;
3675
3676        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3677                struct hlist_head *hhd = &ftrace_func_hash[i];
3678                if (hhd->first)
3679                        return;
3680        }
3681
3682        /* no more funcs left */
3683        ftrace_shutdown(&trace_probe_ops, 0);
3684
3685        ftrace_probe_registered = 0;
3686}
3687
3688
3689static void ftrace_free_entry(struct ftrace_func_probe *entry)
3690{
3691        if (entry->ops->free)
3692                entry->ops->free(entry->ops, entry->ip, &entry->data);
3693        kfree(entry);
3694}
3695
3696int
3697register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3698                              void *data)
3699{
3700        struct ftrace_ops_hash old_hash_ops;
3701        struct ftrace_func_probe *entry;
3702        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3703        struct ftrace_hash *old_hash = *orig_hash;
3704        struct ftrace_hash *hash;
3705        struct ftrace_page *pg;
3706        struct dyn_ftrace *rec;
3707        int type, len, not;
3708        unsigned long key;
3709        int count = 0;
3710        char *search;
3711        int ret;
3712
3713        type = filter_parse_regex(glob, strlen(glob), &search, &not);
3714        len = strlen(search);
3715
3716        /* we do not support '!' for function probes */
3717        if (WARN_ON(not))
3718                return -EINVAL;
3719
3720        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3721
3722        old_hash_ops.filter_hash = old_hash;
3723        /* Probes only have filters */
3724        old_hash_ops.notrace_hash = NULL;
3725
3726        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3727        if (!hash) {
3728                count = -ENOMEM;
3729                goto out;
3730        }
3731
3732        if (unlikely(ftrace_disabled)) {
3733                count = -ENODEV;
3734                goto out;
3735        }
3736
3737        mutex_lock(&ftrace_lock);
3738
3739        do_for_each_ftrace_rec(pg, rec) {
3740
3741                if (!ftrace_match_record(rec, NULL, search, len, type))
3742                        continue;
3743
3744                entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3745                if (!entry) {
3746                        /* If we did not process any, then return error */
3747                        if (!count)
3748                                count = -ENOMEM;
3749                        goto out_unlock;
3750                }
3751
3752                count++;
3753
3754                entry->data = data;
3755
3756                /*
3757                 * The caller might want to do something special
3758                 * for each function we find. We call the callback
3759                 * to give the caller an opportunity to do so.
3760                 */
3761                if (ops->init) {
3762                        if (ops->init(ops, rec->ip, &entry->data) < 0) {
3763                                /* caller does not like this func */
3764                                kfree(entry);
3765                                continue;
3766                        }
3767                }
3768
3769                ret = enter_record(hash, rec, 0);
3770                if (ret < 0) {
3771                        kfree(entry);
3772                        count = ret;
3773                        goto out_unlock;
3774                }
3775
3776                entry->ops = ops;
3777                entry->ip = rec->ip;
3778
3779                key = hash_long(entry->ip, FTRACE_HASH_BITS);
3780                hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3781
3782        } while_for_each_ftrace_rec();
3783
3784        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3785
3786        __enable_ftrace_function_probe(&old_hash_ops);
3787
3788        if (!ret)
3789                free_ftrace_hash_rcu(old_hash);
3790        else
3791                count = ret;
3792
3793 out_unlock:
3794        mutex_unlock(&ftrace_lock);
3795 out:
3796        mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3797        free_ftrace_hash(hash);
3798
3799        return count;
3800}
3801
3802enum {
3803        PROBE_TEST_FUNC         = 1,
3804        PROBE_TEST_DATA         = 2
3805};
3806
3807static void
3808__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3809                                  void *data, int flags)
3810{
3811        struct ftrace_func_entry *rec_entry;
3812        struct ftrace_func_probe *entry;
3813        struct ftrace_func_probe *p;
3814        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3815        struct ftrace_hash *old_hash = *orig_hash;
3816        struct list_head free_list;
3817        struct ftrace_hash *hash;
3818        struct hlist_node *tmp;
3819        char str[KSYM_SYMBOL_LEN];
3820        int type = MATCH_FULL;
3821        int i, len = 0;
3822        char *search;
3823        int ret;
3824
3825        if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3826                glob = NULL;
3827        else if (glob) {
3828                int not;
3829
3830                type = filter_parse_regex(glob, strlen(glob), &search, &not);
3831                len = strlen(search);
3832
3833                /* we do not support '!' for function probes */
3834                if (WARN_ON(not))
3835                        return;
3836        }
3837
3838        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3839
3840        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3841        if (!hash)
3842                /* Hmm, should report this somehow */
3843                goto out_unlock;
3844
3845        INIT_LIST_HEAD(&free_list);
3846
3847        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3848                struct hlist_head *hhd = &ftrace_func_hash[i];
3849
3850                hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3851
3852                        /* break up if statements for readability */
3853                        if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3854                                continue;
3855
3856                        if ((flags & PROBE_TEST_DATA) && entry->data != data)
3857                                continue;
3858
3859                        /* do this last, since it is the most expensive */
3860                        if (glob) {
3861                                kallsyms_lookup(entry->ip, NULL, NULL,
3862                                                NULL, str);
3863                                if (!ftrace_match(str, glob, len, type))
3864                                        continue;
3865                        }
3866
3867                        rec_entry = ftrace_lookup_ip(hash, entry->ip);
3868                        /* It is possible more than one entry had this ip */
3869                        if (rec_entry)
3870                                free_hash_entry(hash, rec_entry);
3871
3872                        hlist_del_rcu(&entry->node);
3873                        list_add(&entry->free_list, &free_list);
3874                }
3875        }
3876        mutex_lock(&ftrace_lock);
3877        __disable_ftrace_function_probe();
3878        /*
3879         * Remove after the disable is called. Otherwise, if the last
3880         * probe is removed, a null hash means *all enabled*.
3881         */
3882        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3883        synchronize_sched();
3884        if (!ret)
3885                free_ftrace_hash_rcu(old_hash);
3886
3887        list_for_each_entry_safe(entry, p, &free_list, free_list) {
3888                list_del(&entry->free_list);
3889                ftrace_free_entry(entry);
3890        }
3891        mutex_unlock(&ftrace_lock);
3892                
3893 out_unlock:
3894        mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3895        free_ftrace_hash(hash);
3896}
3897
3898void
3899unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3900                                void *data)
3901{
3902        __unregister_ftrace_function_probe(glob, ops, data,
3903                                          PROBE_TEST_FUNC | PROBE_TEST_DATA);
3904}
3905
3906void
3907unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3908{
3909        __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3910}
3911
3912void unregister_ftrace_function_probe_all(char *glob)
3913{
3914        __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3915}
3916
3917static LIST_HEAD(ftrace_commands);
3918static DEFINE_MUTEX(ftrace_cmd_mutex);
3919
3920/*
3921 * Currently we only register ftrace commands from __init, so mark this
3922 * __init too.
3923 */
3924__init int register_ftrace_command(struct ftrace_func_command *cmd)
3925{
3926        struct ftrace_func_command *p;
3927        int ret = 0;
3928
3929        mutex_lock(&ftrace_cmd_mutex);
3930        list_for_each_entry(p, &ftrace_commands, list) {
3931                if (strcmp(cmd->name, p->name) == 0) {
3932                        ret = -EBUSY;
3933                        goto out_unlock;
3934                }
3935        }
3936        list_add(&cmd->list, &ftrace_commands);
3937 out_unlock:
3938        mutex_unlock(&ftrace_cmd_mutex);
3939
3940        return ret;
3941}
3942
3943/*
3944 * Currently we only unregister ftrace commands from __init, so mark
3945 * this __init too.
3946 */
3947__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3948{
3949        struct ftrace_func_command *p, *n;
3950        int ret = -ENODEV;
3951
3952        mutex_lock(&ftrace_cmd_mutex);
3953        list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3954                if (strcmp(cmd->name, p->name) == 0) {
3955                        ret = 0;
3956                        list_del_init(&p->list);
3957                        goto out_unlock;
3958                }
3959        }
3960 out_unlock:
3961        mutex_unlock(&ftrace_cmd_mutex);
3962
3963        return ret;
3964}
3965
3966static int ftrace_process_regex(struct ftrace_hash *hash,
3967                                char *buff, int len, int enable)
3968{
3969        char *func, *command, *next = buff;
3970        struct ftrace_func_command *p;
3971        int ret = -EINVAL;
3972
3973        func = strsep(&next, ":");
3974
3975        if (!next) {
3976                ret = ftrace_match_records(hash, func, len);
3977                if (!ret)
3978                        ret = -EINVAL;
3979                if (ret < 0)
3980                        return ret;
3981                return 0;
3982        }
3983
3984        /* command found */
3985
3986        command = strsep(&next, ":");
3987
3988        mutex_lock(&ftrace_cmd_mutex);
3989        list_for_each_entry(p, &ftrace_commands, list) {
3990                if (strcmp(p->name, command) == 0) {
3991                        ret = p->func(hash, func, command, next, enable);
3992                        goto out_unlock;
3993                }
3994        }
3995 out_unlock:
3996        mutex_unlock(&ftrace_cmd_mutex);
3997
3998        return ret;
3999}
4000
4001static ssize_t
4002ftrace_regex_write(struct file *file, const char __user *ubuf,
4003                   size_t cnt, loff_t *ppos, int enable)
4004{
4005        struct ftrace_iterator *iter;
4006        struct trace_parser *parser;
4007        ssize_t ret, read;
4008
4009        if (!cnt)
4010                return 0;
4011
4012        if (file->f_mode & FMODE_READ) {
4013                struct seq_file *m = file->private_data;
4014                iter = m->private;
4015        } else
4016                iter = file->private_data;
4017
4018        if (unlikely(ftrace_disabled))
4019                return -ENODEV;
4020
4021        /* iter->hash is a local copy, so we don't need regex_lock */
4022
4023        parser = &iter->parser;
4024        read = trace_get_user(parser, ubuf, cnt, ppos);
4025
4026        if (read >= 0 && trace_parser_loaded(parser) &&
4027            !trace_parser_cont(parser)) {
4028                ret = ftrace_process_regex(iter->hash, parser->buffer,
4029                                           parser->idx, enable);
4030                trace_parser_clear(parser);
4031                if (ret < 0)
4032                        goto out;
4033        }
4034
4035        ret = read;
4036 out:
4037        return ret;
4038}
4039
4040ssize_t
4041ftrace_filter_write(struct file *file, const char __user *ubuf,
4042                    size_t cnt, loff_t *ppos)
4043{
4044        return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4045}
4046
4047ssize_t
4048ftrace_notrace_write(struct file *file, const char __user *ubuf,
4049                     size_t cnt, loff_t *ppos)
4050{
4051        return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4052}
4053
4054static int
4055ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4056{
4057        struct ftrace_func_entry *entry;
4058
4059        if (!ftrace_location(ip))
4060                return -EINVAL;
4061
4062        if (remove) {
4063                entry = ftrace_lookup_ip(hash, ip);
4064                if (!entry)
4065                        return -ENOENT;
4066                free_hash_entry(hash, entry);
4067                return 0;
4068        }
4069
4070        return add_hash_entry(hash, ip);
4071}
4072
4073static void ftrace_ops_update_code(struct ftrace_ops *ops,
4074                                   struct ftrace_ops_hash *old_hash)
4075{
4076        struct ftrace_ops *op;
4077
4078        if (!ftrace_enabled)
4079                return;
4080
4081        if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4082                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4083                return;
4084        }
4085
4086        /*
4087         * If this is the shared global_ops filter, then we need to
4088         * check if there is another ops that shares it, is enabled.
4089         * If so, we still need to run the modify code.
4090         */
4091        if (ops->func_hash != &global_ops.local_hash)
4092                return;
4093
4094        do_for_each_ftrace_op(op, ftrace_ops_list) {
4095                if (op->func_hash == &global_ops.local_hash &&
4096                    op->flags & FTRACE_OPS_FL_ENABLED) {
4097                        ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4098                        /* Only need to do this once */
4099                        return;
4100                }
4101        } while_for_each_ftrace_op(op);
4102}
4103
4104static int
4105ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4106                unsigned long ip, int remove, int reset, int enable)
4107{
4108        struct ftrace_hash **orig_hash;
4109        struct ftrace_ops_hash old_hash_ops;
4110        struct ftrace_hash *old_hash;
4111        struct ftrace_hash *hash;
4112        int ret;
4113
4114        if (unlikely(ftrace_disabled))
4115                return -ENODEV;
4116
4117        mutex_lock(&ops->func_hash->regex_lock);
4118
4119        if (enable)
4120                orig_hash = &ops->func_hash->filter_hash;
4121        else
4122                orig_hash = &ops->func_hash->notrace_hash;
4123
4124        if (reset)
4125                hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4126        else
4127                hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4128
4129        if (!hash) {
4130                ret = -ENOMEM;
4131                goto out_regex_unlock;
4132        }
4133
4134        if (buf && !ftrace_match_records(hash, buf, len)) {
4135                ret = -EINVAL;
4136                goto out_regex_unlock;
4137        }
4138        if (ip) {
4139                ret = ftrace_match_addr(hash, ip, remove);
4140                if (ret < 0)
4141                        goto out_regex_unlock;
4142        }
4143
4144        mutex_lock(&ftrace_lock);
4145        old_hash = *orig_hash;
4146        old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4147        old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4148        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4149        if (!ret) {
4150                ftrace_ops_update_code(ops, &old_hash_ops);
4151                free_ftrace_hash_rcu(old_hash);
4152        }
4153        mutex_unlock(&ftrace_lock);
4154
4155 out_regex_unlock:
4156        mutex_unlock(&ops->func_hash->regex_lock);
4157
4158        free_ftrace_hash(hash);
4159        return ret;
4160}
4161
4162static int
4163ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4164                int reset, int enable)
4165{
4166        return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4167}
4168
4169/**
4170 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4171 * @ops - the ops to set the filter with
4172 * @ip - the address to add to or remove from the filter.
4173 * @remove - non zero to remove the ip from the filter
4174 * @reset - non zero to reset all filters before applying this filter.
4175 *
4176 * Filters denote which functions should be enabled when tracing is enabled
4177 * If @ip is NULL, it failes to update filter.
4178 */
4179int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4180                         int remove, int reset)
4181{
4182        ftrace_ops_init(ops);
4183        return ftrace_set_addr(ops, ip, remove, reset, 1);
4184}
4185EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4186
4187static int
4188ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4189                 int reset, int enable)
4190{
4191        return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4192}
4193
4194/**
4195 * ftrace_set_filter - set a function to filter on in ftrace
4196 * @ops - the ops to set the filter with
4197 * @buf - the string that holds the function filter text.
4198 * @len - the length of the string.
4199 * @reset - non zero to reset all filters before applying this filter.
4200 *
4201 * Filters denote which functions should be enabled when tracing is enabled.
4202 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4203 */
4204int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4205                       int len, int reset)
4206{
4207        ftrace_ops_init(ops);
4208        return ftrace_set_regex(ops, buf, len, reset, 1);
4209}
4210EXPORT_SYMBOL_GPL(ftrace_set_filter);
4211
4212/**
4213 * ftrace_set_notrace - set a function to not trace in ftrace
4214 * @ops - the ops to set the notrace filter with
4215 * @buf - the string that holds the function notrace text.
4216 * @len - the length of the string.
4217 * @reset - non zero to reset all filters before applying this filter.
4218 *
4219 * Notrace Filters denote which functions should not be enabled when tracing
4220 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4221 * for tracing.
4222 */
4223int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4224                        int len, int reset)
4225{
4226        ftrace_ops_init(ops);
4227        return ftrace_set_regex(ops, buf, len, reset, 0);
4228}
4229EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4230/**
4231 * ftrace_set_global_filter - set a function to filter on with global tracers
4232 * @buf - the string that holds the function filter text.
4233 * @len - the length of the string.
4234 * @reset - non zero to reset all filters before applying this filter.
4235 *
4236 * Filters denote which functions should be enabled when tracing is enabled.
4237 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4238 */
4239void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4240{
4241        ftrace_set_regex(&global_ops, buf, len, reset, 1);
4242}
4243EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4244
4245/**
4246 * ftrace_set_global_notrace - set a function to not trace with global tracers
4247 * @buf - the string that holds the function notrace text.
4248 * @len - the length of the string.
4249 * @reset - non zero to reset all filters before applying this filter.
4250 *
4251 * Notrace Filters denote which functions should not be enabled when tracing
4252 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4253 * for tracing.
4254 */
4255void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4256{
4257        ftrace_set_regex(&global_ops, buf, len, reset, 0);
4258}
4259EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4260
4261/*
4262 * command line interface to allow users to set filters on boot up.
4263 */
4264#define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
4265static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4266static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4267
4268/* Used by function selftest to not test if filter is set */
4269bool ftrace_filter_param __initdata;
4270
4271static int __init set_ftrace_notrace(char *str)
4272{
4273        ftrace_filter_param = true;
4274        strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4275        return 1;
4276}
4277__setup("ftrace_notrace=", set_ftrace_notrace);
4278
4279static int __init set_ftrace_filter(char *str)
4280{
4281        ftrace_filter_param = true;
4282        strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4283        return 1;
4284}
4285__setup("ftrace_filter=", set_ftrace_filter);
4286
4287#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4288static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4289static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4290static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
4291
4292static unsigned long save_global_trampoline;
4293static unsigned long save_global_flags;
4294
4295static int __init set_graph_function(char *str)
4296{
4297        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4298        return 1;
4299}
4300__setup("ftrace_graph_filter=", set_graph_function);
4301
4302static int __init set_graph_notrace_function(char *str)
4303{
4304        strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4305        return 1;
4306}
4307__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4308
4309static void __init set_ftrace_early_graph(char *buf, int enable)
4310{
4311        int ret;
4312        char *func;
4313        unsigned long *table = ftrace_graph_funcs;
4314        int *count = &ftrace_graph_count;
4315
4316        if (!enable) {
4317                table = ftrace_graph_notrace_funcs;
4318                count = &ftrace_graph_notrace_count;
4319        }
4320
4321        while (buf) {
4322                func = strsep(&buf, ",");
4323                /* we allow only one expression at a time */
4324                ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4325                if (ret)
4326                        printk(KERN_DEBUG "ftrace: function %s not "
4327                                          "traceable\n", func);
4328        }
4329}
4330#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4331
4332void __init
4333ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4334{
4335        char *func;
4336
4337        ftrace_ops_init(ops);
4338
4339        while (buf) {
4340                func = strsep(&buf, ",");
4341                ftrace_set_regex(ops, func, strlen(func), 0, enable);
4342        }
4343}
4344
4345static void __init set_ftrace_early_filters(void)
4346{
4347        if (ftrace_filter_buf[0])
4348                ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4349        if (ftrace_notrace_buf[0])
4350                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4351#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4352        if (ftrace_graph_buf[0])
4353                set_ftrace_early_graph(ftrace_graph_buf, 1);
4354        if (ftrace_graph_notrace_buf[0])
4355                set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4356#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4357}
4358
4359int ftrace_regex_release(struct inode *inode, struct file *file)
4360{
4361        struct seq_file *m = (struct seq_file *)file->private_data;
4362        struct ftrace_ops_hash old_hash_ops;
4363        struct ftrace_iterator *iter;
4364        struct ftrace_hash **orig_hash;
4365        struct ftrace_hash *old_hash;
4366        struct trace_parser *parser;
4367        int filter_hash;
4368        int ret;
4369
4370        if (file->f_mode & FMODE_READ) {
4371                iter = m->private;
4372                seq_release(inode, file);
4373        } else
4374                iter = file->private_data;
4375
4376        parser = &iter->parser;
4377        if (trace_parser_loaded(parser)) {
4378                parser->buffer[parser->idx] = 0;
4379                ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4380        }
4381
4382        trace_parser_put(parser);
4383
4384        mutex_lock(&iter->ops->func_hash->regex_lock);
4385
4386        if (file->f_mode & FMODE_WRITE) {
4387                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4388
4389                if (filter_hash)
4390                        orig_hash = &iter->ops->func_hash->filter_hash;
4391                else
4392                        orig_hash = &iter->ops->func_hash->notrace_hash;
4393
4394                mutex_lock(&ftrace_lock);
4395                old_hash = *orig_hash;
4396                old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
4397                old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
4398                ret = ftrace_hash_move(iter->ops, filter_hash,
4399                                       orig_hash, iter->hash);
4400                if (!ret) {
4401                        ftrace_ops_update_code(iter->ops, &old_hash_ops);
4402                        free_ftrace_hash_rcu(old_hash);
4403                }
4404                mutex_unlock(&ftrace_lock);
4405        }
4406
4407        mutex_unlock(&iter->ops->func_hash->regex_lock);
4408        free_ftrace_hash(iter->hash);
4409        kfree(iter);
4410
4411        return 0;
4412}
4413
4414static const struct file_operations ftrace_avail_fops = {
4415        .open = ftrace_avail_open,
4416        .read = seq_read,
4417        .llseek = seq_lseek,
4418        .release = seq_release_private,
4419};
4420
4421static const struct file_operations ftrace_enabled_fops = {
4422        .open = ftrace_enabled_open,
4423        .read = seq_read,
4424        .llseek = seq_lseek,
4425        .release = seq_release_private,
4426};
4427
4428static const struct file_operations ftrace_filter_fops = {
4429        .open = ftrace_filter_open,
4430        .read = seq_read,
4431        .write = ftrace_filter_write,
4432        .llseek = tracing_lseek,
4433        .release = ftrace_regex_release,
4434};
4435
4436static const struct file_operations ftrace_notrace_fops = {
4437        .open = ftrace_notrace_open,
4438        .read = seq_read,
4439        .write = ftrace_notrace_write,
4440        .llseek = tracing_lseek,
4441        .release = ftrace_regex_release,
4442};
4443
4444#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4445
4446static DEFINE_MUTEX(graph_lock);
4447
4448int ftrace_graph_count;
4449int ftrace_graph_notrace_count;
4450unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4451unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4452
4453struct ftrace_graph_data {
4454        unsigned long *table;
4455        size_t size;
4456        int *count;
4457        const struct seq_operations *seq_ops;
4458};
4459
4460static void *
4461__g_next(struct seq_file *m, loff_t *pos)
4462{
4463        struct ftrace_graph_data *fgd = m->private;
4464
4465        if (*pos >= *fgd->count)
4466                return NULL;
4467        return &fgd->table[*pos];
4468}
4469
4470static void *
4471g_next(struct seq_file *m, void *v, loff_t *pos)
4472{
4473        (*pos)++;
4474        return __g_next(m, pos);
4475}
4476
4477static void *g_start(struct seq_file *m, loff_t *pos)
4478{
4479        struct ftrace_graph_data *fgd = m->private;
4480
4481        mutex_lock(&graph_lock);
4482
4483        /* Nothing, tell g_show to print all functions are enabled */
4484        if (!*fgd->count && !*pos)
4485                return (void *)1;
4486
4487        return __g_next(m, pos);
4488}
4489
4490static void g_stop(struct seq_file *m, void *p)
4491{
4492        mutex_unlock(&graph_lock);
4493}
4494
4495static int g_show(struct seq_file *m, void *v)
4496{
4497        unsigned long *ptr = v;
4498
4499        if (!ptr)
4500                return 0;
4501
4502        if (ptr == (unsigned long *)1) {
4503                struct ftrace_graph_data *fgd = m->private;
4504
4505                if (fgd->table == ftrace_graph_funcs)
4506                        seq_puts(m, "#### all functions enabled ####\n");
4507                else
4508                        seq_puts(m, "#### no functions disabled ####\n");
4509                return 0;
4510        }
4511
4512        seq_printf(m, "%ps\n", (void *)*ptr);
4513
4514        return 0;
4515}
4516
4517static const struct seq_operations ftrace_graph_seq_ops = {
4518        .start = g_start,
4519        .next = g_next,
4520        .stop = g_stop,
4521        .show = g_show,
4522};
4523
4524static int
4525__ftrace_graph_open(struct inode *inode, struct file *file,
4526                    struct ftrace_graph_data *fgd)
4527{
4528        int ret = 0;
4529
4530        mutex_lock(&graph_lock);
4531        if ((file->f_mode & FMODE_WRITE) &&
4532            (file->f_flags & O_TRUNC)) {
4533                *fgd->count = 0;
4534                memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4535        }
4536        mutex_unlock(&graph_lock);
4537
4538        if (file->f_mode & FMODE_READ) {
4539                ret = seq_open(file, fgd->seq_ops);
4540                if (!ret) {
4541                        struct seq_file *m = file->private_data;
4542                        m->private = fgd;
4543                }
4544        } else
4545                file->private_data = fgd;
4546
4547        return ret;
4548}
4549
4550static int
4551ftrace_graph_open(struct inode *inode, struct file *file)
4552{
4553        struct ftrace_graph_data *fgd;
4554
4555        if (unlikely(ftrace_disabled))
4556                return -ENODEV;
4557
4558        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4559        if (fgd == NULL)
4560                return -ENOMEM;
4561
4562        fgd->table = ftrace_graph_funcs;
4563        fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4564        fgd->count = &ftrace_graph_count;
4565        fgd->seq_ops = &ftrace_graph_seq_ops;
4566
4567        return __ftrace_graph_open(inode, file, fgd);
4568}
4569
4570static int
4571ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4572{
4573        struct ftrace_graph_data *fgd;
4574
4575        if (unlikely(ftrace_disabled))
4576                return -ENODEV;
4577
4578        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4579        if (fgd == NULL)
4580                return -ENOMEM;
4581
4582        fgd->table = ftrace_graph_notrace_funcs;
4583        fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4584        fgd->count = &ftrace_graph_notrace_count;
4585        fgd->seq_ops = &ftrace_graph_seq_ops;
4586
4587        return __ftrace_graph_open(inode, file, fgd);
4588}
4589
4590static int
4591ftrace_graph_release(struct inode *inode, struct file *file)
4592{
4593        if (file->f_mode & FMODE_READ) {
4594                struct seq_file *m = file->private_data;
4595
4596                kfree(m->private);
4597                seq_release(inode, file);
4598        } else {
4599                kfree(file->private_data);
4600        }
4601
4602        return 0;
4603}
4604
4605static int
4606ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4607{
4608        struct dyn_ftrace *rec;
4609        struct ftrace_page *pg;
4610        int search_len;
4611        int fail = 1;
4612        int type, not;
4613        char *search;
4614        bool exists;
4615        int i;
4616
4617        /* decode regex */
4618        type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
4619        if (!not && *idx >= size)
4620                return -EBUSY;
4621
4622        search_len = strlen(search);
4623
4624        mutex_lock(&ftrace_lock);
4625
4626        if (unlikely(ftrace_disabled)) {
4627                mutex_unlock(&ftrace_lock);
4628                return -ENODEV;
4629        }
4630
4631        do_for_each_ftrace_rec(pg, rec) {
4632
4633                if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4634                        /* if it is in the array */
4635                        exists = false;
4636                        for (i = 0; i < *idx; i++) {
4637                                if (array[i] == rec->ip) {
4638                                        exists = true;
4639                                        break;
4640                                }
4641                        }
4642
4643                        if (!not) {
4644                                fail = 0;
4645                                if (!exists) {
4646                                        array[(*idx)++] = rec->ip;
4647                                        if (*idx >= size)
4648                                                goto out;
4649                                }
4650                        } else {
4651                                if (exists) {
4652                                        array[i] = array[--(*idx)];
4653                                        array[*idx] = 0;
4654                                        fail = 0;
4655                                }
4656                        }
4657                }
4658        } while_for_each_ftrace_rec();
4659out:
4660        mutex_unlock(&ftrace_lock);
4661
4662        if (fail)
4663                return -EINVAL;
4664
4665        return 0;
4666}
4667
4668static ssize_t
4669ftrace_graph_write(struct file *file, const char __user *ubuf,
4670                   size_t cnt, loff_t *ppos)
4671{
4672        struct trace_parser parser;
4673        ssize_t read, ret = 0;
4674        struct ftrace_graph_data *fgd = file->private_data;
4675
4676        if (!cnt)
4677                return 0;
4678
4679        if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4680                return -ENOMEM;
4681
4682        read = trace_get_user(&parser, ubuf, cnt, ppos);
4683
4684        if (read >= 0 && trace_parser_loaded((&parser))) {
4685                parser.buffer[parser.idx] = 0;
4686
4687                mutex_lock(&graph_lock);
4688
4689                /* we allow only one expression at a time */
4690                ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4691                                      parser.buffer);
4692
4693                mutex_unlock(&graph_lock);
4694        }
4695
4696        if (!ret)
4697                ret = read;
4698
4699        trace_parser_put(&parser);
4700
4701        return ret;
4702}
4703
4704static const struct file_operations ftrace_graph_fops = {
4705        .open           = ftrace_graph_open,
4706        .read           = seq_read,
4707        .write          = ftrace_graph_write,
4708        .llseek         = tracing_lseek,
4709        .release        = ftrace_graph_release,
4710};
4711
4712static const struct file_operations ftrace_graph_notrace_fops = {
4713        .open           = ftrace_graph_notrace_open,
4714        .read           = seq_read,
4715        .write          = ftrace_graph_write,
4716        .llseek         = tracing_lseek,
4717        .release        = ftrace_graph_release,
4718};
4719#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4720
4721void ftrace_create_filter_files(struct ftrace_ops *ops,
4722                                struct dentry *parent)
4723{
4724
4725        trace_create_file("set_ftrace_filter", 0644, parent,
4726                          ops, &ftrace_filter_fops);
4727
4728        trace_create_file("set_ftrace_notrace", 0644, parent,
4729                          ops, &ftrace_notrace_fops);
4730}
4731
4732/*
4733 * The name "destroy_filter_files" is really a misnomer. Although
4734 * in the future, it may actualy delete the files, but this is
4735 * really intended to make sure the ops passed in are disabled
4736 * and that when this function returns, the caller is free to
4737 * free the ops.
4738 *
4739 * The "destroy" name is only to match the "create" name that this
4740 * should be paired with.
4741 */
4742void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4743{
4744        mutex_lock(&ftrace_lock);
4745        if (ops->flags & FTRACE_OPS_FL_ENABLED)
4746                ftrace_shutdown(ops, 0);
4747        ops->flags |= FTRACE_OPS_FL_DELETED;
4748        mutex_unlock(&ftrace_lock);
4749}
4750
4751static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
4752{
4753
4754        trace_create_file("available_filter_functions", 0444,
4755                        d_tracer, NULL, &ftrace_avail_fops);
4756
4757        trace_create_file("enabled_functions", 0444,
4758                        d_tracer, NULL, &ftrace_enabled_fops);
4759
4760        ftrace_create_filter_files(&global_ops, d_tracer);
4761
4762#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4763        trace_create_file("set_graph_function", 0444, d_tracer,
4764                                    NULL,
4765                                    &ftrace_graph_fops);
4766        trace_create_file("set_graph_notrace", 0444, d_tracer,
4767                                    NULL,
4768                                    &ftrace_graph_notrace_fops);
4769#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4770
4771        return 0;
4772}
4773
4774static int ftrace_cmp_ips(const void *a, const void *b)
4775{
4776        const unsigned long *ipa = a;
4777        const unsigned long *ipb = b;
4778
4779        if (*ipa > *ipb)
4780                return 1;
4781        if (*ipa < *ipb)
4782                return -1;
4783        return 0;
4784}
4785
4786static void ftrace_swap_ips(void *a, void *b, int size)
4787{
4788        unsigned long *ipa = a;
4789        unsigned long *ipb = b;
4790        unsigned long t;
4791
4792        t = *ipa;
4793        *ipa = *ipb;
4794        *ipb = t;
4795}
4796
4797static int ftrace_process_locs(struct module *mod,
4798                               unsigned long *start,
4799                               unsigned long *end)
4800{
4801        struct ftrace_page *start_pg;
4802        struct ftrace_page *pg;
4803        struct dyn_ftrace *rec;
4804        unsigned long count;
4805        unsigned long *p;
4806        unsigned long addr;
4807        unsigned long flags = 0; /* Shut up gcc */
4808        int ret = -ENOMEM;
4809
4810        count = end - start;
4811
4812        if (!count)
4813                return 0;
4814
4815        sort(start, count, sizeof(*start),
4816             ftrace_cmp_ips, ftrace_swap_ips);
4817
4818        start_pg = ftrace_allocate_pages(count);
4819        if (!start_pg)
4820                return -ENOMEM;
4821
4822        mutex_lock(&ftrace_lock);
4823
4824        /*
4825         * Core and each module needs their own pages, as
4826         * modules will free them when they are removed.
4827         * Force a new page to be allocated for modules.
4828         */
4829        if (!mod) {
4830                WARN_ON(ftrace_pages || ftrace_pages_start);
4831                /* First initialization */
4832                ftrace_pages = ftrace_pages_start = start_pg;
4833        } else {
4834                if (!ftrace_pages)
4835                        goto out;
4836
4837                if (WARN_ON(ftrace_pages->next)) {
4838                        /* Hmm, we have free pages? */
4839                        while (ftrace_pages->next)
4840                                ftrace_pages = ftrace_pages->next;
4841                }
4842
4843                ftrace_pages->next = start_pg;
4844        }
4845
4846        p = start;
4847        pg = start_pg;
4848        while (p < end) {
4849                addr = ftrace_call_adjust(*p++);
4850                /*
4851                 * Some architecture linkers will pad between
4852                 * the different mcount_loc sections of different
4853                 * object files to satisfy alignments.
4854                 * Skip any NULL pointers.
4855                 */
4856                if (!addr)
4857                        continue;
4858
4859                if (pg->index == pg->size) {
4860                        /* We should have allocated enough */
4861                        if (WARN_ON(!pg->next))
4862                                break;
4863                        pg = pg->next;
4864                }
4865
4866                rec = &pg->records[pg->index++];
4867                rec->ip = addr;
4868        }
4869
4870        /* We should have used all pages */
4871        WARN_ON(pg->next);
4872
4873        /* Assign the last page to ftrace_pages */
4874        ftrace_pages = pg;
4875
4876        /*
4877         * We only need to disable interrupts on start up
4878         * because we are modifying code that an interrupt
4879         * may execute, and the modification is not atomic.
4880         * But for modules, nothing runs the code we modify
4881         * until we are finished with it, and there's no
4882         * reason to cause large interrupt latencies while we do it.
4883         */
4884        if (!mod)
4885                local_irq_save(flags);
4886        ftrace_update_code(mod, start_pg);
4887        if (!mod)
4888                local_irq_restore(flags);
4889        ret = 0;
4890 out:
4891        mutex_unlock(&ftrace_lock);
4892
4893        return ret;
4894}
4895
4896#ifdef CONFIG_MODULES
4897
4898#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4899
4900void ftrace_release_mod(struct module *mod)
4901{
4902        struct dyn_ftrace *rec;
4903        struct ftrace_page **last_pg;
4904        struct ftrace_page *pg;
4905        int order;
4906
4907        mutex_lock(&ftrace_lock);
4908
4909        if (ftrace_disabled)
4910                goto out_unlock;
4911
4912        /*
4913         * Each module has its own ftrace_pages, remove
4914         * them from the list.
4915         */
4916        last_pg = &ftrace_pages_start;
4917        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4918                rec = &pg->records[0];
4919                if (within_module_core(rec->ip, mod)) {
4920                        /*
4921                         * As core pages are first, the first
4922                         * page should never be a module page.
4923                         */
4924                        if (WARN_ON(pg == ftrace_pages_start))
4925                                goto out_unlock;
4926
4927                        /* Check if we are deleting the last page */
4928                        if (pg == ftrace_pages)
4929                                ftrace_pages = next_to_ftrace_page(last_pg);
4930
4931                        *last_pg = pg->next;
4932                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4933                        free_pages((unsigned long)pg->records, order);
4934                        kfree(pg);
4935                } else
4936                        last_pg = &pg->next;
4937        }
4938 out_unlock:
4939        mutex_unlock(&ftrace_lock);
4940}
4941
4942static void ftrace_init_module(struct module *mod,
4943                               unsigned long *start, unsigned long *end)
4944{
4945        if (ftrace_disabled || start == end)
4946                return;
4947        ftrace_process_locs(mod, start, end);
4948}
4949
4950void ftrace_module_init(struct module *mod)
4951{
4952        ftrace_init_module(mod, mod->ftrace_callsites,
4953                           mod->ftrace_callsites +
4954                           mod->num_ftrace_callsites);
4955}
4956
4957static int ftrace_module_notify_exit(struct notifier_block *self,
4958                                     unsigned long val, void *data)
4959{
4960        struct module *mod = data;
4961
4962        if (val == MODULE_STATE_GOING)
4963                ftrace_release_mod(mod);
4964
4965        return 0;
4966}
4967#else
4968static int ftrace_module_notify_exit(struct notifier_block *self,
4969                                     unsigned long val, void *data)
4970{
4971        return 0;
4972}
4973#endif /* CONFIG_MODULES */
4974
4975struct notifier_block ftrace_module_exit_nb = {
4976        .notifier_call = ftrace_module_notify_exit,
4977        .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4978};
4979
4980void __init ftrace_init(void)
4981{
4982        extern unsigned long __start_mcount_loc[];
4983        extern unsigned long __stop_mcount_loc[];
4984        unsigned long count, flags;
4985        int ret;
4986
4987        local_irq_save(flags);
4988        ret = ftrace_dyn_arch_init();
4989        local_irq_restore(flags);
4990        if (ret)
4991                goto failed;
4992
4993        count = __stop_mcount_loc - __start_mcount_loc;
4994        if (!count) {
4995                pr_info("ftrace: No functions to be traced?\n");
4996                goto failed;
4997        }
4998
4999        pr_info("ftrace: allocating %ld entries in %ld pages\n",
5000                count, count / ENTRIES_PER_PAGE + 1);
5001
5002        last_ftrace_enabled = ftrace_enabled = 1;
5003
5004        ret = ftrace_process_locs(NULL,
5005                                  __start_mcount_loc,
5006                                  __stop_mcount_loc);
5007
5008        ret = register_module_notifier(&ftrace_module_exit_nb);
5009        if (ret)
5010                pr_warning("Failed to register trace ftrace module exit notifier\n");
5011
5012        set_ftrace_early_filters();
5013
5014        return;
5015 failed:
5016        ftrace_disabled = 1;
5017}
5018
5019/* Do nothing if arch does not support this */
5020void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
5021{
5022}
5023
5024static void ftrace_update_trampoline(struct ftrace_ops *ops)
5025{
5026
5027/*
5028 * Currently there's no safe way to free a trampoline when the kernel
5029 * is configured with PREEMPT. That is because a task could be preempted
5030 * when it jumped to the trampoline, it may be preempted for a long time
5031 * depending on the system load, and currently there's no way to know
5032 * when it will be off the trampoline. If the trampoline is freed
5033 * too early, when the task runs again, it will be executing on freed
5034 * memory and crash.
5035 */
5036#ifdef CONFIG_PREEMPT
5037        /* Currently, only non dynamic ops can have a trampoline */
5038        if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
5039                return;
5040#endif
5041
5042        arch_ftrace_update_trampoline(ops);
5043}
5044
5045#else
5046
5047static struct ftrace_ops global_ops = {
5048        .func                   = ftrace_stub,
5049        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
5050                                  FTRACE_OPS_FL_INITIALIZED |
5051                                  FTRACE_OPS_FL_PID,
5052};
5053
5054static int __init ftrace_nodyn_init(void)
5055{
5056        ftrace_enabled = 1;
5057        return 0;
5058}
5059core_initcall(ftrace_nodyn_init);
5060
5061static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
5062static inline void ftrace_startup_enable(int command) { }
5063static inline void ftrace_startup_all(int command) { }
5064/* Keep as macros so we do not need to define the commands */
5065# define ftrace_startup(ops, command)                                   \
5066        ({                                                              \
5067                int ___ret = __register_ftrace_function(ops);           \
5068                if (!___ret)                                            \
5069                        (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
5070                ___ret;                                                 \
5071        })
5072# define ftrace_shutdown(ops, command)                                  \
5073        ({                                                              \
5074                int ___ret = __unregister_ftrace_function(ops);         \
5075                if (!___ret)                                            \
5076                        (ops)->flags &= ~FTRACE_OPS_FL_ENABLED;         \
5077                ___ret;                                                 \
5078        })
5079
5080# define ftrace_startup_sysctl()        do { } while (0)
5081# define ftrace_shutdown_sysctl()       do { } while (0)
5082
5083static inline int
5084ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
5085{
5086        return 1;
5087}
5088
5089static void ftrace_update_trampoline(struct ftrace_ops *ops)
5090{
5091}
5092
5093#endif /* CONFIG_DYNAMIC_FTRACE */
5094
5095__init void ftrace_init_global_array_ops(struct trace_array *tr)
5096{
5097        tr->ops = &global_ops;
5098        tr->ops->private = tr;
5099}
5100
5101void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5102{
5103        /* If we filter on pids, update to use the pid function */
5104        if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
5105                if (WARN_ON(tr->ops->func != ftrace_stub))
5106                        printk("ftrace ops had %pS for function\n",
5107                               tr->ops->func);
5108        }
5109        tr->ops->func = func;
5110        tr->ops->private = tr;
5111}
5112
5113void ftrace_reset_array_ops(struct trace_array *tr)
5114{
5115        tr->ops->func = ftrace_stub;
5116}
5117
5118static void
5119ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
5120                        struct ftrace_ops *op, struct pt_regs *regs)
5121{
5122        if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
5123                return;
5124
5125        /*
5126         * Some of the ops may be dynamically allocated,
5127         * they must be freed after a synchronize_sched().
5128         */
5129        preempt_disable_notrace();
5130        trace_recursion_set(TRACE_CONTROL_BIT);
5131
5132        /*
5133         * Control funcs (perf) uses RCU. Only trace if
5134         * RCU is currently active.
5135         */
5136        if (!rcu_is_watching())
5137                goto out;
5138
5139        do_for_each_ftrace_op(op, ftrace_control_list) {
5140                if (!(op->flags & FTRACE_OPS_FL_STUB) &&
5141                    !ftrace_function_local_disabled(op) &&
5142                    ftrace_ops_test(op, ip, regs))
5143                        op->func(ip, parent_ip, op, regs);
5144        } while_for_each_ftrace_op(op);
5145 out:
5146        trace_recursion_clear(TRACE_CONTROL_BIT);
5147        preempt_enable_notrace();
5148}
5149
5150static struct ftrace_ops control_ops = {
5151        .func   = ftrace_ops_control_func,
5152        .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
5153        INIT_OPS_HASH(control_ops)
5154};
5155
5156static inline void
5157__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5158                       struct ftrace_ops *ignored, struct pt_regs *regs)
5159{
5160        struct ftrace_ops *op;
5161        int bit;
5162
5163        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5164        if (bit < 0)
5165                return;
5166
5167        /*
5168         * Some of the ops may be dynamically allocated,
5169         * they must be freed after a synchronize_sched().
5170         */
5171        preempt_disable_notrace();
5172        do_for_each_ftrace_op(op, ftrace_ops_list) {
5173                if (ftrace_ops_test(op, ip, regs)) {
5174                        if (FTRACE_WARN_ON(!op->func)) {
5175                                pr_warn("op=%p %pS\n", op, op);
5176                                goto out;
5177                        }
5178                        op->func(ip, parent_ip, op, regs);
5179                }
5180        } while_for_each_ftrace_op(op);
5181out:
5182        preempt_enable_notrace();
5183        trace_clear_recursion(bit);
5184}
5185
5186/*
5187 * Some archs only support passing ip and parent_ip. Even though
5188 * the list function ignores the op parameter, we do not want any
5189 * C side effects, where a function is called without the caller
5190 * sending a third parameter.
5191 * Archs are to support both the regs and ftrace_ops at the same time.
5192 * If they support ftrace_ops, it is assumed they support regs.
5193 * If call backs want to use regs, they must either check for regs
5194 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5195 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5196 * An architecture can pass partial regs with ftrace_ops and still
5197 * set the ARCH_SUPPORT_FTARCE_OPS.
5198 */
5199#if ARCH_SUPPORTS_FTRACE_OPS
5200static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5201                                 struct ftrace_ops *op, struct pt_regs *regs)
5202{
5203        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
5204}
5205#else
5206static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
5207{
5208        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
5209}
5210#endif
5211
5212/*
5213 * If there's only one function registered but it does not support
5214 * recursion, this function will be called by the mcount trampoline.
5215 * This function will handle recursion protection.
5216 */
5217static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
5218                                   struct ftrace_ops *op, struct pt_regs *regs)
5219{
5220        int bit;
5221
5222        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5223        if (bit < 0)
5224                return;
5225
5226        op->func(ip, parent_ip, op, regs);
5227
5228        trace_clear_recursion(bit);
5229}
5230
5231/**
5232 * ftrace_ops_get_func - get the function a trampoline should call
5233 * @ops: the ops to get the function for
5234 *
5235 * Normally the mcount trampoline will call the ops->func, but there
5236 * are times that it should not. For example, if the ops does not
5237 * have its own recursion protection, then it should call the
5238 * ftrace_ops_recurs_func() instead.
5239 *
5240 * Returns the function that the trampoline should call for @ops.
5241 */
5242ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5243{
5244        /*
5245         * If the func handles its own recursion, call it directly.
5246         * Otherwise call the recursion protected function that
5247         * will call the ftrace ops function.
5248         */
5249        if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
5250                return ftrace_ops_recurs_func;
5251
5252        return ops->func;
5253}
5254
5255static void clear_ftrace_swapper(void)
5256{
5257        struct task_struct *p;
5258        int cpu;
5259
5260        get_online_cpus();
5261        for_each_online_cpu(cpu) {
5262                p = idle_task(cpu);
5263                clear_tsk_trace_trace(p);
5264        }
5265        put_online_cpus();
5266}
5267
5268static void set_ftrace_swapper(void)
5269{
5270        struct task_struct *p;
5271        int cpu;
5272
5273        get_online_cpus();
5274        for_each_online_cpu(cpu) {
5275                p = idle_task(cpu);
5276                set_tsk_trace_trace(p);
5277        }
5278        put_online_cpus();
5279}
5280
5281static void clear_ftrace_pid(struct pid *pid)
5282{
5283        struct task_struct *p;
5284
5285        rcu_read_lock();
5286        do_each_pid_task(pid, PIDTYPE_PID, p) {
5287                clear_tsk_trace_trace(p);
5288        } while_each_pid_task(pid, PIDTYPE_PID, p);
5289        rcu_read_unlock();
5290
5291        put_pid(pid);
5292}
5293
5294static void set_ftrace_pid(struct pid *pid)
5295{
5296        struct task_struct *p;
5297
5298        rcu_read_lock();
5299        do_each_pid_task(pid, PIDTYPE_PID, p) {
5300                set_tsk_trace_trace(p);
5301        } while_each_pid_task(pid, PIDTYPE_PID, p);
5302        rcu_read_unlock();
5303}
5304
5305static void clear_ftrace_pid_task(struct pid *pid)
5306{
5307        if (pid == ftrace_swapper_pid)
5308                clear_ftrace_swapper();
5309        else
5310                clear_ftrace_pid(pid);
5311}
5312
5313static void set_ftrace_pid_task(struct pid *pid)
5314{
5315        if (pid == ftrace_swapper_pid)
5316                set_ftrace_swapper();
5317        else
5318                set_ftrace_pid(pid);
5319}
5320
5321static int ftrace_pid_add(int p)
5322{
5323        struct pid *pid;
5324        struct ftrace_pid *fpid;
5325        int ret = -EINVAL;
5326
5327        mutex_lock(&ftrace_lock);
5328
5329        if (!p)
5330                pid = ftrace_swapper_pid;
5331        else
5332                pid = find_get_pid(p);
5333
5334        if (!pid)
5335                goto out;
5336
5337        ret = 0;
5338
5339        list_for_each_entry(fpid, &ftrace_pids, list)
5340                if (fpid->pid == pid)
5341                        goto out_put;
5342
5343        ret = -ENOMEM;
5344
5345        fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5346        if (!fpid)
5347                goto out_put;
5348
5349        list_add(&fpid->list, &ftrace_pids);
5350        fpid->pid = pid;
5351
5352        set_ftrace_pid_task(pid);
5353
5354        ftrace_update_pid_func();
5355
5356        ftrace_startup_all(0);
5357
5358        mutex_unlock(&ftrace_lock);
5359        return 0;
5360
5361out_put:
5362        if (pid != ftrace_swapper_pid)
5363                put_pid(pid);
5364
5365out:
5366        mutex_unlock(&ftrace_lock);
5367        return ret;
5368}
5369
5370static void ftrace_pid_reset(void)
5371{
5372        struct ftrace_pid *fpid, *safe;
5373
5374        mutex_lock(&ftrace_lock);
5375        list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5376                struct pid *pid = fpid->pid;
5377
5378                clear_ftrace_pid_task(pid);
5379
5380                list_del(&fpid->list);
5381                kfree(fpid);
5382        }
5383
5384        ftrace_update_pid_func();
5385        ftrace_startup_all(0);
5386
5387        mutex_unlock(&ftrace_lock);
5388}
5389
5390static void *fpid_start(struct seq_file *m, loff_t *pos)
5391{
5392        mutex_lock(&ftrace_lock);
5393
5394        if (!ftrace_pids_enabled() && (!*pos))
5395                return (void *) 1;
5396
5397        return seq_list_start(&ftrace_pids, *pos);
5398}
5399
5400static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5401{
5402        if (v == (void *)1)
5403                return NULL;
5404
5405        return seq_list_next(v, &ftrace_pids, pos);
5406}
5407
5408static void fpid_stop(struct seq_file *m, void *p)
5409{
5410        mutex_unlock(&ftrace_lock);
5411}
5412
5413static int fpid_show(struct seq_file *m, void *v)
5414{
5415        const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5416
5417        if (v == (void *)1) {
5418                seq_puts(m, "no pid\n");
5419                return 0;
5420        }
5421
5422        if (fpid->pid == ftrace_swapper_pid)
5423                seq_puts(m, "swapper tasks\n");
5424        else
5425                seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5426
5427        return 0;
5428}
5429
5430static const struct seq_operations ftrace_pid_sops = {
5431        .start = fpid_start,
5432        .next = fpid_next,
5433        .stop = fpid_stop,
5434        .show = fpid_show,
5435};
5436
5437static int
5438ftrace_pid_open(struct inode *inode, struct file *file)
5439{
5440        int ret = 0;
5441
5442        if ((file->f_mode & FMODE_WRITE) &&
5443            (file->f_flags & O_TRUNC))
5444                ftrace_pid_reset();
5445
5446        if (file->f_mode & FMODE_READ)
5447                ret = seq_open(file, &ftrace_pid_sops);
5448
5449        return ret;
5450}
5451
5452static ssize_t
5453ftrace_pid_write(struct file *filp, const char __user *ubuf,
5454                   size_t cnt, loff_t *ppos)
5455{
5456        char buf[64], *tmp;
5457        long val;
5458        int ret;
5459
5460        if (cnt >= sizeof(buf))
5461                return -EINVAL;
5462
5463        if (copy_from_user(&buf, ubuf, cnt))
5464                return -EFAULT;
5465
5466        buf[cnt] = 0;
5467
5468        /*
5469         * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5470         * to clean the filter quietly.
5471         */
5472        tmp = strstrip(buf);
5473        if (strlen(tmp) == 0)
5474                return 1;
5475
5476        ret = kstrtol(tmp, 10, &val);
5477        if (ret < 0)
5478                return ret;
5479
5480        ret = ftrace_pid_add(val);
5481
5482        return ret ? ret : cnt;
5483}
5484
5485static int
5486ftrace_pid_release(struct inode *inode, struct file *file)
5487{
5488        if (file->f_mode & FMODE_READ)
5489                seq_release(inode, file);
5490
5491        return 0;
5492}
5493
5494static const struct file_operations ftrace_pid_fops = {
5495        .open           = ftrace_pid_open,
5496        .write          = ftrace_pid_write,
5497        .read           = seq_read,
5498        .llseek         = tracing_lseek,
5499        .release        = ftrace_pid_release,
5500};
5501
5502static __init int ftrace_init_tracefs(void)
5503{
5504        struct dentry *d_tracer;
5505
5506        d_tracer = tracing_init_dentry();
5507        if (IS_ERR(d_tracer))
5508                return 0;
5509
5510        ftrace_init_dyn_tracefs(d_tracer);
5511
5512        trace_create_file("set_ftrace_pid", 0644, d_tracer,
5513                            NULL, &ftrace_pid_fops);
5514
5515        ftrace_profile_tracefs(d_tracer);
5516
5517        return 0;
5518}
5519fs_initcall(ftrace_init_tracefs);
5520
5521/**
5522 * ftrace_kill - kill ftrace
5523 *
5524 * This function should be used by panic code. It stops ftrace
5525 * but in a not so nice way. If you need to simply kill ftrace
5526 * from a non-atomic section, use ftrace_kill.
5527 */
5528void ftrace_kill(void)
5529{
5530        ftrace_disabled = 1;
5531        ftrace_enabled = 0;
5532        clear_ftrace_function();
5533}
5534
5535/**
5536 * Test if ftrace is dead or not.
5537 */
5538int ftrace_is_dead(void)
5539{
5540        return ftrace_disabled;
5541}
5542
5543/**
5544 * register_ftrace_function - register a function for profiling
5545 * @ops - ops structure that holds the function for profiling.
5546 *
5547 * Register a function to be called by all functions in the
5548 * kernel.
5549 *
5550 * Note: @ops->func and all the functions it calls must be labeled
5551 *       with "notrace", otherwise it will go into a
5552 *       recursive loop.
5553 */
5554int register_ftrace_function(struct ftrace_ops *ops)
5555{
5556        int ret = -1;
5557
5558        ftrace_ops_init(ops);
5559
5560        mutex_lock(&ftrace_lock);
5561
5562        ret = ftrace_startup(ops, 0);
5563
5564        mutex_unlock(&ftrace_lock);
5565
5566        return ret;
5567}
5568EXPORT_SYMBOL_GPL(register_ftrace_function);
5569
5570/**
5571 * unregister_ftrace_function - unregister a function for profiling.
5572 * @ops - ops structure that holds the function to unregister
5573 *
5574 * Unregister a function that was added to be called by ftrace profiling.
5575 */
5576int unregister_ftrace_function(struct ftrace_ops *ops)
5577{
5578        int ret;
5579
5580        mutex_lock(&ftrace_lock);
5581        ret = ftrace_shutdown(ops, 0);
5582        mutex_unlock(&ftrace_lock);
5583
5584        return ret;
5585}
5586EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5587
5588int
5589ftrace_enable_sysctl(struct ctl_table *table, int write,
5590                     void __user *buffer, size_t *lenp,
5591                     loff_t *ppos)
5592{
5593        int ret = -ENODEV;
5594
5595        mutex_lock(&ftrace_lock);
5596
5597        if (unlikely(ftrace_disabled))
5598                goto out;
5599
5600        ret = proc_dointvec(table, write, buffer, lenp, ppos);
5601
5602        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5603                goto out;
5604
5605        last_ftrace_enabled = !!ftrace_enabled;
5606
5607        if (ftrace_enabled) {
5608
5609                /* we are starting ftrace again */
5610                if (ftrace_ops_list != &ftrace_list_end)
5611                        update_ftrace_function();
5612
5613                ftrace_startup_sysctl();
5614
5615        } else {
5616                /* stopping ftrace calls (just send to ftrace_stub) */
5617                ftrace_trace_function = ftrace_stub;
5618
5619                ftrace_shutdown_sysctl();
5620        }
5621
5622 out:
5623        mutex_unlock(&ftrace_lock);
5624        return ret;
5625}
5626
5627#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5628
5629static struct ftrace_ops graph_ops = {
5630        .func                   = ftrace_stub,
5631        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
5632                                   FTRACE_OPS_FL_INITIALIZED |
5633                                   FTRACE_OPS_FL_PID |
5634                                   FTRACE_OPS_FL_STUB,
5635#ifdef FTRACE_GRAPH_TRAMP_ADDR
5636        .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
5637        /* trampoline_size is only needed for dynamically allocated tramps */
5638#endif
5639        ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5640};
5641
5642int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5643{
5644        return 0;
5645}
5646
5647/* The callbacks that hook a function */
5648trace_func_graph_ret_t ftrace_graph_return =
5649                        (trace_func_graph_ret_t)ftrace_stub;
5650trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5651static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5652
5653/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5654static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5655{
5656        int i;
5657        int ret = 0;
5658        unsigned long flags;
5659        int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5660        struct task_struct *g, *t;
5661
5662        for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5663                ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5664                                        * sizeof(struct ftrace_ret_stack),
5665                                        GFP_KERNEL);
5666                if (!ret_stack_list[i]) {
5667                        start = 0;
5668                        end = i;
5669                        ret = -ENOMEM;
5670                        goto free;
5671                }
5672        }
5673
5674        read_lock_irqsave(&tasklist_lock, flags);
5675        do_each_thread(g, t) {
5676                if (start == end) {
5677                        ret = -EAGAIN;
5678                        goto unlock;
5679                }
5680
5681                if (t->ret_stack == NULL) {
5682                        atomic_set(&t->tracing_graph_pause, 0);
5683                        atomic_set(&t->trace_overrun, 0);
5684                        t->curr_ret_stack = -1;
5685                        /* Make sure the tasks see the -1 first: */
5686                        smp_wmb();
5687                        t->ret_stack = ret_stack_list[start++];
5688                }
5689        } while_each_thread(g, t);
5690
5691unlock:
5692        read_unlock_irqrestore(&tasklist_lock, flags);
5693free:
5694        for (i = start; i < end; i++)
5695                kfree(ret_stack_list[i]);
5696        return ret;
5697}
5698
5699static void
5700ftrace_graph_probe_sched_switch(void *ignore,
5701                        struct task_struct *prev, struct task_struct *next)
5702{
5703        unsigned long long timestamp;
5704        int index;
5705
5706        /*
5707         * Does the user want to count the time a function was asleep.
5708         * If so, do not update the time stamps.
5709         */
5710        if (trace_flags & TRACE_ITER_SLEEP_TIME)
5711                return;
5712
5713        timestamp = trace_clock_local();
5714
5715        prev->ftrace_timestamp = timestamp;
5716
5717        /* only process tasks that we timestamped */
5718        if (!next->ftrace_timestamp)
5719                return;
5720
5721        /*
5722         * Update all the counters in next to make up for the
5723         * time next was sleeping.
5724         */
5725        timestamp -= next->ftrace_timestamp;
5726
5727        for (index = next->curr_ret_stack; index >= 0; index--)
5728                next->ret_stack[index].calltime += timestamp;
5729}
5730
5731/* Allocate a return stack for each task */
5732static int start_graph_tracing(void)
5733{
5734        struct ftrace_ret_stack **ret_stack_list;
5735        int ret, cpu;
5736
5737        ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5738                                sizeof(struct ftrace_ret_stack *),
5739                                GFP_KERNEL);
5740
5741        if (!ret_stack_list)
5742                return -ENOMEM;
5743
5744        /* The cpu_boot init_task->ret_stack will never be freed */
5745        for_each_online_cpu(cpu) {
5746                if (!idle_task(cpu)->ret_stack)
5747                        ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5748        }
5749
5750        do {
5751                ret = alloc_retstack_tasklist(ret_stack_list);
5752        } while (ret == -EAGAIN);
5753
5754        if (!ret) {
5755                ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5756                if (ret)
5757                        pr_info("ftrace_graph: Couldn't activate tracepoint"
5758                                " probe to kernel_sched_switch\n");
5759        }
5760
5761        kfree(ret_stack_list);
5762        return ret;
5763}
5764
5765/*
5766 * Hibernation protection.
5767 * The state of the current task is too much unstable during
5768 * suspend/restore to disk. We want to protect against that.
5769 */
5770static int
5771ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5772                                                        void *unused)
5773{
5774        switch (state) {
5775        case PM_HIBERNATION_PREPARE:
5776                pause_graph_tracing();
5777                break;
5778
5779        case PM_POST_HIBERNATION:
5780                unpause_graph_tracing();
5781                break;
5782        }
5783        return NOTIFY_DONE;
5784}
5785
5786static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5787{
5788        if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5789                return 0;
5790        return __ftrace_graph_entry(trace);
5791}
5792
5793/*
5794 * The function graph tracer should only trace the functions defined
5795 * by set_ftrace_filter and set_ftrace_notrace. If another function
5796 * tracer ops is registered, the graph tracer requires testing the
5797 * function against the global ops, and not just trace any function
5798 * that any ftrace_ops registered.
5799 */
5800static void update_function_graph_func(void)
5801{
5802        struct ftrace_ops *op;
5803        bool do_test = false;
5804
5805        /*
5806         * The graph and global ops share the same set of functions
5807         * to test. If any other ops is on the list, then
5808         * the graph tracing needs to test if its the function
5809         * it should call.
5810         */
5811        do_for_each_ftrace_op(op, ftrace_ops_list) {
5812                if (op != &global_ops && op != &graph_ops &&
5813                    op != &ftrace_list_end) {
5814                        do_test = true;
5815                        /* in double loop, break out with goto */
5816                        goto out;
5817                }
5818        } while_for_each_ftrace_op(op);
5819 out:
5820        if (do_test)
5821                ftrace_graph_entry = ftrace_graph_entry_test;
5822        else
5823                ftrace_graph_entry = __ftrace_graph_entry;
5824}
5825
5826static struct notifier_block ftrace_suspend_notifier = {
5827        .notifier_call = ftrace_suspend_notifier_call,
5828};
5829
5830int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5831                        trace_func_graph_ent_t entryfunc)
5832{
5833        int ret = 0;
5834
5835        mutex_lock(&ftrace_lock);
5836
5837        /* we currently allow only one tracer registered at a time */
5838        if (ftrace_graph_active) {
5839                ret = -EBUSY;
5840                goto out;
5841        }
5842
5843        register_pm_notifier(&ftrace_suspend_notifier);
5844
5845        ftrace_graph_active++;
5846        ret = start_graph_tracing();
5847        if (ret) {
5848                ftrace_graph_active--;
5849                goto out;
5850        }
5851
5852        ftrace_graph_return = retfunc;
5853
5854        /*
5855         * Update the indirect function to the entryfunc, and the
5856         * function that gets called to the entry_test first. Then
5857         * call the update fgraph entry function to determine if
5858         * the entryfunc should be called directly or not.
5859         */
5860        __ftrace_graph_entry = entryfunc;
5861        ftrace_graph_entry = ftrace_graph_entry_test;
5862        update_function_graph_func();
5863
5864        ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5865out:
5866        mutex_unlock(&ftrace_lock);
5867        return ret;
5868}
5869
5870void unregister_ftrace_graph(void)
5871{
5872        mutex_lock(&ftrace_lock);
5873
5874        if (unlikely(!ftrace_graph_active))
5875                goto out;
5876
5877        ftrace_graph_active--;
5878        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5879        ftrace_graph_entry = ftrace_graph_entry_stub;
5880        __ftrace_graph_entry = ftrace_graph_entry_stub;
5881        ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5882        unregister_pm_notifier(&ftrace_suspend_notifier);
5883        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5884
5885#ifdef CONFIG_DYNAMIC_FTRACE
5886        /*
5887         * Function graph does not allocate the trampoline, but
5888         * other global_ops do. We need to reset the ALLOC_TRAMP flag
5889         * if one was used.
5890         */
5891        global_ops.trampoline = save_global_trampoline;
5892        if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
5893                global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
5894#endif
5895
5896 out:
5897        mutex_unlock(&ftrace_lock);
5898}
5899
5900static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5901
5902static void
5903graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5904{
5905        atomic_set(&t->tracing_graph_pause, 0);
5906        atomic_set(&t->trace_overrun, 0);
5907        t->ftrace_timestamp = 0;
5908        /* make curr_ret_stack visible before we add the ret_stack */
5909        smp_wmb();
5910        t->ret_stack = ret_stack;
5911}
5912
5913/*
5914 * Allocate a return stack for the idle task. May be the first
5915 * time through, or it may be done by CPU hotplug online.
5916 */
5917void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5918{
5919        t->curr_ret_stack = -1;
5920        /*
5921         * The idle task has no parent, it either has its own
5922         * stack or no stack at all.
5923         */
5924        if (t->ret_stack)
5925                WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5926
5927        if (ftrace_graph_active) {
5928                struct ftrace_ret_stack *ret_stack;
5929
5930                ret_stack = per_cpu(idle_ret_stack, cpu);
5931                if (!ret_stack) {
5932                        ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5933                                            * sizeof(struct ftrace_ret_stack),
5934                                            GFP_KERNEL);
5935                        if (!ret_stack)
5936                                return;
5937                        per_cpu(idle_ret_stack, cpu) = ret_stack;
5938                }
5939                graph_init_task(t, ret_stack);
5940        }
5941}
5942
5943/* Allocate a return stack for newly created task */
5944void ftrace_graph_init_task(struct task_struct *t)
5945{
5946        /* Make sure we do not use the parent ret_stack */
5947        t->ret_stack = NULL;
5948        t->curr_ret_stack = -1;
5949
5950        if (ftrace_graph_active) {
5951                struct ftrace_ret_stack *ret_stack;
5952
5953                ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5954                                * sizeof(struct ftrace_ret_stack),
5955                                GFP_KERNEL);
5956                if (!ret_stack)
5957                        return;
5958                graph_init_task(t, ret_stack);
5959        }
5960}
5961
5962void ftrace_graph_exit_task(struct task_struct *t)
5963{
5964        struct ftrace_ret_stack *ret_stack = t->ret_stack;
5965
5966        t->ret_stack = NULL;
5967        /* NULL must become visible to IRQs before we free it: */
5968        barrier();
5969
5970        kfree(ret_stack);
5971}
5972#endif
5973