linux/kernel/trace/ftrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Infrastructure for profiling code inserted by 'gcc -pg'.
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   7 *
   8 * Originally ported from the -rt patch by:
   9 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  10 *
  11 * Based on code in the latency_tracer, that is:
  12 *
  13 *  Copyright (C) 2004-2006 Ingo Molnar
  14 *  Copyright (C) 2004 Nadia Yvette Chambers
  15 */
  16
  17#include <linux/stop_machine.h>
  18#include <linux/clocksource.h>
  19#include <linux/sched/task.h>
  20#include <linux/kallsyms.h>
  21#include <linux/security.h>
  22#include <linux/seq_file.h>
  23#include <linux/tracefs.h>
  24#include <linux/hardirq.h>
  25#include <linux/kthread.h>
  26#include <linux/uaccess.h>
  27#include <linux/bsearch.h>
  28#include <linux/module.h>
  29#include <linux/ftrace.h>
  30#include <linux/sysctl.h>
  31#include <linux/slab.h>
  32#include <linux/ctype.h>
  33#include <linux/sort.h>
  34#include <linux/list.h>
  35#include <linux/hash.h>
  36#include <linux/rcupdate.h>
  37#include <linux/kprobes.h>
  38
  39#include <trace/events/sched.h>
  40
  41#include <asm/sections.h>
  42#include <asm/setup.h>
  43
  44#include "ftrace_internal.h"
  45#include "trace_output.h"
  46#include "trace_stat.h"
  47
  48#define FTRACE_WARN_ON(cond)                    \
  49        ({                                      \
  50                int ___r = cond;                \
  51                if (WARN_ON(___r))              \
  52                        ftrace_kill();          \
  53                ___r;                           \
  54        })
  55
  56#define FTRACE_WARN_ON_ONCE(cond)               \
  57        ({                                      \
  58                int ___r = cond;                \
  59                if (WARN_ON_ONCE(___r))         \
  60                        ftrace_kill();          \
  61                ___r;                           \
  62        })
  63
  64/* hash bits for specific function selection */
  65#define FTRACE_HASH_DEFAULT_BITS 10
  66#define FTRACE_HASH_MAX_BITS 12
  67
  68#ifdef CONFIG_DYNAMIC_FTRACE
  69#define INIT_OPS_HASH(opsname)  \
  70        .func_hash              = &opsname.local_hash,                  \
  71        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
  72#else
  73#define INIT_OPS_HASH(opsname)
  74#endif
  75
  76enum {
  77        FTRACE_MODIFY_ENABLE_FL         = (1 << 0),
  78        FTRACE_MODIFY_MAY_SLEEP_FL      = (1 << 1),
  79};
  80
  81struct ftrace_ops ftrace_list_end __read_mostly = {
  82        .func           = ftrace_stub,
  83        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
  84        INIT_OPS_HASH(ftrace_list_end)
  85};
  86
  87/* ftrace_enabled is a method to turn ftrace on or off */
  88int ftrace_enabled __read_mostly;
  89static int last_ftrace_enabled;
  90
  91/* Current function tracing op */
  92struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
  93/* What to set function_trace_op to */
  94static struct ftrace_ops *set_function_trace_op;
  95
  96static bool ftrace_pids_enabled(struct ftrace_ops *ops)
  97{
  98        struct trace_array *tr;
  99
 100        if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
 101                return false;
 102
 103        tr = ops->private;
 104
 105        return tr->function_pids != NULL || tr->function_no_pids != NULL;
 106}
 107
 108static void ftrace_update_trampoline(struct ftrace_ops *ops);
 109
 110/*
 111 * ftrace_disabled is set when an anomaly is discovered.
 112 * ftrace_disabled is much stronger than ftrace_enabled.
 113 */
 114static int ftrace_disabled __read_mostly;
 115
 116DEFINE_MUTEX(ftrace_lock);
 117
 118struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
 119ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 120struct ftrace_ops global_ops;
 121
 122#if ARCH_SUPPORTS_FTRACE_OPS
 123static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 124                                 struct ftrace_ops *op, struct pt_regs *regs);
 125#else
 126/* See comment below, where ftrace_ops_list_func is defined */
 127static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 128#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 129#endif
 130
 131static inline void ftrace_ops_init(struct ftrace_ops *ops)
 132{
 133#ifdef CONFIG_DYNAMIC_FTRACE
 134        if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
 135                mutex_init(&ops->local_hash.regex_lock);
 136                ops->func_hash = &ops->local_hash;
 137                ops->flags |= FTRACE_OPS_FL_INITIALIZED;
 138        }
 139#endif
 140}
 141
 142#define FTRACE_PID_IGNORE       -1
 143#define FTRACE_PID_TRACE        -2
 144
 145static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 146                            struct ftrace_ops *op, struct pt_regs *regs)
 147{
 148        struct trace_array *tr = op->private;
 149        int pid;
 150
 151        if (tr) {
 152                pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
 153                if (pid == FTRACE_PID_IGNORE)
 154                        return;
 155                if (pid != FTRACE_PID_TRACE &&
 156                    pid != current->pid)
 157                        return;
 158        }
 159
 160        op->saved_func(ip, parent_ip, op, regs);
 161}
 162
 163static void ftrace_sync_ipi(void *data)
 164{
 165        /* Probably not needed, but do it anyway */
 166        smp_rmb();
 167}
 168
 169static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 170{
 171        /*
 172         * If this is a dynamic, RCU, or per CPU ops, or we force list func,
 173         * then it needs to call the list anyway.
 174         */
 175        if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
 176            FTRACE_FORCE_LIST_FUNC)
 177                return ftrace_ops_list_func;
 178
 179        return ftrace_ops_get_func(ops);
 180}
 181
 182static void update_ftrace_function(void)
 183{
 184        ftrace_func_t func;
 185
 186        /*
 187         * Prepare the ftrace_ops that the arch callback will use.
 188         * If there's only one ftrace_ops registered, the ftrace_ops_list
 189         * will point to the ops we want.
 190         */
 191        set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
 192                                                lockdep_is_held(&ftrace_lock));
 193
 194        /* If there's no ftrace_ops registered, just call the stub function */
 195        if (set_function_trace_op == &ftrace_list_end) {
 196                func = ftrace_stub;
 197
 198        /*
 199         * If we are at the end of the list and this ops is
 200         * recursion safe and not dynamic and the arch supports passing ops,
 201         * then have the mcount trampoline call the function directly.
 202         */
 203        } else if (rcu_dereference_protected(ftrace_ops_list->next,
 204                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 205                func = ftrace_ops_get_list_func(ftrace_ops_list);
 206
 207        } else {
 208                /* Just use the default ftrace_ops */
 209                set_function_trace_op = &ftrace_list_end;
 210                func = ftrace_ops_list_func;
 211        }
 212
 213        update_function_graph_func();
 214
 215        /* If there's no change, then do nothing more here */
 216        if (ftrace_trace_function == func)
 217                return;
 218
 219        /*
 220         * If we are using the list function, it doesn't care
 221         * about the function_trace_ops.
 222         */
 223        if (func == ftrace_ops_list_func) {
 224                ftrace_trace_function = func;
 225                /*
 226                 * Don't even bother setting function_trace_ops,
 227                 * it would be racy to do so anyway.
 228                 */
 229                return;
 230        }
 231
 232#ifndef CONFIG_DYNAMIC_FTRACE
 233        /*
 234         * For static tracing, we need to be a bit more careful.
 235         * The function change takes affect immediately. Thus,
 236         * we need to coorditate the setting of the function_trace_ops
 237         * with the setting of the ftrace_trace_function.
 238         *
 239         * Set the function to the list ops, which will call the
 240         * function we want, albeit indirectly, but it handles the
 241         * ftrace_ops and doesn't depend on function_trace_op.
 242         */
 243        ftrace_trace_function = ftrace_ops_list_func;
 244        /*
 245         * Make sure all CPUs see this. Yes this is slow, but static
 246         * tracing is slow and nasty to have enabled.
 247         */
 248        synchronize_rcu_tasks_rude();
 249        /* Now all cpus are using the list ops. */
 250        function_trace_op = set_function_trace_op;
 251        /* Make sure the function_trace_op is visible on all CPUs */
 252        smp_wmb();
 253        /* Nasty way to force a rmb on all cpus */
 254        smp_call_function(ftrace_sync_ipi, NULL, 1);
 255        /* OK, we are all set to update the ftrace_trace_function now! */
 256#endif /* !CONFIG_DYNAMIC_FTRACE */
 257
 258        ftrace_trace_function = func;
 259}
 260
 261static void add_ftrace_ops(struct ftrace_ops __rcu **list,
 262                           struct ftrace_ops *ops)
 263{
 264        rcu_assign_pointer(ops->next, *list);
 265
 266        /*
 267         * We are entering ops into the list but another
 268         * CPU might be walking that list. We need to make sure
 269         * the ops->next pointer is valid before another CPU sees
 270         * the ops pointer included into the list.
 271         */
 272        rcu_assign_pointer(*list, ops);
 273}
 274
 275static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
 276                             struct ftrace_ops *ops)
 277{
 278        struct ftrace_ops **p;
 279
 280        /*
 281         * If we are removing the last function, then simply point
 282         * to the ftrace_stub.
 283         */
 284        if (rcu_dereference_protected(*list,
 285                        lockdep_is_held(&ftrace_lock)) == ops &&
 286            rcu_dereference_protected(ops->next,
 287                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 288                *list = &ftrace_list_end;
 289                return 0;
 290        }
 291
 292        for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 293                if (*p == ops)
 294                        break;
 295
 296        if (*p != ops)
 297                return -1;
 298
 299        *p = (*p)->next;
 300        return 0;
 301}
 302
 303static void ftrace_update_trampoline(struct ftrace_ops *ops);
 304
 305int __register_ftrace_function(struct ftrace_ops *ops)
 306{
 307        if (ops->flags & FTRACE_OPS_FL_DELETED)
 308                return -EINVAL;
 309
 310        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 311                return -EBUSY;
 312
 313#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 314        /*
 315         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
 316         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
 317         * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
 318         */
 319        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
 320            !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
 321                return -EINVAL;
 322
 323        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
 324                ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
 325#endif
 326        if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
 327                return -EBUSY;
 328
 329        if (!core_kernel_data((unsigned long)ops))
 330                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 331
 332        add_ftrace_ops(&ftrace_ops_list, ops);
 333
 334        /* Always save the function, and reset at unregistering */
 335        ops->saved_func = ops->func;
 336
 337        if (ftrace_pids_enabled(ops))
 338                ops->func = ftrace_pid_func;
 339
 340        ftrace_update_trampoline(ops);
 341
 342        if (ftrace_enabled)
 343                update_ftrace_function();
 344
 345        return 0;
 346}
 347
 348int __unregister_ftrace_function(struct ftrace_ops *ops)
 349{
 350        int ret;
 351
 352        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 353                return -EBUSY;
 354
 355        ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 356
 357        if (ret < 0)
 358                return ret;
 359
 360        if (ftrace_enabled)
 361                update_ftrace_function();
 362
 363        ops->func = ops->saved_func;
 364
 365        return 0;
 366}
 367
 368static void ftrace_update_pid_func(void)
 369{
 370        struct ftrace_ops *op;
 371
 372        /* Only do something if we are tracing something */
 373        if (ftrace_trace_function == ftrace_stub)
 374                return;
 375
 376        do_for_each_ftrace_op(op, ftrace_ops_list) {
 377                if (op->flags & FTRACE_OPS_FL_PID) {
 378                        op->func = ftrace_pids_enabled(op) ?
 379                                ftrace_pid_func : op->saved_func;
 380                        ftrace_update_trampoline(op);
 381                }
 382        } while_for_each_ftrace_op(op);
 383
 384        update_ftrace_function();
 385}
 386
 387#ifdef CONFIG_FUNCTION_PROFILER
 388struct ftrace_profile {
 389        struct hlist_node               node;
 390        unsigned long                   ip;
 391        unsigned long                   counter;
 392#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 393        unsigned long long              time;
 394        unsigned long long              time_squared;
 395#endif
 396};
 397
 398struct ftrace_profile_page {
 399        struct ftrace_profile_page      *next;
 400        unsigned long                   index;
 401        struct ftrace_profile           records[];
 402};
 403
 404struct ftrace_profile_stat {
 405        atomic_t                        disabled;
 406        struct hlist_head               *hash;
 407        struct ftrace_profile_page      *pages;
 408        struct ftrace_profile_page      *start;
 409        struct tracer_stat              stat;
 410};
 411
 412#define PROFILE_RECORDS_SIZE                                            \
 413        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 414
 415#define PROFILES_PER_PAGE                                       \
 416        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 417
 418static int ftrace_profile_enabled __read_mostly;
 419
 420/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 421static DEFINE_MUTEX(ftrace_profile_lock);
 422
 423static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 424
 425#define FTRACE_PROFILE_HASH_BITS 10
 426#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
 427
 428static void *
 429function_stat_next(void *v, int idx)
 430{
 431        struct ftrace_profile *rec = v;
 432        struct ftrace_profile_page *pg;
 433
 434        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 435
 436 again:
 437        if (idx != 0)
 438                rec++;
 439
 440        if ((void *)rec >= (void *)&pg->records[pg->index]) {
 441                pg = pg->next;
 442                if (!pg)
 443                        return NULL;
 444                rec = &pg->records[0];
 445                if (!rec->counter)
 446                        goto again;
 447        }
 448
 449        return rec;
 450}
 451
 452static void *function_stat_start(struct tracer_stat *trace)
 453{
 454        struct ftrace_profile_stat *stat =
 455                container_of(trace, struct ftrace_profile_stat, stat);
 456
 457        if (!stat || !stat->start)
 458                return NULL;
 459
 460        return function_stat_next(&stat->start->records[0], 0);
 461}
 462
 463#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 464/* function graph compares on total time */
 465static int function_stat_cmp(const void *p1, const void *p2)
 466{
 467        const struct ftrace_profile *a = p1;
 468        const struct ftrace_profile *b = p2;
 469
 470        if (a->time < b->time)
 471                return -1;
 472        if (a->time > b->time)
 473                return 1;
 474        else
 475                return 0;
 476}
 477#else
 478/* not function graph compares against hits */
 479static int function_stat_cmp(const void *p1, const void *p2)
 480{
 481        const struct ftrace_profile *a = p1;
 482        const struct ftrace_profile *b = p2;
 483
 484        if (a->counter < b->counter)
 485                return -1;
 486        if (a->counter > b->counter)
 487                return 1;
 488        else
 489                return 0;
 490}
 491#endif
 492
 493static int function_stat_headers(struct seq_file *m)
 494{
 495#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 496        seq_puts(m, "  Function                               "
 497                 "Hit    Time            Avg             s^2\n"
 498                    "  --------                               "
 499                 "---    ----            ---             ---\n");
 500#else
 501        seq_puts(m, "  Function                               Hit\n"
 502                    "  --------                               ---\n");
 503#endif
 504        return 0;
 505}
 506
 507static int function_stat_show(struct seq_file *m, void *v)
 508{
 509        struct ftrace_profile *rec = v;
 510        char str[KSYM_SYMBOL_LEN];
 511        int ret = 0;
 512#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 513        static struct trace_seq s;
 514        unsigned long long avg;
 515        unsigned long long stddev;
 516#endif
 517        mutex_lock(&ftrace_profile_lock);
 518
 519        /* we raced with function_profile_reset() */
 520        if (unlikely(rec->counter == 0)) {
 521                ret = -EBUSY;
 522                goto out;
 523        }
 524
 525#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 526        avg = div64_ul(rec->time, rec->counter);
 527        if (tracing_thresh && (avg < tracing_thresh))
 528                goto out;
 529#endif
 530
 531        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 532        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 533
 534#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 535        seq_puts(m, "    ");
 536
 537        /* Sample standard deviation (s^2) */
 538        if (rec->counter <= 1)
 539                stddev = 0;
 540        else {
 541                /*
 542                 * Apply Welford's method:
 543                 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
 544                 */
 545                stddev = rec->counter * rec->time_squared -
 546                         rec->time * rec->time;
 547
 548                /*
 549                 * Divide only 1000 for ns^2 -> us^2 conversion.
 550                 * trace_print_graph_duration will divide 1000 again.
 551                 */
 552                stddev = div64_ul(stddev,
 553                                  rec->counter * (rec->counter - 1) * 1000);
 554        }
 555
 556        trace_seq_init(&s);
 557        trace_print_graph_duration(rec->time, &s);
 558        trace_seq_puts(&s, "    ");
 559        trace_print_graph_duration(avg, &s);
 560        trace_seq_puts(&s, "    ");
 561        trace_print_graph_duration(stddev, &s);
 562        trace_print_seq(m, &s);
 563#endif
 564        seq_putc(m, '\n');
 565out:
 566        mutex_unlock(&ftrace_profile_lock);
 567
 568        return ret;
 569}
 570
 571static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 572{
 573        struct ftrace_profile_page *pg;
 574
 575        pg = stat->pages = stat->start;
 576
 577        while (pg) {
 578                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 579                pg->index = 0;
 580                pg = pg->next;
 581        }
 582
 583        memset(stat->hash, 0,
 584               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 585}
 586
 587int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 588{
 589        struct ftrace_profile_page *pg;
 590        int functions;
 591        int pages;
 592        int i;
 593
 594        /* If we already allocated, do nothing */
 595        if (stat->pages)
 596                return 0;
 597
 598        stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 599        if (!stat->pages)
 600                return -ENOMEM;
 601
 602#ifdef CONFIG_DYNAMIC_FTRACE
 603        functions = ftrace_update_tot_cnt;
 604#else
 605        /*
 606         * We do not know the number of functions that exist because
 607         * dynamic tracing is what counts them. With past experience
 608         * we have around 20K functions. That should be more than enough.
 609         * It is highly unlikely we will execute every function in
 610         * the kernel.
 611         */
 612        functions = 20000;
 613#endif
 614
 615        pg = stat->start = stat->pages;
 616
 617        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 618
 619        for (i = 1; i < pages; i++) {
 620                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 621                if (!pg->next)
 622                        goto out_free;
 623                pg = pg->next;
 624        }
 625
 626        return 0;
 627
 628 out_free:
 629        pg = stat->start;
 630        while (pg) {
 631                unsigned long tmp = (unsigned long)pg;
 632
 633                pg = pg->next;
 634                free_page(tmp);
 635        }
 636
 637        stat->pages = NULL;
 638        stat->start = NULL;
 639
 640        return -ENOMEM;
 641}
 642
 643static int ftrace_profile_init_cpu(int cpu)
 644{
 645        struct ftrace_profile_stat *stat;
 646        int size;
 647
 648        stat = &per_cpu(ftrace_profile_stats, cpu);
 649
 650        if (stat->hash) {
 651                /* If the profile is already created, simply reset it */
 652                ftrace_profile_reset(stat);
 653                return 0;
 654        }
 655
 656        /*
 657         * We are profiling all functions, but usually only a few thousand
 658         * functions are hit. We'll make a hash of 1024 items.
 659         */
 660        size = FTRACE_PROFILE_HASH_SIZE;
 661
 662        stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
 663
 664        if (!stat->hash)
 665                return -ENOMEM;
 666
 667        /* Preallocate the function profiling pages */
 668        if (ftrace_profile_pages_init(stat) < 0) {
 669                kfree(stat->hash);
 670                stat->hash = NULL;
 671                return -ENOMEM;
 672        }
 673
 674        return 0;
 675}
 676
 677static int ftrace_profile_init(void)
 678{
 679        int cpu;
 680        int ret = 0;
 681
 682        for_each_possible_cpu(cpu) {
 683                ret = ftrace_profile_init_cpu(cpu);
 684                if (ret)
 685                        break;
 686        }
 687
 688        return ret;
 689}
 690
 691/* interrupts must be disabled */
 692static struct ftrace_profile *
 693ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 694{
 695        struct ftrace_profile *rec;
 696        struct hlist_head *hhd;
 697        unsigned long key;
 698
 699        key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
 700        hhd = &stat->hash[key];
 701
 702        if (hlist_empty(hhd))
 703                return NULL;
 704
 705        hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
 706                if (rec->ip == ip)
 707                        return rec;
 708        }
 709
 710        return NULL;
 711}
 712
 713static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 714                               struct ftrace_profile *rec)
 715{
 716        unsigned long key;
 717
 718        key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
 719        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 720}
 721
 722/*
 723 * The memory is already allocated, this simply finds a new record to use.
 724 */
 725static struct ftrace_profile *
 726ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 727{
 728        struct ftrace_profile *rec = NULL;
 729
 730        /* prevent recursion (from NMIs) */
 731        if (atomic_inc_return(&stat->disabled) != 1)
 732                goto out;
 733
 734        /*
 735         * Try to find the function again since an NMI
 736         * could have added it
 737         */
 738        rec = ftrace_find_profiled_func(stat, ip);
 739        if (rec)
 740                goto out;
 741
 742        if (stat->pages->index == PROFILES_PER_PAGE) {
 743                if (!stat->pages->next)
 744                        goto out;
 745                stat->pages = stat->pages->next;
 746        }
 747
 748        rec = &stat->pages->records[stat->pages->index++];
 749        rec->ip = ip;
 750        ftrace_add_profile(stat, rec);
 751
 752 out:
 753        atomic_dec(&stat->disabled);
 754
 755        return rec;
 756}
 757
 758static void
 759function_profile_call(unsigned long ip, unsigned long parent_ip,
 760                      struct ftrace_ops *ops, struct pt_regs *regs)
 761{
 762        struct ftrace_profile_stat *stat;
 763        struct ftrace_profile *rec;
 764        unsigned long flags;
 765
 766        if (!ftrace_profile_enabled)
 767                return;
 768
 769        local_irq_save(flags);
 770
 771        stat = this_cpu_ptr(&ftrace_profile_stats);
 772        if (!stat->hash || !ftrace_profile_enabled)
 773                goto out;
 774
 775        rec = ftrace_find_profiled_func(stat, ip);
 776        if (!rec) {
 777                rec = ftrace_profile_alloc(stat, ip);
 778                if (!rec)
 779                        goto out;
 780        }
 781
 782        rec->counter++;
 783 out:
 784        local_irq_restore(flags);
 785}
 786
 787#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 788static bool fgraph_graph_time = true;
 789
 790void ftrace_graph_graph_time_control(bool enable)
 791{
 792        fgraph_graph_time = enable;
 793}
 794
 795static int profile_graph_entry(struct ftrace_graph_ent *trace)
 796{
 797        struct ftrace_ret_stack *ret_stack;
 798
 799        function_profile_call(trace->func, 0, NULL, NULL);
 800
 801        /* If function graph is shutting down, ret_stack can be NULL */
 802        if (!current->ret_stack)
 803                return 0;
 804
 805        ret_stack = ftrace_graph_get_ret_stack(current, 0);
 806        if (ret_stack)
 807                ret_stack->subtime = 0;
 808
 809        return 1;
 810}
 811
 812static void profile_graph_return(struct ftrace_graph_ret *trace)
 813{
 814        struct ftrace_ret_stack *ret_stack;
 815        struct ftrace_profile_stat *stat;
 816        unsigned long long calltime;
 817        struct ftrace_profile *rec;
 818        unsigned long flags;
 819
 820        local_irq_save(flags);
 821        stat = this_cpu_ptr(&ftrace_profile_stats);
 822        if (!stat->hash || !ftrace_profile_enabled)
 823                goto out;
 824
 825        /* If the calltime was zero'd ignore it */
 826        if (!trace->calltime)
 827                goto out;
 828
 829        calltime = trace->rettime - trace->calltime;
 830
 831        if (!fgraph_graph_time) {
 832
 833                /* Append this call time to the parent time to subtract */
 834                ret_stack = ftrace_graph_get_ret_stack(current, 1);
 835                if (ret_stack)
 836                        ret_stack->subtime += calltime;
 837
 838                ret_stack = ftrace_graph_get_ret_stack(current, 0);
 839                if (ret_stack && ret_stack->subtime < calltime)
 840                        calltime -= ret_stack->subtime;
 841                else
 842                        calltime = 0;
 843        }
 844
 845        rec = ftrace_find_profiled_func(stat, trace->func);
 846        if (rec) {
 847                rec->time += calltime;
 848                rec->time_squared += calltime * calltime;
 849        }
 850
 851 out:
 852        local_irq_restore(flags);
 853}
 854
 855static struct fgraph_ops fprofiler_ops = {
 856        .entryfunc = &profile_graph_entry,
 857        .retfunc = &profile_graph_return,
 858};
 859
 860static int register_ftrace_profiler(void)
 861{
 862        return register_ftrace_graph(&fprofiler_ops);
 863}
 864
 865static void unregister_ftrace_profiler(void)
 866{
 867        unregister_ftrace_graph(&fprofiler_ops);
 868}
 869#else
 870static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 871        .func           = function_profile_call,
 872        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
 873        INIT_OPS_HASH(ftrace_profile_ops)
 874};
 875
 876static int register_ftrace_profiler(void)
 877{
 878        return register_ftrace_function(&ftrace_profile_ops);
 879}
 880
 881static void unregister_ftrace_profiler(void)
 882{
 883        unregister_ftrace_function(&ftrace_profile_ops);
 884}
 885#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 886
 887static ssize_t
 888ftrace_profile_write(struct file *filp, const char __user *ubuf,
 889                     size_t cnt, loff_t *ppos)
 890{
 891        unsigned long val;
 892        int ret;
 893
 894        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 895        if (ret)
 896                return ret;
 897
 898        val = !!val;
 899
 900        mutex_lock(&ftrace_profile_lock);
 901        if (ftrace_profile_enabled ^ val) {
 902                if (val) {
 903                        ret = ftrace_profile_init();
 904                        if (ret < 0) {
 905                                cnt = ret;
 906                                goto out;
 907                        }
 908
 909                        ret = register_ftrace_profiler();
 910                        if (ret < 0) {
 911                                cnt = ret;
 912                                goto out;
 913                        }
 914                        ftrace_profile_enabled = 1;
 915                } else {
 916                        ftrace_profile_enabled = 0;
 917                        /*
 918                         * unregister_ftrace_profiler calls stop_machine
 919                         * so this acts like an synchronize_rcu.
 920                         */
 921                        unregister_ftrace_profiler();
 922                }
 923        }
 924 out:
 925        mutex_unlock(&ftrace_profile_lock);
 926
 927        *ppos += cnt;
 928
 929        return cnt;
 930}
 931
 932static ssize_t
 933ftrace_profile_read(struct file *filp, char __user *ubuf,
 934                     size_t cnt, loff_t *ppos)
 935{
 936        char buf[64];           /* big enough to hold a number */
 937        int r;
 938
 939        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
 940        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 941}
 942
 943static const struct file_operations ftrace_profile_fops = {
 944        .open           = tracing_open_generic,
 945        .read           = ftrace_profile_read,
 946        .write          = ftrace_profile_write,
 947        .llseek         = default_llseek,
 948};
 949
 950/* used to initialize the real stat files */
 951static struct tracer_stat function_stats __initdata = {
 952        .name           = "functions",
 953        .stat_start     = function_stat_start,
 954        .stat_next      = function_stat_next,
 955        .stat_cmp       = function_stat_cmp,
 956        .stat_headers   = function_stat_headers,
 957        .stat_show      = function_stat_show
 958};
 959
 960static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 961{
 962        struct ftrace_profile_stat *stat;
 963        struct dentry *entry;
 964        char *name;
 965        int ret;
 966        int cpu;
 967
 968        for_each_possible_cpu(cpu) {
 969                stat = &per_cpu(ftrace_profile_stats, cpu);
 970
 971                name = kasprintf(GFP_KERNEL, "function%d", cpu);
 972                if (!name) {
 973                        /*
 974                         * The files created are permanent, if something happens
 975                         * we still do not free memory.
 976                         */
 977                        WARN(1,
 978                             "Could not allocate stat file for cpu %d\n",
 979                             cpu);
 980                        return;
 981                }
 982                stat->stat = function_stats;
 983                stat->stat.name = name;
 984                ret = register_stat_tracer(&stat->stat);
 985                if (ret) {
 986                        WARN(1,
 987                             "Could not register function stat for cpu %d\n",
 988                             cpu);
 989                        kfree(name);
 990                        return;
 991                }
 992        }
 993
 994        entry = tracefs_create_file("function_profile_enabled", 0644,
 995                                    d_tracer, NULL, &ftrace_profile_fops);
 996        if (!entry)
 997                pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
 998}
 999
1000#else /* CONFIG_FUNCTION_PROFILER */
1001static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1002{
1003}
1004#endif /* CONFIG_FUNCTION_PROFILER */
1005
1006#ifdef CONFIG_DYNAMIC_FTRACE
1007
1008static struct ftrace_ops *removed_ops;
1009
1010/*
1011 * Set when doing a global update, like enabling all recs or disabling them.
1012 * It is not set when just updating a single ftrace_ops.
1013 */
1014static bool update_all_ops;
1015
1016#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1017# error Dynamic ftrace depends on MCOUNT_RECORD
1018#endif
1019
1020struct ftrace_func_probe {
1021        struct ftrace_probe_ops *probe_ops;
1022        struct ftrace_ops       ops;
1023        struct trace_array      *tr;
1024        struct list_head        list;
1025        void                    *data;
1026        int                     ref;
1027};
1028
1029/*
1030 * We make these constant because no one should touch them,
1031 * but they are used as the default "empty hash", to avoid allocating
1032 * it all the time. These are in a read only section such that if
1033 * anyone does try to modify it, it will cause an exception.
1034 */
1035static const struct hlist_head empty_buckets[1];
1036static const struct ftrace_hash empty_hash = {
1037        .buckets = (struct hlist_head *)empty_buckets,
1038};
1039#define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1040
1041struct ftrace_ops global_ops = {
1042        .func                           = ftrace_stub,
1043        .local_hash.notrace_hash        = EMPTY_HASH,
1044        .local_hash.filter_hash         = EMPTY_HASH,
1045        INIT_OPS_HASH(global_ops)
1046        .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
1047                                          FTRACE_OPS_FL_INITIALIZED |
1048                                          FTRACE_OPS_FL_PID,
1049};
1050
1051/*
1052 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1053 */
1054struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1055{
1056        struct ftrace_ops *op = NULL;
1057
1058        /*
1059         * Some of the ops may be dynamically allocated,
1060         * they are freed after a synchronize_rcu().
1061         */
1062        preempt_disable_notrace();
1063
1064        do_for_each_ftrace_op(op, ftrace_ops_list) {
1065                /*
1066                 * This is to check for dynamically allocated trampolines.
1067                 * Trampolines that are in kernel text will have
1068                 * core_kernel_text() return true.
1069                 */
1070                if (op->trampoline && op->trampoline_size)
1071                        if (addr >= op->trampoline &&
1072                            addr < op->trampoline + op->trampoline_size) {
1073                                preempt_enable_notrace();
1074                                return op;
1075                        }
1076        } while_for_each_ftrace_op(op);
1077        preempt_enable_notrace();
1078
1079        return NULL;
1080}
1081
1082/*
1083 * This is used by __kernel_text_address() to return true if the
1084 * address is on a dynamically allocated trampoline that would
1085 * not return true for either core_kernel_text() or
1086 * is_module_text_address().
1087 */
1088bool is_ftrace_trampoline(unsigned long addr)
1089{
1090        return ftrace_ops_trampoline(addr) != NULL;
1091}
1092
1093struct ftrace_page {
1094        struct ftrace_page      *next;
1095        struct dyn_ftrace       *records;
1096        int                     index;
1097        int                     size;
1098};
1099
1100#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1101#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1102
1103static struct ftrace_page       *ftrace_pages_start;
1104static struct ftrace_page       *ftrace_pages;
1105
1106static __always_inline unsigned long
1107ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1108{
1109        if (hash->size_bits > 0)
1110                return hash_long(ip, hash->size_bits);
1111
1112        return 0;
1113}
1114
1115/* Only use this function if ftrace_hash_empty() has already been tested */
1116static __always_inline struct ftrace_func_entry *
1117__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1118{
1119        unsigned long key;
1120        struct ftrace_func_entry *entry;
1121        struct hlist_head *hhd;
1122
1123        key = ftrace_hash_key(hash, ip);
1124        hhd = &hash->buckets[key];
1125
1126        hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1127                if (entry->ip == ip)
1128                        return entry;
1129        }
1130        return NULL;
1131}
1132
1133/**
1134 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1135 * @hash: The hash to look at
1136 * @ip: The instruction pointer to test
1137 *
1138 * Search a given @hash to see if a given instruction pointer (@ip)
1139 * exists in it.
1140 *
1141 * Returns the entry that holds the @ip if found. NULL otherwise.
1142 */
1143struct ftrace_func_entry *
1144ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1145{
1146        if (ftrace_hash_empty(hash))
1147                return NULL;
1148
1149        return __ftrace_lookup_ip(hash, ip);
1150}
1151
1152static void __add_hash_entry(struct ftrace_hash *hash,
1153                             struct ftrace_func_entry *entry)
1154{
1155        struct hlist_head *hhd;
1156        unsigned long key;
1157
1158        key = ftrace_hash_key(hash, entry->ip);
1159        hhd = &hash->buckets[key];
1160        hlist_add_head(&entry->hlist, hhd);
1161        hash->count++;
1162}
1163
1164static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1165{
1166        struct ftrace_func_entry *entry;
1167
1168        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1169        if (!entry)
1170                return -ENOMEM;
1171
1172        entry->ip = ip;
1173        __add_hash_entry(hash, entry);
1174
1175        return 0;
1176}
1177
1178static void
1179free_hash_entry(struct ftrace_hash *hash,
1180                  struct ftrace_func_entry *entry)
1181{
1182        hlist_del(&entry->hlist);
1183        kfree(entry);
1184        hash->count--;
1185}
1186
1187static void
1188remove_hash_entry(struct ftrace_hash *hash,
1189                  struct ftrace_func_entry *entry)
1190{
1191        hlist_del_rcu(&entry->hlist);
1192        hash->count--;
1193}
1194
1195static void ftrace_hash_clear(struct ftrace_hash *hash)
1196{
1197        struct hlist_head *hhd;
1198        struct hlist_node *tn;
1199        struct ftrace_func_entry *entry;
1200        int size = 1 << hash->size_bits;
1201        int i;
1202
1203        if (!hash->count)
1204                return;
1205
1206        for (i = 0; i < size; i++) {
1207                hhd = &hash->buckets[i];
1208                hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1209                        free_hash_entry(hash, entry);
1210        }
1211        FTRACE_WARN_ON(hash->count);
1212}
1213
1214static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1215{
1216        list_del(&ftrace_mod->list);
1217        kfree(ftrace_mod->module);
1218        kfree(ftrace_mod->func);
1219        kfree(ftrace_mod);
1220}
1221
1222static void clear_ftrace_mod_list(struct list_head *head)
1223{
1224        struct ftrace_mod_load *p, *n;
1225
1226        /* stack tracer isn't supported yet */
1227        if (!head)
1228                return;
1229
1230        mutex_lock(&ftrace_lock);
1231        list_for_each_entry_safe(p, n, head, list)
1232                free_ftrace_mod(p);
1233        mutex_unlock(&ftrace_lock);
1234}
1235
1236static void free_ftrace_hash(struct ftrace_hash *hash)
1237{
1238        if (!hash || hash == EMPTY_HASH)
1239                return;
1240        ftrace_hash_clear(hash);
1241        kfree(hash->buckets);
1242        kfree(hash);
1243}
1244
1245static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1246{
1247        struct ftrace_hash *hash;
1248
1249        hash = container_of(rcu, struct ftrace_hash, rcu);
1250        free_ftrace_hash(hash);
1251}
1252
1253static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1254{
1255        if (!hash || hash == EMPTY_HASH)
1256                return;
1257        call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1258}
1259
1260void ftrace_free_filter(struct ftrace_ops *ops)
1261{
1262        ftrace_ops_init(ops);
1263        free_ftrace_hash(ops->func_hash->filter_hash);
1264        free_ftrace_hash(ops->func_hash->notrace_hash);
1265}
1266
1267static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1268{
1269        struct ftrace_hash *hash;
1270        int size;
1271
1272        hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1273        if (!hash)
1274                return NULL;
1275
1276        size = 1 << size_bits;
1277        hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1278
1279        if (!hash->buckets) {
1280                kfree(hash);
1281                return NULL;
1282        }
1283
1284        hash->size_bits = size_bits;
1285
1286        return hash;
1287}
1288
1289
1290static int ftrace_add_mod(struct trace_array *tr,
1291                          const char *func, const char *module,
1292                          int enable)
1293{
1294        struct ftrace_mod_load *ftrace_mod;
1295        struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1296
1297        ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1298        if (!ftrace_mod)
1299                return -ENOMEM;
1300
1301        ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1302        ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1303        ftrace_mod->enable = enable;
1304
1305        if (!ftrace_mod->func || !ftrace_mod->module)
1306                goto out_free;
1307
1308        list_add(&ftrace_mod->list, mod_head);
1309
1310        return 0;
1311
1312 out_free:
1313        free_ftrace_mod(ftrace_mod);
1314
1315        return -ENOMEM;
1316}
1317
1318static struct ftrace_hash *
1319alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1320{
1321        struct ftrace_func_entry *entry;
1322        struct ftrace_hash *new_hash;
1323        int size;
1324        int ret;
1325        int i;
1326
1327        new_hash = alloc_ftrace_hash(size_bits);
1328        if (!new_hash)
1329                return NULL;
1330
1331        if (hash)
1332                new_hash->flags = hash->flags;
1333
1334        /* Empty hash? */
1335        if (ftrace_hash_empty(hash))
1336                return new_hash;
1337
1338        size = 1 << hash->size_bits;
1339        for (i = 0; i < size; i++) {
1340                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1341                        ret = add_hash_entry(new_hash, entry->ip);
1342                        if (ret < 0)
1343                                goto free_hash;
1344                }
1345        }
1346
1347        FTRACE_WARN_ON(new_hash->count != hash->count);
1348
1349        return new_hash;
1350
1351 free_hash:
1352        free_ftrace_hash(new_hash);
1353        return NULL;
1354}
1355
1356static void
1357ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1358static void
1359ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1360
1361static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1362                                       struct ftrace_hash *new_hash);
1363
1364static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1365{
1366        struct ftrace_func_entry *entry;
1367        struct ftrace_hash *new_hash;
1368        struct hlist_head *hhd;
1369        struct hlist_node *tn;
1370        int bits = 0;
1371        int i;
1372
1373        /*
1374         * Make the hash size about 1/2 the # found
1375         */
1376        for (size /= 2; size; size >>= 1)
1377                bits++;
1378
1379        /* Don't allocate too much */
1380        if (bits > FTRACE_HASH_MAX_BITS)
1381                bits = FTRACE_HASH_MAX_BITS;
1382
1383        new_hash = alloc_ftrace_hash(bits);
1384        if (!new_hash)
1385                return NULL;
1386
1387        new_hash->flags = src->flags;
1388
1389        size = 1 << src->size_bits;
1390        for (i = 0; i < size; i++) {
1391                hhd = &src->buckets[i];
1392                hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1393                        remove_hash_entry(src, entry);
1394                        __add_hash_entry(new_hash, entry);
1395                }
1396        }
1397        return new_hash;
1398}
1399
1400static struct ftrace_hash *
1401__ftrace_hash_move(struct ftrace_hash *src)
1402{
1403        int size = src->count;
1404
1405        /*
1406         * If the new source is empty, just return the empty_hash.
1407         */
1408        if (ftrace_hash_empty(src))
1409                return EMPTY_HASH;
1410
1411        return dup_hash(src, size);
1412}
1413
1414static int
1415ftrace_hash_move(struct ftrace_ops *ops, int enable,
1416                 struct ftrace_hash **dst, struct ftrace_hash *src)
1417{
1418        struct ftrace_hash *new_hash;
1419        int ret;
1420
1421        /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1422        if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1423                return -EINVAL;
1424
1425        new_hash = __ftrace_hash_move(src);
1426        if (!new_hash)
1427                return -ENOMEM;
1428
1429        /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1430        if (enable) {
1431                /* IPMODIFY should be updated only when filter_hash updating */
1432                ret = ftrace_hash_ipmodify_update(ops, new_hash);
1433                if (ret < 0) {
1434                        free_ftrace_hash(new_hash);
1435                        return ret;
1436                }
1437        }
1438
1439        /*
1440         * Remove the current set, update the hash and add
1441         * them back.
1442         */
1443        ftrace_hash_rec_disable_modify(ops, enable);
1444
1445        rcu_assign_pointer(*dst, new_hash);
1446
1447        ftrace_hash_rec_enable_modify(ops, enable);
1448
1449        return 0;
1450}
1451
1452static bool hash_contains_ip(unsigned long ip,
1453                             struct ftrace_ops_hash *hash)
1454{
1455        /*
1456         * The function record is a match if it exists in the filter
1457         * hash and not in the notrace hash. Note, an emty hash is
1458         * considered a match for the filter hash, but an empty
1459         * notrace hash is considered not in the notrace hash.
1460         */
1461        return (ftrace_hash_empty(hash->filter_hash) ||
1462                __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1463                (ftrace_hash_empty(hash->notrace_hash) ||
1464                 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1465}
1466
1467/*
1468 * Test the hashes for this ops to see if we want to call
1469 * the ops->func or not.
1470 *
1471 * It's a match if the ip is in the ops->filter_hash or
1472 * the filter_hash does not exist or is empty,
1473 *  AND
1474 * the ip is not in the ops->notrace_hash.
1475 *
1476 * This needs to be called with preemption disabled as
1477 * the hashes are freed with call_rcu().
1478 */
1479int
1480ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1481{
1482        struct ftrace_ops_hash hash;
1483        int ret;
1484
1485#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1486        /*
1487         * There's a small race when adding ops that the ftrace handler
1488         * that wants regs, may be called without them. We can not
1489         * allow that handler to be called if regs is NULL.
1490         */
1491        if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1492                return 0;
1493#endif
1494
1495        rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1496        rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1497
1498        if (hash_contains_ip(ip, &hash))
1499                ret = 1;
1500        else
1501                ret = 0;
1502
1503        return ret;
1504}
1505
1506/*
1507 * This is a double for. Do not use 'break' to break out of the loop,
1508 * you must use a goto.
1509 */
1510#define do_for_each_ftrace_rec(pg, rec)                                 \
1511        for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1512                int _____i;                                             \
1513                for (_____i = 0; _____i < pg->index; _____i++) {        \
1514                        rec = &pg->records[_____i];
1515
1516#define while_for_each_ftrace_rec()             \
1517                }                               \
1518        }
1519
1520
1521static int ftrace_cmp_recs(const void *a, const void *b)
1522{
1523        const struct dyn_ftrace *key = a;
1524        const struct dyn_ftrace *rec = b;
1525
1526        if (key->flags < rec->ip)
1527                return -1;
1528        if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1529                return 1;
1530        return 0;
1531}
1532
1533static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1534{
1535        struct ftrace_page *pg;
1536        struct dyn_ftrace *rec = NULL;
1537        struct dyn_ftrace key;
1538
1539        key.ip = start;
1540        key.flags = end;        /* overload flags, as it is unsigned long */
1541
1542        for (pg = ftrace_pages_start; pg; pg = pg->next) {
1543                if (end < pg->records[0].ip ||
1544                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1545                        continue;
1546                rec = bsearch(&key, pg->records, pg->index,
1547                              sizeof(struct dyn_ftrace),
1548                              ftrace_cmp_recs);
1549                if (rec)
1550                        break;
1551        }
1552        return rec;
1553}
1554
1555/**
1556 * ftrace_location_range - return the first address of a traced location
1557 *      if it touches the given ip range
1558 * @start: start of range to search.
1559 * @end: end of range to search (inclusive). @end points to the last byte
1560 *      to check.
1561 *
1562 * Returns rec->ip if the related ftrace location is a least partly within
1563 * the given address range. That is, the first address of the instruction
1564 * that is either a NOP or call to the function tracer. It checks the ftrace
1565 * internal tables to determine if the address belongs or not.
1566 */
1567unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1568{
1569        struct dyn_ftrace *rec;
1570
1571        rec = lookup_rec(start, end);
1572        if (rec)
1573                return rec->ip;
1574
1575        return 0;
1576}
1577
1578/**
1579 * ftrace_location - return true if the ip giving is a traced location
1580 * @ip: the instruction pointer to check
1581 *
1582 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1583 * That is, the instruction that is either a NOP or call to
1584 * the function tracer. It checks the ftrace internal tables to
1585 * determine if the address belongs or not.
1586 */
1587unsigned long ftrace_location(unsigned long ip)
1588{
1589        return ftrace_location_range(ip, ip);
1590}
1591
1592/**
1593 * ftrace_text_reserved - return true if range contains an ftrace location
1594 * @start: start of range to search
1595 * @end: end of range to search (inclusive). @end points to the last byte to check.
1596 *
1597 * Returns 1 if @start and @end contains a ftrace location.
1598 * That is, the instruction that is either a NOP or call to
1599 * the function tracer. It checks the ftrace internal tables to
1600 * determine if the address belongs or not.
1601 */
1602int ftrace_text_reserved(const void *start, const void *end)
1603{
1604        unsigned long ret;
1605
1606        ret = ftrace_location_range((unsigned long)start,
1607                                    (unsigned long)end);
1608
1609        return (int)!!ret;
1610}
1611
1612/* Test if ops registered to this rec needs regs */
1613static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1614{
1615        struct ftrace_ops *ops;
1616        bool keep_regs = false;
1617
1618        for (ops = ftrace_ops_list;
1619             ops != &ftrace_list_end; ops = ops->next) {
1620                /* pass rec in as regs to have non-NULL val */
1621                if (ftrace_ops_test(ops, rec->ip, rec)) {
1622                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1623                                keep_regs = true;
1624                                break;
1625                        }
1626                }
1627        }
1628
1629        return  keep_regs;
1630}
1631
1632static struct ftrace_ops *
1633ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1634static struct ftrace_ops *
1635ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1636
1637static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1638                                     int filter_hash,
1639                                     bool inc)
1640{
1641        struct ftrace_hash *hash;
1642        struct ftrace_hash *other_hash;
1643        struct ftrace_page *pg;
1644        struct dyn_ftrace *rec;
1645        bool update = false;
1646        int count = 0;
1647        int all = false;
1648
1649        /* Only update if the ops has been registered */
1650        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1651                return false;
1652
1653        /*
1654         * In the filter_hash case:
1655         *   If the count is zero, we update all records.
1656         *   Otherwise we just update the items in the hash.
1657         *
1658         * In the notrace_hash case:
1659         *   We enable the update in the hash.
1660         *   As disabling notrace means enabling the tracing,
1661         *   and enabling notrace means disabling, the inc variable
1662         *   gets inversed.
1663         */
1664        if (filter_hash) {
1665                hash = ops->func_hash->filter_hash;
1666                other_hash = ops->func_hash->notrace_hash;
1667                if (ftrace_hash_empty(hash))
1668                        all = true;
1669        } else {
1670                inc = !inc;
1671                hash = ops->func_hash->notrace_hash;
1672                other_hash = ops->func_hash->filter_hash;
1673                /*
1674                 * If the notrace hash has no items,
1675                 * then there's nothing to do.
1676                 */
1677                if (ftrace_hash_empty(hash))
1678                        return false;
1679        }
1680
1681        do_for_each_ftrace_rec(pg, rec) {
1682                int in_other_hash = 0;
1683                int in_hash = 0;
1684                int match = 0;
1685
1686                if (rec->flags & FTRACE_FL_DISABLED)
1687                        continue;
1688
1689                if (all) {
1690                        /*
1691                         * Only the filter_hash affects all records.
1692                         * Update if the record is not in the notrace hash.
1693                         */
1694                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1695                                match = 1;
1696                } else {
1697                        in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1698                        in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1699
1700                        /*
1701                         * If filter_hash is set, we want to match all functions
1702                         * that are in the hash but not in the other hash.
1703                         *
1704                         * If filter_hash is not set, then we are decrementing.
1705                         * That means we match anything that is in the hash
1706                         * and also in the other_hash. That is, we need to turn
1707                         * off functions in the other hash because they are disabled
1708                         * by this hash.
1709                         */
1710                        if (filter_hash && in_hash && !in_other_hash)
1711                                match = 1;
1712                        else if (!filter_hash && in_hash &&
1713                                 (in_other_hash || ftrace_hash_empty(other_hash)))
1714                                match = 1;
1715                }
1716                if (!match)
1717                        continue;
1718
1719                if (inc) {
1720                        rec->flags++;
1721                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1722                                return false;
1723
1724                        if (ops->flags & FTRACE_OPS_FL_DIRECT)
1725                                rec->flags |= FTRACE_FL_DIRECT;
1726
1727                        /*
1728                         * If there's only a single callback registered to a
1729                         * function, and the ops has a trampoline registered
1730                         * for it, then we can call it directly.
1731                         */
1732                        if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1733                                rec->flags |= FTRACE_FL_TRAMP;
1734                        else
1735                                /*
1736                                 * If we are adding another function callback
1737                                 * to this function, and the previous had a
1738                                 * custom trampoline in use, then we need to go
1739                                 * back to the default trampoline.
1740                                 */
1741                                rec->flags &= ~FTRACE_FL_TRAMP;
1742
1743                        /*
1744                         * If any ops wants regs saved for this function
1745                         * then all ops will get saved regs.
1746                         */
1747                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1748                                rec->flags |= FTRACE_FL_REGS;
1749                } else {
1750                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1751                                return false;
1752                        rec->flags--;
1753
1754                        /*
1755                         * Only the internal direct_ops should have the
1756                         * DIRECT flag set. Thus, if it is removing a
1757                         * function, then that function should no longer
1758                         * be direct.
1759                         */
1760                        if (ops->flags & FTRACE_OPS_FL_DIRECT)
1761                                rec->flags &= ~FTRACE_FL_DIRECT;
1762
1763                        /*
1764                         * If the rec had REGS enabled and the ops that is
1765                         * being removed had REGS set, then see if there is
1766                         * still any ops for this record that wants regs.
1767                         * If not, we can stop recording them.
1768                         */
1769                        if (ftrace_rec_count(rec) > 0 &&
1770                            rec->flags & FTRACE_FL_REGS &&
1771                            ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1772                                if (!test_rec_ops_needs_regs(rec))
1773                                        rec->flags &= ~FTRACE_FL_REGS;
1774                        }
1775
1776                        /*
1777                         * The TRAMP needs to be set only if rec count
1778                         * is decremented to one, and the ops that is
1779                         * left has a trampoline. As TRAMP can only be
1780                         * enabled if there is only a single ops attached
1781                         * to it.
1782                         */
1783                        if (ftrace_rec_count(rec) == 1 &&
1784                            ftrace_find_tramp_ops_any(rec))
1785                                rec->flags |= FTRACE_FL_TRAMP;
1786                        else
1787                                rec->flags &= ~FTRACE_FL_TRAMP;
1788
1789                        /*
1790                         * flags will be cleared in ftrace_check_record()
1791                         * if rec count is zero.
1792                         */
1793                }
1794                count++;
1795
1796                /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1797                update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1798
1799                /* Shortcut, if we handled all records, we are done. */
1800                if (!all && count == hash->count)
1801                        return update;
1802        } while_for_each_ftrace_rec();
1803
1804        return update;
1805}
1806
1807static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1808                                    int filter_hash)
1809{
1810        return __ftrace_hash_rec_update(ops, filter_hash, 0);
1811}
1812
1813static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1814                                   int filter_hash)
1815{
1816        return __ftrace_hash_rec_update(ops, filter_hash, 1);
1817}
1818
1819static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1820                                          int filter_hash, int inc)
1821{
1822        struct ftrace_ops *op;
1823
1824        __ftrace_hash_rec_update(ops, filter_hash, inc);
1825
1826        if (ops->func_hash != &global_ops.local_hash)
1827                return;
1828
1829        /*
1830         * If the ops shares the global_ops hash, then we need to update
1831         * all ops that are enabled and use this hash.
1832         */
1833        do_for_each_ftrace_op(op, ftrace_ops_list) {
1834                /* Already done */
1835                if (op == ops)
1836                        continue;
1837                if (op->func_hash == &global_ops.local_hash)
1838                        __ftrace_hash_rec_update(op, filter_hash, inc);
1839        } while_for_each_ftrace_op(op);
1840}
1841
1842static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1843                                           int filter_hash)
1844{
1845        ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1846}
1847
1848static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1849                                          int filter_hash)
1850{
1851        ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1852}
1853
1854/*
1855 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1856 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1857 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1858 * Note that old_hash and new_hash has below meanings
1859 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1860 *  - If the hash is EMPTY_HASH, it hits nothing
1861 *  - Anything else hits the recs which match the hash entries.
1862 */
1863static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1864                                         struct ftrace_hash *old_hash,
1865                                         struct ftrace_hash *new_hash)
1866{
1867        struct ftrace_page *pg;
1868        struct dyn_ftrace *rec, *end = NULL;
1869        int in_old, in_new;
1870
1871        /* Only update if the ops has been registered */
1872        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1873                return 0;
1874
1875        if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1876                return 0;
1877
1878        /*
1879         * Since the IPMODIFY is a very address sensitive action, we do not
1880         * allow ftrace_ops to set all functions to new hash.
1881         */
1882        if (!new_hash || !old_hash)
1883                return -EINVAL;
1884
1885        /* Update rec->flags */
1886        do_for_each_ftrace_rec(pg, rec) {
1887
1888                if (rec->flags & FTRACE_FL_DISABLED)
1889                        continue;
1890
1891                /* We need to update only differences of filter_hash */
1892                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1893                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1894                if (in_old == in_new)
1895                        continue;
1896
1897                if (in_new) {
1898                        /* New entries must ensure no others are using it */
1899                        if (rec->flags & FTRACE_FL_IPMODIFY)
1900                                goto rollback;
1901                        rec->flags |= FTRACE_FL_IPMODIFY;
1902                } else /* Removed entry */
1903                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1904        } while_for_each_ftrace_rec();
1905
1906        return 0;
1907
1908rollback:
1909        end = rec;
1910
1911        /* Roll back what we did above */
1912        do_for_each_ftrace_rec(pg, rec) {
1913
1914                if (rec->flags & FTRACE_FL_DISABLED)
1915                        continue;
1916
1917                if (rec == end)
1918                        goto err_out;
1919
1920                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1921                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1922                if (in_old == in_new)
1923                        continue;
1924
1925                if (in_new)
1926                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1927                else
1928                        rec->flags |= FTRACE_FL_IPMODIFY;
1929        } while_for_each_ftrace_rec();
1930
1931err_out:
1932        return -EBUSY;
1933}
1934
1935static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1936{
1937        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1938
1939        if (ftrace_hash_empty(hash))
1940                hash = NULL;
1941
1942        return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1943}
1944
1945/* Disabling always succeeds */
1946static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1947{
1948        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1949
1950        if (ftrace_hash_empty(hash))
1951                hash = NULL;
1952
1953        __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1954}
1955
1956static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1957                                       struct ftrace_hash *new_hash)
1958{
1959        struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1960
1961        if (ftrace_hash_empty(old_hash))
1962                old_hash = NULL;
1963
1964        if (ftrace_hash_empty(new_hash))
1965                new_hash = NULL;
1966
1967        return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1968}
1969
1970static void print_ip_ins(const char *fmt, const unsigned char *p)
1971{
1972        int i;
1973
1974        printk(KERN_CONT "%s", fmt);
1975
1976        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1977                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1978}
1979
1980enum ftrace_bug_type ftrace_bug_type;
1981const void *ftrace_expected;
1982
1983static void print_bug_type(void)
1984{
1985        switch (ftrace_bug_type) {
1986        case FTRACE_BUG_UNKNOWN:
1987                break;
1988        case FTRACE_BUG_INIT:
1989                pr_info("Initializing ftrace call sites\n");
1990                break;
1991        case FTRACE_BUG_NOP:
1992                pr_info("Setting ftrace call site to NOP\n");
1993                break;
1994        case FTRACE_BUG_CALL:
1995                pr_info("Setting ftrace call site to call ftrace function\n");
1996                break;
1997        case FTRACE_BUG_UPDATE:
1998                pr_info("Updating ftrace call site to call a different ftrace function\n");
1999                break;
2000        }
2001}
2002
2003/**
2004 * ftrace_bug - report and shutdown function tracer
2005 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2006 * @rec: The record that failed
2007 *
2008 * The arch code that enables or disables the function tracing
2009 * can call ftrace_bug() when it has detected a problem in
2010 * modifying the code. @failed should be one of either:
2011 * EFAULT - if the problem happens on reading the @ip address
2012 * EINVAL - if what is read at @ip is not what was expected
2013 * EPERM - if the problem happens on writing to the @ip address
2014 */
2015void ftrace_bug(int failed, struct dyn_ftrace *rec)
2016{
2017        unsigned long ip = rec ? rec->ip : 0;
2018
2019        pr_info("------------[ ftrace bug ]------------\n");
2020
2021        switch (failed) {
2022        case -EFAULT:
2023                pr_info("ftrace faulted on modifying ");
2024                print_ip_sym(KERN_INFO, ip);
2025                break;
2026        case -EINVAL:
2027                pr_info("ftrace failed to modify ");
2028                print_ip_sym(KERN_INFO, ip);
2029                print_ip_ins(" actual:   ", (unsigned char *)ip);
2030                pr_cont("\n");
2031                if (ftrace_expected) {
2032                        print_ip_ins(" expected: ", ftrace_expected);
2033                        pr_cont("\n");
2034                }
2035                break;
2036        case -EPERM:
2037                pr_info("ftrace faulted on writing ");
2038                print_ip_sym(KERN_INFO, ip);
2039                break;
2040        default:
2041                pr_info("ftrace faulted on unknown error ");
2042                print_ip_sym(KERN_INFO, ip);
2043        }
2044        print_bug_type();
2045        if (rec) {
2046                struct ftrace_ops *ops = NULL;
2047
2048                pr_info("ftrace record flags: %lx\n", rec->flags);
2049                pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2050                        rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2051                if (rec->flags & FTRACE_FL_TRAMP_EN) {
2052                        ops = ftrace_find_tramp_ops_any(rec);
2053                        if (ops) {
2054                                do {
2055                                        pr_cont("\ttramp: %pS (%pS)",
2056                                                (void *)ops->trampoline,
2057                                                (void *)ops->func);
2058                                        ops = ftrace_find_tramp_ops_next(rec, ops);
2059                                } while (ops);
2060                        } else
2061                                pr_cont("\ttramp: ERROR!");
2062
2063                }
2064                ip = ftrace_get_addr_curr(rec);
2065                pr_cont("\n expected tramp: %lx\n", ip);
2066        }
2067
2068        FTRACE_WARN_ON_ONCE(1);
2069}
2070
2071static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2072{
2073        unsigned long flag = 0UL;
2074
2075        ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2076
2077        if (rec->flags & FTRACE_FL_DISABLED)
2078                return FTRACE_UPDATE_IGNORE;
2079
2080        /*
2081         * If we are updating calls:
2082         *
2083         *   If the record has a ref count, then we need to enable it
2084         *   because someone is using it.
2085         *
2086         *   Otherwise we make sure its disabled.
2087         *
2088         * If we are disabling calls, then disable all records that
2089         * are enabled.
2090         */
2091        if (enable && ftrace_rec_count(rec))
2092                flag = FTRACE_FL_ENABLED;
2093
2094        /*
2095         * If enabling and the REGS flag does not match the REGS_EN, or
2096         * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2097         * this record. Set flags to fail the compare against ENABLED.
2098         * Same for direct calls.
2099         */
2100        if (flag) {
2101                if (!(rec->flags & FTRACE_FL_REGS) !=
2102                    !(rec->flags & FTRACE_FL_REGS_EN))
2103                        flag |= FTRACE_FL_REGS;
2104
2105                if (!(rec->flags & FTRACE_FL_TRAMP) !=
2106                    !(rec->flags & FTRACE_FL_TRAMP_EN))
2107                        flag |= FTRACE_FL_TRAMP;
2108
2109                /*
2110                 * Direct calls are special, as count matters.
2111                 * We must test the record for direct, if the
2112                 * DIRECT and DIRECT_EN do not match, but only
2113                 * if the count is 1. That's because, if the
2114                 * count is something other than one, we do not
2115                 * want the direct enabled (it will be done via the
2116                 * direct helper). But if DIRECT_EN is set, and
2117                 * the count is not one, we need to clear it.
2118                 */
2119                if (ftrace_rec_count(rec) == 1) {
2120                        if (!(rec->flags & FTRACE_FL_DIRECT) !=
2121                            !(rec->flags & FTRACE_FL_DIRECT_EN))
2122                                flag |= FTRACE_FL_DIRECT;
2123                } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2124                        flag |= FTRACE_FL_DIRECT;
2125                }
2126        }
2127
2128        /* If the state of this record hasn't changed, then do nothing */
2129        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2130                return FTRACE_UPDATE_IGNORE;
2131
2132        if (flag) {
2133                /* Save off if rec is being enabled (for return value) */
2134                flag ^= rec->flags & FTRACE_FL_ENABLED;
2135
2136                if (update) {
2137                        rec->flags |= FTRACE_FL_ENABLED;
2138                        if (flag & FTRACE_FL_REGS) {
2139                                if (rec->flags & FTRACE_FL_REGS)
2140                                        rec->flags |= FTRACE_FL_REGS_EN;
2141                                else
2142                                        rec->flags &= ~FTRACE_FL_REGS_EN;
2143                        }
2144                        if (flag & FTRACE_FL_TRAMP) {
2145                                if (rec->flags & FTRACE_FL_TRAMP)
2146                                        rec->flags |= FTRACE_FL_TRAMP_EN;
2147                                else
2148                                        rec->flags &= ~FTRACE_FL_TRAMP_EN;
2149                        }
2150                        if (flag & FTRACE_FL_DIRECT) {
2151                                /*
2152                                 * If there's only one user (direct_ops helper)
2153                                 * then we can call the direct function
2154                                 * directly (no ftrace trampoline).
2155                                 */
2156                                if (ftrace_rec_count(rec) == 1) {
2157                                        if (rec->flags & FTRACE_FL_DIRECT)
2158                                                rec->flags |= FTRACE_FL_DIRECT_EN;
2159                                        else
2160                                                rec->flags &= ~FTRACE_FL_DIRECT_EN;
2161                                } else {
2162                                        /*
2163                                         * Can only call directly if there's
2164                                         * only one callback to the function.
2165                                         */
2166                                        rec->flags &= ~FTRACE_FL_DIRECT_EN;
2167                                }
2168                        }
2169                }
2170
2171                /*
2172                 * If this record is being updated from a nop, then
2173                 *   return UPDATE_MAKE_CALL.
2174                 * Otherwise,
2175                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2176                 *   from the save regs, to a non-save regs function or
2177                 *   vice versa, or from a trampoline call.
2178                 */
2179                if (flag & FTRACE_FL_ENABLED) {
2180                        ftrace_bug_type = FTRACE_BUG_CALL;
2181                        return FTRACE_UPDATE_MAKE_CALL;
2182                }
2183
2184                ftrace_bug_type = FTRACE_BUG_UPDATE;
2185                return FTRACE_UPDATE_MODIFY_CALL;
2186        }
2187
2188        if (update) {
2189                /* If there's no more users, clear all flags */
2190                if (!ftrace_rec_count(rec))
2191                        rec->flags = 0;
2192                else
2193                        /*
2194                         * Just disable the record, but keep the ops TRAMP
2195                         * and REGS states. The _EN flags must be disabled though.
2196                         */
2197                        rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2198                                        FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2199        }
2200
2201        ftrace_bug_type = FTRACE_BUG_NOP;
2202        return FTRACE_UPDATE_MAKE_NOP;
2203}
2204
2205/**
2206 * ftrace_update_record, set a record that now is tracing or not
2207 * @rec: the record to update
2208 * @enable: set to true if the record is tracing, false to force disable
2209 *
2210 * The records that represent all functions that can be traced need
2211 * to be updated when tracing has been enabled.
2212 */
2213int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2214{
2215        return ftrace_check_record(rec, enable, true);
2216}
2217
2218/**
2219 * ftrace_test_record, check if the record has been enabled or not
2220 * @rec: the record to test
2221 * @enable: set to true to check if enabled, false if it is disabled
2222 *
2223 * The arch code may need to test if a record is already set to
2224 * tracing to determine how to modify the function code that it
2225 * represents.
2226 */
2227int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2228{
2229        return ftrace_check_record(rec, enable, false);
2230}
2231
2232static struct ftrace_ops *
2233ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2234{
2235        struct ftrace_ops *op;
2236        unsigned long ip = rec->ip;
2237
2238        do_for_each_ftrace_op(op, ftrace_ops_list) {
2239
2240                if (!op->trampoline)
2241                        continue;
2242
2243                if (hash_contains_ip(ip, op->func_hash))
2244                        return op;
2245        } while_for_each_ftrace_op(op);
2246
2247        return NULL;
2248}
2249
2250static struct ftrace_ops *
2251ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2252                           struct ftrace_ops *op)
2253{
2254        unsigned long ip = rec->ip;
2255
2256        while_for_each_ftrace_op(op) {
2257
2258                if (!op->trampoline)
2259                        continue;
2260
2261                if (hash_contains_ip(ip, op->func_hash))
2262                        return op;
2263        }
2264
2265        return NULL;
2266}
2267
2268static struct ftrace_ops *
2269ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2270{
2271        struct ftrace_ops *op;
2272        unsigned long ip = rec->ip;
2273
2274        /*
2275         * Need to check removed ops first.
2276         * If they are being removed, and this rec has a tramp,
2277         * and this rec is in the ops list, then it would be the
2278         * one with the tramp.
2279         */
2280        if (removed_ops) {
2281                if (hash_contains_ip(ip, &removed_ops->old_hash))
2282                        return removed_ops;
2283        }
2284
2285        /*
2286         * Need to find the current trampoline for a rec.
2287         * Now, a trampoline is only attached to a rec if there
2288         * was a single 'ops' attached to it. But this can be called
2289         * when we are adding another op to the rec or removing the
2290         * current one. Thus, if the op is being added, we can
2291         * ignore it because it hasn't attached itself to the rec
2292         * yet.
2293         *
2294         * If an ops is being modified (hooking to different functions)
2295         * then we don't care about the new functions that are being
2296         * added, just the old ones (that are probably being removed).
2297         *
2298         * If we are adding an ops to a function that already is using
2299         * a trampoline, it needs to be removed (trampolines are only
2300         * for single ops connected), then an ops that is not being
2301         * modified also needs to be checked.
2302         */
2303        do_for_each_ftrace_op(op, ftrace_ops_list) {
2304
2305                if (!op->trampoline)
2306                        continue;
2307
2308                /*
2309                 * If the ops is being added, it hasn't gotten to
2310                 * the point to be removed from this tree yet.
2311                 */
2312                if (op->flags & FTRACE_OPS_FL_ADDING)
2313                        continue;
2314
2315
2316                /*
2317                 * If the ops is being modified and is in the old
2318                 * hash, then it is probably being removed from this
2319                 * function.
2320                 */
2321                if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2322                    hash_contains_ip(ip, &op->old_hash))
2323                        return op;
2324                /*
2325                 * If the ops is not being added or modified, and it's
2326                 * in its normal filter hash, then this must be the one
2327                 * we want!
2328                 */
2329                if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2330                    hash_contains_ip(ip, op->func_hash))
2331                        return op;
2332
2333        } while_for_each_ftrace_op(op);
2334
2335        return NULL;
2336}
2337
2338static struct ftrace_ops *
2339ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2340{
2341        struct ftrace_ops *op;
2342        unsigned long ip = rec->ip;
2343
2344        do_for_each_ftrace_op(op, ftrace_ops_list) {
2345                /* pass rec in as regs to have non-NULL val */
2346                if (hash_contains_ip(ip, op->func_hash))
2347                        return op;
2348        } while_for_each_ftrace_op(op);
2349
2350        return NULL;
2351}
2352
2353#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2354/* Protected by rcu_tasks for reading, and direct_mutex for writing */
2355static struct ftrace_hash *direct_functions = EMPTY_HASH;
2356static DEFINE_MUTEX(direct_mutex);
2357int ftrace_direct_func_count;
2358
2359/*
2360 * Search the direct_functions hash to see if the given instruction pointer
2361 * has a direct caller attached to it.
2362 */
2363unsigned long ftrace_find_rec_direct(unsigned long ip)
2364{
2365        struct ftrace_func_entry *entry;
2366
2367        entry = __ftrace_lookup_ip(direct_functions, ip);
2368        if (!entry)
2369                return 0;
2370
2371        return entry->direct;
2372}
2373
2374static void call_direct_funcs(unsigned long ip, unsigned long pip,
2375                              struct ftrace_ops *ops, struct pt_regs *regs)
2376{
2377        unsigned long addr;
2378
2379        addr = ftrace_find_rec_direct(ip);
2380        if (!addr)
2381                return;
2382
2383        arch_ftrace_set_direct_caller(regs, addr);
2384}
2385
2386struct ftrace_ops direct_ops = {
2387        .func           = call_direct_funcs,
2388        .flags          = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
2389                          | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2390                          | FTRACE_OPS_FL_PERMANENT,
2391};
2392#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2393
2394/**
2395 * ftrace_get_addr_new - Get the call address to set to
2396 * @rec:  The ftrace record descriptor
2397 *
2398 * If the record has the FTRACE_FL_REGS set, that means that it
2399 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2400 * is not not set, then it wants to convert to the normal callback.
2401 *
2402 * Returns the address of the trampoline to set to
2403 */
2404unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2405{
2406        struct ftrace_ops *ops;
2407        unsigned long addr;
2408
2409        if ((rec->flags & FTRACE_FL_DIRECT) &&
2410            (ftrace_rec_count(rec) == 1)) {
2411                addr = ftrace_find_rec_direct(rec->ip);
2412                if (addr)
2413                        return addr;
2414                WARN_ON_ONCE(1);
2415        }
2416
2417        /* Trampolines take precedence over regs */
2418        if (rec->flags & FTRACE_FL_TRAMP) {
2419                ops = ftrace_find_tramp_ops_new(rec);
2420                if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2421                        pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2422                                (void *)rec->ip, (void *)rec->ip, rec->flags);
2423                        /* Ftrace is shutting down, return anything */
2424                        return (unsigned long)FTRACE_ADDR;
2425                }
2426                return ops->trampoline;
2427        }
2428
2429        if (rec->flags & FTRACE_FL_REGS)
2430                return (unsigned long)FTRACE_REGS_ADDR;
2431        else
2432                return (unsigned long)FTRACE_ADDR;
2433}
2434
2435/**
2436 * ftrace_get_addr_curr - Get the call address that is already there
2437 * @rec:  The ftrace record descriptor
2438 *
2439 * The FTRACE_FL_REGS_EN is set when the record already points to
2440 * a function that saves all the regs. Basically the '_EN' version
2441 * represents the current state of the function.
2442 *
2443 * Returns the address of the trampoline that is currently being called
2444 */
2445unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2446{
2447        struct ftrace_ops *ops;
2448        unsigned long addr;
2449
2450        /* Direct calls take precedence over trampolines */
2451        if (rec->flags & FTRACE_FL_DIRECT_EN) {
2452                addr = ftrace_find_rec_direct(rec->ip);
2453                if (addr)
2454                        return addr;
2455                WARN_ON_ONCE(1);
2456        }
2457
2458        /* Trampolines take precedence over regs */
2459        if (rec->flags & FTRACE_FL_TRAMP_EN) {
2460                ops = ftrace_find_tramp_ops_curr(rec);
2461                if (FTRACE_WARN_ON(!ops)) {
2462                        pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2463                                (void *)rec->ip, (void *)rec->ip);
2464                        /* Ftrace is shutting down, return anything */
2465                        return (unsigned long)FTRACE_ADDR;
2466                }
2467                return ops->trampoline;
2468        }
2469
2470        if (rec->flags & FTRACE_FL_REGS_EN)
2471                return (unsigned long)FTRACE_REGS_ADDR;
2472        else
2473                return (unsigned long)FTRACE_ADDR;
2474}
2475
2476static int
2477__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2478{
2479        unsigned long ftrace_old_addr;
2480        unsigned long ftrace_addr;
2481        int ret;
2482
2483        ftrace_addr = ftrace_get_addr_new(rec);
2484
2485        /* This needs to be done before we call ftrace_update_record */
2486        ftrace_old_addr = ftrace_get_addr_curr(rec);
2487
2488        ret = ftrace_update_record(rec, enable);
2489
2490        ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2491
2492        switch (ret) {
2493        case FTRACE_UPDATE_IGNORE:
2494                return 0;
2495
2496        case FTRACE_UPDATE_MAKE_CALL:
2497                ftrace_bug_type = FTRACE_BUG_CALL;
2498                return ftrace_make_call(rec, ftrace_addr);
2499
2500        case FTRACE_UPDATE_MAKE_NOP:
2501                ftrace_bug_type = FTRACE_BUG_NOP;
2502                return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2503
2504        case FTRACE_UPDATE_MODIFY_CALL:
2505                ftrace_bug_type = FTRACE_BUG_UPDATE;
2506                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2507        }
2508
2509        return -1; /* unknown ftrace bug */
2510}
2511
2512void __weak ftrace_replace_code(int mod_flags)
2513{
2514        struct dyn_ftrace *rec;
2515        struct ftrace_page *pg;
2516        bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2517        int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2518        int failed;
2519
2520        if (unlikely(ftrace_disabled))
2521                return;
2522
2523        do_for_each_ftrace_rec(pg, rec) {
2524
2525                if (rec->flags & FTRACE_FL_DISABLED)
2526                        continue;
2527
2528                failed = __ftrace_replace_code(rec, enable);
2529                if (failed) {
2530                        ftrace_bug(failed, rec);
2531                        /* Stop processing */
2532                        return;
2533                }
2534                if (schedulable)
2535                        cond_resched();
2536        } while_for_each_ftrace_rec();
2537}
2538
2539struct ftrace_rec_iter {
2540        struct ftrace_page      *pg;
2541        int                     index;
2542};
2543
2544/**
2545 * ftrace_rec_iter_start, start up iterating over traced functions
2546 *
2547 * Returns an iterator handle that is used to iterate over all
2548 * the records that represent address locations where functions
2549 * are traced.
2550 *
2551 * May return NULL if no records are available.
2552 */
2553struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2554{
2555        /*
2556         * We only use a single iterator.
2557         * Protected by the ftrace_lock mutex.
2558         */
2559        static struct ftrace_rec_iter ftrace_rec_iter;
2560        struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2561
2562        iter->pg = ftrace_pages_start;
2563        iter->index = 0;
2564
2565        /* Could have empty pages */
2566        while (iter->pg && !iter->pg->index)
2567                iter->pg = iter->pg->next;
2568
2569        if (!iter->pg)
2570                return NULL;
2571
2572        return iter;
2573}
2574
2575/**
2576 * ftrace_rec_iter_next, get the next record to process.
2577 * @iter: The handle to the iterator.
2578 *
2579 * Returns the next iterator after the given iterator @iter.
2580 */
2581struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2582{
2583        iter->index++;
2584
2585        if (iter->index >= iter->pg->index) {
2586                iter->pg = iter->pg->next;
2587                iter->index = 0;
2588
2589                /* Could have empty pages */
2590                while (iter->pg && !iter->pg->index)
2591                        iter->pg = iter->pg->next;
2592        }
2593
2594        if (!iter->pg)
2595                return NULL;
2596
2597        return iter;
2598}
2599
2600/**
2601 * ftrace_rec_iter_record, get the record at the iterator location
2602 * @iter: The current iterator location
2603 *
2604 * Returns the record that the current @iter is at.
2605 */
2606struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2607{
2608        return &iter->pg->records[iter->index];
2609}
2610
2611static int
2612ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2613{
2614        int ret;
2615
2616        if (unlikely(ftrace_disabled))
2617                return 0;
2618
2619        ret = ftrace_init_nop(mod, rec);
2620        if (ret) {
2621                ftrace_bug_type = FTRACE_BUG_INIT;
2622                ftrace_bug(ret, rec);
2623                return 0;
2624        }
2625        return 1;
2626}
2627
2628/*
2629 * archs can override this function if they must do something
2630 * before the modifying code is performed.
2631 */
2632int __weak ftrace_arch_code_modify_prepare(void)
2633{
2634        return 0;
2635}
2636
2637/*
2638 * archs can override this function if they must do something
2639 * after the modifying code is performed.
2640 */
2641int __weak ftrace_arch_code_modify_post_process(void)
2642{
2643        return 0;
2644}
2645
2646void ftrace_modify_all_code(int command)
2647{
2648        int update = command & FTRACE_UPDATE_TRACE_FUNC;
2649        int mod_flags = 0;
2650        int err = 0;
2651
2652        if (command & FTRACE_MAY_SLEEP)
2653                mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2654
2655        /*
2656         * If the ftrace_caller calls a ftrace_ops func directly,
2657         * we need to make sure that it only traces functions it
2658         * expects to trace. When doing the switch of functions,
2659         * we need to update to the ftrace_ops_list_func first
2660         * before the transition between old and new calls are set,
2661         * as the ftrace_ops_list_func will check the ops hashes
2662         * to make sure the ops are having the right functions
2663         * traced.
2664         */
2665        if (update) {
2666                err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2667                if (FTRACE_WARN_ON(err))
2668                        return;
2669        }
2670
2671        if (command & FTRACE_UPDATE_CALLS)
2672                ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2673        else if (command & FTRACE_DISABLE_CALLS)
2674                ftrace_replace_code(mod_flags);
2675
2676        if (update && ftrace_trace_function != ftrace_ops_list_func) {
2677                function_trace_op = set_function_trace_op;
2678                smp_wmb();
2679                /* If irqs are disabled, we are in stop machine */
2680                if (!irqs_disabled())
2681                        smp_call_function(ftrace_sync_ipi, NULL, 1);
2682                err = ftrace_update_ftrace_func(ftrace_trace_function);
2683                if (FTRACE_WARN_ON(err))
2684                        return;
2685        }
2686
2687        if (command & FTRACE_START_FUNC_RET)
2688                err = ftrace_enable_ftrace_graph_caller();
2689        else if (command & FTRACE_STOP_FUNC_RET)
2690                err = ftrace_disable_ftrace_graph_caller();
2691        FTRACE_WARN_ON(err);
2692}
2693
2694static int __ftrace_modify_code(void *data)
2695{
2696        int *command = data;
2697
2698        ftrace_modify_all_code(*command);
2699
2700        return 0;
2701}
2702
2703/**
2704 * ftrace_run_stop_machine, go back to the stop machine method
2705 * @command: The command to tell ftrace what to do
2706 *
2707 * If an arch needs to fall back to the stop machine method, the
2708 * it can call this function.
2709 */
2710void ftrace_run_stop_machine(int command)
2711{
2712        stop_machine(__ftrace_modify_code, &command, NULL);
2713}
2714
2715/**
2716 * arch_ftrace_update_code, modify the code to trace or not trace
2717 * @command: The command that needs to be done
2718 *
2719 * Archs can override this function if it does not need to
2720 * run stop_machine() to modify code.
2721 */
2722void __weak arch_ftrace_update_code(int command)
2723{
2724        ftrace_run_stop_machine(command);
2725}
2726
2727static void ftrace_run_update_code(int command)
2728{
2729        int ret;
2730
2731        ret = ftrace_arch_code_modify_prepare();
2732        FTRACE_WARN_ON(ret);
2733        if (ret)
2734                return;
2735
2736        /*
2737         * By default we use stop_machine() to modify the code.
2738         * But archs can do what ever they want as long as it
2739         * is safe. The stop_machine() is the safest, but also
2740         * produces the most overhead.
2741         */
2742        arch_ftrace_update_code(command);
2743
2744        ret = ftrace_arch_code_modify_post_process();
2745        FTRACE_WARN_ON(ret);
2746}
2747
2748static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2749                                   struct ftrace_ops_hash *old_hash)
2750{
2751        ops->flags |= FTRACE_OPS_FL_MODIFYING;
2752        ops->old_hash.filter_hash = old_hash->filter_hash;
2753        ops->old_hash.notrace_hash = old_hash->notrace_hash;
2754        ftrace_run_update_code(command);
2755        ops->old_hash.filter_hash = NULL;
2756        ops->old_hash.notrace_hash = NULL;
2757        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2758}
2759
2760static ftrace_func_t saved_ftrace_func;
2761static int ftrace_start_up;
2762
2763void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2764{
2765}
2766
2767static void ftrace_startup_enable(int command)
2768{
2769        if (saved_ftrace_func != ftrace_trace_function) {
2770                saved_ftrace_func = ftrace_trace_function;
2771                command |= FTRACE_UPDATE_TRACE_FUNC;
2772        }
2773
2774        if (!command || !ftrace_enabled)
2775                return;
2776
2777        ftrace_run_update_code(command);
2778}
2779
2780static void ftrace_startup_all(int command)
2781{
2782        update_all_ops = true;
2783        ftrace_startup_enable(command);
2784        update_all_ops = false;
2785}
2786
2787int ftrace_startup(struct ftrace_ops *ops, int command)
2788{
2789        int ret;
2790
2791        if (unlikely(ftrace_disabled))
2792                return -ENODEV;
2793
2794        ret = __register_ftrace_function(ops);
2795        if (ret)
2796                return ret;
2797
2798        ftrace_start_up++;
2799
2800        /*
2801         * Note that ftrace probes uses this to start up
2802         * and modify functions it will probe. But we still
2803         * set the ADDING flag for modification, as probes
2804         * do not have trampolines. If they add them in the
2805         * future, then the probes will need to distinguish
2806         * between adding and updating probes.
2807         */
2808        ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2809
2810        ret = ftrace_hash_ipmodify_enable(ops);
2811        if (ret < 0) {
2812                /* Rollback registration process */
2813                __unregister_ftrace_function(ops);
2814                ftrace_start_up--;
2815                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2816                return ret;
2817        }
2818
2819        if (ftrace_hash_rec_enable(ops, 1))
2820                command |= FTRACE_UPDATE_CALLS;
2821
2822        ftrace_startup_enable(command);
2823
2824        ops->flags &= ~FTRACE_OPS_FL_ADDING;
2825
2826        return 0;
2827}
2828
2829int ftrace_shutdown(struct ftrace_ops *ops, int command)
2830{
2831        int ret;
2832
2833        if (unlikely(ftrace_disabled))
2834                return -ENODEV;
2835
2836        ret = __unregister_ftrace_function(ops);
2837        if (ret)
2838                return ret;
2839
2840        ftrace_start_up--;
2841        /*
2842         * Just warn in case of unbalance, no need to kill ftrace, it's not
2843         * critical but the ftrace_call callers may be never nopped again after
2844         * further ftrace uses.
2845         */
2846        WARN_ON_ONCE(ftrace_start_up < 0);
2847
2848        /* Disabling ipmodify never fails */
2849        ftrace_hash_ipmodify_disable(ops);
2850
2851        if (ftrace_hash_rec_disable(ops, 1))
2852                command |= FTRACE_UPDATE_CALLS;
2853
2854        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2855
2856        if (saved_ftrace_func != ftrace_trace_function) {
2857                saved_ftrace_func = ftrace_trace_function;
2858                command |= FTRACE_UPDATE_TRACE_FUNC;
2859        }
2860
2861        if (!command || !ftrace_enabled) {
2862                /*
2863                 * If these are dynamic or per_cpu ops, they still
2864                 * need their data freed. Since, function tracing is
2865                 * not currently active, we can just free them
2866                 * without synchronizing all CPUs.
2867                 */
2868                if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2869                        goto free_ops;
2870
2871                return 0;
2872        }
2873
2874        /*
2875         * If the ops uses a trampoline, then it needs to be
2876         * tested first on update.
2877         */
2878        ops->flags |= FTRACE_OPS_FL_REMOVING;
2879        removed_ops = ops;
2880
2881        /* The trampoline logic checks the old hashes */
2882        ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2883        ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2884
2885        ftrace_run_update_code(command);
2886
2887        /*
2888         * If there's no more ops registered with ftrace, run a
2889         * sanity check to make sure all rec flags are cleared.
2890         */
2891        if (rcu_dereference_protected(ftrace_ops_list,
2892                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2893                struct ftrace_page *pg;
2894                struct dyn_ftrace *rec;
2895
2896                do_for_each_ftrace_rec(pg, rec) {
2897                        if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2898                                pr_warn("  %pS flags:%lx\n",
2899                                        (void *)rec->ip, rec->flags);
2900                } while_for_each_ftrace_rec();
2901        }
2902
2903        ops->old_hash.filter_hash = NULL;
2904        ops->old_hash.notrace_hash = NULL;
2905
2906        removed_ops = NULL;
2907        ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2908
2909        /*
2910         * Dynamic ops may be freed, we must make sure that all
2911         * callers are done before leaving this function.
2912         * The same goes for freeing the per_cpu data of the per_cpu
2913         * ops.
2914         */
2915        if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
2916                /*
2917                 * We need to do a hard force of sched synchronization.
2918                 * This is because we use preempt_disable() to do RCU, but
2919                 * the function tracers can be called where RCU is not watching
2920                 * (like before user_exit()). We can not rely on the RCU
2921                 * infrastructure to do the synchronization, thus we must do it
2922                 * ourselves.
2923                 */
2924                synchronize_rcu_tasks_rude();
2925
2926                /*
2927                 * When the kernel is preeptive, tasks can be preempted
2928                 * while on a ftrace trampoline. Just scheduling a task on
2929                 * a CPU is not good enough to flush them. Calling
2930                 * synchornize_rcu_tasks() will wait for those tasks to
2931                 * execute and either schedule voluntarily or enter user space.
2932                 */
2933                if (IS_ENABLED(CONFIG_PREEMPTION))
2934                        synchronize_rcu_tasks();
2935
2936 free_ops:
2937                arch_ftrace_trampoline_free(ops);
2938        }
2939
2940        return 0;
2941}
2942
2943static void ftrace_startup_sysctl(void)
2944{
2945        int command;
2946
2947        if (unlikely(ftrace_disabled))
2948                return;
2949
2950        /* Force update next time */
2951        saved_ftrace_func = NULL;
2952        /* ftrace_start_up is true if we want ftrace running */
2953        if (ftrace_start_up) {
2954                command = FTRACE_UPDATE_CALLS;
2955                if (ftrace_graph_active)
2956                        command |= FTRACE_START_FUNC_RET;
2957                ftrace_startup_enable(command);
2958        }
2959}
2960
2961static void ftrace_shutdown_sysctl(void)
2962{
2963        int command;
2964
2965        if (unlikely(ftrace_disabled))
2966                return;
2967
2968        /* ftrace_start_up is true if ftrace is running */
2969        if (ftrace_start_up) {
2970                command = FTRACE_DISABLE_CALLS;
2971                if (ftrace_graph_active)
2972                        command |= FTRACE_STOP_FUNC_RET;
2973                ftrace_run_update_code(command);
2974        }
2975}
2976
2977static u64              ftrace_update_time;
2978unsigned long           ftrace_update_tot_cnt;
2979unsigned long           ftrace_number_of_pages;
2980unsigned long           ftrace_number_of_groups;
2981
2982static inline int ops_traces_mod(struct ftrace_ops *ops)
2983{
2984        /*
2985         * Filter_hash being empty will default to trace module.
2986         * But notrace hash requires a test of individual module functions.
2987         */
2988        return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2989                ftrace_hash_empty(ops->func_hash->notrace_hash);
2990}
2991
2992/*
2993 * Check if the current ops references the record.
2994 *
2995 * If the ops traces all functions, then it was already accounted for.
2996 * If the ops does not trace the current record function, skip it.
2997 * If the ops ignores the function via notrace filter, skip it.
2998 */
2999static inline bool
3000ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3001{
3002        /* If ops isn't enabled, ignore it */
3003        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
3004                return false;
3005
3006        /* If ops traces all then it includes this function */
3007        if (ops_traces_mod(ops))
3008                return true;
3009
3010        /* The function must be in the filter */
3011        if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
3012            !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
3013                return false;
3014
3015        /* If in notrace hash, we ignore it too */
3016        if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
3017                return false;
3018
3019        return true;
3020}
3021
3022static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3023{
3024        struct ftrace_page *pg;
3025        struct dyn_ftrace *p;
3026        u64 start, stop;
3027        unsigned long update_cnt = 0;
3028        unsigned long rec_flags = 0;
3029        int i;
3030
3031        start = ftrace_now(raw_smp_processor_id());
3032
3033        /*
3034         * When a module is loaded, this function is called to convert
3035         * the calls to mcount in its text to nops, and also to create
3036         * an entry in the ftrace data. Now, if ftrace is activated
3037         * after this call, but before the module sets its text to
3038         * read-only, the modification of enabling ftrace can fail if
3039         * the read-only is done while ftrace is converting the calls.
3040         * To prevent this, the module's records are set as disabled
3041         * and will be enabled after the call to set the module's text
3042         * to read-only.
3043         */
3044        if (mod)
3045                rec_flags |= FTRACE_FL_DISABLED;
3046
3047        for (pg = new_pgs; pg; pg = pg->next) {
3048
3049                for (i = 0; i < pg->index; i++) {
3050
3051                        /* If something went wrong, bail without enabling anything */
3052                        if (unlikely(ftrace_disabled))
3053                                return -1;
3054
3055                        p = &pg->records[i];
3056                        p->flags = rec_flags;
3057
3058                        /*
3059                         * Do the initial record conversion from mcount jump
3060                         * to the NOP instructions.
3061                         */
3062                        if (!__is_defined(CC_USING_NOP_MCOUNT) &&
3063                            !ftrace_nop_initialize(mod, p))
3064                                break;
3065
3066                        update_cnt++;
3067                }
3068        }
3069
3070        stop = ftrace_now(raw_smp_processor_id());
3071        ftrace_update_time = stop - start;
3072        ftrace_update_tot_cnt += update_cnt;
3073
3074        return 0;
3075}
3076
3077static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3078{
3079        int order;
3080        int cnt;
3081
3082        if (WARN_ON(!count))
3083                return -EINVAL;
3084
3085        order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
3086
3087        /*
3088         * We want to fill as much as possible. No more than a page
3089         * may be empty.
3090         */
3091        while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
3092                order--;
3093
3094 again:
3095        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3096
3097        if (!pg->records) {
3098                /* if we can't allocate this size, try something smaller */
3099                if (!order)
3100                        return -ENOMEM;
3101                order >>= 1;
3102                goto again;
3103        }
3104
3105        ftrace_number_of_pages += 1 << order;
3106        ftrace_number_of_groups++;
3107
3108        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3109        pg->size = cnt;
3110
3111        if (cnt > count)
3112                cnt = count;
3113
3114        return cnt;
3115}
3116
3117static struct ftrace_page *
3118ftrace_allocate_pages(unsigned long num_to_init)
3119{
3120        struct ftrace_page *start_pg;
3121        struct ftrace_page *pg;
3122        int order;
3123        int cnt;
3124
3125        if (!num_to_init)
3126                return NULL;
3127
3128        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3129        if (!pg)
3130                return NULL;
3131
3132        /*
3133         * Try to allocate as much as possible in one continues
3134         * location that fills in all of the space. We want to
3135         * waste as little space as possible.
3136         */
3137        for (;;) {
3138                cnt = ftrace_allocate_records(pg, num_to_init);
3139                if (cnt < 0)
3140                        goto free_pages;
3141
3142                num_to_init -= cnt;
3143                if (!num_to_init)
3144                        break;
3145
3146                pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3147                if (!pg->next)
3148                        goto free_pages;
3149
3150                pg = pg->next;
3151        }
3152
3153        return start_pg;
3154
3155 free_pages:
3156        pg = start_pg;
3157        while (pg) {
3158                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3159                free_pages((unsigned long)pg->records, order);
3160                start_pg = pg->next;
3161                kfree(pg);
3162                pg = start_pg;
3163                ftrace_number_of_pages -= 1 << order;
3164                ftrace_number_of_groups--;
3165        }
3166        pr_info("ftrace: FAILED to allocate memory for functions\n");
3167        return NULL;
3168}
3169
3170#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3171
3172struct ftrace_iterator {
3173        loff_t                          pos;
3174        loff_t                          func_pos;
3175        loff_t                          mod_pos;
3176        struct ftrace_page              *pg;
3177        struct dyn_ftrace               *func;
3178        struct ftrace_func_probe        *probe;
3179        struct ftrace_func_entry        *probe_entry;
3180        struct trace_parser             parser;
3181        struct ftrace_hash              *hash;
3182        struct ftrace_ops               *ops;
3183        struct trace_array              *tr;
3184        struct list_head                *mod_list;
3185        int                             pidx;
3186        int                             idx;
3187        unsigned                        flags;
3188};
3189
3190static void *
3191t_probe_next(struct seq_file *m, loff_t *pos)
3192{
3193        struct ftrace_iterator *iter = m->private;
3194        struct trace_array *tr = iter->ops->private;
3195        struct list_head *func_probes;
3196        struct ftrace_hash *hash;
3197        struct list_head *next;
3198        struct hlist_node *hnd = NULL;
3199        struct hlist_head *hhd;
3200        int size;
3201
3202        (*pos)++;
3203        iter->pos = *pos;
3204
3205        if (!tr)
3206                return NULL;
3207
3208        func_probes = &tr->func_probes;
3209        if (list_empty(func_probes))
3210                return NULL;
3211
3212        if (!iter->probe) {
3213                next = func_probes->next;
3214                iter->probe = list_entry(next, struct ftrace_func_probe, list);
3215        }
3216
3217        if (iter->probe_entry)
3218                hnd = &iter->probe_entry->hlist;
3219
3220        hash = iter->probe->ops.func_hash->filter_hash;
3221
3222        /*
3223         * A probe being registered may temporarily have an empty hash
3224         * and it's at the end of the func_probes list.
3225         */
3226        if (!hash || hash == EMPTY_HASH)
3227                return NULL;
3228
3229        size = 1 << hash->size_bits;
3230
3231 retry:
3232        if (iter->pidx >= size) {
3233                if (iter->probe->list.next == func_probes)
3234                        return NULL;
3235                next = iter->probe->list.next;
3236                iter->probe = list_entry(next, struct ftrace_func_probe, list);
3237                hash = iter->probe->ops.func_hash->filter_hash;
3238                size = 1 << hash->size_bits;
3239                iter->pidx = 0;
3240        }
3241
3242        hhd = &hash->buckets[iter->pidx];
3243
3244        if (hlist_empty(hhd)) {
3245                iter->pidx++;
3246                hnd = NULL;
3247                goto retry;
3248        }
3249
3250        if (!hnd)
3251                hnd = hhd->first;
3252        else {
3253                hnd = hnd->next;
3254                if (!hnd) {
3255                        iter->pidx++;
3256                        goto retry;
3257                }
3258        }
3259
3260        if (WARN_ON_ONCE(!hnd))
3261                return NULL;
3262
3263        iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3264
3265        return iter;
3266}
3267
3268static void *t_probe_start(struct seq_file *m, loff_t *pos)
3269{
3270        struct ftrace_iterator *iter = m->private;
3271        void *p = NULL;
3272        loff_t l;
3273
3274        if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3275                return NULL;
3276
3277        if (iter->mod_pos > *pos)
3278                return NULL;
3279
3280        iter->probe = NULL;
3281        iter->probe_entry = NULL;
3282        iter->pidx = 0;
3283        for (l = 0; l <= (*pos - iter->mod_pos); ) {
3284                p = t_probe_next(m, &l);
3285                if (!p)
3286                        break;
3287        }
3288        if (!p)
3289                return NULL;
3290
3291        /* Only set this if we have an item */
3292        iter->flags |= FTRACE_ITER_PROBE;
3293
3294        return iter;
3295}
3296
3297static int
3298t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3299{
3300        struct ftrace_func_entry *probe_entry;
3301        struct ftrace_probe_ops *probe_ops;
3302        struct ftrace_func_probe *probe;
3303
3304        probe = iter->probe;
3305        probe_entry = iter->probe_entry;
3306
3307        if (WARN_ON_ONCE(!probe || !probe_entry))
3308                return -EIO;
3309
3310        probe_ops = probe->probe_ops;
3311
3312        if (probe_ops->print)
3313                return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3314
3315        seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3316                   (void *)probe_ops->func);
3317
3318        return 0;
3319}
3320
3321static void *
3322t_mod_next(struct seq_file *m, loff_t *pos)
3323{
3324        struct ftrace_iterator *iter = m->private;
3325        struct trace_array *tr = iter->tr;
3326
3327        (*pos)++;
3328        iter->pos = *pos;
3329
3330        iter->mod_list = iter->mod_list->next;
3331
3332        if (iter->mod_list == &tr->mod_trace ||
3333            iter->mod_list == &tr->mod_notrace) {
3334                iter->flags &= ~FTRACE_ITER_MOD;
3335                return NULL;
3336        }
3337
3338        iter->mod_pos = *pos;
3339
3340        return iter;
3341}
3342
3343static void *t_mod_start(struct seq_file *m, loff_t *pos)
3344{
3345        struct ftrace_iterator *iter = m->private;
3346        void *p = NULL;
3347        loff_t l;
3348
3349        if (iter->func_pos > *pos)
3350                return NULL;
3351
3352        iter->mod_pos = iter->func_pos;
3353
3354        /* probes are only available if tr is set */
3355        if (!iter->tr)
3356                return NULL;
3357
3358        for (l = 0; l <= (*pos - iter->func_pos); ) {
3359                p = t_mod_next(m, &l);
3360                if (!p)
3361                        break;
3362        }
3363        if (!p) {
3364                iter->flags &= ~FTRACE_ITER_MOD;
3365                return t_probe_start(m, pos);
3366        }
3367
3368        /* Only set this if we have an item */
3369        iter->flags |= FTRACE_ITER_MOD;
3370
3371        return iter;
3372}
3373
3374static int
3375t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3376{
3377        struct ftrace_mod_load *ftrace_mod;
3378        struct trace_array *tr = iter->tr;
3379
3380        if (WARN_ON_ONCE(!iter->mod_list) ||
3381                         iter->mod_list == &tr->mod_trace ||
3382                         iter->mod_list == &tr->mod_notrace)
3383                return -EIO;
3384
3385        ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3386
3387        if (ftrace_mod->func)
3388                seq_printf(m, "%s", ftrace_mod->func);
3389        else
3390                seq_putc(m, '*');
3391
3392        seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3393
3394        return 0;
3395}
3396
3397static void *
3398t_func_next(struct seq_file *m, loff_t *pos)
3399{
3400        struct ftrace_iterator *iter = m->private;
3401        struct dyn_ftrace *rec = NULL;
3402
3403        (*pos)++;
3404
3405 retry:
3406        if (iter->idx >= iter->pg->index) {
3407                if (iter->pg->next) {
3408                        iter->pg = iter->pg->next;
3409                        iter->idx = 0;
3410                        goto retry;
3411                }
3412        } else {
3413                rec = &iter->pg->records[iter->idx++];
3414                if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3415                     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3416
3417                    ((iter->flags & FTRACE_ITER_ENABLED) &&
3418                     !(rec->flags & FTRACE_FL_ENABLED))) {
3419
3420                        rec = NULL;
3421                        goto retry;
3422                }
3423        }
3424
3425        if (!rec)
3426                return NULL;
3427
3428        iter->pos = iter->func_pos = *pos;
3429        iter->func = rec;
3430
3431        return iter;
3432}
3433
3434static void *
3435t_next(struct seq_file *m, void *v, loff_t *pos)
3436{
3437        struct ftrace_iterator *iter = m->private;
3438        loff_t l = *pos; /* t_probe_start() must use original pos */
3439        void *ret;
3440
3441        if (unlikely(ftrace_disabled))
3442                return NULL;
3443
3444        if (iter->flags & FTRACE_ITER_PROBE)
3445                return t_probe_next(m, pos);
3446
3447        if (iter->flags & FTRACE_ITER_MOD)
3448                return t_mod_next(m, pos);
3449
3450        if (iter->flags & FTRACE_ITER_PRINTALL) {
3451                /* next must increment pos, and t_probe_start does not */
3452                (*pos)++;
3453                return t_mod_start(m, &l);
3454        }
3455
3456        ret = t_func_next(m, pos);
3457
3458        if (!ret)
3459                return t_mod_start(m, &l);
3460
3461        return ret;
3462}
3463
3464static void reset_iter_read(struct ftrace_iterator *iter)
3465{
3466        iter->pos = 0;
3467        iter->func_pos = 0;
3468        iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3469}
3470
3471static void *t_start(struct seq_file *m, loff_t *pos)
3472{
3473        struct ftrace_iterator *iter = m->private;
3474        void *p = NULL;
3475        loff_t l;
3476
3477        mutex_lock(&ftrace_lock);
3478
3479        if (unlikely(ftrace_disabled))
3480                return NULL;
3481
3482        /*
3483         * If an lseek was done, then reset and start from beginning.
3484         */
3485        if (*pos < iter->pos)
3486                reset_iter_read(iter);
3487
3488        /*
3489         * For set_ftrace_filter reading, if we have the filter
3490         * off, we can short cut and just print out that all
3491         * functions are enabled.
3492         */
3493        if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3494            ftrace_hash_empty(iter->hash)) {
3495                iter->func_pos = 1; /* Account for the message */
3496                if (*pos > 0)
3497                        return t_mod_start(m, pos);
3498                iter->flags |= FTRACE_ITER_PRINTALL;
3499                /* reset in case of seek/pread */
3500                iter->flags &= ~FTRACE_ITER_PROBE;
3501                return iter;
3502        }
3503
3504        if (iter->flags & FTRACE_ITER_MOD)
3505                return t_mod_start(m, pos);
3506
3507        /*
3508         * Unfortunately, we need to restart at ftrace_pages_start
3509         * every time we let go of the ftrace_mutex. This is because
3510         * those pointers can change without the lock.
3511         */
3512        iter->pg = ftrace_pages_start;
3513        iter->idx = 0;
3514        for (l = 0; l <= *pos; ) {
3515                p = t_func_next(m, &l);
3516                if (!p)
3517                        break;
3518        }
3519
3520        if (!p)
3521                return t_mod_start(m, pos);
3522
3523        return iter;
3524}
3525
3526static void t_stop(struct seq_file *m, void *p)
3527{
3528        mutex_unlock(&ftrace_lock);
3529}
3530
3531void * __weak
3532arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3533{
3534        return NULL;
3535}
3536
3537static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3538                                struct dyn_ftrace *rec)
3539{
3540        void *ptr;
3541
3542        ptr = arch_ftrace_trampoline_func(ops, rec);
3543        if (ptr)
3544                seq_printf(m, " ->%pS", ptr);
3545}
3546
3547static int t_show(struct seq_file *m, void *v)
3548{
3549        struct ftrace_iterator *iter = m->private;
3550        struct dyn_ftrace *rec;
3551
3552        if (iter->flags & FTRACE_ITER_PROBE)
3553                return t_probe_show(m, iter);
3554
3555        if (iter->flags & FTRACE_ITER_MOD)
3556                return t_mod_show(m, iter);
3557
3558        if (iter->flags & FTRACE_ITER_PRINTALL) {
3559                if (iter->flags & FTRACE_ITER_NOTRACE)
3560                        seq_puts(m, "#### no functions disabled ####\n");
3561                else
3562                        seq_puts(m, "#### all functions enabled ####\n");
3563                return 0;
3564        }
3565
3566        rec = iter->func;
3567
3568        if (!rec)
3569                return 0;
3570
3571        seq_printf(m, "%ps", (void *)rec->ip);
3572        if (iter->flags & FTRACE_ITER_ENABLED) {
3573                struct ftrace_ops *ops;
3574
3575                seq_printf(m, " (%ld)%s%s%s",
3576                           ftrace_rec_count(rec),
3577                           rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3578                           rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
3579                           rec->flags & FTRACE_FL_DIRECT ? " D" : "  ");
3580                if (rec->flags & FTRACE_FL_TRAMP_EN) {
3581                        ops = ftrace_find_tramp_ops_any(rec);
3582                        if (ops) {
3583                                do {
3584                                        seq_printf(m, "\ttramp: %pS (%pS)",
3585                                                   (void *)ops->trampoline,
3586                                                   (void *)ops->func);
3587                                        add_trampoline_func(m, ops, rec);
3588                                        ops = ftrace_find_tramp_ops_next(rec, ops);
3589                                } while (ops);
3590                        } else
3591                                seq_puts(m, "\ttramp: ERROR!");
3592                } else {
3593                        add_trampoline_func(m, NULL, rec);
3594                }
3595                if (rec->flags & FTRACE_FL_DIRECT) {
3596                        unsigned long direct;
3597
3598                        direct = ftrace_find_rec_direct(rec->ip);
3599                        if (direct)
3600                                seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3601                }
3602        }
3603
3604        seq_putc(m, '\n');
3605
3606        return 0;
3607}
3608
3609static const struct seq_operations show_ftrace_seq_ops = {
3610        .start = t_start,
3611        .next = t_next,
3612        .stop = t_stop,
3613        .show = t_show,
3614};
3615
3616static int
3617ftrace_avail_open(struct inode *inode, struct file *file)
3618{
3619        struct ftrace_iterator *iter;
3620        int ret;
3621
3622        ret = security_locked_down(LOCKDOWN_TRACEFS);
3623        if (ret)
3624                return ret;
3625
3626        if (unlikely(ftrace_disabled))
3627                return -ENODEV;
3628
3629        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3630        if (!iter)
3631                return -ENOMEM;
3632
3633        iter->pg = ftrace_pages_start;
3634        iter->ops = &global_ops;
3635
3636        return 0;
3637}
3638
3639static int
3640ftrace_enabled_open(struct inode *inode, struct file *file)
3641{
3642        struct ftrace_iterator *iter;
3643
3644        /*
3645         * This shows us what functions are currently being
3646         * traced and by what. Not sure if we want lockdown
3647         * to hide such critical information for an admin.
3648         * Although, perhaps it can show information we don't
3649         * want people to see, but if something is tracing
3650         * something, we probably want to know about it.
3651         */
3652
3653        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3654        if (!iter)
3655                return -ENOMEM;
3656
3657        iter->pg = ftrace_pages_start;
3658        iter->flags = FTRACE_ITER_ENABLED;
3659        iter->ops = &global_ops;
3660
3661        return 0;
3662}
3663
3664/**
3665 * ftrace_regex_open - initialize function tracer filter files
3666 * @ops: The ftrace_ops that hold the hash filters
3667 * @flag: The type of filter to process
3668 * @inode: The inode, usually passed in to your open routine
3669 * @file: The file, usually passed in to your open routine
3670 *
3671 * ftrace_regex_open() initializes the filter files for the
3672 * @ops. Depending on @flag it may process the filter hash or
3673 * the notrace hash of @ops. With this called from the open
3674 * routine, you can use ftrace_filter_write() for the write
3675 * routine if @flag has FTRACE_ITER_FILTER set, or
3676 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3677 * tracing_lseek() should be used as the lseek routine, and
3678 * release must call ftrace_regex_release().
3679 */
3680int
3681ftrace_regex_open(struct ftrace_ops *ops, int flag,
3682                  struct inode *inode, struct file *file)
3683{
3684        struct ftrace_iterator *iter;
3685        struct ftrace_hash *hash;
3686        struct list_head *mod_head;
3687        struct trace_array *tr = ops->private;
3688        int ret = -ENOMEM;
3689
3690        ftrace_ops_init(ops);
3691
3692        if (unlikely(ftrace_disabled))
3693                return -ENODEV;
3694
3695        if (tracing_check_open_get_tr(tr))
3696                return -ENODEV;
3697
3698        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3699        if (!iter)
3700                goto out;
3701
3702        if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3703                goto out;
3704
3705        iter->ops = ops;
3706        iter->flags = flag;
3707        iter->tr = tr;
3708
3709        mutex_lock(&ops->func_hash->regex_lock);
3710
3711        if (flag & FTRACE_ITER_NOTRACE) {
3712                hash = ops->func_hash->notrace_hash;
3713                mod_head = tr ? &tr->mod_notrace : NULL;
3714        } else {
3715                hash = ops->func_hash->filter_hash;
3716                mod_head = tr ? &tr->mod_trace : NULL;
3717        }
3718
3719        iter->mod_list = mod_head;
3720
3721        if (file->f_mode & FMODE_WRITE) {
3722                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3723
3724                if (file->f_flags & O_TRUNC) {
3725                        iter->hash = alloc_ftrace_hash(size_bits);
3726                        clear_ftrace_mod_list(mod_head);
3727                } else {
3728                        iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3729                }
3730
3731                if (!iter->hash) {
3732                        trace_parser_put(&iter->parser);
3733                        goto out_unlock;
3734                }
3735        } else
3736                iter->hash = hash;
3737
3738        ret = 0;
3739
3740        if (file->f_mode & FMODE_READ) {
3741                iter->pg = ftrace_pages_start;
3742
3743                ret = seq_open(file, &show_ftrace_seq_ops);
3744                if (!ret) {
3745                        struct seq_file *m = file->private_data;
3746                        m->private = iter;
3747                } else {
3748                        /* Failed */
3749                        free_ftrace_hash(iter->hash);
3750                        trace_parser_put(&iter->parser);
3751                }
3752        } else
3753                file->private_data = iter;
3754
3755 out_unlock:
3756        mutex_unlock(&ops->func_hash->regex_lock);
3757
3758 out:
3759        if (ret) {
3760                kfree(iter);
3761                if (tr)
3762                        trace_array_put(tr);
3763        }
3764
3765        return ret;
3766}
3767
3768static int
3769ftrace_filter_open(struct inode *inode, struct file *file)
3770{
3771        struct ftrace_ops *ops = inode->i_private;
3772
3773        /* Checks for tracefs lockdown */
3774        return ftrace_regex_open(ops,
3775                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3776                        inode, file);
3777}
3778
3779static int
3780ftrace_notrace_open(struct inode *inode, struct file *file)
3781{
3782        struct ftrace_ops *ops = inode->i_private;
3783
3784        /* Checks for tracefs lockdown */
3785        return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3786                                 inode, file);
3787}
3788
3789/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3790struct ftrace_glob {
3791        char *search;
3792        unsigned len;
3793        int type;
3794};
3795
3796/*
3797 * If symbols in an architecture don't correspond exactly to the user-visible
3798 * name of what they represent, it is possible to define this function to
3799 * perform the necessary adjustments.
3800*/
3801char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3802{
3803        return str;
3804}
3805
3806static int ftrace_match(char *str, struct ftrace_glob *g)
3807{
3808        int matched = 0;
3809        int slen;
3810
3811        str = arch_ftrace_match_adjust(str, g->search);
3812
3813        switch (g->type) {
3814        case MATCH_FULL:
3815                if (strcmp(str, g->search) == 0)
3816                        matched = 1;
3817                break;
3818        case MATCH_FRONT_ONLY:
3819                if (strncmp(str, g->search, g->len) == 0)
3820                        matched = 1;
3821                break;
3822        case MATCH_MIDDLE_ONLY:
3823                if (strstr(str, g->search))
3824                        matched = 1;
3825                break;
3826        case MATCH_END_ONLY:
3827                slen = strlen(str);
3828                if (slen >= g->len &&
3829                    memcmp(str + slen - g->len, g->search, g->len) == 0)
3830                        matched = 1;
3831                break;
3832        case MATCH_GLOB:
3833                if (glob_match(g->search, str))
3834                        matched = 1;
3835                break;
3836        }
3837
3838        return matched;
3839}
3840
3841static int
3842enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3843{
3844        struct ftrace_func_entry *entry;
3845        int ret = 0;
3846
3847        entry = ftrace_lookup_ip(hash, rec->ip);
3848        if (clear_filter) {
3849                /* Do nothing if it doesn't exist */
3850                if (!entry)
3851                        return 0;
3852
3853                free_hash_entry(hash, entry);
3854        } else {
3855                /* Do nothing if it exists */
3856                if (entry)
3857                        return 0;
3858
3859                ret = add_hash_entry(hash, rec->ip);
3860        }
3861        return ret;
3862}
3863
3864static int
3865add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
3866                 int clear_filter)
3867{
3868        long index = simple_strtoul(func_g->search, NULL, 0);
3869        struct ftrace_page *pg;
3870        struct dyn_ftrace *rec;
3871
3872        /* The index starts at 1 */
3873        if (--index < 0)
3874                return 0;
3875
3876        do_for_each_ftrace_rec(pg, rec) {
3877                if (pg->index <= index) {
3878                        index -= pg->index;
3879                        /* this is a double loop, break goes to the next page */
3880                        break;
3881                }
3882                rec = &pg->records[index];
3883                enter_record(hash, rec, clear_filter);
3884                return 1;
3885        } while_for_each_ftrace_rec();
3886        return 0;
3887}
3888
3889static int
3890ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3891                struct ftrace_glob *mod_g, int exclude_mod)
3892{
3893        char str[KSYM_SYMBOL_LEN];
3894        char *modname;
3895
3896        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3897
3898        if (mod_g) {
3899                int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3900
3901                /* blank module name to match all modules */
3902                if (!mod_g->len) {
3903                        /* blank module globbing: modname xor exclude_mod */
3904                        if (!exclude_mod != !modname)
3905                                goto func_match;
3906                        return 0;
3907                }
3908
3909                /*
3910                 * exclude_mod is set to trace everything but the given
3911                 * module. If it is set and the module matches, then
3912                 * return 0. If it is not set, and the module doesn't match
3913                 * also return 0. Otherwise, check the function to see if
3914                 * that matches.
3915                 */
3916                if (!mod_matches == !exclude_mod)
3917                        return 0;
3918func_match:
3919                /* blank search means to match all funcs in the mod */
3920                if (!func_g->len)
3921                        return 1;
3922        }
3923
3924        return ftrace_match(str, func_g);
3925}
3926
3927static int
3928match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3929{
3930        struct ftrace_page *pg;
3931        struct dyn_ftrace *rec;
3932        struct ftrace_glob func_g = { .type = MATCH_FULL };
3933        struct ftrace_glob mod_g = { .type = MATCH_FULL };
3934        struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3935        int exclude_mod = 0;
3936        int found = 0;
3937        int ret;
3938        int clear_filter = 0;
3939
3940        if (func) {
3941                func_g.type = filter_parse_regex(func, len, &func_g.search,
3942                                                 &clear_filter);
3943                func_g.len = strlen(func_g.search);
3944        }
3945
3946        if (mod) {
3947                mod_g.type = filter_parse_regex(mod, strlen(mod),
3948                                &mod_g.search, &exclude_mod);
3949                mod_g.len = strlen(mod_g.search);
3950        }
3951
3952        mutex_lock(&ftrace_lock);
3953
3954        if (unlikely(ftrace_disabled))
3955                goto out_unlock;
3956
3957        if (func_g.type == MATCH_INDEX) {
3958                found = add_rec_by_index(hash, &func_g, clear_filter);
3959                goto out_unlock;
3960        }
3961
3962        do_for_each_ftrace_rec(pg, rec) {
3963
3964                if (rec->flags & FTRACE_FL_DISABLED)
3965                        continue;
3966
3967                if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3968                        ret = enter_record(hash, rec, clear_filter);
3969                        if (ret < 0) {
3970                                found = ret;
3971                                goto out_unlock;
3972                        }
3973                        found = 1;
3974                }
3975        } while_for_each_ftrace_rec();
3976 out_unlock:
3977        mutex_unlock(&ftrace_lock);
3978
3979        return found;
3980}
3981
3982static int
3983ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3984{
3985        return match_records(hash, buff, len, NULL);
3986}
3987
3988static void ftrace_ops_update_code(struct ftrace_ops *ops,
3989                                   struct ftrace_ops_hash *old_hash)
3990{
3991        struct ftrace_ops *op;
3992
3993        if (!ftrace_enabled)
3994                return;
3995
3996        if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3997                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3998                return;
3999        }
4000
4001        /*
4002         * If this is the shared global_ops filter, then we need to
4003         * check if there is another ops that shares it, is enabled.
4004         * If so, we still need to run the modify code.
4005         */
4006        if (ops->func_hash != &global_ops.local_hash)
4007                return;
4008
4009        do_for_each_ftrace_op(op, ftrace_ops_list) {
4010                if (op->func_hash == &global_ops.local_hash &&
4011                    op->flags & FTRACE_OPS_FL_ENABLED) {
4012                        ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4013                        /* Only need to do this once */
4014                        return;
4015                }
4016        } while_for_each_ftrace_op(op);
4017}
4018
4019static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4020                                           struct ftrace_hash **orig_hash,
4021                                           struct ftrace_hash *hash,
4022                                           int enable)
4023{
4024        struct ftrace_ops_hash old_hash_ops;
4025        struct ftrace_hash *old_hash;
4026        int ret;
4027
4028        old_hash = *orig_hash;
4029        old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4030        old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4031        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4032        if (!ret) {
4033                ftrace_ops_update_code(ops, &old_hash_ops);
4034                free_ftrace_hash_rcu(old_hash);
4035        }
4036        return ret;
4037}
4038
4039static bool module_exists(const char *module)
4040{
4041        /* All modules have the symbol __this_module */
4042        static const char this_mod[] = "__this_module";
4043        char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4044        unsigned long val;
4045        int n;
4046
4047        n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4048
4049        if (n > sizeof(modname) - 1)
4050                return false;
4051
4052        val = module_kallsyms_lookup_name(modname);
4053        return val != 0;
4054}
4055
4056static int cache_mod(struct trace_array *tr,
4057                     const char *func, char *module, int enable)
4058{
4059        struct ftrace_mod_load *ftrace_mod, *n;
4060        struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4061        int ret;
4062
4063        mutex_lock(&ftrace_lock);
4064
4065        /* We do not cache inverse filters */
4066        if (func[0] == '!') {
4067                func++;
4068                ret = -EINVAL;
4069
4070                /* Look to remove this hash */
4071                list_for_each_entry_safe(ftrace_mod, n, head, list) {
4072                        if (strcmp(ftrace_mod->module, module) != 0)
4073                                continue;
4074
4075                        /* no func matches all */
4076                        if (strcmp(func, "*") == 0 ||
4077                            (ftrace_mod->func &&
4078                             strcmp(ftrace_mod->func, func) == 0)) {
4079                                ret = 0;
4080                                free_ftrace_mod(ftrace_mod);
4081                                continue;
4082                        }
4083                }
4084                goto out;
4085        }
4086
4087        ret = -EINVAL;
4088        /* We only care about modules that have not been loaded yet */
4089        if (module_exists(module))
4090                goto out;
4091
4092        /* Save this string off, and execute it when the module is loaded */
4093        ret = ftrace_add_mod(tr, func, module, enable);
4094 out:
4095        mutex_unlock(&ftrace_lock);
4096
4097        return ret;
4098}
4099
4100static int
4101ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4102                 int reset, int enable);
4103
4104#ifdef CONFIG_MODULES
4105static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4106                             char *mod, bool enable)
4107{
4108        struct ftrace_mod_load *ftrace_mod, *n;
4109        struct ftrace_hash **orig_hash, *new_hash;
4110        LIST_HEAD(process_mods);
4111        char *func;
4112        int ret;
4113
4114        mutex_lock(&ops->func_hash->regex_lock);
4115
4116        if (enable)
4117                orig_hash = &ops->func_hash->filter_hash;
4118        else
4119                orig_hash = &ops->func_hash->notrace_hash;
4120
4121        new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4122                                              *orig_hash);
4123        if (!new_hash)
4124                goto out; /* warn? */
4125
4126        mutex_lock(&ftrace_lock);
4127
4128        list_for_each_entry_safe(ftrace_mod, n, head, list) {
4129
4130                if (strcmp(ftrace_mod->module, mod) != 0)
4131                        continue;
4132
4133                if (ftrace_mod->func)
4134                        func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4135                else
4136                        func = kstrdup("*", GFP_KERNEL);
4137
4138                if (!func) /* warn? */
4139                        continue;
4140
4141                list_del(&ftrace_mod->list);
4142                list_add(&ftrace_mod->list, &process_mods);
4143
4144                /* Use the newly allocated func, as it may be "*" */
4145                kfree(ftrace_mod->func);
4146                ftrace_mod->func = func;
4147        }
4148
4149        mutex_unlock(&ftrace_lock);
4150
4151        list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4152
4153                func = ftrace_mod->func;
4154
4155                /* Grabs ftrace_lock, which is why we have this extra step */
4156                match_records(new_hash, func, strlen(func), mod);
4157                free_ftrace_mod(ftrace_mod);
4158        }
4159
4160        if (enable && list_empty(head))
4161                new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4162
4163        mutex_lock(&ftrace_lock);
4164
4165        ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4166                                              new_hash, enable);
4167        mutex_unlock(&ftrace_lock);
4168
4169 out:
4170        mutex_unlock(&ops->func_hash->regex_lock);
4171
4172        free_ftrace_hash(new_hash);
4173}
4174
4175static void process_cached_mods(const char *mod_name)
4176{
4177        struct trace_array *tr;
4178        char *mod;
4179
4180        mod = kstrdup(mod_name, GFP_KERNEL);
4181        if (!mod)
4182                return;
4183
4184        mutex_lock(&trace_types_lock);
4185        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4186                if (!list_empty(&tr->mod_trace))
4187                        process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4188                if (!list_empty(&tr->mod_notrace))
4189                        process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4190        }
4191        mutex_unlock(&trace_types_lock);
4192
4193        kfree(mod);
4194}
4195#endif
4196
4197/*
4198 * We register the module command as a template to show others how
4199 * to register the a command as well.
4200 */
4201
4202static int
4203ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4204                    char *func_orig, char *cmd, char *module, int enable)
4205{
4206        char *func;
4207        int ret;
4208
4209        /* match_records() modifies func, and we need the original */
4210        func = kstrdup(func_orig, GFP_KERNEL);
4211        if (!func)
4212                return -ENOMEM;
4213
4214        /*
4215         * cmd == 'mod' because we only registered this func
4216         * for the 'mod' ftrace_func_command.
4217         * But if you register one func with multiple commands,
4218         * you can tell which command was used by the cmd
4219         * parameter.
4220         */
4221        ret = match_records(hash, func, strlen(func), module);
4222        kfree(func);
4223
4224        if (!ret)
4225                return cache_mod(tr, func_orig, module, enable);
4226        if (ret < 0)
4227                return ret;
4228        return 0;
4229}
4230
4231static struct ftrace_func_command ftrace_mod_cmd = {
4232        .name                   = "mod",
4233        .func                   = ftrace_mod_callback,
4234};
4235
4236static int __init ftrace_mod_cmd_init(void)
4237{
4238        return register_ftrace_command(&ftrace_mod_cmd);
4239}
4240core_initcall(ftrace_mod_cmd_init);
4241
4242static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4243                                      struct ftrace_ops *op, struct pt_regs *pt_regs)
4244{
4245        struct ftrace_probe_ops *probe_ops;
4246        struct ftrace_func_probe *probe;
4247
4248        probe = container_of(op, struct ftrace_func_probe, ops);
4249        probe_ops = probe->probe_ops;
4250
4251        /*
4252         * Disable preemption for these calls to prevent a RCU grace
4253         * period. This syncs the hash iteration and freeing of items
4254         * on the hash. rcu_read_lock is too dangerous here.
4255         */
4256        preempt_disable_notrace();
4257        probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4258        preempt_enable_notrace();
4259}
4260
4261struct ftrace_func_map {
4262        struct ftrace_func_entry        entry;
4263        void                            *data;
4264};
4265
4266struct ftrace_func_mapper {
4267        struct ftrace_hash              hash;
4268};
4269
4270/**
4271 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4272 *
4273 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4274 */
4275struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4276{
4277        struct ftrace_hash *hash;
4278
4279        /*
4280         * The mapper is simply a ftrace_hash, but since the entries
4281         * in the hash are not ftrace_func_entry type, we define it
4282         * as a separate structure.
4283         */
4284        hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4285        return (struct ftrace_func_mapper *)hash;
4286}
4287
4288/**
4289 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4290 * @mapper: The mapper that has the ip maps
4291 * @ip: the instruction pointer to find the data for
4292 *
4293 * Returns the data mapped to @ip if found otherwise NULL. The return
4294 * is actually the address of the mapper data pointer. The address is
4295 * returned for use cases where the data is no bigger than a long, and
4296 * the user can use the data pointer as its data instead of having to
4297 * allocate more memory for the reference.
4298 */
4299void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4300                                  unsigned long ip)
4301{
4302        struct ftrace_func_entry *entry;
4303        struct ftrace_func_map *map;
4304
4305        entry = ftrace_lookup_ip(&mapper->hash, ip);
4306        if (!entry)
4307                return NULL;
4308
4309        map = (struct ftrace_func_map *)entry;
4310        return &map->data;
4311}
4312
4313/**
4314 * ftrace_func_mapper_add_ip - Map some data to an ip
4315 * @mapper: The mapper that has the ip maps
4316 * @ip: The instruction pointer address to map @data to
4317 * @data: The data to map to @ip
4318 *
4319 * Returns 0 on succes otherwise an error.
4320 */
4321int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4322                              unsigned long ip, void *data)
4323{
4324        struct ftrace_func_entry *entry;
4325        struct ftrace_func_map *map;
4326
4327        entry = ftrace_lookup_ip(&mapper->hash, ip);
4328        if (entry)
4329                return -EBUSY;
4330
4331        map = kmalloc(sizeof(*map), GFP_KERNEL);
4332        if (!map)
4333                return -ENOMEM;
4334
4335        map->entry.ip = ip;
4336        map->data = data;
4337
4338        __add_hash_entry(&mapper->hash, &map->entry);
4339
4340        return 0;
4341}
4342
4343/**
4344 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4345 * @mapper: The mapper that has the ip maps
4346 * @ip: The instruction pointer address to remove the data from
4347 *
4348 * Returns the data if it is found, otherwise NULL.
4349 * Note, if the data pointer is used as the data itself, (see 
4350 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4351 * if the data pointer was set to zero.
4352 */
4353void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4354                                   unsigned long ip)
4355{
4356        struct ftrace_func_entry *entry;
4357        struct ftrace_func_map *map;
4358        void *data;
4359
4360        entry = ftrace_lookup_ip(&mapper->hash, ip);
4361        if (!entry)
4362                return NULL;
4363
4364        map = (struct ftrace_func_map *)entry;
4365        data = map->data;
4366
4367        remove_hash_entry(&mapper->hash, entry);
4368        kfree(entry);
4369
4370        return data;
4371}
4372
4373/**
4374 * free_ftrace_func_mapper - free a mapping of ips and data
4375 * @mapper: The mapper that has the ip maps
4376 * @free_func: A function to be called on each data item.
4377 *
4378 * This is used to free the function mapper. The @free_func is optional
4379 * and can be used if the data needs to be freed as well.
4380 */
4381void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4382                             ftrace_mapper_func free_func)
4383{
4384        struct ftrace_func_entry *entry;
4385        struct ftrace_func_map *map;
4386        struct hlist_head *hhd;
4387        int size, i;
4388
4389        if (!mapper)
4390                return;
4391
4392        if (free_func && mapper->hash.count) {
4393                size = 1 << mapper->hash.size_bits;
4394                for (i = 0; i < size; i++) {
4395                        hhd = &mapper->hash.buckets[i];
4396                        hlist_for_each_entry(entry, hhd, hlist) {
4397                                map = (struct ftrace_func_map *)entry;
4398                                free_func(map);
4399                        }
4400                }
4401        }
4402        free_ftrace_hash(&mapper->hash);
4403}
4404
4405static void release_probe(struct ftrace_func_probe *probe)
4406{
4407        struct ftrace_probe_ops *probe_ops;
4408
4409        mutex_lock(&ftrace_lock);
4410
4411        WARN_ON(probe->ref <= 0);
4412
4413        /* Subtract the ref that was used to protect this instance */
4414        probe->ref--;
4415
4416        if (!probe->ref) {
4417                probe_ops = probe->probe_ops;
4418                /*
4419                 * Sending zero as ip tells probe_ops to free
4420                 * the probe->data itself
4421                 */
4422                if (probe_ops->free)
4423                        probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4424                list_del(&probe->list);
4425                kfree(probe);
4426        }
4427        mutex_unlock(&ftrace_lock);
4428}
4429
4430static void acquire_probe_locked(struct ftrace_func_probe *probe)
4431{
4432        /*
4433         * Add one ref to keep it from being freed when releasing the
4434         * ftrace_lock mutex.
4435         */
4436        probe->ref++;
4437}
4438
4439int
4440register_ftrace_function_probe(char *glob, struct trace_array *tr,
4441                               struct ftrace_probe_ops *probe_ops,
4442                               void *data)
4443{
4444        struct ftrace_func_entry *entry;
4445        struct ftrace_func_probe *probe;
4446        struct ftrace_hash **orig_hash;
4447        struct ftrace_hash *old_hash;
4448        struct ftrace_hash *hash;
4449        int count = 0;
4450        int size;
4451        int ret;
4452        int i;
4453
4454        if (WARN_ON(!tr))
4455                return -EINVAL;
4456
4457        /* We do not support '!' for function probes */
4458        if (WARN_ON(glob[0] == '!'))
4459                return -EINVAL;
4460
4461
4462        mutex_lock(&ftrace_lock);
4463        /* Check if the probe_ops is already registered */
4464        list_for_each_entry(probe, &tr->func_probes, list) {
4465                if (probe->probe_ops == probe_ops)
4466                        break;
4467        }
4468        if (&probe->list == &tr->func_probes) {
4469                probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4470                if (!probe) {
4471                        mutex_unlock(&ftrace_lock);
4472                        return -ENOMEM;
4473                }
4474                probe->probe_ops = probe_ops;
4475                probe->ops.func = function_trace_probe_call;
4476                probe->tr = tr;
4477                ftrace_ops_init(&probe->ops);
4478                list_add(&probe->list, &tr->func_probes);
4479        }
4480
4481        acquire_probe_locked(probe);
4482
4483        mutex_unlock(&ftrace_lock);
4484
4485        /*
4486         * Note, there's a small window here that the func_hash->filter_hash
4487         * may be NULL or empty. Need to be carefule when reading the loop.
4488         */
4489        mutex_lock(&probe->ops.func_hash->regex_lock);
4490
4491        orig_hash = &probe->ops.func_hash->filter_hash;
4492        old_hash = *orig_hash;
4493        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4494
4495        if (!hash) {
4496                ret = -ENOMEM;
4497                goto out;
4498        }
4499
4500        ret = ftrace_match_records(hash, glob, strlen(glob));
4501
4502        /* Nothing found? */
4503        if (!ret)
4504                ret = -EINVAL;
4505
4506        if (ret < 0)
4507                goto out;
4508
4509        size = 1 << hash->size_bits;
4510        for (i = 0; i < size; i++) {
4511                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4512                        if (ftrace_lookup_ip(old_hash, entry->ip))
4513                                continue;
4514                        /*
4515                         * The caller might want to do something special
4516                         * for each function we find. We call the callback
4517                         * to give the caller an opportunity to do so.
4518                         */
4519                        if (probe_ops->init) {
4520                                ret = probe_ops->init(probe_ops, tr,
4521                                                      entry->ip, data,
4522                                                      &probe->data);
4523                                if (ret < 0) {
4524                                        if (probe_ops->free && count)
4525                                                probe_ops->free(probe_ops, tr,
4526                                                                0, probe->data);
4527                                        probe->data = NULL;
4528                                        goto out;
4529                                }
4530                        }
4531                        count++;
4532                }
4533        }
4534
4535        mutex_lock(&ftrace_lock);
4536
4537        if (!count) {
4538                /* Nothing was added? */
4539                ret = -EINVAL;
4540                goto out_unlock;
4541        }
4542
4543        ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4544                                              hash, 1);
4545        if (ret < 0)
4546                goto err_unlock;
4547
4548        /* One ref for each new function traced */
4549        probe->ref += count;
4550
4551        if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4552                ret = ftrace_startup(&probe->ops, 0);
4553
4554 out_unlock:
4555        mutex_unlock(&ftrace_lock);
4556
4557        if (!ret)
4558                ret = count;
4559 out:
4560        mutex_unlock(&probe->ops.func_hash->regex_lock);
4561        free_ftrace_hash(hash);
4562
4563        release_probe(probe);
4564
4565        return ret;
4566
4567 err_unlock:
4568        if (!probe_ops->free || !count)
4569                goto out_unlock;
4570
4571        /* Failed to do the move, need to call the free functions */
4572        for (i = 0; i < size; i++) {
4573                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4574                        if (ftrace_lookup_ip(old_hash, entry->ip))
4575                                continue;
4576                        probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4577                }
4578        }
4579        goto out_unlock;
4580}
4581
4582int
4583unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4584                                      struct ftrace_probe_ops *probe_ops)
4585{
4586        struct ftrace_ops_hash old_hash_ops;
4587        struct ftrace_func_entry *entry;
4588        struct ftrace_func_probe *probe;
4589        struct ftrace_glob func_g;
4590        struct ftrace_hash **orig_hash;
4591        struct ftrace_hash *old_hash;
4592        struct ftrace_hash *hash = NULL;
4593        struct hlist_node *tmp;
4594        struct hlist_head hhd;
4595        char str[KSYM_SYMBOL_LEN];
4596        int count = 0;
4597        int i, ret = -ENODEV;
4598        int size;
4599
4600        if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4601                func_g.search = NULL;
4602        else {
4603                int not;
4604
4605                func_g.type = filter_parse_regex(glob, strlen(glob),
4606                                                 &func_g.search, &not);
4607                func_g.len = strlen(func_g.search);
4608
4609                /* we do not support '!' for function probes */
4610                if (WARN_ON(not))
4611                        return -EINVAL;
4612        }
4613
4614        mutex_lock(&ftrace_lock);
4615        /* Check if the probe_ops is already registered */
4616        list_for_each_entry(probe, &tr->func_probes, list) {
4617                if (probe->probe_ops == probe_ops)
4618                        break;
4619        }
4620        if (&probe->list == &tr->func_probes)
4621                goto err_unlock_ftrace;
4622
4623        ret = -EINVAL;
4624        if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4625                goto err_unlock_ftrace;
4626
4627        acquire_probe_locked(probe);
4628
4629        mutex_unlock(&ftrace_lock);
4630
4631        mutex_lock(&probe->ops.func_hash->regex_lock);
4632
4633        orig_hash = &probe->ops.func_hash->filter_hash;
4634        old_hash = *orig_hash;
4635
4636        if (ftrace_hash_empty(old_hash))
4637                goto out_unlock;
4638
4639        old_hash_ops.filter_hash = old_hash;
4640        /* Probes only have filters */
4641        old_hash_ops.notrace_hash = NULL;
4642
4643        ret = -ENOMEM;
4644        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4645        if (!hash)
4646                goto out_unlock;
4647
4648        INIT_HLIST_HEAD(&hhd);
4649
4650        size = 1 << hash->size_bits;
4651        for (i = 0; i < size; i++) {
4652                hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4653
4654                        if (func_g.search) {
4655                                kallsyms_lookup(entry->ip, NULL, NULL,
4656                                                NULL, str);
4657                                if (!ftrace_match(str, &func_g))
4658                                        continue;
4659                        }
4660                        count++;
4661                        remove_hash_entry(hash, entry);
4662                        hlist_add_head(&entry->hlist, &hhd);
4663                }
4664        }
4665
4666        /* Nothing found? */
4667        if (!count) {
4668                ret = -EINVAL;
4669                goto out_unlock;
4670        }
4671
4672        mutex_lock(&ftrace_lock);
4673
4674        WARN_ON(probe->ref < count);
4675
4676        probe->ref -= count;
4677
4678        if (ftrace_hash_empty(hash))
4679                ftrace_shutdown(&probe->ops, 0);
4680
4681        ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4682                                              hash, 1);
4683
4684        /* still need to update the function call sites */
4685        if (ftrace_enabled && !ftrace_hash_empty(hash))
4686                ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4687                                       &old_hash_ops);
4688        synchronize_rcu();
4689
4690        hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4691                hlist_del(&entry->hlist);
4692                if (probe_ops->free)
4693                        probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4694                kfree(entry);
4695        }
4696        mutex_unlock(&ftrace_lock);
4697
4698 out_unlock:
4699        mutex_unlock(&probe->ops.func_hash->regex_lock);
4700        free_ftrace_hash(hash);
4701
4702        release_probe(probe);
4703
4704        return ret;
4705
4706 err_unlock_ftrace:
4707        mutex_unlock(&ftrace_lock);
4708        return ret;
4709}
4710
4711void clear_ftrace_function_probes(struct trace_array *tr)
4712{
4713        struct ftrace_func_probe *probe, *n;
4714
4715        list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4716                unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4717}
4718
4719static LIST_HEAD(ftrace_commands);
4720static DEFINE_MUTEX(ftrace_cmd_mutex);
4721
4722/*
4723 * Currently we only register ftrace commands from __init, so mark this
4724 * __init too.
4725 */
4726__init int register_ftrace_command(struct ftrace_func_command *cmd)
4727{
4728        struct ftrace_func_command *p;
4729        int ret = 0;
4730
4731        mutex_lock(&ftrace_cmd_mutex);
4732        list_for_each_entry(p, &ftrace_commands, list) {
4733                if (strcmp(cmd->name, p->name) == 0) {
4734                        ret = -EBUSY;
4735                        goto out_unlock;
4736                }
4737        }
4738        list_add(&cmd->list, &ftrace_commands);
4739 out_unlock:
4740        mutex_unlock(&ftrace_cmd_mutex);
4741
4742        return ret;
4743}
4744
4745/*
4746 * Currently we only unregister ftrace commands from __init, so mark
4747 * this __init too.
4748 */
4749__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4750{
4751        struct ftrace_func_command *p, *n;
4752        int ret = -ENODEV;
4753
4754        mutex_lock(&ftrace_cmd_mutex);
4755        list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4756                if (strcmp(cmd->name, p->name) == 0) {
4757                        ret = 0;
4758                        list_del_init(&p->list);
4759                        goto out_unlock;
4760                }
4761        }
4762 out_unlock:
4763        mutex_unlock(&ftrace_cmd_mutex);
4764
4765        return ret;
4766}
4767
4768static int ftrace_process_regex(struct ftrace_iterator *iter,
4769                                char *buff, int len, int enable)
4770{
4771        struct ftrace_hash *hash = iter->hash;
4772        struct trace_array *tr = iter->ops->private;
4773        char *func, *command, *next = buff;
4774        struct ftrace_func_command *p;
4775        int ret = -EINVAL;
4776
4777        func = strsep(&next, ":");
4778
4779        if (!next) {
4780                ret = ftrace_match_records(hash, func, len);
4781                if (!ret)
4782                        ret = -EINVAL;
4783                if (ret < 0)
4784                        return ret;
4785                return 0;
4786        }
4787
4788        /* command found */
4789
4790        command = strsep(&next, ":");
4791
4792        mutex_lock(&ftrace_cmd_mutex);
4793        list_for_each_entry(p, &ftrace_commands, list) {
4794                if (strcmp(p->name, command) == 0) {
4795                        ret = p->func(tr, hash, func, command, next, enable);
4796                        goto out_unlock;
4797                }
4798        }
4799 out_unlock:
4800        mutex_unlock(&ftrace_cmd_mutex);
4801
4802        return ret;
4803}
4804
4805static ssize_t
4806ftrace_regex_write(struct file *file, const char __user *ubuf,
4807                   size_t cnt, loff_t *ppos, int enable)
4808{
4809        struct ftrace_iterator *iter;
4810        struct trace_parser *parser;
4811        ssize_t ret, read;
4812
4813        if (!cnt)
4814                return 0;
4815
4816        if (file->f_mode & FMODE_READ) {
4817                struct seq_file *m = file->private_data;
4818                iter = m->private;
4819        } else
4820                iter = file->private_data;
4821
4822        if (unlikely(ftrace_disabled))
4823                return -ENODEV;
4824
4825        /* iter->hash is a local copy, so we don't need regex_lock */
4826
4827        parser = &iter->parser;
4828        read = trace_get_user(parser, ubuf, cnt, ppos);
4829
4830        if (read >= 0 && trace_parser_loaded(parser) &&
4831            !trace_parser_cont(parser)) {
4832                ret = ftrace_process_regex(iter, parser->buffer,
4833                                           parser->idx, enable);
4834                trace_parser_clear(parser);
4835                if (ret < 0)
4836                        goto out;
4837        }
4838
4839        ret = read;
4840 out:
4841        return ret;
4842}
4843
4844ssize_t
4845ftrace_filter_write(struct file *file, const char __user *ubuf,
4846                    size_t cnt, loff_t *ppos)
4847{
4848        return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4849}
4850
4851ssize_t
4852ftrace_notrace_write(struct file *file, const char __user *ubuf,
4853                     size_t cnt, loff_t *ppos)
4854{
4855        return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4856}
4857
4858static int
4859ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4860{
4861        struct ftrace_func_entry *entry;
4862
4863        if (!ftrace_location(ip))
4864                return -EINVAL;
4865
4866        if (remove) {
4867                entry = ftrace_lookup_ip(hash, ip);
4868                if (!entry)
4869                        return -ENOENT;
4870                free_hash_entry(hash, entry);
4871                return 0;
4872        }
4873
4874        return add_hash_entry(hash, ip);
4875}
4876
4877static int
4878ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4879                unsigned long ip, int remove, int reset, int enable)
4880{
4881        struct ftrace_hash **orig_hash;
4882        struct ftrace_hash *hash;
4883        int ret;
4884
4885        if (unlikely(ftrace_disabled))
4886                return -ENODEV;
4887
4888        mutex_lock(&ops->func_hash->regex_lock);
4889
4890        if (enable)
4891                orig_hash = &ops->func_hash->filter_hash;
4892        else
4893                orig_hash = &ops->func_hash->notrace_hash;
4894
4895        if (reset)
4896                hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4897        else
4898                hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4899
4900        if (!hash) {
4901                ret = -ENOMEM;
4902                goto out_regex_unlock;
4903        }
4904
4905        if (buf && !ftrace_match_records(hash, buf, len)) {
4906                ret = -EINVAL;
4907                goto out_regex_unlock;
4908        }
4909        if (ip) {
4910                ret = ftrace_match_addr(hash, ip, remove);
4911                if (ret < 0)
4912                        goto out_regex_unlock;
4913        }
4914
4915        mutex_lock(&ftrace_lock);
4916        ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4917        mutex_unlock(&ftrace_lock);
4918
4919 out_regex_unlock:
4920        mutex_unlock(&ops->func_hash->regex_lock);
4921
4922        free_ftrace_hash(hash);
4923        return ret;
4924}
4925
4926static int
4927ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4928                int reset, int enable)
4929{
4930        return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
4931}
4932
4933#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
4934
4935struct ftrace_direct_func {
4936        struct list_head        next;
4937        unsigned long           addr;
4938        int                     count;
4939};
4940
4941static LIST_HEAD(ftrace_direct_funcs);
4942
4943/**
4944 * ftrace_find_direct_func - test an address if it is a registered direct caller
4945 * @addr: The address of a registered direct caller
4946 *
4947 * This searches to see if a ftrace direct caller has been registered
4948 * at a specific address, and if so, it returns a descriptor for it.
4949 *
4950 * This can be used by architecture code to see if an address is
4951 * a direct caller (trampoline) attached to a fentry/mcount location.
4952 * This is useful for the function_graph tracer, as it may need to
4953 * do adjustments if it traced a location that also has a direct
4954 * trampoline attached to it.
4955 */
4956struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
4957{
4958        struct ftrace_direct_func *entry;
4959        bool found = false;
4960
4961        /* May be called by fgraph trampoline (protected by rcu tasks) */
4962        list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
4963                if (entry->addr == addr) {
4964                        found = true;
4965                        break;
4966                }
4967        }
4968        if (found)
4969                return entry;
4970
4971        return NULL;
4972}
4973
4974/**
4975 * register_ftrace_direct - Call a custom trampoline directly
4976 * @ip: The address of the nop at the beginning of a function
4977 * @addr: The address of the trampoline to call at @ip
4978 *
4979 * This is used to connect a direct call from the nop location (@ip)
4980 * at the start of ftrace traced functions. The location that it calls
4981 * (@addr) must be able to handle a direct call, and save the parameters
4982 * of the function being traced, and restore them (or inject new ones
4983 * if needed), before returning.
4984 *
4985 * Returns:
4986 *  0 on success
4987 *  -EBUSY - Another direct function is already attached (there can be only one)
4988 *  -ENODEV - @ip does not point to a ftrace nop location (or not supported)
4989 *  -ENOMEM - There was an allocation failure.
4990 */
4991int register_ftrace_direct(unsigned long ip, unsigned long addr)
4992{
4993        struct ftrace_direct_func *direct;
4994        struct ftrace_func_entry *entry;
4995        struct ftrace_hash *free_hash = NULL;
4996        struct dyn_ftrace *rec;
4997        int ret = -EBUSY;
4998
4999        mutex_lock(&direct_mutex);
5000
5001        /* See if there's a direct function at @ip already */
5002        if (ftrace_find_rec_direct(ip))
5003                goto out_unlock;
5004
5005        ret = -ENODEV;
5006        rec = lookup_rec(ip, ip);
5007        if (!rec)
5008                goto out_unlock;
5009
5010        /*
5011         * Check if the rec says it has a direct call but we didn't
5012         * find one earlier?
5013         */
5014        if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5015                goto out_unlock;
5016
5017        /* Make sure the ip points to the exact record */
5018        if (ip != rec->ip) {
5019                ip = rec->ip;
5020                /* Need to check this ip for a direct. */
5021                if (ftrace_find_rec_direct(ip))
5022                        goto out_unlock;
5023        }
5024
5025        ret = -ENOMEM;
5026        if (ftrace_hash_empty(direct_functions) ||
5027            direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
5028                struct ftrace_hash *new_hash;
5029                int size = ftrace_hash_empty(direct_functions) ? 0 :
5030                        direct_functions->count + 1;
5031
5032                if (size < 32)
5033                        size = 32;
5034
5035                new_hash = dup_hash(direct_functions, size);
5036                if (!new_hash)
5037                        goto out_unlock;
5038
5039                free_hash = direct_functions;
5040                direct_functions = new_hash;
5041        }
5042
5043        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5044        if (!entry)
5045                goto out_unlock;
5046
5047        direct = ftrace_find_direct_func(addr);
5048        if (!direct) {
5049                direct = kmalloc(sizeof(*direct), GFP_KERNEL);
5050                if (!direct) {
5051                        kfree(entry);
5052                        goto out_unlock;
5053                }
5054                direct->addr = addr;
5055                direct->count = 0;
5056                list_add_rcu(&direct->next, &ftrace_direct_funcs);
5057                ftrace_direct_func_count++;
5058        }
5059
5060        entry->ip = ip;
5061        entry->direct = addr;
5062        __add_hash_entry(direct_functions, entry);
5063
5064        ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5065        if (ret)
5066                remove_hash_entry(direct_functions, entry);
5067
5068        if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5069                ret = register_ftrace_function(&direct_ops);
5070                if (ret)
5071                        ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5072        }
5073
5074        if (ret) {
5075                kfree(entry);
5076                if (!direct->count) {
5077                        list_del_rcu(&direct->next);
5078                        synchronize_rcu_tasks();
5079                        kfree(direct);
5080                        if (free_hash)
5081                                free_ftrace_hash(free_hash);
5082                        free_hash = NULL;
5083                        ftrace_direct_func_count--;
5084                }
5085        } else {
5086                direct->count++;
5087        }
5088 out_unlock:
5089        mutex_unlock(&direct_mutex);
5090
5091        if (free_hash) {
5092                synchronize_rcu_tasks();
5093                free_ftrace_hash(free_hash);
5094        }
5095
5096        return ret;
5097}
5098EXPORT_SYMBOL_GPL(register_ftrace_direct);
5099
5100static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5101                                                   struct dyn_ftrace **recp)
5102{
5103        struct ftrace_func_entry *entry;
5104        struct dyn_ftrace *rec;
5105
5106        rec = lookup_rec(*ip, *ip);
5107        if (!rec)
5108                return NULL;
5109
5110        entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5111        if (!entry) {
5112                WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5113                return NULL;
5114        }
5115
5116        WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5117
5118        /* Passed in ip just needs to be on the call site */
5119        *ip = rec->ip;
5120
5121        if (recp)
5122                *recp = rec;
5123
5124        return entry;
5125}
5126
5127int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5128{
5129        struct ftrace_direct_func *direct;
5130        struct ftrace_func_entry *entry;
5131        int ret = -ENODEV;
5132
5133        mutex_lock(&direct_mutex);
5134
5135        entry = find_direct_entry(&ip, NULL);
5136        if (!entry)
5137                goto out_unlock;
5138
5139        if (direct_functions->count == 1)
5140                unregister_ftrace_function(&direct_ops);
5141
5142        ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5143
5144        WARN_ON(ret);
5145
5146        remove_hash_entry(direct_functions, entry);
5147
5148        direct = ftrace_find_direct_func(addr);
5149        if (!WARN_ON(!direct)) {
5150                /* This is the good path (see the ! before WARN) */
5151                direct->count--;
5152                WARN_ON(direct->count < 0);
5153                if (!direct->count) {
5154                        list_del_rcu(&direct->next);
5155                        synchronize_rcu_tasks();
5156                        kfree(direct);
5157                        kfree(entry);
5158                        ftrace_direct_func_count--;
5159                }
5160        }
5161 out_unlock:
5162        mutex_unlock(&direct_mutex);
5163
5164        return ret;
5165}
5166EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5167
5168static struct ftrace_ops stub_ops = {
5169        .func           = ftrace_stub,
5170};
5171
5172/**
5173 * ftrace_modify_direct_caller - modify ftrace nop directly
5174 * @entry: The ftrace hash entry of the direct helper for @rec
5175 * @rec: The record representing the function site to patch
5176 * @old_addr: The location that the site at @rec->ip currently calls
5177 * @new_addr: The location that the site at @rec->ip should call
5178 *
5179 * An architecture may overwrite this function to optimize the
5180 * changing of the direct callback on an ftrace nop location.
5181 * This is called with the ftrace_lock mutex held, and no other
5182 * ftrace callbacks are on the associated record (@rec). Thus,
5183 * it is safe to modify the ftrace record, where it should be
5184 * currently calling @old_addr directly, to call @new_addr.
5185 *
5186 * Safety checks should be made to make sure that the code at
5187 * @rec->ip is currently calling @old_addr. And this must
5188 * also update entry->direct to @new_addr.
5189 */
5190int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5191                                       struct dyn_ftrace *rec,
5192                                       unsigned long old_addr,
5193                                       unsigned long new_addr)
5194{
5195        unsigned long ip = rec->ip;
5196        int ret;
5197
5198        /*
5199         * The ftrace_lock was used to determine if the record
5200         * had more than one registered user to it. If it did,
5201         * we needed to prevent that from changing to do the quick
5202         * switch. But if it did not (only a direct caller was attached)
5203         * then this function is called. But this function can deal
5204         * with attached callers to the rec that we care about, and
5205         * since this function uses standard ftrace calls that take
5206         * the ftrace_lock mutex, we need to release it.
5207         */
5208        mutex_unlock(&ftrace_lock);
5209
5210        /*
5211         * By setting a stub function at the same address, we force
5212         * the code to call the iterator and the direct_ops helper.
5213         * This means that @ip does not call the direct call, and
5214         * we can simply modify it.
5215         */
5216        ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5217        if (ret)
5218                goto out_lock;
5219
5220        ret = register_ftrace_function(&stub_ops);
5221        if (ret) {
5222                ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5223                goto out_lock;
5224        }
5225
5226        entry->direct = new_addr;
5227
5228        /*
5229         * By removing the stub, we put back the direct call, calling
5230         * the @new_addr.
5231         */
5232        unregister_ftrace_function(&stub_ops);
5233        ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5234
5235 out_lock:
5236        mutex_lock(&ftrace_lock);
5237
5238        return ret;
5239}
5240
5241/**
5242 * modify_ftrace_direct - Modify an existing direct call to call something else
5243 * @ip: The instruction pointer to modify
5244 * @old_addr: The address that the current @ip calls directly
5245 * @new_addr: The address that the @ip should call
5246 *
5247 * This modifies a ftrace direct caller at an instruction pointer without
5248 * having to disable it first. The direct call will switch over to the
5249 * @new_addr without missing anything.
5250 *
5251 * Returns: zero on success. Non zero on error, which includes:
5252 *  -ENODEV : the @ip given has no direct caller attached
5253 *  -EINVAL : the @old_addr does not match the current direct caller
5254 */
5255int modify_ftrace_direct(unsigned long ip,
5256                         unsigned long old_addr, unsigned long new_addr)
5257{
5258        struct ftrace_func_entry *entry;
5259        struct dyn_ftrace *rec;
5260        int ret = -ENODEV;
5261
5262        mutex_lock(&direct_mutex);
5263
5264        mutex_lock(&ftrace_lock);
5265        entry = find_direct_entry(&ip, &rec);
5266        if (!entry)
5267                goto out_unlock;
5268
5269        ret = -EINVAL;
5270        if (entry->direct != old_addr)
5271                goto out_unlock;
5272
5273        /*
5274         * If there's no other ftrace callback on the rec->ip location,
5275         * then it can be changed directly by the architecture.
5276         * If there is another caller, then we just need to change the
5277         * direct caller helper to point to @new_addr.
5278         */
5279        if (ftrace_rec_count(rec) == 1) {
5280                ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5281        } else {
5282                entry->direct = new_addr;
5283                ret = 0;
5284        }
5285
5286 out_unlock:
5287        mutex_unlock(&ftrace_lock);
5288        mutex_unlock(&direct_mutex);
5289        return ret;
5290}
5291EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5292#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5293
5294/**
5295 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5296 * @ops - the ops to set the filter with
5297 * @ip - the address to add to or remove from the filter.
5298 * @remove - non zero to remove the ip from the filter
5299 * @reset - non zero to reset all filters before applying this filter.
5300 *
5301 * Filters denote which functions should be enabled when tracing is enabled
5302 * If @ip is NULL, it failes to update filter.
5303 */
5304int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5305                         int remove, int reset)
5306{
5307        ftrace_ops_init(ops);
5308        return ftrace_set_addr(ops, ip, remove, reset, 1);
5309}
5310EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5311
5312/**
5313 * ftrace_ops_set_global_filter - setup ops to use global filters
5314 * @ops - the ops which will use the global filters
5315 *
5316 * ftrace users who need global function trace filtering should call this.
5317 * It can set the global filter only if ops were not initialized before.
5318 */
5319void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5320{
5321        if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5322                return;
5323
5324        ftrace_ops_init(ops);
5325        ops->func_hash = &global_ops.local_hash;
5326}
5327EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5328
5329static int
5330ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5331                 int reset, int enable)
5332{
5333        return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
5334}
5335
5336/**
5337 * ftrace_set_filter - set a function to filter on in ftrace
5338 * @ops - the ops to set the filter with
5339 * @buf - the string that holds the function filter text.
5340 * @len - the length of the string.
5341 * @reset - non zero to reset all filters before applying this filter.
5342 *
5343 * Filters denote which functions should be enabled when tracing is enabled.
5344 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5345 */
5346int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5347                       int len, int reset)
5348{
5349        ftrace_ops_init(ops);
5350        return ftrace_set_regex(ops, buf, len, reset, 1);
5351}
5352EXPORT_SYMBOL_GPL(ftrace_set_filter);
5353
5354/**
5355 * ftrace_set_notrace - set a function to not trace in ftrace
5356 * @ops - the ops to set the notrace filter with
5357 * @buf - the string that holds the function notrace text.
5358 * @len - the length of the string.
5359 * @reset - non zero to reset all filters before applying this filter.
5360 *
5361 * Notrace Filters denote which functions should not be enabled when tracing
5362 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5363 * for tracing.
5364 */
5365int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5366                        int len, int reset)
5367{
5368        ftrace_ops_init(ops);
5369        return ftrace_set_regex(ops, buf, len, reset, 0);
5370}
5371EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5372/**
5373 * ftrace_set_global_filter - set a function to filter on with global tracers
5374 * @buf - the string that holds the function filter text.
5375 * @len - the length of the string.
5376 * @reset - non zero to reset all filters before applying this filter.
5377 *
5378 * Filters denote which functions should be enabled when tracing is enabled.
5379 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5380 */
5381void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5382{
5383        ftrace_set_regex(&global_ops, buf, len, reset, 1);
5384}
5385EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5386
5387/**
5388 * ftrace_set_global_notrace - set a function to not trace with global tracers
5389 * @buf - the string that holds the function notrace text.
5390 * @len - the length of the string.
5391 * @reset - non zero to reset all filters before applying this filter.
5392 *
5393 * Notrace Filters denote which functions should not be enabled when tracing
5394 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5395 * for tracing.
5396 */
5397void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5398{
5399        ftrace_set_regex(&global_ops, buf, len, reset, 0);
5400}
5401EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5402
5403/*
5404 * command line interface to allow users to set filters on boot up.
5405 */
5406#define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
5407static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5408static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5409
5410/* Used by function selftest to not test if filter is set */
5411bool ftrace_filter_param __initdata;
5412
5413static int __init set_ftrace_notrace(char *str)
5414{
5415        ftrace_filter_param = true;
5416        strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5417        return 1;
5418}
5419__setup("ftrace_notrace=", set_ftrace_notrace);
5420
5421static int __init set_ftrace_filter(char *str)
5422{
5423        ftrace_filter_param = true;
5424        strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5425        return 1;
5426}
5427__setup("ftrace_filter=", set_ftrace_filter);
5428
5429#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5430static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5431static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5432static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5433
5434static int __init set_graph_function(char *str)
5435{
5436        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5437        return 1;
5438}
5439__setup("ftrace_graph_filter=", set_graph_function);
5440
5441static int __init set_graph_notrace_function(char *str)
5442{
5443        strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5444        return 1;
5445}
5446__setup("ftrace_graph_notrace=", set_graph_notrace_function);
5447
5448static int __init set_graph_max_depth_function(char *str)
5449{
5450        if (!str)
5451                return 0;
5452        fgraph_max_depth = simple_strtoul(str, NULL, 0);
5453        return 1;
5454}
5455__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5456
5457static void __init set_ftrace_early_graph(char *buf, int enable)
5458{
5459        int ret;
5460        char *func;
5461        struct ftrace_hash *hash;
5462
5463        hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5464        if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
5465                return;
5466
5467        while (buf) {
5468                func = strsep(&buf, ",");
5469                /* we allow only one expression at a time */
5470                ret = ftrace_graph_set_hash(hash, func);
5471                if (ret)
5472                        printk(KERN_DEBUG "ftrace: function %s not "
5473                                          "traceable\n", func);
5474        }
5475
5476        if (enable)
5477                ftrace_graph_hash = hash;
5478        else
5479                ftrace_graph_notrace_hash = hash;
5480}
5481#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5482
5483void __init
5484ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5485{
5486        char *func;
5487
5488        ftrace_ops_init(ops);
5489
5490        while (buf) {
5491                func = strsep(&buf, ",");
5492                ftrace_set_regex(ops, func, strlen(func), 0, enable);
5493        }
5494}
5495
5496static void __init set_ftrace_early_filters(void)
5497{
5498        if (ftrace_filter_buf[0])
5499                ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5500        if (ftrace_notrace_buf[0])
5501                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5502#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5503        if (ftrace_graph_buf[0])
5504                set_ftrace_early_graph(ftrace_graph_buf, 1);
5505        if (ftrace_graph_notrace_buf[0])
5506                set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5507#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5508}
5509
5510int ftrace_regex_release(struct inode *inode, struct file *file)
5511{
5512        struct seq_file *m = (struct seq_file *)file->private_data;
5513        struct ftrace_iterator *iter;
5514        struct ftrace_hash **orig_hash;
5515        struct trace_parser *parser;
5516        int filter_hash;
5517        int ret;
5518
5519        if (file->f_mode & FMODE_READ) {
5520                iter = m->private;
5521                seq_release(inode, file);
5522        } else
5523                iter = file->private_data;
5524
5525        parser = &iter->parser;
5526        if (trace_parser_loaded(parser)) {
5527                ftrace_match_records(iter->hash, parser->buffer, parser->idx);
5528        }
5529
5530        trace_parser_put(parser);
5531
5532        mutex_lock(&iter->ops->func_hash->regex_lock);
5533
5534        if (file->f_mode & FMODE_WRITE) {
5535                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5536
5537                if (filter_hash) {
5538                        orig_hash = &iter->ops->func_hash->filter_hash;
5539                        if (iter->tr && !list_empty(&iter->tr->mod_trace))
5540                                iter->hash->flags |= FTRACE_HASH_FL_MOD;
5541                } else
5542                        orig_hash = &iter->ops->func_hash->notrace_hash;
5543
5544                mutex_lock(&ftrace_lock);
5545                ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5546                                                      iter->hash, filter_hash);
5547                mutex_unlock(&ftrace_lock);
5548        } else {
5549                /* For read only, the hash is the ops hash */
5550                iter->hash = NULL;
5551        }
5552
5553        mutex_unlock(&iter->ops->func_hash->regex_lock);
5554        free_ftrace_hash(iter->hash);
5555        if (iter->tr)
5556                trace_array_put(iter->tr);
5557        kfree(iter);
5558
5559        return 0;
5560}
5561
5562static const struct file_operations ftrace_avail_fops = {
5563        .open = ftrace_avail_open,
5564        .read = seq_read,
5565        .llseek = seq_lseek,
5566        .release = seq_release_private,
5567};
5568
5569static const struct file_operations ftrace_enabled_fops = {
5570        .open = ftrace_enabled_open,
5571        .read = seq_read,
5572        .llseek = seq_lseek,
5573        .release = seq_release_private,
5574};
5575
5576static const struct file_operations ftrace_filter_fops = {
5577        .open = ftrace_filter_open,
5578        .read = seq_read,
5579        .write = ftrace_filter_write,
5580        .llseek = tracing_lseek,
5581        .release = ftrace_regex_release,
5582};
5583
5584static const struct file_operations ftrace_notrace_fops = {
5585        .open = ftrace_notrace_open,
5586        .read = seq_read,
5587        .write = ftrace_notrace_write,
5588        .llseek = tracing_lseek,
5589        .release = ftrace_regex_release,
5590};
5591
5592#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5593
5594static DEFINE_MUTEX(graph_lock);
5595
5596struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
5597struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
5598
5599enum graph_filter_type {
5600        GRAPH_FILTER_NOTRACE    = 0,
5601        GRAPH_FILTER_FUNCTION,
5602};
5603
5604#define FTRACE_GRAPH_EMPTY      ((void *)1)
5605
5606struct ftrace_graph_data {
5607        struct ftrace_hash              *hash;
5608        struct ftrace_func_entry        *entry;
5609        int                             idx;   /* for hash table iteration */
5610        enum graph_filter_type          type;
5611        struct ftrace_hash              *new_hash;
5612        const struct seq_operations     *seq_ops;
5613        struct trace_parser             parser;
5614};
5615
5616static void *
5617__g_next(struct seq_file *m, loff_t *pos)
5618{
5619        struct ftrace_graph_data *fgd = m->private;
5620        struct ftrace_func_entry *entry = fgd->entry;
5621        struct hlist_head *head;
5622        int i, idx = fgd->idx;
5623
5624        if (*pos >= fgd->hash->count)
5625                return NULL;
5626
5627        if (entry) {
5628                hlist_for_each_entry_continue(entry, hlist) {
5629                        fgd->entry = entry;
5630                        return entry;
5631                }
5632
5633                idx++;
5634        }
5635
5636        for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5637                head = &fgd->hash->buckets[i];
5638                hlist_for_each_entry(entry, head, hlist) {
5639                        fgd->entry = entry;
5640                        fgd->idx = i;
5641                        return entry;
5642                }
5643        }
5644        return NULL;
5645}
5646
5647static void *
5648g_next(struct seq_file *m, void *v, loff_t *pos)
5649{
5650        (*pos)++;
5651        return __g_next(m, pos);
5652}
5653
5654static void *g_start(struct seq_file *m, loff_t *pos)
5655{
5656        struct ftrace_graph_data *fgd = m->private;
5657
5658        mutex_lock(&graph_lock);
5659
5660        if (fgd->type == GRAPH_FILTER_FUNCTION)
5661                fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5662                                        lockdep_is_held(&graph_lock));
5663        else
5664                fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5665                                        lockdep_is_held(&graph_lock));
5666
5667        /* Nothing, tell g_show to print all functions are enabled */
5668        if (ftrace_hash_empty(fgd->hash) && !*pos)
5669                return FTRACE_GRAPH_EMPTY;
5670
5671        fgd->idx = 0;
5672        fgd->entry = NULL;
5673        return __g_next(m, pos);
5674}
5675
5676static void g_stop(struct seq_file *m, void *p)
5677{
5678        mutex_unlock(&graph_lock);
5679}
5680
5681static int g_show(struct seq_file *m, void *v)
5682{
5683        struct ftrace_func_entry *entry = v;
5684
5685        if (!entry)
5686                return 0;
5687
5688        if (entry == FTRACE_GRAPH_EMPTY) {
5689                struct ftrace_graph_data *fgd = m->private;
5690
5691                if (fgd->type == GRAPH_FILTER_FUNCTION)
5692                        seq_puts(m, "#### all functions enabled ####\n");
5693                else
5694                        seq_puts(m, "#### no functions disabled ####\n");
5695                return 0;
5696        }
5697
5698        seq_printf(m, "%ps\n", (void *)entry->ip);
5699
5700        return 0;
5701}
5702
5703static const struct seq_operations ftrace_graph_seq_ops = {
5704        .start = g_start,
5705        .next = g_next,
5706        .stop = g_stop,
5707        .show = g_show,
5708};
5709
5710static int
5711__ftrace_graph_open(struct inode *inode, struct file *file,
5712                    struct ftrace_graph_data *fgd)
5713{
5714        int ret;
5715        struct ftrace_hash *new_hash = NULL;
5716
5717        ret = security_locked_down(LOCKDOWN_TRACEFS);
5718        if (ret)
5719                return ret;
5720
5721        if (file->f_mode & FMODE_WRITE) {
5722                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5723
5724                if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5725                        return -ENOMEM;
5726
5727                if (file->f_flags & O_TRUNC)
5728                        new_hash = alloc_ftrace_hash(size_bits);
5729                else
5730                        new_hash = alloc_and_copy_ftrace_hash(size_bits,
5731                                                              fgd->hash);
5732                if (!new_hash) {
5733                        ret = -ENOMEM;
5734                        goto out;
5735                }
5736        }
5737
5738        if (file->f_mode & FMODE_READ) {
5739                ret = seq_open(file, &ftrace_graph_seq_ops);
5740                if (!ret) {
5741                        struct seq_file *m = file->private_data;
5742                        m->private = fgd;
5743                } else {
5744                        /* Failed */
5745                        free_ftrace_hash(new_hash);
5746                        new_hash = NULL;
5747                }
5748        } else
5749                file->private_data = fgd;
5750
5751out:
5752        if (ret < 0 && file->f_mode & FMODE_WRITE)
5753                trace_parser_put(&fgd->parser);
5754
5755        fgd->new_hash = new_hash;
5756
5757        /*
5758         * All uses of fgd->hash must be taken with the graph_lock
5759         * held. The graph_lock is going to be released, so force
5760         * fgd->hash to be reinitialized when it is taken again.
5761         */
5762        fgd->hash = NULL;
5763
5764        return ret;
5765}
5766
5767static int
5768ftrace_graph_open(struct inode *inode, struct file *file)
5769{
5770        struct ftrace_graph_data *fgd;
5771        int ret;
5772
5773        if (unlikely(ftrace_disabled))
5774                return -ENODEV;
5775
5776        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5777        if (fgd == NULL)
5778                return -ENOMEM;
5779
5780        mutex_lock(&graph_lock);
5781
5782        fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5783                                        lockdep_is_held(&graph_lock));
5784        fgd->type = GRAPH_FILTER_FUNCTION;
5785        fgd->seq_ops = &ftrace_graph_seq_ops;
5786
5787        ret = __ftrace_graph_open(inode, file, fgd);
5788        if (ret < 0)
5789                kfree(fgd);
5790
5791        mutex_unlock(&graph_lock);
5792        return ret;
5793}
5794
5795static int
5796ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5797{
5798        struct ftrace_graph_data *fgd;
5799        int ret;
5800
5801        if (unlikely(ftrace_disabled))
5802                return -ENODEV;
5803
5804        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5805        if (fgd == NULL)
5806                return -ENOMEM;
5807
5808        mutex_lock(&graph_lock);
5809
5810        fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5811                                        lockdep_is_held(&graph_lock));
5812        fgd->type = GRAPH_FILTER_NOTRACE;
5813        fgd->seq_ops = &ftrace_graph_seq_ops;
5814
5815        ret = __ftrace_graph_open(inode, file, fgd);
5816        if (ret < 0)
5817                kfree(fgd);
5818
5819        mutex_unlock(&graph_lock);
5820        return ret;
5821}
5822
5823static int
5824ftrace_graph_release(struct inode *inode, struct file *file)
5825{
5826        struct ftrace_graph_data *fgd;
5827        struct ftrace_hash *old_hash, *new_hash;
5828        struct trace_parser *parser;
5829        int ret = 0;
5830
5831        if (file->f_mode & FMODE_READ) {
5832                struct seq_file *m = file->private_data;
5833
5834                fgd = m->private;
5835                seq_release(inode, file);
5836        } else {
5837                fgd = file->private_data;
5838        }
5839
5840
5841        if (file->f_mode & FMODE_WRITE) {
5842
5843                parser = &fgd->parser;
5844
5845                if (trace_parser_loaded((parser))) {
5846                        ret = ftrace_graph_set_hash(fgd->new_hash,
5847                                                    parser->buffer);
5848                }
5849
5850                trace_parser_put(parser);
5851
5852                new_hash = __ftrace_hash_move(fgd->new_hash);
5853                if (!new_hash) {
5854                        ret = -ENOMEM;
5855                        goto out;
5856                }
5857
5858                mutex_lock(&graph_lock);
5859
5860                if (fgd->type == GRAPH_FILTER_FUNCTION) {
5861                        old_hash = rcu_dereference_protected(ftrace_graph_hash,
5862                                        lockdep_is_held(&graph_lock));
5863                        rcu_assign_pointer(ftrace_graph_hash, new_hash);
5864                } else {
5865                        old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5866                                        lockdep_is_held(&graph_lock));
5867                        rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5868                }
5869
5870                mutex_unlock(&graph_lock);
5871
5872                /*
5873                 * We need to do a hard force of sched synchronization.
5874                 * This is because we use preempt_disable() to do RCU, but
5875                 * the function tracers can be called where RCU is not watching
5876                 * (like before user_exit()). We can not rely on the RCU
5877                 * infrastructure to do the synchronization, thus we must do it
5878                 * ourselves.
5879                 */
5880                synchronize_rcu_tasks_rude();
5881
5882                free_ftrace_hash(old_hash);
5883        }
5884
5885 out:
5886        free_ftrace_hash(fgd->new_hash);
5887        kfree(fgd);
5888
5889        return ret;
5890}
5891
5892static int
5893ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
5894{
5895        struct ftrace_glob func_g;
5896        struct dyn_ftrace *rec;
5897        struct ftrace_page *pg;
5898        struct ftrace_func_entry *entry;
5899        int fail = 1;
5900        int not;
5901
5902        /* decode regex */
5903        func_g.type = filter_parse_regex(buffer, strlen(buffer),
5904                                         &func_g.search, &not);
5905
5906        func_g.len = strlen(func_g.search);
5907
5908        mutex_lock(&ftrace_lock);
5909
5910        if (unlikely(ftrace_disabled)) {
5911                mutex_unlock(&ftrace_lock);
5912                return -ENODEV;
5913        }
5914
5915        do_for_each_ftrace_rec(pg, rec) {
5916
5917                if (rec->flags & FTRACE_FL_DISABLED)
5918                        continue;
5919
5920                if (ftrace_match_record(rec, &func_g, NULL, 0)) {
5921                        entry = ftrace_lookup_ip(hash, rec->ip);
5922
5923                        if (!not) {
5924                                fail = 0;
5925
5926                                if (entry)
5927                                        continue;
5928                                if (add_hash_entry(hash, rec->ip) < 0)
5929                                        goto out;
5930                        } else {
5931                                if (entry) {
5932                                        free_hash_entry(hash, entry);
5933                                        fail = 0;
5934                                }
5935                        }
5936                }
5937        } while_for_each_ftrace_rec();
5938out:
5939        mutex_unlock(&ftrace_lock);
5940
5941        if (fail)
5942                return -EINVAL;
5943
5944        return 0;
5945}
5946
5947static ssize_t
5948ftrace_graph_write(struct file *file, const char __user *ubuf,
5949                   size_t cnt, loff_t *ppos)
5950{
5951        ssize_t read, ret = 0;
5952        struct ftrace_graph_data *fgd = file->private_data;
5953        struct trace_parser *parser;
5954
5955        if (!cnt)
5956                return 0;
5957
5958        /* Read mode uses seq functions */
5959        if (file->f_mode & FMODE_READ) {
5960                struct seq_file *m = file->private_data;
5961                fgd = m->private;
5962        }
5963
5964        parser = &fgd->parser;
5965
5966        read = trace_get_user(parser, ubuf, cnt, ppos);
5967
5968        if (read >= 0 && trace_parser_loaded(parser) &&
5969            !trace_parser_cont(parser)) {
5970
5971                ret = ftrace_graph_set_hash(fgd->new_hash,
5972                                            parser->buffer);
5973                trace_parser_clear(parser);
5974        }
5975
5976        if (!ret)
5977                ret = read;
5978
5979        return ret;
5980}
5981
5982static const struct file_operations ftrace_graph_fops = {
5983        .open           = ftrace_graph_open,
5984        .read           = seq_read,
5985        .write          = ftrace_graph_write,
5986        .llseek         = tracing_lseek,
5987        .release        = ftrace_graph_release,
5988};
5989
5990static const struct file_operations ftrace_graph_notrace_fops = {
5991        .open           = ftrace_graph_notrace_open,
5992        .read           = seq_read,
5993        .write          = ftrace_graph_write,
5994        .llseek         = tracing_lseek,
5995        .release        = ftrace_graph_release,
5996};
5997#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5998
5999void ftrace_create_filter_files(struct ftrace_ops *ops,
6000                                struct dentry *parent)
6001{
6002
6003        trace_create_file("set_ftrace_filter", 0644, parent,
6004                          ops, &ftrace_filter_fops);
6005
6006        trace_create_file("set_ftrace_notrace", 0644, parent,
6007                          ops, &ftrace_notrace_fops);
6008}
6009
6010/*
6011 * The name "destroy_filter_files" is really a misnomer. Although
6012 * in the future, it may actually delete the files, but this is
6013 * really intended to make sure the ops passed in are disabled
6014 * and that when this function returns, the caller is free to
6015 * free the ops.
6016 *
6017 * The "destroy" name is only to match the "create" name that this
6018 * should be paired with.
6019 */
6020void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6021{
6022        mutex_lock(&ftrace_lock);
6023        if (ops->flags & FTRACE_OPS_FL_ENABLED)
6024                ftrace_shutdown(ops, 0);
6025        ops->flags |= FTRACE_OPS_FL_DELETED;
6026        ftrace_free_filter(ops);
6027        mutex_unlock(&ftrace_lock);
6028}
6029
6030static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6031{
6032
6033        trace_create_file("available_filter_functions", 0444,
6034                        d_tracer, NULL, &ftrace_avail_fops);
6035
6036        trace_create_file("enabled_functions", 0444,
6037                        d_tracer, NULL, &ftrace_enabled_fops);
6038
6039        ftrace_create_filter_files(&global_ops, d_tracer);
6040
6041#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6042        trace_create_file("set_graph_function", 0644, d_tracer,
6043                                    NULL,
6044                                    &ftrace_graph_fops);
6045        trace_create_file("set_graph_notrace", 0644, d_tracer,
6046                                    NULL,
6047                                    &ftrace_graph_notrace_fops);
6048#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6049
6050        return 0;
6051}
6052
6053static int ftrace_cmp_ips(const void *a, const void *b)
6054{
6055        const unsigned long *ipa = a;
6056        const unsigned long *ipb = b;
6057
6058        if (*ipa > *ipb)
6059                return 1;
6060        if (*ipa < *ipb)
6061                return -1;
6062        return 0;
6063}
6064
6065static int ftrace_process_locs(struct module *mod,
6066                               unsigned long *start,
6067                               unsigned long *end)
6068{
6069        struct ftrace_page *start_pg;
6070        struct ftrace_page *pg;
6071        struct dyn_ftrace *rec;
6072        unsigned long count;
6073        unsigned long *p;
6074        unsigned long addr;
6075        unsigned long flags = 0; /* Shut up gcc */
6076        int ret = -ENOMEM;
6077
6078        count = end - start;
6079
6080        if (!count)
6081                return 0;
6082
6083        sort(start, count, sizeof(*start),
6084             ftrace_cmp_ips, NULL);
6085
6086        start_pg = ftrace_allocate_pages(count);
6087        if (!start_pg)
6088                return -ENOMEM;
6089
6090        mutex_lock(&ftrace_lock);
6091
6092        /*
6093         * Core and each module needs their own pages, as
6094         * modules will free them when they are removed.
6095         * Force a new page to be allocated for modules.
6096         */
6097        if (!mod) {
6098                WARN_ON(ftrace_pages || ftrace_pages_start);
6099                /* First initialization */
6100                ftrace_pages = ftrace_pages_start = start_pg;
6101        } else {
6102                if (!ftrace_pages)
6103                        goto out;
6104
6105                if (WARN_ON(ftrace_pages->next)) {
6106                        /* Hmm, we have free pages? */
6107                        while (ftrace_pages->next)
6108                                ftrace_pages = ftrace_pages->next;
6109                }
6110
6111                ftrace_pages->next = start_pg;
6112        }
6113
6114        p = start;
6115        pg = start_pg;
6116        while (p < end) {
6117                addr = ftrace_call_adjust(*p++);
6118                /*
6119                 * Some architecture linkers will pad between
6120                 * the different mcount_loc sections of different
6121                 * object files to satisfy alignments.
6122                 * Skip any NULL pointers.
6123                 */
6124                if (!addr)
6125                        continue;
6126
6127                if (pg->index == pg->size) {
6128                        /* We should have allocated enough */
6129                        if (WARN_ON(!pg->next))
6130                                break;
6131                        pg = pg->next;
6132                }
6133
6134                rec = &pg->records[pg->index++];
6135                rec->ip = addr;
6136        }
6137
6138        /* We should have used all pages */
6139        WARN_ON(pg->next);
6140
6141        /* Assign the last page to ftrace_pages */
6142        ftrace_pages = pg;
6143
6144        /*
6145         * We only need to disable interrupts on start up
6146         * because we are modifying code that an interrupt
6147         * may execute, and the modification is not atomic.
6148         * But for modules, nothing runs the code we modify
6149         * until we are finished with it, and there's no
6150         * reason to cause large interrupt latencies while we do it.
6151         */
6152        if (!mod)
6153                local_irq_save(flags);
6154        ftrace_update_code(mod, start_pg);
6155        if (!mod)
6156                local_irq_restore(flags);
6157        ret = 0;
6158 out:
6159        mutex_unlock(&ftrace_lock);
6160
6161        return ret;
6162}
6163
6164struct ftrace_mod_func {
6165        struct list_head        list;
6166        char                    *name;
6167        unsigned long           ip;
6168        unsigned int            size;
6169};
6170
6171struct ftrace_mod_map {
6172        struct rcu_head         rcu;
6173        struct list_head        list;
6174        struct module           *mod;
6175        unsigned long           start_addr;
6176        unsigned long           end_addr;
6177        struct list_head        funcs;
6178        unsigned int            num_funcs;
6179};
6180
6181#ifdef CONFIG_MODULES
6182
6183#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6184
6185static LIST_HEAD(ftrace_mod_maps);
6186
6187static int referenced_filters(struct dyn_ftrace *rec)
6188{
6189        struct ftrace_ops *ops;
6190        int cnt = 0;
6191
6192        for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6193                if (ops_references_rec(ops, rec))
6194                    cnt++;
6195        }
6196
6197        return cnt;
6198}
6199
6200static void
6201clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6202{
6203        struct ftrace_func_entry *entry;
6204        struct dyn_ftrace *rec;
6205        int i;
6206
6207        if (ftrace_hash_empty(hash))
6208                return;
6209
6210        for (i = 0; i < pg->index; i++) {
6211                rec = &pg->records[i];
6212                entry = __ftrace_lookup_ip(hash, rec->ip);
6213                /*
6214                 * Do not allow this rec to match again.
6215                 * Yeah, it may waste some memory, but will be removed
6216                 * if/when the hash is modified again.
6217                 */
6218                if (entry)
6219                        entry->ip = 0;
6220        }
6221}
6222
6223/* Clear any records from hashs */
6224static void clear_mod_from_hashes(struct ftrace_page *pg)
6225{
6226        struct trace_array *tr;
6227
6228        mutex_lock(&trace_types_lock);
6229        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6230                if (!tr->ops || !tr->ops->func_hash)
6231                        continue;
6232                mutex_lock(&tr->ops->func_hash->regex_lock);
6233                clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6234                clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6235                mutex_unlock(&tr->ops->func_hash->regex_lock);
6236        }
6237        mutex_unlock(&trace_types_lock);
6238}
6239
6240static void ftrace_free_mod_map(struct rcu_head *rcu)
6241{
6242        struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6243        struct ftrace_mod_func *mod_func;
6244        struct ftrace_mod_func *n;
6245
6246        /* All the contents of mod_map are now not visible to readers */
6247        list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6248                kfree(mod_func->name);
6249                list_del(&mod_func->list);
6250                kfree(mod_func);
6251        }
6252
6253        kfree(mod_map);
6254}
6255
6256void ftrace_release_mod(struct module *mod)
6257{
6258        struct ftrace_mod_map *mod_map;
6259        struct ftrace_mod_map *n;
6260        struct dyn_ftrace *rec;
6261        struct ftrace_page **last_pg;
6262        struct ftrace_page *tmp_page = NULL;
6263        struct ftrace_page *pg;
6264        int order;
6265
6266        mutex_lock(&ftrace_lock);
6267
6268        if (ftrace_disabled)
6269                goto out_unlock;
6270
6271        list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6272                if (mod_map->mod == mod) {
6273                        list_del_rcu(&mod_map->list);
6274                        call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6275                        break;
6276                }
6277        }
6278
6279        /*
6280         * Each module has its own ftrace_pages, remove
6281         * them from the list.
6282         */
6283        last_pg = &ftrace_pages_start;
6284        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6285                rec = &pg->records[0];
6286                if (within_module_core(rec->ip, mod) ||
6287                    within_module_init(rec->ip, mod)) {
6288                        /*
6289                         * As core pages are first, the first
6290                         * page should never be a module page.
6291                         */
6292                        if (WARN_ON(pg == ftrace_pages_start))
6293                                goto out_unlock;
6294
6295                        /* Check if we are deleting the last page */
6296                        if (pg == ftrace_pages)
6297                                ftrace_pages = next_to_ftrace_page(last_pg);
6298
6299                        ftrace_update_tot_cnt -= pg->index;
6300                        *last_pg = pg->next;
6301
6302                        pg->next = tmp_page;
6303                        tmp_page = pg;
6304                } else
6305                        last_pg = &pg->next;
6306        }
6307 out_unlock:
6308        mutex_unlock(&ftrace_lock);
6309
6310        for (pg = tmp_page; pg; pg = tmp_page) {
6311
6312                /* Needs to be called outside of ftrace_lock */
6313                clear_mod_from_hashes(pg);
6314
6315                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6316                free_pages((unsigned long)pg->records, order);
6317                tmp_page = pg->next;
6318                kfree(pg);
6319                ftrace_number_of_pages -= 1 << order;
6320                ftrace_number_of_groups--;
6321        }
6322}
6323
6324void ftrace_module_enable(struct module *mod)
6325{
6326        struct dyn_ftrace *rec;
6327        struct ftrace_page *pg;
6328
6329        mutex_lock(&ftrace_lock);
6330
6331        if (ftrace_disabled)
6332                goto out_unlock;
6333
6334        /*
6335         * If the tracing is enabled, go ahead and enable the record.
6336         *
6337         * The reason not to enable the record immediately is the
6338         * inherent check of ftrace_make_nop/ftrace_make_call for
6339         * correct previous instructions.  Making first the NOP
6340         * conversion puts the module to the correct state, thus
6341         * passing the ftrace_make_call check.
6342         *
6343         * We also delay this to after the module code already set the
6344         * text to read-only, as we now need to set it back to read-write
6345         * so that we can modify the text.
6346         */
6347        if (ftrace_start_up)
6348                ftrace_arch_code_modify_prepare();
6349
6350        do_for_each_ftrace_rec(pg, rec) {
6351                int cnt;
6352                /*
6353                 * do_for_each_ftrace_rec() is a double loop.
6354                 * module text shares the pg. If a record is
6355                 * not part of this module, then skip this pg,
6356                 * which the "break" will do.
6357                 */
6358                if (!within_module_core(rec->ip, mod) &&
6359                    !within_module_init(rec->ip, mod))
6360                        break;
6361
6362                cnt = 0;
6363
6364                /*
6365                 * When adding a module, we need to check if tracers are
6366                 * currently enabled and if they are, and can trace this record,
6367                 * we need to enable the module functions as well as update the
6368                 * reference counts for those function records.
6369                 */
6370                if (ftrace_start_up)
6371                        cnt += referenced_filters(rec);
6372
6373                /* This clears FTRACE_FL_DISABLED */
6374                rec->flags = cnt;
6375
6376                if (ftrace_start_up && cnt) {
6377                        int failed = __ftrace_replace_code(rec, 1);
6378                        if (failed) {
6379                                ftrace_bug(failed, rec);
6380                                goto out_loop;
6381                        }
6382                }
6383
6384        } while_for_each_ftrace_rec();
6385
6386 out_loop:
6387        if (ftrace_start_up)
6388                ftrace_arch_code_modify_post_process();
6389
6390 out_unlock:
6391        mutex_unlock(&ftrace_lock);
6392
6393        process_cached_mods(mod->name);
6394}
6395
6396void ftrace_module_init(struct module *mod)
6397{
6398        if (ftrace_disabled || !mod->num_ftrace_callsites)
6399                return;
6400
6401        ftrace_process_locs(mod, mod->ftrace_callsites,
6402                            mod->ftrace_callsites + mod->num_ftrace_callsites);
6403}
6404
6405static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6406                                struct dyn_ftrace *rec)
6407{
6408        struct ftrace_mod_func *mod_func;
6409        unsigned long symsize;
6410        unsigned long offset;
6411        char str[KSYM_SYMBOL_LEN];
6412        char *modname;
6413        const char *ret;
6414
6415        ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6416        if (!ret)
6417                return;
6418
6419        mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6420        if (!mod_func)
6421                return;
6422
6423        mod_func->name = kstrdup(str, GFP_KERNEL);
6424        if (!mod_func->name) {
6425                kfree(mod_func);
6426                return;
6427        }
6428
6429        mod_func->ip = rec->ip - offset;
6430        mod_func->size = symsize;
6431
6432        mod_map->num_funcs++;
6433
6434        list_add_rcu(&mod_func->list, &mod_map->funcs);
6435}
6436
6437static struct ftrace_mod_map *
6438allocate_ftrace_mod_map(struct module *mod,
6439                        unsigned long start, unsigned long end)
6440{
6441        struct ftrace_mod_map *mod_map;
6442
6443        mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
6444        if (!mod_map)
6445                return NULL;
6446
6447        mod_map->mod = mod;
6448        mod_map->start_addr = start;
6449        mod_map->end_addr = end;
6450        mod_map->num_funcs = 0;
6451
6452        INIT_LIST_HEAD_RCU(&mod_map->funcs);
6453
6454        list_add_rcu(&mod_map->list, &ftrace_mod_maps);
6455
6456        return mod_map;
6457}
6458
6459static const char *
6460ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
6461                           unsigned long addr, unsigned long *size,
6462                           unsigned long *off, char *sym)
6463{
6464        struct ftrace_mod_func *found_func =  NULL;
6465        struct ftrace_mod_func *mod_func;
6466
6467        list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6468                if (addr >= mod_func->ip &&
6469                    addr < mod_func->ip + mod_func->size) {
6470                        found_func = mod_func;
6471                        break;
6472                }
6473        }
6474
6475        if (found_func) {
6476                if (size)
6477                        *size = found_func->size;
6478                if (off)
6479                        *off = addr - found_func->ip;
6480                if (sym)
6481                        strlcpy(sym, found_func->name, KSYM_NAME_LEN);
6482
6483                return found_func->name;
6484        }
6485
6486        return NULL;
6487}
6488
6489const char *
6490ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
6491                   unsigned long *off, char **modname, char *sym)
6492{
6493        struct ftrace_mod_map *mod_map;
6494        const char *ret = NULL;
6495
6496        /* mod_map is freed via call_rcu() */
6497        preempt_disable();
6498        list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6499                ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
6500                if (ret) {
6501                        if (modname)
6502                                *modname = mod_map->mod->name;
6503                        break;
6504                }
6505        }
6506        preempt_enable();
6507
6508        return ret;
6509}
6510
6511int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6512                           char *type, char *name,
6513                           char *module_name, int *exported)
6514{
6515        struct ftrace_mod_map *mod_map;
6516        struct ftrace_mod_func *mod_func;
6517
6518        preempt_disable();
6519        list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6520
6521                if (symnum >= mod_map->num_funcs) {
6522                        symnum -= mod_map->num_funcs;
6523                        continue;
6524                }
6525
6526                list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6527                        if (symnum > 1) {
6528                                symnum--;
6529                                continue;
6530                        }
6531
6532                        *value = mod_func->ip;
6533                        *type = 'T';
6534                        strlcpy(name, mod_func->name, KSYM_NAME_LEN);
6535                        strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
6536                        *exported = 1;
6537                        preempt_enable();
6538                        return 0;
6539                }
6540                WARN_ON(1);
6541                break;
6542        }
6543        preempt_enable();
6544        return -ERANGE;
6545}
6546
6547#else
6548static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6549                                struct dyn_ftrace *rec) { }
6550static inline struct ftrace_mod_map *
6551allocate_ftrace_mod_map(struct module *mod,
6552                        unsigned long start, unsigned long end)
6553{
6554        return NULL;
6555}
6556#endif /* CONFIG_MODULES */
6557
6558struct ftrace_init_func {
6559        struct list_head list;
6560        unsigned long ip;
6561};
6562
6563/* Clear any init ips from hashes */
6564static void
6565clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
6566{
6567        struct ftrace_func_entry *entry;
6568
6569        entry = ftrace_lookup_ip(hash, func->ip);
6570        /*
6571         * Do not allow this rec to match again.
6572         * Yeah, it may waste some memory, but will be removed
6573         * if/when the hash is modified again.
6574         */
6575        if (entry)
6576                entry->ip = 0;
6577}
6578
6579static void
6580clear_func_from_hashes(struct ftrace_init_func *func)
6581{
6582        struct trace_array *tr;
6583
6584        mutex_lock(&trace_types_lock);
6585        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6586                if (!tr->ops || !tr->ops->func_hash)
6587                        continue;
6588                mutex_lock(&tr->ops->func_hash->regex_lock);
6589                clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6590                clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6591                mutex_unlock(&tr->ops->func_hash->regex_lock);
6592        }
6593        mutex_unlock(&trace_types_lock);
6594}
6595
6596static void add_to_clear_hash_list(struct list_head *clear_list,
6597                                   struct dyn_ftrace *rec)
6598{
6599        struct ftrace_init_func *func;
6600
6601        func = kmalloc(sizeof(*func), GFP_KERNEL);
6602        if (!func) {
6603                MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
6604                return;
6605        }
6606
6607        func->ip = rec->ip;
6608        list_add(&func->list, clear_list);
6609}
6610
6611void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
6612{
6613        unsigned long start = (unsigned long)(start_ptr);
6614        unsigned long end = (unsigned long)(end_ptr);
6615        struct ftrace_page **last_pg = &ftrace_pages_start;
6616        struct ftrace_page *pg;
6617        struct dyn_ftrace *rec;
6618        struct dyn_ftrace key;
6619        struct ftrace_mod_map *mod_map = NULL;
6620        struct ftrace_init_func *func, *func_next;
6621        struct list_head clear_hash;
6622        int order;
6623
6624        INIT_LIST_HEAD(&clear_hash);
6625
6626        key.ip = start;
6627        key.flags = end;        /* overload flags, as it is unsigned long */
6628
6629        mutex_lock(&ftrace_lock);
6630
6631        /*
6632         * If we are freeing module init memory, then check if
6633         * any tracer is active. If so, we need to save a mapping of
6634         * the module functions being freed with the address.
6635         */
6636        if (mod && ftrace_ops_list != &ftrace_list_end)
6637                mod_map = allocate_ftrace_mod_map(mod, start, end);
6638
6639        for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
6640                if (end < pg->records[0].ip ||
6641                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
6642                        continue;
6643 again:
6644                rec = bsearch(&key, pg->records, pg->index,
6645                              sizeof(struct dyn_ftrace),
6646                              ftrace_cmp_recs);
6647                if (!rec)
6648                        continue;
6649
6650                /* rec will be cleared from hashes after ftrace_lock unlock */
6651                add_to_clear_hash_list(&clear_hash, rec);
6652
6653                if (mod_map)
6654                        save_ftrace_mod_rec(mod_map, rec);
6655
6656                pg->index--;
6657                ftrace_update_tot_cnt--;
6658                if (!pg->index) {
6659                        *last_pg = pg->next;
6660                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6661                        free_pages((unsigned long)pg->records, order);
6662                        ftrace_number_of_pages -= 1 << order;
6663                        ftrace_number_of_groups--;
6664                        kfree(pg);
6665                        pg = container_of(last_pg, struct ftrace_page, next);
6666                        if (!(*last_pg))
6667                                ftrace_pages = pg;
6668                        continue;
6669                }
6670                memmove(rec, rec + 1,
6671                        (pg->index - (rec - pg->records)) * sizeof(*rec));
6672                /* More than one function may be in this block */
6673                goto again;
6674        }
6675        mutex_unlock(&ftrace_lock);
6676
6677        list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6678                clear_func_from_hashes(func);
6679                kfree(func);
6680        }
6681}
6682
6683void __init ftrace_free_init_mem(void)
6684{
6685        void *start = (void *)(&__init_begin);
6686        void *end = (void *)(&__init_end);
6687
6688        ftrace_free_mem(NULL, start, end);
6689}
6690
6691void __init ftrace_init(void)
6692{
6693        extern unsigned long __start_mcount_loc[];
6694        extern unsigned long __stop_mcount_loc[];
6695        unsigned long count, flags;
6696        int ret;
6697
6698        local_irq_save(flags);
6699        ret = ftrace_dyn_arch_init();
6700        local_irq_restore(flags);
6701        if (ret)
6702                goto failed;
6703
6704        count = __stop_mcount_loc - __start_mcount_loc;
6705        if (!count) {
6706                pr_info("ftrace: No functions to be traced?\n");
6707                goto failed;
6708        }
6709
6710        pr_info("ftrace: allocating %ld entries in %ld pages\n",
6711                count, count / ENTRIES_PER_PAGE + 1);
6712
6713        last_ftrace_enabled = ftrace_enabled = 1;
6714
6715        ret = ftrace_process_locs(NULL,
6716                                  __start_mcount_loc,
6717                                  __stop_mcount_loc);
6718
6719        pr_info("ftrace: allocated %ld pages with %ld groups\n",
6720                ftrace_number_of_pages, ftrace_number_of_groups);
6721
6722        set_ftrace_early_filters();
6723
6724        return;
6725 failed:
6726        ftrace_disabled = 1;
6727}
6728
6729/* Do nothing if arch does not support this */
6730void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
6731{
6732}
6733
6734static void ftrace_update_trampoline(struct ftrace_ops *ops)
6735{
6736        arch_ftrace_update_trampoline(ops);
6737}
6738
6739void ftrace_init_trace_array(struct trace_array *tr)
6740{
6741        INIT_LIST_HEAD(&tr->func_probes);
6742        INIT_LIST_HEAD(&tr->mod_trace);
6743        INIT_LIST_HEAD(&tr->mod_notrace);
6744}
6745#else
6746
6747struct ftrace_ops global_ops = {
6748        .func                   = ftrace_stub,
6749        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
6750                                  FTRACE_OPS_FL_INITIALIZED |
6751                                  FTRACE_OPS_FL_PID,
6752};
6753
6754static int __init ftrace_nodyn_init(void)
6755{
6756        ftrace_enabled = 1;
6757        return 0;
6758}
6759core_initcall(ftrace_nodyn_init);
6760
6761static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
6762static inline void ftrace_startup_enable(int command) { }
6763static inline void ftrace_startup_all(int command) { }
6764
6765# define ftrace_startup_sysctl()        do { } while (0)
6766# define ftrace_shutdown_sysctl()       do { } while (0)
6767
6768static void ftrace_update_trampoline(struct ftrace_ops *ops)
6769{
6770}
6771
6772#endif /* CONFIG_DYNAMIC_FTRACE */
6773
6774__init void ftrace_init_global_array_ops(struct trace_array *tr)
6775{
6776        tr->ops = &global_ops;
6777        tr->ops->private = tr;
6778        ftrace_init_trace_array(tr);
6779}
6780
6781void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6782{
6783        /* If we filter on pids, update to use the pid function */
6784        if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6785                if (WARN_ON(tr->ops->func != ftrace_stub))
6786                        printk("ftrace ops had %pS for function\n",
6787                               tr->ops->func);
6788        }
6789        tr->ops->func = func;
6790        tr->ops->private = tr;
6791}
6792
6793void ftrace_reset_array_ops(struct trace_array *tr)
6794{
6795        tr->ops->func = ftrace_stub;
6796}
6797
6798static nokprobe_inline void
6799__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6800                       struct ftrace_ops *ignored, struct pt_regs *regs)
6801{
6802        struct ftrace_ops *op;
6803        int bit;
6804
6805        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6806        if (bit < 0)
6807                return;
6808
6809        /*
6810         * Some of the ops may be dynamically allocated,
6811         * they must be freed after a synchronize_rcu().
6812         */
6813        preempt_disable_notrace();
6814
6815        do_for_each_ftrace_op(op, ftrace_ops_list) {
6816                /* Stub functions don't need to be called nor tested */
6817                if (op->flags & FTRACE_OPS_FL_STUB)
6818                        continue;
6819                /*
6820                 * Check the following for each ops before calling their func:
6821                 *  if RCU flag is set, then rcu_is_watching() must be true
6822                 *  if PER_CPU is set, then ftrace_function_local_disable()
6823                 *                          must be false
6824                 *  Otherwise test if the ip matches the ops filter
6825                 *
6826                 * If any of the above fails then the op->func() is not executed.
6827                 */
6828                if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
6829                    ftrace_ops_test(op, ip, regs)) {
6830                        if (FTRACE_WARN_ON(!op->func)) {
6831                                pr_warn("op=%p %pS\n", op, op);
6832                                goto out;
6833                        }
6834                        op->func(ip, parent_ip, op, regs);
6835                }
6836        } while_for_each_ftrace_op(op);
6837out:
6838        preempt_enable_notrace();
6839        trace_clear_recursion(bit);
6840}
6841
6842/*
6843 * Some archs only support passing ip and parent_ip. Even though
6844 * the list function ignores the op parameter, we do not want any
6845 * C side effects, where a function is called without the caller
6846 * sending a third parameter.
6847 * Archs are to support both the regs and ftrace_ops at the same time.
6848 * If they support ftrace_ops, it is assumed they support regs.
6849 * If call backs want to use regs, they must either check for regs
6850 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
6851 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
6852 * An architecture can pass partial regs with ftrace_ops and still
6853 * set the ARCH_SUPPORTS_FTRACE_OPS.
6854 */
6855#if ARCH_SUPPORTS_FTRACE_OPS
6856static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6857                                 struct ftrace_ops *op, struct pt_regs *regs)
6858{
6859        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
6860}
6861NOKPROBE_SYMBOL(ftrace_ops_list_func);
6862#else
6863static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
6864{
6865        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
6866}
6867NOKPROBE_SYMBOL(ftrace_ops_no_ops);
6868#endif
6869
6870/*
6871 * If there's only one function registered but it does not support
6872 * recursion, needs RCU protection and/or requires per cpu handling, then
6873 * this function will be called by the mcount trampoline.
6874 */
6875static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
6876                                   struct ftrace_ops *op, struct pt_regs *regs)
6877{
6878        int bit;
6879
6880        if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
6881                return;
6882
6883        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6884        if (bit < 0)
6885                return;
6886
6887        preempt_disable_notrace();
6888
6889        op->func(ip, parent_ip, op, regs);
6890
6891        preempt_enable_notrace();
6892        trace_clear_recursion(bit);
6893}
6894NOKPROBE_SYMBOL(ftrace_ops_assist_func);
6895
6896/**
6897 * ftrace_ops_get_func - get the function a trampoline should call
6898 * @ops: the ops to get the function for
6899 *
6900 * Normally the mcount trampoline will call the ops->func, but there
6901 * are times that it should not. For example, if the ops does not
6902 * have its own recursion protection, then it should call the
6903 * ftrace_ops_assist_func() instead.
6904 *
6905 * Returns the function that the trampoline should call for @ops.
6906 */
6907ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6908{
6909        /*
6910         * If the function does not handle recursion, needs to be RCU safe,
6911         * or does per cpu logic, then we need to call the assist handler.
6912         */
6913        if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
6914            ops->flags & FTRACE_OPS_FL_RCU)
6915                return ftrace_ops_assist_func;
6916
6917        return ops->func;
6918}
6919
6920static void
6921ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6922                    struct task_struct *prev, struct task_struct *next)
6923{
6924        struct trace_array *tr = data;
6925        struct trace_pid_list *pid_list;
6926        struct trace_pid_list *no_pid_list;
6927
6928        pid_list = rcu_dereference_sched(tr->function_pids);
6929        no_pid_list = rcu_dereference_sched(tr->function_no_pids);
6930
6931        if (trace_ignore_this_task(pid_list, no_pid_list, next))
6932                this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
6933                               FTRACE_PID_IGNORE);
6934        else
6935                this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
6936                               next->pid);
6937}
6938
6939static void
6940ftrace_pid_follow_sched_process_fork(void *data,
6941                                     struct task_struct *self,
6942                                     struct task_struct *task)
6943{
6944        struct trace_pid_list *pid_list;
6945        struct trace_array *tr = data;
6946
6947        pid_list = rcu_dereference_sched(tr->function_pids);
6948        trace_filter_add_remove_task(pid_list, self, task);
6949
6950        pid_list = rcu_dereference_sched(tr->function_no_pids);
6951        trace_filter_add_remove_task(pid_list, self, task);
6952}
6953
6954static void
6955ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6956{
6957        struct trace_pid_list *pid_list;
6958        struct trace_array *tr = data;
6959
6960        pid_list = rcu_dereference_sched(tr->function_pids);
6961        trace_filter_add_remove_task(pid_list, NULL, task);
6962
6963        pid_list = rcu_dereference_sched(tr->function_no_pids);
6964        trace_filter_add_remove_task(pid_list, NULL, task);
6965}
6966
6967void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6968{
6969        if (enable) {
6970                register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6971                                                  tr);
6972                register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6973                                                  tr);
6974        } else {
6975                unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6976                                                    tr);
6977                unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6978                                                    tr);
6979        }
6980}
6981
6982static void clear_ftrace_pids(struct trace_array *tr, int type)
6983{
6984        struct trace_pid_list *pid_list;
6985        struct trace_pid_list *no_pid_list;
6986        int cpu;
6987
6988        pid_list = rcu_dereference_protected(tr->function_pids,
6989                                             lockdep_is_held(&ftrace_lock));
6990        no_pid_list = rcu_dereference_protected(tr->function_no_pids,
6991                                                lockdep_is_held(&ftrace_lock));
6992
6993        /* Make sure there's something to do */
6994        if (!pid_type_enabled(type, pid_list, no_pid_list))
6995                return;
6996
6997        /* See if the pids still need to be checked after this */
6998        if (!still_need_pid_events(type, pid_list, no_pid_list)) {
6999                unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7000                for_each_possible_cpu(cpu)
7001                        per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7002        }
7003
7004        if (type & TRACE_PIDS)
7005                rcu_assign_pointer(tr->function_pids, NULL);
7006
7007        if (type & TRACE_NO_PIDS)
7008                rcu_assign_pointer(tr->function_no_pids, NULL);
7009
7010        /* Wait till all users are no longer using pid filtering */
7011        synchronize_rcu();
7012
7013        if ((type & TRACE_PIDS) && pid_list)
7014                trace_free_pid_list(pid_list);
7015
7016        if ((type & TRACE_NO_PIDS) && no_pid_list)
7017                trace_free_pid_list(no_pid_list);
7018}
7019
7020void ftrace_clear_pids(struct trace_array *tr)
7021{
7022        mutex_lock(&ftrace_lock);
7023
7024        clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7025
7026        mutex_unlock(&ftrace_lock);
7027}
7028
7029static void ftrace_pid_reset(struct trace_array *tr, int type)
7030{
7031        mutex_lock(&ftrace_lock);
7032        clear_ftrace_pids(tr, type);
7033
7034        ftrace_update_pid_func();
7035        ftrace_startup_all(0);
7036
7037        mutex_unlock(&ftrace_lock);
7038}
7039
7040/* Greater than any max PID */
7041#define FTRACE_NO_PIDS          (void *)(PID_MAX_LIMIT + 1)
7042
7043static void *fpid_start(struct seq_file *m, loff_t *pos)
7044        __acquires(RCU)
7045{
7046        struct trace_pid_list *pid_list;
7047        struct trace_array *tr = m->private;
7048
7049        mutex_lock(&ftrace_lock);
7050        rcu_read_lock_sched();
7051
7052        pid_list = rcu_dereference_sched(tr->function_pids);
7053
7054        if (!pid_list)
7055                return !(*pos) ? FTRACE_NO_PIDS : NULL;
7056
7057        return trace_pid_start(pid_list, pos);
7058}
7059
7060static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7061{
7062        struct trace_array *tr = m->private;
7063        struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7064
7065        if (v == FTRACE_NO_PIDS) {
7066                (*pos)++;
7067                return NULL;
7068        }
7069        return trace_pid_next(pid_list, v, pos);
7070}
7071
7072static void fpid_stop(struct seq_file *m, void *p)
7073        __releases(RCU)
7074{
7075        rcu_read_unlock_sched();
7076        mutex_unlock(&ftrace_lock);
7077}
7078
7079static int fpid_show(struct seq_file *m, void *v)
7080{
7081        if (v == FTRACE_NO_PIDS) {
7082                seq_puts(m, "no pid\n");
7083                return 0;
7084        }
7085
7086        return trace_pid_show(m, v);
7087}
7088
7089static const struct seq_operations ftrace_pid_sops = {
7090        .start = fpid_start,
7091        .next = fpid_next,
7092        .stop = fpid_stop,
7093        .show = fpid_show,
7094};
7095
7096static void *fnpid_start(struct seq_file *m, loff_t *pos)
7097        __acquires(RCU)
7098{
7099        struct trace_pid_list *pid_list;
7100        struct trace_array *tr = m->private;
7101
7102        mutex_lock(&ftrace_lock);
7103        rcu_read_lock_sched();
7104
7105        pid_list = rcu_dereference_sched(tr->function_no_pids);
7106
7107        if (!pid_list)
7108                return !(*pos) ? FTRACE_NO_PIDS : NULL;
7109
7110        return trace_pid_start(pid_list, pos);
7111}
7112
7113static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7114{
7115        struct trace_array *tr = m->private;
7116        struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7117
7118        if (v == FTRACE_NO_PIDS) {
7119                (*pos)++;
7120                return NULL;
7121        }
7122        return trace_pid_next(pid_list, v, pos);
7123}
7124
7125static const struct seq_operations ftrace_no_pid_sops = {
7126        .start = fnpid_start,
7127        .next = fnpid_next,
7128        .stop = fpid_stop,
7129        .show = fpid_show,
7130};
7131
7132static int pid_open(struct inode *inode, struct file *file, int type)
7133{
7134        const struct seq_operations *seq_ops;
7135        struct trace_array *tr = inode->i_private;
7136        struct seq_file *m;
7137        int ret = 0;
7138
7139        ret = tracing_check_open_get_tr(tr);
7140        if (ret)
7141                return ret;
7142
7143        if ((file->f_mode & FMODE_WRITE) &&
7144            (file->f_flags & O_TRUNC))
7145                ftrace_pid_reset(tr, type);
7146
7147        switch (type) {
7148        case TRACE_PIDS:
7149                seq_ops = &ftrace_pid_sops;
7150                break;
7151        case TRACE_NO_PIDS:
7152                seq_ops = &ftrace_no_pid_sops;
7153                break;
7154        default:
7155                trace_array_put(tr);
7156                WARN_ON_ONCE(1);
7157                return -EINVAL;
7158        }
7159
7160        ret = seq_open(file, seq_ops);
7161        if (ret < 0) {
7162                trace_array_put(tr);
7163        } else {
7164                m = file->private_data;
7165                /* copy tr over to seq ops */
7166                m->private = tr;
7167        }
7168
7169        return ret;
7170}
7171
7172static int
7173ftrace_pid_open(struct inode *inode, struct file *file)
7174{
7175        return pid_open(inode, file, TRACE_PIDS);
7176}
7177
7178static int
7179ftrace_no_pid_open(struct inode *inode, struct file *file)
7180{
7181        return pid_open(inode, file, TRACE_NO_PIDS);
7182}
7183
7184static void ignore_task_cpu(void *data)
7185{
7186        struct trace_array *tr = data;
7187        struct trace_pid_list *pid_list;
7188        struct trace_pid_list *no_pid_list;
7189
7190        /*
7191         * This function is called by on_each_cpu() while the
7192         * event_mutex is held.
7193         */
7194        pid_list = rcu_dereference_protected(tr->function_pids,
7195                                             mutex_is_locked(&ftrace_lock));
7196        no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7197                                                mutex_is_locked(&ftrace_lock));
7198
7199        if (trace_ignore_this_task(pid_list, no_pid_list, current))
7200                this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7201                               FTRACE_PID_IGNORE);
7202        else
7203                this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7204                               current->pid);
7205}
7206
7207static ssize_t
7208pid_write(struct file *filp, const char __user *ubuf,
7209          size_t cnt, loff_t *ppos, int type)
7210{
7211        struct seq_file *m = filp->private_data;
7212        struct trace_array *tr = m->private;
7213        struct trace_pid_list *filtered_pids;
7214        struct trace_pid_list *other_pids;
7215        struct trace_pid_list *pid_list;
7216        ssize_t ret;
7217
7218        if (!cnt)
7219                return 0;
7220
7221        mutex_lock(&ftrace_lock);
7222
7223        switch (type) {
7224        case TRACE_PIDS:
7225                filtered_pids = rcu_dereference_protected(tr->function_pids,
7226                                             lockdep_is_held(&ftrace_lock));
7227                other_pids = rcu_dereference_protected(tr->function_no_pids,
7228                                             lockdep_is_held(&ftrace_lock));
7229                break;
7230        case TRACE_NO_PIDS:
7231                filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7232                                             lockdep_is_held(&ftrace_lock));
7233                other_pids = rcu_dereference_protected(tr->function_pids,
7234                                             lockdep_is_held(&ftrace_lock));
7235                break;
7236        default:
7237                ret = -EINVAL;
7238                WARN_ON_ONCE(1);
7239                goto out;
7240        }
7241
7242        ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7243        if (ret < 0)
7244                goto out;
7245
7246        switch (type) {
7247        case TRACE_PIDS:
7248                rcu_assign_pointer(tr->function_pids, pid_list);
7249                break;
7250        case TRACE_NO_PIDS:
7251                rcu_assign_pointer(tr->function_no_pids, pid_list);
7252                break;
7253        }
7254
7255
7256        if (filtered_pids) {
7257                synchronize_rcu();
7258                trace_free_pid_list(filtered_pids);
7259        } else if (pid_list && !other_pids) {
7260                /* Register a probe to set whether to ignore the tracing of a task */
7261                register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7262        }
7263
7264        /*
7265         * Ignoring of pids is done at task switch. But we have to
7266         * check for those tasks that are currently running.
7267         * Always do this in case a pid was appended or removed.
7268         */
7269        on_each_cpu(ignore_task_cpu, tr, 1);
7270
7271        ftrace_update_pid_func();
7272        ftrace_startup_all(0);
7273 out:
7274        mutex_unlock(&ftrace_lock);
7275
7276        if (ret > 0)
7277                *ppos += ret;
7278
7279        return ret;
7280}
7281
7282static ssize_t
7283ftrace_pid_write(struct file *filp, const char __user *ubuf,
7284                 size_t cnt, loff_t *ppos)
7285{
7286        return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7287}
7288
7289static ssize_t
7290ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7291                    size_t cnt, loff_t *ppos)
7292{
7293        return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7294}
7295
7296static int
7297ftrace_pid_release(struct inode *inode, struct file *file)
7298{
7299        struct trace_array *tr = inode->i_private;
7300
7301        trace_array_put(tr);
7302
7303        return seq_release(inode, file);
7304}
7305
7306static const struct file_operations ftrace_pid_fops = {
7307        .open           = ftrace_pid_open,
7308        .write          = ftrace_pid_write,
7309        .read           = seq_read,
7310        .llseek         = tracing_lseek,
7311        .release        = ftrace_pid_release,
7312};
7313
7314static const struct file_operations ftrace_no_pid_fops = {
7315        .open           = ftrace_no_pid_open,
7316        .write          = ftrace_no_pid_write,
7317        .read           = seq_read,
7318        .llseek         = tracing_lseek,
7319        .release        = ftrace_pid_release,
7320};
7321
7322void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7323{
7324        trace_create_file("set_ftrace_pid", 0644, d_tracer,
7325                            tr, &ftrace_pid_fops);
7326        trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer,
7327                            tr, &ftrace_no_pid_fops);
7328}
7329
7330void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7331                                         struct dentry *d_tracer)
7332{
7333        /* Only the top level directory has the dyn_tracefs and profile */
7334        WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7335
7336        ftrace_init_dyn_tracefs(d_tracer);
7337        ftrace_profile_tracefs(d_tracer);
7338}
7339
7340/**
7341 * ftrace_kill - kill ftrace
7342 *
7343 * This function should be used by panic code. It stops ftrace
7344 * but in a not so nice way. If you need to simply kill ftrace
7345 * from a non-atomic section, use ftrace_kill.
7346 */
7347void ftrace_kill(void)
7348{
7349        ftrace_disabled = 1;
7350        ftrace_enabled = 0;
7351        ftrace_trace_function = ftrace_stub;
7352}
7353
7354/**
7355 * Test if ftrace is dead or not.
7356 */
7357int ftrace_is_dead(void)
7358{
7359        return ftrace_disabled;
7360}
7361
7362/**
7363 * register_ftrace_function - register a function for profiling
7364 * @ops - ops structure that holds the function for profiling.
7365 *
7366 * Register a function to be called by all functions in the
7367 * kernel.
7368 *
7369 * Note: @ops->func and all the functions it calls must be labeled
7370 *       with "notrace", otherwise it will go into a
7371 *       recursive loop.
7372 */
7373int register_ftrace_function(struct ftrace_ops *ops)
7374{
7375        int ret = -1;
7376
7377        ftrace_ops_init(ops);
7378
7379        mutex_lock(&ftrace_lock);
7380
7381        ret = ftrace_startup(ops, 0);
7382
7383        mutex_unlock(&ftrace_lock);
7384
7385        return ret;
7386}
7387EXPORT_SYMBOL_GPL(register_ftrace_function);
7388
7389/**
7390 * unregister_ftrace_function - unregister a function for profiling.
7391 * @ops - ops structure that holds the function to unregister
7392 *
7393 * Unregister a function that was added to be called by ftrace profiling.
7394 */
7395int unregister_ftrace_function(struct ftrace_ops *ops)
7396{
7397        int ret;
7398
7399        mutex_lock(&ftrace_lock);
7400        ret = ftrace_shutdown(ops, 0);
7401        mutex_unlock(&ftrace_lock);
7402
7403        return ret;
7404}
7405EXPORT_SYMBOL_GPL(unregister_ftrace_function);
7406
7407static bool is_permanent_ops_registered(void)
7408{
7409        struct ftrace_ops *op;
7410
7411        do_for_each_ftrace_op(op, ftrace_ops_list) {
7412                if (op->flags & FTRACE_OPS_FL_PERMANENT)
7413                        return true;
7414        } while_for_each_ftrace_op(op);
7415
7416        return false;
7417}
7418
7419int
7420ftrace_enable_sysctl(struct ctl_table *table, int write,
7421                     void __user *buffer, size_t *lenp,
7422                     loff_t *ppos)
7423{
7424        int ret = -ENODEV;
7425
7426        mutex_lock(&ftrace_lock);
7427
7428        if (unlikely(ftrace_disabled))
7429                goto out;
7430
7431        ret = proc_dointvec(table, write, buffer, lenp, ppos);
7432
7433        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
7434                goto out;
7435
7436        if (ftrace_enabled) {
7437
7438                /* we are starting ftrace again */
7439                if (rcu_dereference_protected(ftrace_ops_list,
7440                        lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
7441                        update_ftrace_function();
7442
7443                ftrace_startup_sysctl();
7444
7445        } else {
7446                if (is_permanent_ops_registered()) {
7447                        ftrace_enabled = true;
7448                        ret = -EBUSY;
7449                        goto out;
7450                }
7451
7452                /* stopping ftrace calls (just send to ftrace_stub) */
7453                ftrace_trace_function = ftrace_stub;
7454
7455                ftrace_shutdown_sysctl();
7456        }
7457
7458        last_ftrace_enabled = !!ftrace_enabled;
7459 out:
7460        mutex_unlock(&ftrace_lock);
7461        return ret;
7462}
7463